diff options
79 files changed, 51083 insertions, 5144 deletions
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index 330d2a423362..89d70de5e235 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig | |||
@@ -43,6 +43,7 @@ config INFINIBAND_ADDR_TRANS | |||
43 | 43 | ||
44 | source "drivers/infiniband/hw/mthca/Kconfig" | 44 | source "drivers/infiniband/hw/mthca/Kconfig" |
45 | source "drivers/infiniband/hw/ipath/Kconfig" | 45 | source "drivers/infiniband/hw/ipath/Kconfig" |
46 | source "drivers/infiniband/hw/qib/Kconfig" | ||
46 | source "drivers/infiniband/hw/ehca/Kconfig" | 47 | source "drivers/infiniband/hw/ehca/Kconfig" |
47 | source "drivers/infiniband/hw/amso1100/Kconfig" | 48 | source "drivers/infiniband/hw/amso1100/Kconfig" |
48 | source "drivers/infiniband/hw/cxgb3/Kconfig" | 49 | source "drivers/infiniband/hw/cxgb3/Kconfig" |
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile index 0c4e589d746e..9cc7a47d3e67 100644 --- a/drivers/infiniband/Makefile +++ b/drivers/infiniband/Makefile | |||
@@ -1,6 +1,7 @@ | |||
1 | obj-$(CONFIG_INFINIBAND) += core/ | 1 | obj-$(CONFIG_INFINIBAND) += core/ |
2 | obj-$(CONFIG_INFINIBAND_MTHCA) += hw/mthca/ | 2 | obj-$(CONFIG_INFINIBAND_MTHCA) += hw/mthca/ |
3 | obj-$(CONFIG_INFINIBAND_IPATH) += hw/ipath/ | 3 | obj-$(CONFIG_INFINIBAND_IPATH) += hw/ipath/ |
4 | obj-$(CONFIG_INFINIBAND_QIB) += hw/qib/ | ||
4 | obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/ | 5 | obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/ |
5 | obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/ | 6 | obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/ |
6 | obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/ | 7 | obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/ |
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index 05ac36e6acdb..a565af5c2d2e 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h | |||
@@ -38,7 +38,9 @@ | |||
38 | 38 | ||
39 | #include <rdma/ib_verbs.h> | 39 | #include <rdma/ib_verbs.h> |
40 | 40 | ||
41 | int ib_device_register_sysfs(struct ib_device *device); | 41 | int ib_device_register_sysfs(struct ib_device *device, |
42 | int (*port_callback)(struct ib_device *, | ||
43 | u8, struct kobject *)); | ||
42 | void ib_device_unregister_sysfs(struct ib_device *device); | 44 | void ib_device_unregister_sysfs(struct ib_device *device); |
43 | 45 | ||
44 | int ib_sysfs_setup(void); | 46 | int ib_sysfs_setup(void); |
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index d1fba4153332..a19effad0811 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c | |||
@@ -267,7 +267,9 @@ out: | |||
267 | * callback for each device that is added. @device must be allocated | 267 | * callback for each device that is added. @device must be allocated |
268 | * with ib_alloc_device(). | 268 | * with ib_alloc_device(). |
269 | */ | 269 | */ |
270 | int ib_register_device(struct ib_device *device) | 270 | int ib_register_device(struct ib_device *device, |
271 | int (*port_callback)(struct ib_device *, | ||
272 | u8, struct kobject *)) | ||
271 | { | 273 | { |
272 | int ret; | 274 | int ret; |
273 | 275 | ||
@@ -296,7 +298,7 @@ int ib_register_device(struct ib_device *device) | |||
296 | goto out; | 298 | goto out; |
297 | } | 299 | } |
298 | 300 | ||
299 | ret = ib_device_register_sysfs(device); | 301 | ret = ib_device_register_sysfs(device, port_callback); |
300 | if (ret) { | 302 | if (ret) { |
301 | printk(KERN_WARNING "Couldn't register device %s with driver model\n", | 303 | printk(KERN_WARNING "Couldn't register device %s with driver model\n", |
302 | device->name); | 304 | device->name); |
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 6dc7b77d5d29..ef1304f151dc 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -47,8 +47,8 @@ MODULE_DESCRIPTION("kernel IB MAD API"); | |||
47 | MODULE_AUTHOR("Hal Rosenstock"); | 47 | MODULE_AUTHOR("Hal Rosenstock"); |
48 | MODULE_AUTHOR("Sean Hefty"); | 48 | MODULE_AUTHOR("Sean Hefty"); |
49 | 49 | ||
50 | int mad_sendq_size = IB_MAD_QP_SEND_SIZE; | 50 | static int mad_sendq_size = IB_MAD_QP_SEND_SIZE; |
51 | int mad_recvq_size = IB_MAD_QP_RECV_SIZE; | 51 | static int mad_recvq_size = IB_MAD_QP_RECV_SIZE; |
52 | 52 | ||
53 | module_param_named(send_queue_size, mad_sendq_size, int, 0444); | 53 | module_param_named(send_queue_size, mad_sendq_size, int, 0444); |
54 | MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests"); | 54 | MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests"); |
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index f901957abc8b..3627300e2a10 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c | |||
@@ -475,7 +475,9 @@ err: | |||
475 | return NULL; | 475 | return NULL; |
476 | } | 476 | } |
477 | 477 | ||
478 | static int add_port(struct ib_device *device, int port_num) | 478 | static int add_port(struct ib_device *device, int port_num, |
479 | int (*port_callback)(struct ib_device *, | ||
480 | u8, struct kobject *)) | ||
479 | { | 481 | { |
480 | struct ib_port *p; | 482 | struct ib_port *p; |
481 | struct ib_port_attr attr; | 483 | struct ib_port_attr attr; |
@@ -522,11 +524,20 @@ static int add_port(struct ib_device *device, int port_num) | |||
522 | if (ret) | 524 | if (ret) |
523 | goto err_free_pkey; | 525 | goto err_free_pkey; |
524 | 526 | ||
527 | if (port_callback) { | ||
528 | ret = port_callback(device, port_num, &p->kobj); | ||
529 | if (ret) | ||
530 | goto err_remove_pkey; | ||
531 | } | ||
532 | |||
525 | list_add_tail(&p->kobj.entry, &device->port_list); | 533 | list_add_tail(&p->kobj.entry, &device->port_list); |
526 | 534 | ||
527 | kobject_uevent(&p->kobj, KOBJ_ADD); | 535 | kobject_uevent(&p->kobj, KOBJ_ADD); |
528 | return 0; | 536 | return 0; |
529 | 537 | ||
538 | err_remove_pkey: | ||
539 | sysfs_remove_group(&p->kobj, &p->pkey_group); | ||
540 | |||
530 | err_free_pkey: | 541 | err_free_pkey: |
531 | for (i = 0; i < attr.pkey_tbl_len; ++i) | 542 | for (i = 0; i < attr.pkey_tbl_len; ++i) |
532 | kfree(p->pkey_group.attrs[i]); | 543 | kfree(p->pkey_group.attrs[i]); |
@@ -754,7 +765,9 @@ static struct attribute_group iw_stats_group = { | |||
754 | .attrs = iw_proto_stats_attrs, | 765 | .attrs = iw_proto_stats_attrs, |
755 | }; | 766 | }; |
756 | 767 | ||
757 | int ib_device_register_sysfs(struct ib_device *device) | 768 | int ib_device_register_sysfs(struct ib_device *device, |
769 | int (*port_callback)(struct ib_device *, | ||
770 | u8, struct kobject *)) | ||
758 | { | 771 | { |
759 | struct device *class_dev = &device->dev; | 772 | struct device *class_dev = &device->dev; |
760 | int ret; | 773 | int ret; |
@@ -785,12 +798,12 @@ int ib_device_register_sysfs(struct ib_device *device) | |||
785 | } | 798 | } |
786 | 799 | ||
787 | if (device->node_type == RDMA_NODE_IB_SWITCH) { | 800 | if (device->node_type == RDMA_NODE_IB_SWITCH) { |
788 | ret = add_port(device, 0); | 801 | ret = add_port(device, 0, port_callback); |
789 | if (ret) | 802 | if (ret) |
790 | goto err_put; | 803 | goto err_put; |
791 | } else { | 804 | } else { |
792 | for (i = 1; i <= device->phys_port_cnt; ++i) { | 805 | for (i = 1; i <= device->phys_port_cnt; ++i) { |
793 | ret = add_port(device, i); | 806 | ret = add_port(device, i, port_callback); |
794 | if (ret) | 807 | if (ret) |
795 | goto err_put; | 808 | goto err_put; |
796 | } | 809 | } |
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c index c47f618d12e8..aeebc4d37e33 100644 --- a/drivers/infiniband/hw/amso1100/c2_provider.c +++ b/drivers/infiniband/hw/amso1100/c2_provider.c | |||
@@ -865,7 +865,7 @@ int c2_register_device(struct c2_dev *dev) | |||
865 | dev->ibdev.iwcm->create_listen = c2_service_create; | 865 | dev->ibdev.iwcm->create_listen = c2_service_create; |
866 | dev->ibdev.iwcm->destroy_listen = c2_service_destroy; | 866 | dev->ibdev.iwcm->destroy_listen = c2_service_destroy; |
867 | 867 | ||
868 | ret = ib_register_device(&dev->ibdev); | 868 | ret = ib_register_device(&dev->ibdev, NULL); |
869 | if (ret) | 869 | if (ret) |
870 | goto out_free_iwcm; | 870 | goto out_free_iwcm; |
871 | 871 | ||
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 19b1c4a62a23..fca0b4b747e4 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c | |||
@@ -1428,7 +1428,7 @@ int iwch_register_device(struct iwch_dev *dev) | |||
1428 | dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref; | 1428 | dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref; |
1429 | dev->ibdev.iwcm->get_qp = iwch_get_qp; | 1429 | dev->ibdev.iwcm->get_qp = iwch_get_qp; |
1430 | 1430 | ||
1431 | ret = ib_register_device(&dev->ibdev); | 1431 | ret = ib_register_device(&dev->ibdev, NULL); |
1432 | if (ret) | 1432 | if (ret) |
1433 | goto bail1; | 1433 | goto bail1; |
1434 | 1434 | ||
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index fb1aafcc294f..2447f5295482 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c | |||
@@ -373,6 +373,7 @@ static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe, | |||
373 | V_CQE_SWCQE(SW_CQE(hw_cqe)) | | 373 | V_CQE_SWCQE(SW_CQE(hw_cqe)) | |
374 | V_CQE_OPCODE(FW_RI_READ_REQ) | | 374 | V_CQE_OPCODE(FW_RI_READ_REQ) | |
375 | V_CQE_TYPE(1)); | 375 | V_CQE_TYPE(1)); |
376 | read_cqe->bits_type_ts = hw_cqe->bits_type_ts; | ||
376 | } | 377 | } |
377 | 378 | ||
378 | /* | 379 | /* |
@@ -780,6 +781,9 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, | |||
780 | /* account for the status page. */ | 781 | /* account for the status page. */ |
781 | entries++; | 782 | entries++; |
782 | 783 | ||
784 | /* IQ needs one extra entry to differentiate full vs empty. */ | ||
785 | entries++; | ||
786 | |||
783 | /* | 787 | /* |
784 | * entries must be multiple of 16 for HW. | 788 | * entries must be multiple of 16 for HW. |
785 | */ | 789 | */ |
@@ -801,7 +805,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, | |||
801 | 805 | ||
802 | chp->rhp = rhp; | 806 | chp->rhp = rhp; |
803 | chp->cq.size--; /* status page */ | 807 | chp->cq.size--; /* status page */ |
804 | chp->ibcq.cqe = chp->cq.size; | 808 | chp->ibcq.cqe = chp->cq.size - 1; |
805 | spin_lock_init(&chp->lock); | 809 | spin_lock_init(&chp->lock); |
806 | atomic_set(&chp->refcnt, 1); | 810 | atomic_set(&chp->refcnt, 1); |
807 | init_waitqueue_head(&chp->wait); | 811 | init_waitqueue_head(&chp->wait); |
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index be23b5eab13b..d870f9c17c1e 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c | |||
@@ -306,7 +306,8 @@ static void c4iw_remove(struct c4iw_dev *dev) | |||
306 | PDBG("%s c4iw_dev %p\n", __func__, dev); | 306 | PDBG("%s c4iw_dev %p\n", __func__, dev); |
307 | cancel_delayed_work_sync(&dev->db_drop_task); | 307 | cancel_delayed_work_sync(&dev->db_drop_task); |
308 | list_del(&dev->entry); | 308 | list_del(&dev->entry); |
309 | c4iw_unregister_device(dev); | 309 | if (dev->registered) |
310 | c4iw_unregister_device(dev); | ||
310 | c4iw_rdev_close(&dev->rdev); | 311 | c4iw_rdev_close(&dev->rdev); |
311 | idr_destroy(&dev->cqidr); | 312 | idr_destroy(&dev->cqidr); |
312 | idr_destroy(&dev->qpidr); | 313 | idr_destroy(&dev->qpidr); |
@@ -343,12 +344,6 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) | |||
343 | list_add_tail(&devp->entry, &dev_list); | 344 | list_add_tail(&devp->entry, &dev_list); |
344 | mutex_unlock(&dev_mutex); | 345 | mutex_unlock(&dev_mutex); |
345 | 346 | ||
346 | if (c4iw_register_device(devp)) { | ||
347 | printk(KERN_ERR MOD "Unable to register device\n"); | ||
348 | mutex_lock(&dev_mutex); | ||
349 | c4iw_remove(devp); | ||
350 | mutex_unlock(&dev_mutex); | ||
351 | } | ||
352 | if (c4iw_debugfs_root) { | 347 | if (c4iw_debugfs_root) { |
353 | devp->debugfs_root = debugfs_create_dir( | 348 | devp->debugfs_root = debugfs_create_dir( |
354 | pci_name(devp->rdev.lldi.pdev), | 349 | pci_name(devp->rdev.lldi.pdev), |
@@ -379,9 +374,6 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop) | |||
379 | 374 | ||
380 | for (i = 0; i < dev->rdev.lldi.nrxq; i++) | 375 | for (i = 0; i < dev->rdev.lldi.nrxq; i++) |
381 | PDBG("rxqid[%u] %u\n", i, dev->rdev.lldi.rxq_ids[i]); | 376 | PDBG("rxqid[%u] %u\n", i, dev->rdev.lldi.rxq_ids[i]); |
382 | |||
383 | printk(KERN_INFO MOD "Initialized device %s\n", | ||
384 | pci_name(dev->rdev.lldi.pdev)); | ||
385 | out: | 377 | out: |
386 | return dev; | 378 | return dev; |
387 | } | 379 | } |
@@ -471,7 +463,41 @@ nomem: | |||
471 | 463 | ||
472 | static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) | 464 | static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) |
473 | { | 465 | { |
466 | struct c4iw_dev *dev = handle; | ||
467 | |||
474 | PDBG("%s new_state %u\n", __func__, new_state); | 468 | PDBG("%s new_state %u\n", __func__, new_state); |
469 | switch (new_state) { | ||
470 | case CXGB4_STATE_UP: | ||
471 | printk(KERN_INFO MOD "%s: Up\n", pci_name(dev->rdev.lldi.pdev)); | ||
472 | if (!dev->registered) { | ||
473 | int ret; | ||
474 | ret = c4iw_register_device(dev); | ||
475 | if (ret) | ||
476 | printk(KERN_ERR MOD | ||
477 | "%s: RDMA registration failed: %d\n", | ||
478 | pci_name(dev->rdev.lldi.pdev), ret); | ||
479 | } | ||
480 | break; | ||
481 | case CXGB4_STATE_DOWN: | ||
482 | printk(KERN_INFO MOD "%s: Down\n", | ||
483 | pci_name(dev->rdev.lldi.pdev)); | ||
484 | if (dev->registered) | ||
485 | c4iw_unregister_device(dev); | ||
486 | break; | ||
487 | case CXGB4_STATE_START_RECOVERY: | ||
488 | printk(KERN_INFO MOD "%s: Fatal Error\n", | ||
489 | pci_name(dev->rdev.lldi.pdev)); | ||
490 | if (dev->registered) | ||
491 | c4iw_unregister_device(dev); | ||
492 | break; | ||
493 | case CXGB4_STATE_DETACH: | ||
494 | printk(KERN_INFO MOD "%s: Detach\n", | ||
495 | pci_name(dev->rdev.lldi.pdev)); | ||
496 | mutex_lock(&dev_mutex); | ||
497 | c4iw_remove(dev); | ||
498 | mutex_unlock(&dev_mutex); | ||
499 | break; | ||
500 | } | ||
475 | return 0; | 501 | return 0; |
476 | } | 502 | } |
477 | 503 | ||
@@ -504,14 +530,12 @@ static void __exit c4iw_exit_module(void) | |||
504 | { | 530 | { |
505 | struct c4iw_dev *dev, *tmp; | 531 | struct c4iw_dev *dev, *tmp; |
506 | 532 | ||
507 | cxgb4_unregister_uld(CXGB4_ULD_RDMA); | ||
508 | |||
509 | mutex_lock(&dev_mutex); | 533 | mutex_lock(&dev_mutex); |
510 | list_for_each_entry_safe(dev, tmp, &dev_list, entry) { | 534 | list_for_each_entry_safe(dev, tmp, &dev_list, entry) { |
511 | c4iw_remove(dev); | 535 | c4iw_remove(dev); |
512 | } | 536 | } |
513 | mutex_unlock(&dev_mutex); | 537 | mutex_unlock(&dev_mutex); |
514 | 538 | cxgb4_unregister_uld(CXGB4_ULD_RDMA); | |
515 | c4iw_cm_term(); | 539 | c4iw_cm_term(); |
516 | debugfs_remove_recursive(c4iw_debugfs_root); | 540 | debugfs_remove_recursive(c4iw_debugfs_root); |
517 | } | 541 | } |
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index a6269981e815..277ab589b44d 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h | |||
@@ -152,6 +152,7 @@ struct c4iw_dev { | |||
152 | struct list_head entry; | 152 | struct list_head entry; |
153 | struct delayed_work db_drop_task; | 153 | struct delayed_work db_drop_task; |
154 | struct dentry *debugfs_root; | 154 | struct dentry *debugfs_root; |
155 | u8 registered; | ||
155 | }; | 156 | }; |
156 | 157 | ||
157 | static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) | 158 | static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) |
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index e54ff6d25691..7f94da1a2437 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c | |||
@@ -712,8 +712,10 @@ struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth) | |||
712 | php = to_c4iw_pd(pd); | 712 | php = to_c4iw_pd(pd); |
713 | rhp = php->rhp; | 713 | rhp = php->rhp; |
714 | mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); | 714 | mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); |
715 | if (!mhp) | 715 | if (!mhp) { |
716 | ret = -ENOMEM; | ||
716 | goto err; | 717 | goto err; |
718 | } | ||
717 | 719 | ||
718 | mhp->rhp = rhp; | 720 | mhp->rhp = rhp; |
719 | ret = alloc_pbl(mhp, pbl_depth); | 721 | ret = alloc_pbl(mhp, pbl_depth); |
@@ -730,8 +732,10 @@ struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth) | |||
730 | mhp->attr.state = 1; | 732 | mhp->attr.state = 1; |
731 | mmid = (stag) >> 8; | 733 | mmid = (stag) >> 8; |
732 | mhp->ibmr.rkey = mhp->ibmr.lkey = stag; | 734 | mhp->ibmr.rkey = mhp->ibmr.lkey = stag; |
733 | if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) | 735 | if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) { |
736 | ret = -ENOMEM; | ||
734 | goto err3; | 737 | goto err3; |
738 | } | ||
735 | 739 | ||
736 | PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); | 740 | PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); |
737 | return &(mhp->ibmr); | 741 | return &(mhp->ibmr); |
@@ -755,9 +759,6 @@ struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device, | |||
755 | dma_addr_t dma_addr; | 759 | dma_addr_t dma_addr; |
756 | int size = sizeof *c4pl + page_list_len * sizeof(u64); | 760 | int size = sizeof *c4pl + page_list_len * sizeof(u64); |
757 | 761 | ||
758 | if (page_list_len > T4_MAX_FR_DEPTH) | ||
759 | return ERR_PTR(-EINVAL); | ||
760 | |||
761 | c4pl = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev, size, | 762 | c4pl = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev, size, |
762 | &dma_addr, GFP_KERNEL); | 763 | &dma_addr, GFP_KERNEL); |
763 | if (!c4pl) | 764 | if (!c4pl) |
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index dfc49020bb9c..8f645c83a125 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c | |||
@@ -486,7 +486,7 @@ int c4iw_register_device(struct c4iw_dev *dev) | |||
486 | dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref; | 486 | dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref; |
487 | dev->ibdev.iwcm->get_qp = c4iw_get_qp; | 487 | dev->ibdev.iwcm->get_qp = c4iw_get_qp; |
488 | 488 | ||
489 | ret = ib_register_device(&dev->ibdev); | 489 | ret = ib_register_device(&dev->ibdev, NULL); |
490 | if (ret) | 490 | if (ret) |
491 | goto bail1; | 491 | goto bail1; |
492 | 492 | ||
@@ -496,6 +496,7 @@ int c4iw_register_device(struct c4iw_dev *dev) | |||
496 | if (ret) | 496 | if (ret) |
497 | goto bail2; | 497 | goto bail2; |
498 | } | 498 | } |
499 | dev->registered = 1; | ||
499 | return 0; | 500 | return 0; |
500 | bail2: | 501 | bail2: |
501 | ib_unregister_device(&dev->ibdev); | 502 | ib_unregister_device(&dev->ibdev); |
@@ -514,5 +515,6 @@ void c4iw_unregister_device(struct c4iw_dev *dev) | |||
514 | c4iw_class_attributes[i]); | 515 | c4iw_class_attributes[i]); |
515 | ib_unregister_device(&dev->ibdev); | 516 | ib_unregister_device(&dev->ibdev); |
516 | kfree(dev->ibdev.iwcm); | 517 | kfree(dev->ibdev.iwcm); |
518 | dev->registered = 0; | ||
517 | return; | 519 | return; |
518 | } | 520 | } |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 83a01dc0c4c1..0c28ed1eafa6 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
@@ -572,9 +572,13 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
572 | err = build_rdma_write(wqe, wr, &len16); | 572 | err = build_rdma_write(wqe, wr, &len16); |
573 | break; | 573 | break; |
574 | case IB_WR_RDMA_READ: | 574 | case IB_WR_RDMA_READ: |
575 | case IB_WR_RDMA_READ_WITH_INV: | ||
575 | fw_opcode = FW_RI_RDMA_READ_WR; | 576 | fw_opcode = FW_RI_RDMA_READ_WR; |
576 | swsqe->opcode = FW_RI_READ_REQ; | 577 | swsqe->opcode = FW_RI_READ_REQ; |
577 | fw_flags = 0; | 578 | if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) |
579 | fw_flags |= FW_RI_RDMA_READ_INVALIDATE; | ||
580 | else | ||
581 | fw_flags = 0; | ||
578 | err = build_rdma_read(wqe, wr, &len16); | 582 | err = build_rdma_read(wqe, wr, &len16); |
579 | if (err) | 583 | if (err) |
580 | break; | 584 | break; |
@@ -588,6 +592,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
588 | err = build_fastreg(wqe, wr, &len16); | 592 | err = build_fastreg(wqe, wr, &len16); |
589 | break; | 593 | break; |
590 | case IB_WR_LOCAL_INV: | 594 | case IB_WR_LOCAL_INV: |
595 | if (wr->send_flags & IB_SEND_FENCE) | ||
596 | fw_flags |= FW_RI_LOCAL_FENCE_FLAG; | ||
591 | fw_opcode = FW_RI_INV_LSTAG_WR; | 597 | fw_opcode = FW_RI_INV_LSTAG_WR; |
592 | swsqe->opcode = FW_RI_LOCAL_INV; | 598 | swsqe->opcode = FW_RI_LOCAL_INV; |
593 | err = build_inv_stag(wqe, wr, &len16); | 599 | err = build_inv_stag(wqe, wr, &len16); |
@@ -1339,7 +1345,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp) | |||
1339 | wait_event(qhp->wait, !qhp->ep); | 1345 | wait_event(qhp->wait, !qhp->ep); |
1340 | 1346 | ||
1341 | remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); | 1347 | remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); |
1342 | remove_handle(rhp, &rhp->qpidr, qhp->wq.rq.qid); | ||
1343 | atomic_dec(&qhp->refcnt); | 1348 | atomic_dec(&qhp->refcnt); |
1344 | wait_event(qhp->wait, !atomic_read(&qhp->refcnt)); | 1349 | wait_event(qhp->wait, !atomic_read(&qhp->refcnt)); |
1345 | 1350 | ||
@@ -1442,30 +1447,26 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, | |||
1442 | if (ret) | 1447 | if (ret) |
1443 | goto err2; | 1448 | goto err2; |
1444 | 1449 | ||
1445 | ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.rq.qid); | ||
1446 | if (ret) | ||
1447 | goto err3; | ||
1448 | |||
1449 | if (udata) { | 1450 | if (udata) { |
1450 | mm1 = kmalloc(sizeof *mm1, GFP_KERNEL); | 1451 | mm1 = kmalloc(sizeof *mm1, GFP_KERNEL); |
1451 | if (!mm1) { | 1452 | if (!mm1) { |
1452 | ret = -ENOMEM; | 1453 | ret = -ENOMEM; |
1453 | goto err4; | 1454 | goto err3; |
1454 | } | 1455 | } |
1455 | mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); | 1456 | mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); |
1456 | if (!mm2) { | 1457 | if (!mm2) { |
1457 | ret = -ENOMEM; | 1458 | ret = -ENOMEM; |
1458 | goto err5; | 1459 | goto err4; |
1459 | } | 1460 | } |
1460 | mm3 = kmalloc(sizeof *mm3, GFP_KERNEL); | 1461 | mm3 = kmalloc(sizeof *mm3, GFP_KERNEL); |
1461 | if (!mm3) { | 1462 | if (!mm3) { |
1462 | ret = -ENOMEM; | 1463 | ret = -ENOMEM; |
1463 | goto err6; | 1464 | goto err5; |
1464 | } | 1465 | } |
1465 | mm4 = kmalloc(sizeof *mm4, GFP_KERNEL); | 1466 | mm4 = kmalloc(sizeof *mm4, GFP_KERNEL); |
1466 | if (!mm4) { | 1467 | if (!mm4) { |
1467 | ret = -ENOMEM; | 1468 | ret = -ENOMEM; |
1468 | goto err7; | 1469 | goto err6; |
1469 | } | 1470 | } |
1470 | 1471 | ||
1471 | uresp.qid_mask = rhp->rdev.qpmask; | 1472 | uresp.qid_mask = rhp->rdev.qpmask; |
@@ -1487,7 +1488,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, | |||
1487 | spin_unlock(&ucontext->mmap_lock); | 1488 | spin_unlock(&ucontext->mmap_lock); |
1488 | ret = ib_copy_to_udata(udata, &uresp, sizeof uresp); | 1489 | ret = ib_copy_to_udata(udata, &uresp, sizeof uresp); |
1489 | if (ret) | 1490 | if (ret) |
1490 | goto err8; | 1491 | goto err7; |
1491 | mm1->key = uresp.sq_key; | 1492 | mm1->key = uresp.sq_key; |
1492 | mm1->addr = virt_to_phys(qhp->wq.sq.queue); | 1493 | mm1->addr = virt_to_phys(qhp->wq.sq.queue); |
1493 | mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize); | 1494 | mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize); |
@@ -1511,16 +1512,14 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, | |||
1511 | __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries, | 1512 | __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries, |
1512 | qhp->wq.sq.qid); | 1513 | qhp->wq.sq.qid); |
1513 | return &qhp->ibqp; | 1514 | return &qhp->ibqp; |
1514 | err8: | ||
1515 | kfree(mm4); | ||
1516 | err7: | 1515 | err7: |
1517 | kfree(mm3); | 1516 | kfree(mm4); |
1518 | err6: | 1517 | err6: |
1519 | kfree(mm2); | 1518 | kfree(mm3); |
1520 | err5: | 1519 | err5: |
1521 | kfree(mm1); | 1520 | kfree(mm2); |
1522 | err4: | 1521 | err4: |
1523 | remove_handle(rhp, &rhp->qpidr, qhp->wq.rq.qid); | 1522 | kfree(mm1); |
1524 | err3: | 1523 | err3: |
1525 | remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); | 1524 | remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); |
1526 | err2: | 1525 | err2: |
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index d0e8af352408..1057cb96302e 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h | |||
@@ -41,11 +41,13 @@ | |||
41 | #define T4_MAX_NUM_QP (1<<16) | 41 | #define T4_MAX_NUM_QP (1<<16) |
42 | #define T4_MAX_NUM_CQ (1<<15) | 42 | #define T4_MAX_NUM_CQ (1<<15) |
43 | #define T4_MAX_NUM_PD (1<<15) | 43 | #define T4_MAX_NUM_PD (1<<15) |
44 | #define T4_MAX_PBL_SIZE 256 | 44 | #define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1) |
45 | #define T4_MAX_RQ_SIZE 1024 | 45 | #define T4_MAX_EQ_SIZE (65520 - T4_EQ_STATUS_ENTRIES) |
46 | #define T4_MAX_SQ_SIZE 1024 | 46 | #define T4_MAX_IQ_SIZE (65520 - 1) |
47 | #define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE-1) | 47 | #define T4_MAX_RQ_SIZE (8192 - T4_EQ_STATUS_ENTRIES) |
48 | #define T4_MAX_CQ_DEPTH 8192 | 48 | #define T4_MAX_SQ_SIZE (T4_MAX_EQ_SIZE - 1) |
49 | #define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE - 1) | ||
50 | #define T4_MAX_CQ_DEPTH (T4_MAX_IQ_SIZE - 1) | ||
49 | #define T4_MAX_NUM_STAG (1<<15) | 51 | #define T4_MAX_NUM_STAG (1<<15) |
50 | #define T4_MAX_MR_SIZE (~0ULL - 1) | 52 | #define T4_MAX_MR_SIZE (~0ULL - 1) |
51 | #define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ | 53 | #define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ |
@@ -79,12 +81,11 @@ struct t4_status_page { | |||
79 | sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) | 81 | sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) |
80 | #define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \ | 82 | #define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \ |
81 | sizeof(struct fw_ri_immd))) | 83 | sizeof(struct fw_ri_immd))) |
82 | #define T4_MAX_FR_DEPTH 255 | 84 | #define T4_MAX_FR_DEPTH (T4_MAX_FR_IMMD / sizeof(u64)) |
83 | 85 | ||
84 | #define T4_RQ_NUM_SLOTS 2 | 86 | #define T4_RQ_NUM_SLOTS 2 |
85 | #define T4_RQ_NUM_BYTES (T4_EQ_SIZE * T4_RQ_NUM_SLOTS) | 87 | #define T4_RQ_NUM_BYTES (T4_EQ_SIZE * T4_RQ_NUM_SLOTS) |
86 | #define T4_MAX_RECV_SGE ((T4_RQ_NUM_BYTES - sizeof(struct fw_ri_recv_wr) - \ | 88 | #define T4_MAX_RECV_SGE 4 |
87 | sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) | ||
88 | 89 | ||
89 | union t4_wr { | 90 | union t4_wr { |
90 | struct fw_ri_res_wr res; | 91 | struct fw_ri_res_wr res; |
@@ -434,7 +435,7 @@ struct t4_cq { | |||
434 | struct c4iw_rdev *rdev; | 435 | struct c4iw_rdev *rdev; |
435 | u64 ugts; | 436 | u64 ugts; |
436 | size_t memsize; | 437 | size_t memsize; |
437 | u64 timestamp; | 438 | __be64 bits_type_ts; |
438 | u32 cqid; | 439 | u32 cqid; |
439 | u16 size; /* including status page */ | 440 | u16 size; /* including status page */ |
440 | u16 cidx; | 441 | u16 cidx; |
@@ -449,25 +450,17 @@ struct t4_cq { | |||
449 | static inline int t4_arm_cq(struct t4_cq *cq, int se) | 450 | static inline int t4_arm_cq(struct t4_cq *cq, int se) |
450 | { | 451 | { |
451 | u32 val; | 452 | u32 val; |
452 | u16 inc; | 453 | |
453 | 454 | while (cq->cidx_inc > CIDXINC_MASK) { | |
454 | do { | 455 | val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) | |
455 | /* | 456 | INGRESSQID(cq->cqid); |
456 | * inc must be less the both the max update value -and- | ||
457 | * the size of the CQ. | ||
458 | */ | ||
459 | inc = cq->cidx_inc <= CIDXINC_MASK ? cq->cidx_inc : | ||
460 | CIDXINC_MASK; | ||
461 | inc = inc <= (cq->size - 1) ? inc : (cq->size - 1); | ||
462 | if (inc == cq->cidx_inc) | ||
463 | val = SEINTARM(se) | CIDXINC(inc) | TIMERREG(6) | | ||
464 | INGRESSQID(cq->cqid); | ||
465 | else | ||
466 | val = SEINTARM(0) | CIDXINC(inc) | TIMERREG(7) | | ||
467 | INGRESSQID(cq->cqid); | ||
468 | cq->cidx_inc -= inc; | ||
469 | writel(val, cq->gts); | 457 | writel(val, cq->gts); |
470 | } while (cq->cidx_inc); | 458 | cq->cidx_inc -= CIDXINC_MASK; |
459 | } | ||
460 | val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6) | | ||
461 | INGRESSQID(cq->cqid); | ||
462 | writel(val, cq->gts); | ||
463 | cq->cidx_inc = 0; | ||
471 | return 0; | 464 | return 0; |
472 | } | 465 | } |
473 | 466 | ||
@@ -487,7 +480,9 @@ static inline void t4_swcq_consume(struct t4_cq *cq) | |||
487 | 480 | ||
488 | static inline void t4_hwcq_consume(struct t4_cq *cq) | 481 | static inline void t4_hwcq_consume(struct t4_cq *cq) |
489 | { | 482 | { |
490 | cq->cidx_inc++; | 483 | cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts; |
484 | if (++cq->cidx_inc == cq->size) | ||
485 | cq->cidx_inc = 0; | ||
491 | if (++cq->cidx == cq->size) { | 486 | if (++cq->cidx == cq->size) { |
492 | cq->cidx = 0; | 487 | cq->cidx = 0; |
493 | cq->gen ^= 1; | 488 | cq->gen ^= 1; |
@@ -501,20 +496,23 @@ static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe) | |||
501 | 496 | ||
502 | static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) | 497 | static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) |
503 | { | 498 | { |
504 | int ret = 0; | 499 | int ret; |
505 | u64 bits_type_ts = be64_to_cpu(cq->queue[cq->cidx].bits_type_ts); | 500 | u16 prev_cidx; |
506 | 501 | ||
507 | if (G_CQE_GENBIT(bits_type_ts) == cq->gen) { | 502 | if (cq->cidx == 0) |
508 | *cqe = &cq->queue[cq->cidx]; | 503 | prev_cidx = cq->size - 1; |
509 | cq->timestamp = G_CQE_TS(bits_type_ts); | ||
510 | } else if (G_CQE_TS(bits_type_ts) > cq->timestamp) | ||
511 | ret = -EOVERFLOW; | ||
512 | else | 504 | else |
513 | ret = -ENODATA; | 505 | prev_cidx = cq->cidx - 1; |
514 | if (ret == -EOVERFLOW) { | 506 | |
515 | printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid); | 507 | if (cq->queue[prev_cidx].bits_type_ts != cq->bits_type_ts) { |
508 | ret = -EOVERFLOW; | ||
516 | cq->error = 1; | 509 | cq->error = 1; |
517 | } | 510 | printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid); |
511 | } else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) { | ||
512 | *cqe = &cq->queue[cq->cidx]; | ||
513 | ret = 0; | ||
514 | } else | ||
515 | ret = -ENODATA; | ||
518 | return ret; | 516 | return ret; |
519 | } | 517 | } |
520 | 518 | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index 252489e88369..ecb51b396c42 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c | |||
@@ -799,7 +799,7 @@ static int __devinit ehca_probe(struct of_device *dev, | |||
799 | goto probe5; | 799 | goto probe5; |
800 | } | 800 | } |
801 | 801 | ||
802 | ret = ib_register_device(&shca->ib_device); | 802 | ret = ib_register_device(&shca->ib_device, NULL); |
803 | if (ret) { | 803 | if (ret) { |
804 | ehca_err(&shca->ib_device, | 804 | ehca_err(&shca->ib_device, |
805 | "ib_register_device() failed ret=%i", ret); | 805 | "ib_register_device() failed ret=%i", ret); |
diff --git a/drivers/infiniband/hw/ipath/Kconfig b/drivers/infiniband/hw/ipath/Kconfig index 3c7968f25ec2..1d9bb115cbf6 100644 --- a/drivers/infiniband/hw/ipath/Kconfig +++ b/drivers/infiniband/hw/ipath/Kconfig | |||
@@ -1,9 +1,11 @@ | |||
1 | config INFINIBAND_IPATH | 1 | config INFINIBAND_IPATH |
2 | tristate "QLogic InfiniPath Driver" | 2 | tristate "QLogic HTX HCA support" |
3 | depends on 64BIT && NET | 3 | depends on 64BIT && NET && HT_IRQ |
4 | ---help--- | 4 | ---help--- |
5 | This is a driver for QLogic InfiniPath host channel adapters, | 5 | This is a driver for the obsolete QLogic Hyper-Transport |
6 | IB host channel adapter (model QHT7140), | ||
6 | including InfiniBand verbs support. This driver allows these | 7 | including InfiniBand verbs support. This driver allows these |
7 | devices to be used with both kernel upper level protocols such | 8 | devices to be used with both kernel upper level protocols such |
8 | as IP-over-InfiniBand as well as with userspace applications | 9 | as IP-over-InfiniBand as well as with userspace applications |
9 | (in conjunction with InfiniBand userspace access). | 10 | (in conjunction with InfiniBand userspace access). |
11 | For QLogic PCIe QLE based cards, use the QIB driver instead. | ||
diff --git a/drivers/infiniband/hw/ipath/Makefile b/drivers/infiniband/hw/ipath/Makefile index bf9450061986..fa3df82681df 100644 --- a/drivers/infiniband/hw/ipath/Makefile +++ b/drivers/infiniband/hw/ipath/Makefile | |||
@@ -29,13 +29,9 @@ ib_ipath-y := \ | |||
29 | ipath_user_pages.o \ | 29 | ipath_user_pages.o \ |
30 | ipath_user_sdma.o \ | 30 | ipath_user_sdma.o \ |
31 | ipath_verbs_mcast.o \ | 31 | ipath_verbs_mcast.o \ |
32 | ipath_verbs.o \ | 32 | ipath_verbs.o |
33 | ipath_iba7220.o \ | ||
34 | ipath_sd7220.o \ | ||
35 | ipath_sd7220_img.o | ||
36 | 33 | ||
37 | ib_ipath-$(CONFIG_HT_IRQ) += ipath_iba6110.o | 34 | ib_ipath-$(CONFIG_HT_IRQ) += ipath_iba6110.o |
38 | ib_ipath-$(CONFIG_PCI_MSI) += ipath_iba6120.o | ||
39 | 35 | ||
40 | ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o | 36 | ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o |
41 | ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o | 37 | ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o |
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index 6302626d17f0..21337468c652 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c | |||
@@ -132,18 +132,13 @@ static int __devinit ipath_init_one(struct pci_dev *, | |||
132 | 132 | ||
133 | /* Only needed for registration, nothing else needs this info */ | 133 | /* Only needed for registration, nothing else needs this info */ |
134 | #define PCI_VENDOR_ID_PATHSCALE 0x1fc1 | 134 | #define PCI_VENDOR_ID_PATHSCALE 0x1fc1 |
135 | #define PCI_VENDOR_ID_QLOGIC 0x1077 | ||
136 | #define PCI_DEVICE_ID_INFINIPATH_HT 0xd | 135 | #define PCI_DEVICE_ID_INFINIPATH_HT 0xd |
137 | #define PCI_DEVICE_ID_INFINIPATH_PE800 0x10 | ||
138 | #define PCI_DEVICE_ID_INFINIPATH_7220 0x7220 | ||
139 | 136 | ||
140 | /* Number of seconds before our card status check... */ | 137 | /* Number of seconds before our card status check... */ |
141 | #define STATUS_TIMEOUT 60 | 138 | #define STATUS_TIMEOUT 60 |
142 | 139 | ||
143 | static const struct pci_device_id ipath_pci_tbl[] = { | 140 | static const struct pci_device_id ipath_pci_tbl[] = { |
144 | { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) }, | 141 | { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) }, |
145 | { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_PE800) }, | ||
146 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_INFINIPATH_7220) }, | ||
147 | { 0, } | 142 | { 0, } |
148 | }; | 143 | }; |
149 | 144 | ||
@@ -521,30 +516,9 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, | |||
521 | /* setup the chip-specific functions, as early as possible. */ | 516 | /* setup the chip-specific functions, as early as possible. */ |
522 | switch (ent->device) { | 517 | switch (ent->device) { |
523 | case PCI_DEVICE_ID_INFINIPATH_HT: | 518 | case PCI_DEVICE_ID_INFINIPATH_HT: |
524 | #ifdef CONFIG_HT_IRQ | ||
525 | ipath_init_iba6110_funcs(dd); | 519 | ipath_init_iba6110_funcs(dd); |
526 | break; | 520 | break; |
527 | #else | 521 | |
528 | ipath_dev_err(dd, "QLogic HT device 0x%x cannot work if " | ||
529 | "CONFIG_HT_IRQ is not enabled\n", ent->device); | ||
530 | return -ENODEV; | ||
531 | #endif | ||
532 | case PCI_DEVICE_ID_INFINIPATH_PE800: | ||
533 | #ifdef CONFIG_PCI_MSI | ||
534 | ipath_init_iba6120_funcs(dd); | ||
535 | break; | ||
536 | #else | ||
537 | ipath_dev_err(dd, "QLogic PCIE device 0x%x cannot work if " | ||
538 | "CONFIG_PCI_MSI is not enabled\n", ent->device); | ||
539 | return -ENODEV; | ||
540 | #endif | ||
541 | case PCI_DEVICE_ID_INFINIPATH_7220: | ||
542 | #ifndef CONFIG_PCI_MSI | ||
543 | ipath_dbg("CONFIG_PCI_MSI is not enabled, " | ||
544 | "using INTx for unit %u\n", dd->ipath_unit); | ||
545 | #endif | ||
546 | ipath_init_iba7220_funcs(dd); | ||
547 | break; | ||
548 | default: | 522 | default: |
549 | ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, " | 523 | ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, " |
550 | "failing\n", ent->device); | 524 | "failing\n", ent->device); |
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6120.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c deleted file mode 100644 index 4b4a30b0dabd..000000000000 --- a/drivers/infiniband/hw/ipath/ipath_iba6120.c +++ /dev/null | |||
@@ -1,1862 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved. | ||
3 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | /* | ||
34 | * This file contains all of the code that is specific to the | ||
35 | * InfiniPath PCIe chip. | ||
36 | */ | ||
37 | |||
38 | #include <linux/interrupt.h> | ||
39 | #include <linux/pci.h> | ||
40 | #include <linux/delay.h> | ||
41 | #include <rdma/ib_verbs.h> | ||
42 | |||
43 | #include "ipath_kernel.h" | ||
44 | #include "ipath_registers.h" | ||
45 | |||
46 | static void ipath_setup_pe_setextled(struct ipath_devdata *, u64, u64); | ||
47 | |||
48 | /* | ||
49 | * This file contains all the chip-specific register information and | ||
50 | * access functions for the QLogic InfiniPath PCI-Express chip. | ||
51 | * | ||
52 | * This lists the InfiniPath registers, in the actual chip layout. | ||
53 | * This structure should never be directly accessed. | ||
54 | */ | ||
55 | struct _infinipath_do_not_use_kernel_regs { | ||
56 | unsigned long long Revision; | ||
57 | unsigned long long Control; | ||
58 | unsigned long long PageAlign; | ||
59 | unsigned long long PortCnt; | ||
60 | unsigned long long DebugPortSelect; | ||
61 | unsigned long long Reserved0; | ||
62 | unsigned long long SendRegBase; | ||
63 | unsigned long long UserRegBase; | ||
64 | unsigned long long CounterRegBase; | ||
65 | unsigned long long Scratch; | ||
66 | unsigned long long Reserved1; | ||
67 | unsigned long long Reserved2; | ||
68 | unsigned long long IntBlocked; | ||
69 | unsigned long long IntMask; | ||
70 | unsigned long long IntStatus; | ||
71 | unsigned long long IntClear; | ||
72 | unsigned long long ErrorMask; | ||
73 | unsigned long long ErrorStatus; | ||
74 | unsigned long long ErrorClear; | ||
75 | unsigned long long HwErrMask; | ||
76 | unsigned long long HwErrStatus; | ||
77 | unsigned long long HwErrClear; | ||
78 | unsigned long long HwDiagCtrl; | ||
79 | unsigned long long MDIO; | ||
80 | unsigned long long IBCStatus; | ||
81 | unsigned long long IBCCtrl; | ||
82 | unsigned long long ExtStatus; | ||
83 | unsigned long long ExtCtrl; | ||
84 | unsigned long long GPIOOut; | ||
85 | unsigned long long GPIOMask; | ||
86 | unsigned long long GPIOStatus; | ||
87 | unsigned long long GPIOClear; | ||
88 | unsigned long long RcvCtrl; | ||
89 | unsigned long long RcvBTHQP; | ||
90 | unsigned long long RcvHdrSize; | ||
91 | unsigned long long RcvHdrCnt; | ||
92 | unsigned long long RcvHdrEntSize; | ||
93 | unsigned long long RcvTIDBase; | ||
94 | unsigned long long RcvTIDCnt; | ||
95 | unsigned long long RcvEgrBase; | ||
96 | unsigned long long RcvEgrCnt; | ||
97 | unsigned long long RcvBufBase; | ||
98 | unsigned long long RcvBufSize; | ||
99 | unsigned long long RxIntMemBase; | ||
100 | unsigned long long RxIntMemSize; | ||
101 | unsigned long long RcvPartitionKey; | ||
102 | unsigned long long Reserved3; | ||
103 | unsigned long long RcvPktLEDCnt; | ||
104 | unsigned long long Reserved4[8]; | ||
105 | unsigned long long SendCtrl; | ||
106 | unsigned long long SendPIOBufBase; | ||
107 | unsigned long long SendPIOSize; | ||
108 | unsigned long long SendPIOBufCnt; | ||
109 | unsigned long long SendPIOAvailAddr; | ||
110 | unsigned long long TxIntMemBase; | ||
111 | unsigned long long TxIntMemSize; | ||
112 | unsigned long long Reserved5; | ||
113 | unsigned long long PCIeRBufTestReg0; | ||
114 | unsigned long long PCIeRBufTestReg1; | ||
115 | unsigned long long Reserved51[6]; | ||
116 | unsigned long long SendBufferError; | ||
117 | unsigned long long SendBufferErrorCONT1; | ||
118 | unsigned long long Reserved6SBE[6]; | ||
119 | unsigned long long RcvHdrAddr0; | ||
120 | unsigned long long RcvHdrAddr1; | ||
121 | unsigned long long RcvHdrAddr2; | ||
122 | unsigned long long RcvHdrAddr3; | ||
123 | unsigned long long RcvHdrAddr4; | ||
124 | unsigned long long Reserved7RHA[11]; | ||
125 | unsigned long long RcvHdrTailAddr0; | ||
126 | unsigned long long RcvHdrTailAddr1; | ||
127 | unsigned long long RcvHdrTailAddr2; | ||
128 | unsigned long long RcvHdrTailAddr3; | ||
129 | unsigned long long RcvHdrTailAddr4; | ||
130 | unsigned long long Reserved8RHTA[11]; | ||
131 | unsigned long long Reserved9SW[8]; | ||
132 | unsigned long long SerdesConfig0; | ||
133 | unsigned long long SerdesConfig1; | ||
134 | unsigned long long SerdesStatus; | ||
135 | unsigned long long XGXSConfig; | ||
136 | unsigned long long IBPLLCfg; | ||
137 | unsigned long long Reserved10SW2[3]; | ||
138 | unsigned long long PCIEQ0SerdesConfig0; | ||
139 | unsigned long long PCIEQ0SerdesConfig1; | ||
140 | unsigned long long PCIEQ0SerdesStatus; | ||
141 | unsigned long long Reserved11; | ||
142 | unsigned long long PCIEQ1SerdesConfig0; | ||
143 | unsigned long long PCIEQ1SerdesConfig1; | ||
144 | unsigned long long PCIEQ1SerdesStatus; | ||
145 | unsigned long long Reserved12; | ||
146 | }; | ||
147 | |||
148 | struct _infinipath_do_not_use_counters { | ||
149 | __u64 LBIntCnt; | ||
150 | __u64 LBFlowStallCnt; | ||
151 | __u64 Reserved1; | ||
152 | __u64 TxUnsupVLErrCnt; | ||
153 | __u64 TxDataPktCnt; | ||
154 | __u64 TxFlowPktCnt; | ||
155 | __u64 TxDwordCnt; | ||
156 | __u64 TxLenErrCnt; | ||
157 | __u64 TxMaxMinLenErrCnt; | ||
158 | __u64 TxUnderrunCnt; | ||
159 | __u64 TxFlowStallCnt; | ||
160 | __u64 TxDroppedPktCnt; | ||
161 | __u64 RxDroppedPktCnt; | ||
162 | __u64 RxDataPktCnt; | ||
163 | __u64 RxFlowPktCnt; | ||
164 | __u64 RxDwordCnt; | ||
165 | __u64 RxLenErrCnt; | ||
166 | __u64 RxMaxMinLenErrCnt; | ||
167 | __u64 RxICRCErrCnt; | ||
168 | __u64 RxVCRCErrCnt; | ||
169 | __u64 RxFlowCtrlErrCnt; | ||
170 | __u64 RxBadFormatCnt; | ||
171 | __u64 RxLinkProblemCnt; | ||
172 | __u64 RxEBPCnt; | ||
173 | __u64 RxLPCRCErrCnt; | ||
174 | __u64 RxBufOvflCnt; | ||
175 | __u64 RxTIDFullErrCnt; | ||
176 | __u64 RxTIDValidErrCnt; | ||
177 | __u64 RxPKeyMismatchCnt; | ||
178 | __u64 RxP0HdrEgrOvflCnt; | ||
179 | __u64 RxP1HdrEgrOvflCnt; | ||
180 | __u64 RxP2HdrEgrOvflCnt; | ||
181 | __u64 RxP3HdrEgrOvflCnt; | ||
182 | __u64 RxP4HdrEgrOvflCnt; | ||
183 | __u64 RxP5HdrEgrOvflCnt; | ||
184 | __u64 RxP6HdrEgrOvflCnt; | ||
185 | __u64 RxP7HdrEgrOvflCnt; | ||
186 | __u64 RxP8HdrEgrOvflCnt; | ||
187 | __u64 Reserved6; | ||
188 | __u64 Reserved7; | ||
189 | __u64 IBStatusChangeCnt; | ||
190 | __u64 IBLinkErrRecoveryCnt; | ||
191 | __u64 IBLinkDownedCnt; | ||
192 | __u64 IBSymbolErrCnt; | ||
193 | }; | ||
194 | |||
195 | #define IPATH_KREG_OFFSET(field) (offsetof( \ | ||
196 | struct _infinipath_do_not_use_kernel_regs, field) / sizeof(u64)) | ||
197 | #define IPATH_CREG_OFFSET(field) (offsetof( \ | ||
198 | struct _infinipath_do_not_use_counters, field) / sizeof(u64)) | ||
199 | |||
200 | static const struct ipath_kregs ipath_pe_kregs = { | ||
201 | .kr_control = IPATH_KREG_OFFSET(Control), | ||
202 | .kr_counterregbase = IPATH_KREG_OFFSET(CounterRegBase), | ||
203 | .kr_debugportselect = IPATH_KREG_OFFSET(DebugPortSelect), | ||
204 | .kr_errorclear = IPATH_KREG_OFFSET(ErrorClear), | ||
205 | .kr_errormask = IPATH_KREG_OFFSET(ErrorMask), | ||
206 | .kr_errorstatus = IPATH_KREG_OFFSET(ErrorStatus), | ||
207 | .kr_extctrl = IPATH_KREG_OFFSET(ExtCtrl), | ||
208 | .kr_extstatus = IPATH_KREG_OFFSET(ExtStatus), | ||
209 | .kr_gpio_clear = IPATH_KREG_OFFSET(GPIOClear), | ||
210 | .kr_gpio_mask = IPATH_KREG_OFFSET(GPIOMask), | ||
211 | .kr_gpio_out = IPATH_KREG_OFFSET(GPIOOut), | ||
212 | .kr_gpio_status = IPATH_KREG_OFFSET(GPIOStatus), | ||
213 | .kr_hwdiagctrl = IPATH_KREG_OFFSET(HwDiagCtrl), | ||
214 | .kr_hwerrclear = IPATH_KREG_OFFSET(HwErrClear), | ||
215 | .kr_hwerrmask = IPATH_KREG_OFFSET(HwErrMask), | ||
216 | .kr_hwerrstatus = IPATH_KREG_OFFSET(HwErrStatus), | ||
217 | .kr_ibcctrl = IPATH_KREG_OFFSET(IBCCtrl), | ||
218 | .kr_ibcstatus = IPATH_KREG_OFFSET(IBCStatus), | ||
219 | .kr_intblocked = IPATH_KREG_OFFSET(IntBlocked), | ||
220 | .kr_intclear = IPATH_KREG_OFFSET(IntClear), | ||
221 | .kr_intmask = IPATH_KREG_OFFSET(IntMask), | ||
222 | .kr_intstatus = IPATH_KREG_OFFSET(IntStatus), | ||
223 | .kr_mdio = IPATH_KREG_OFFSET(MDIO), | ||
224 | .kr_pagealign = IPATH_KREG_OFFSET(PageAlign), | ||
225 | .kr_partitionkey = IPATH_KREG_OFFSET(RcvPartitionKey), | ||
226 | .kr_portcnt = IPATH_KREG_OFFSET(PortCnt), | ||
227 | .kr_rcvbthqp = IPATH_KREG_OFFSET(RcvBTHQP), | ||
228 | .kr_rcvbufbase = IPATH_KREG_OFFSET(RcvBufBase), | ||
229 | .kr_rcvbufsize = IPATH_KREG_OFFSET(RcvBufSize), | ||
230 | .kr_rcvctrl = IPATH_KREG_OFFSET(RcvCtrl), | ||
231 | .kr_rcvegrbase = IPATH_KREG_OFFSET(RcvEgrBase), | ||
232 | .kr_rcvegrcnt = IPATH_KREG_OFFSET(RcvEgrCnt), | ||
233 | .kr_rcvhdrcnt = IPATH_KREG_OFFSET(RcvHdrCnt), | ||
234 | .kr_rcvhdrentsize = IPATH_KREG_OFFSET(RcvHdrEntSize), | ||
235 | .kr_rcvhdrsize = IPATH_KREG_OFFSET(RcvHdrSize), | ||
236 | .kr_rcvintmembase = IPATH_KREG_OFFSET(RxIntMemBase), | ||
237 | .kr_rcvintmemsize = IPATH_KREG_OFFSET(RxIntMemSize), | ||
238 | .kr_rcvtidbase = IPATH_KREG_OFFSET(RcvTIDBase), | ||
239 | .kr_rcvtidcnt = IPATH_KREG_OFFSET(RcvTIDCnt), | ||
240 | .kr_revision = IPATH_KREG_OFFSET(Revision), | ||
241 | .kr_scratch = IPATH_KREG_OFFSET(Scratch), | ||
242 | .kr_sendbuffererror = IPATH_KREG_OFFSET(SendBufferError), | ||
243 | .kr_sendctrl = IPATH_KREG_OFFSET(SendCtrl), | ||
244 | .kr_sendpioavailaddr = IPATH_KREG_OFFSET(SendPIOAvailAddr), | ||
245 | .kr_sendpiobufbase = IPATH_KREG_OFFSET(SendPIOBufBase), | ||
246 | .kr_sendpiobufcnt = IPATH_KREG_OFFSET(SendPIOBufCnt), | ||
247 | .kr_sendpiosize = IPATH_KREG_OFFSET(SendPIOSize), | ||
248 | .kr_sendregbase = IPATH_KREG_OFFSET(SendRegBase), | ||
249 | .kr_txintmembase = IPATH_KREG_OFFSET(TxIntMemBase), | ||
250 | .kr_txintmemsize = IPATH_KREG_OFFSET(TxIntMemSize), | ||
251 | .kr_userregbase = IPATH_KREG_OFFSET(UserRegBase), | ||
252 | .kr_serdesconfig0 = IPATH_KREG_OFFSET(SerdesConfig0), | ||
253 | .kr_serdesconfig1 = IPATH_KREG_OFFSET(SerdesConfig1), | ||
254 | .kr_serdesstatus = IPATH_KREG_OFFSET(SerdesStatus), | ||
255 | .kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig), | ||
256 | .kr_ibpllcfg = IPATH_KREG_OFFSET(IBPLLCfg), | ||
257 | |||
258 | /* | ||
259 | * These should not be used directly via ipath_write_kreg64(), | ||
260 | * use them with ipath_write_kreg64_port(), | ||
261 | */ | ||
262 | .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0), | ||
263 | .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0), | ||
264 | |||
265 | /* The rcvpktled register controls one of the debug port signals, so | ||
266 | * a packet activity LED can be connected to it. */ | ||
267 | .kr_rcvpktledcnt = IPATH_KREG_OFFSET(RcvPktLEDCnt), | ||
268 | .kr_pcierbuftestreg0 = IPATH_KREG_OFFSET(PCIeRBufTestReg0), | ||
269 | .kr_pcierbuftestreg1 = IPATH_KREG_OFFSET(PCIeRBufTestReg1), | ||
270 | .kr_pcieq0serdesconfig0 = IPATH_KREG_OFFSET(PCIEQ0SerdesConfig0), | ||
271 | .kr_pcieq0serdesconfig1 = IPATH_KREG_OFFSET(PCIEQ0SerdesConfig1), | ||
272 | .kr_pcieq0serdesstatus = IPATH_KREG_OFFSET(PCIEQ0SerdesStatus), | ||
273 | .kr_pcieq1serdesconfig0 = IPATH_KREG_OFFSET(PCIEQ1SerdesConfig0), | ||
274 | .kr_pcieq1serdesconfig1 = IPATH_KREG_OFFSET(PCIEQ1SerdesConfig1), | ||
275 | .kr_pcieq1serdesstatus = IPATH_KREG_OFFSET(PCIEQ1SerdesStatus) | ||
276 | }; | ||
277 | |||
278 | static const struct ipath_cregs ipath_pe_cregs = { | ||
279 | .cr_badformatcnt = IPATH_CREG_OFFSET(RxBadFormatCnt), | ||
280 | .cr_erricrccnt = IPATH_CREG_OFFSET(RxICRCErrCnt), | ||
281 | .cr_errlinkcnt = IPATH_CREG_OFFSET(RxLinkProblemCnt), | ||
282 | .cr_errlpcrccnt = IPATH_CREG_OFFSET(RxLPCRCErrCnt), | ||
283 | .cr_errpkey = IPATH_CREG_OFFSET(RxPKeyMismatchCnt), | ||
284 | .cr_errrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowCtrlErrCnt), | ||
285 | .cr_err_rlencnt = IPATH_CREG_OFFSET(RxLenErrCnt), | ||
286 | .cr_errslencnt = IPATH_CREG_OFFSET(TxLenErrCnt), | ||
287 | .cr_errtidfull = IPATH_CREG_OFFSET(RxTIDFullErrCnt), | ||
288 | .cr_errtidvalid = IPATH_CREG_OFFSET(RxTIDValidErrCnt), | ||
289 | .cr_errvcrccnt = IPATH_CREG_OFFSET(RxVCRCErrCnt), | ||
290 | .cr_ibstatuschange = IPATH_CREG_OFFSET(IBStatusChangeCnt), | ||
291 | .cr_intcnt = IPATH_CREG_OFFSET(LBIntCnt), | ||
292 | .cr_invalidrlencnt = IPATH_CREG_OFFSET(RxMaxMinLenErrCnt), | ||
293 | .cr_invalidslencnt = IPATH_CREG_OFFSET(TxMaxMinLenErrCnt), | ||
294 | .cr_lbflowstallcnt = IPATH_CREG_OFFSET(LBFlowStallCnt), | ||
295 | .cr_pktrcvcnt = IPATH_CREG_OFFSET(RxDataPktCnt), | ||
296 | .cr_pktrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowPktCnt), | ||
297 | .cr_pktsendcnt = IPATH_CREG_OFFSET(TxDataPktCnt), | ||
298 | .cr_pktsendflowcnt = IPATH_CREG_OFFSET(TxFlowPktCnt), | ||
299 | .cr_portovflcnt = IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt), | ||
300 | .cr_rcvebpcnt = IPATH_CREG_OFFSET(RxEBPCnt), | ||
301 | .cr_rcvovflcnt = IPATH_CREG_OFFSET(RxBufOvflCnt), | ||
302 | .cr_senddropped = IPATH_CREG_OFFSET(TxDroppedPktCnt), | ||
303 | .cr_sendstallcnt = IPATH_CREG_OFFSET(TxFlowStallCnt), | ||
304 | .cr_sendunderruncnt = IPATH_CREG_OFFSET(TxUnderrunCnt), | ||
305 | .cr_wordrcvcnt = IPATH_CREG_OFFSET(RxDwordCnt), | ||
306 | .cr_wordsendcnt = IPATH_CREG_OFFSET(TxDwordCnt), | ||
307 | .cr_unsupvlcnt = IPATH_CREG_OFFSET(TxUnsupVLErrCnt), | ||
308 | .cr_rxdroppktcnt = IPATH_CREG_OFFSET(RxDroppedPktCnt), | ||
309 | .cr_iblinkerrrecovcnt = IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt), | ||
310 | .cr_iblinkdowncnt = IPATH_CREG_OFFSET(IBLinkDownedCnt), | ||
311 | .cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt) | ||
312 | }; | ||
313 | |||
314 | /* kr_control bits */ | ||
315 | #define INFINIPATH_C_RESET 1U | ||
316 | |||
317 | /* kr_intstatus, kr_intclear, kr_intmask bits */ | ||
318 | #define INFINIPATH_I_RCVURG_MASK ((1U<<5)-1) | ||
319 | #define INFINIPATH_I_RCVURG_SHIFT 0 | ||
320 | #define INFINIPATH_I_RCVAVAIL_MASK ((1U<<5)-1) | ||
321 | #define INFINIPATH_I_RCVAVAIL_SHIFT 12 | ||
322 | |||
323 | /* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */ | ||
324 | #define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL | ||
325 | #define INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT 0 | ||
326 | #define INFINIPATH_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL | ||
327 | #define INFINIPATH_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL | ||
328 | #define INFINIPATH_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL | ||
329 | #define INFINIPATH_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL | ||
330 | #define INFINIPATH_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL | ||
331 | #define INFINIPATH_HWE_COREPLL_FBSLIP 0x0080000000000000ULL | ||
332 | #define INFINIPATH_HWE_COREPLL_RFSLIP 0x0100000000000000ULL | ||
333 | #define INFINIPATH_HWE_PCIE1PLLFAILED 0x0400000000000000ULL | ||
334 | #define INFINIPATH_HWE_PCIE0PLLFAILED 0x0800000000000000ULL | ||
335 | #define INFINIPATH_HWE_SERDESPLLFAILED 0x1000000000000000ULL | ||
336 | |||
337 | #define IBA6120_IBCS_LINKTRAININGSTATE_MASK 0xf | ||
338 | #define IBA6120_IBCS_LINKSTATE_SHIFT 4 | ||
339 | |||
340 | /* kr_extstatus bits */ | ||
341 | #define INFINIPATH_EXTS_FREQSEL 0x2 | ||
342 | #define INFINIPATH_EXTS_SERDESSEL 0x4 | ||
343 | #define INFINIPATH_EXTS_MEMBIST_ENDTEST 0x0000000000004000 | ||
344 | #define INFINIPATH_EXTS_MEMBIST_FOUND 0x0000000000008000 | ||
345 | |||
346 | /* kr_xgxsconfig bits */ | ||
347 | #define INFINIPATH_XGXS_RESET 0x5ULL | ||
348 | |||
349 | #define _IPATH_GPIO_SDA_NUM 1 | ||
350 | #define _IPATH_GPIO_SCL_NUM 0 | ||
351 | |||
352 | #define IPATH_GPIO_SDA (1ULL << \ | ||
353 | (_IPATH_GPIO_SDA_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT)) | ||
354 | #define IPATH_GPIO_SCL (1ULL << \ | ||
355 | (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT)) | ||
356 | |||
357 | #define INFINIPATH_RT_BUFSIZE_MASK 0xe0000000ULL | ||
358 | #define INFINIPATH_RT_BUFSIZE_SHIFTVAL(tid) \ | ||
359 | ((((tid) & INFINIPATH_RT_BUFSIZE_MASK) >> 29) + 11 - 1) | ||
360 | #define INFINIPATH_RT_BUFSIZE(tid) (1 << INFINIPATH_RT_BUFSIZE_SHIFTVAL(tid)) | ||
361 | #define INFINIPATH_RT_IS_VALID(tid) \ | ||
362 | (((tid) & INFINIPATH_RT_BUFSIZE_MASK) && \ | ||
363 | ((((tid) & INFINIPATH_RT_BUFSIZE_MASK) != INFINIPATH_RT_BUFSIZE_MASK))) | ||
364 | #define INFINIPATH_RT_ADDR_MASK 0x1FFFFFFFULL /* 29 bits valid */ | ||
365 | #define INFINIPATH_RT_ADDR_SHIFT 10 | ||
366 | |||
367 | #define INFINIPATH_R_INTRAVAIL_SHIFT 16 | ||
368 | #define INFINIPATH_R_TAILUPD_SHIFT 31 | ||
369 | |||
370 | /* 6120 specific hardware errors... */ | ||
371 | static const struct ipath_hwerror_msgs ipath_6120_hwerror_msgs[] = { | ||
372 | INFINIPATH_HWE_MSG(PCIEPOISONEDTLP, "PCIe Poisoned TLP"), | ||
373 | INFINIPATH_HWE_MSG(PCIECPLTIMEOUT, "PCIe completion timeout"), | ||
374 | /* | ||
375 | * In practice, it's unlikely wthat we'll see PCIe PLL, or bus | ||
376 | * parity or memory parity error failures, because most likely we | ||
377 | * won't be able to talk to the core of the chip. Nonetheless, we | ||
378 | * might see them, if they are in parts of the PCIe core that aren't | ||
379 | * essential. | ||
380 | */ | ||
381 | INFINIPATH_HWE_MSG(PCIE1PLLFAILED, "PCIePLL1"), | ||
382 | INFINIPATH_HWE_MSG(PCIE0PLLFAILED, "PCIePLL0"), | ||
383 | INFINIPATH_HWE_MSG(PCIEBUSPARITYXTLH, "PCIe XTLH core parity"), | ||
384 | INFINIPATH_HWE_MSG(PCIEBUSPARITYXADM, "PCIe ADM TX core parity"), | ||
385 | INFINIPATH_HWE_MSG(PCIEBUSPARITYRADM, "PCIe ADM RX core parity"), | ||
386 | INFINIPATH_HWE_MSG(RXDSYNCMEMPARITYERR, "Rx Dsync"), | ||
387 | INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"), | ||
388 | }; | ||
389 | |||
390 | #define TXE_PIO_PARITY ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | \ | ||
391 | INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) \ | ||
392 | << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) | ||
393 | #define RXE_EAGER_PARITY (INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID \ | ||
394 | << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) | ||
395 | |||
396 | static void ipath_pe_put_tid_2(struct ipath_devdata *, u64 __iomem *, | ||
397 | u32, unsigned long); | ||
398 | |||
399 | /* | ||
400 | * On platforms using this chip, and not having ordered WC stores, we | ||
401 | * can get TXE parity errors due to speculative reads to the PIO buffers, | ||
402 | * and this, due to a chip bug can result in (many) false parity error | ||
403 | * reports. So it's a debug print on those, and an info print on systems | ||
404 | * where the speculative reads don't occur. | ||
405 | */ | ||
406 | static void ipath_pe_txe_recover(struct ipath_devdata *dd) | ||
407 | { | ||
408 | if (ipath_unordered_wc()) | ||
409 | ipath_dbg("Recovering from TXE PIO parity error\n"); | ||
410 | else { | ||
411 | ++ipath_stats.sps_txeparity; | ||
412 | dev_info(&dd->pcidev->dev, | ||
413 | "Recovering from TXE PIO parity error\n"); | ||
414 | } | ||
415 | } | ||
416 | |||
417 | |||
418 | /** | ||
419 | * ipath_pe_handle_hwerrors - display hardware errors. | ||
420 | * @dd: the infinipath device | ||
421 | * @msg: the output buffer | ||
422 | * @msgl: the size of the output buffer | ||
423 | * | ||
424 | * Use same msg buffer as regular errors to avoid excessive stack | ||
425 | * use. Most hardware errors are catastrophic, but for right now, | ||
426 | * we'll print them and continue. We reuse the same message buffer as | ||
427 | * ipath_handle_errors() to avoid excessive stack usage. | ||
428 | */ | ||
429 | static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg, | ||
430 | size_t msgl) | ||
431 | { | ||
432 | ipath_err_t hwerrs; | ||
433 | u32 bits, ctrl; | ||
434 | int isfatal = 0; | ||
435 | char bitsmsg[64]; | ||
436 | int log_idx; | ||
437 | |||
438 | hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus); | ||
439 | if (!hwerrs) { | ||
440 | /* | ||
441 | * better than printing cofusing messages | ||
442 | * This seems to be related to clearing the crc error, or | ||
443 | * the pll error during init. | ||
444 | */ | ||
445 | ipath_cdbg(VERBOSE, "Called but no hardware errors set\n"); | ||
446 | return; | ||
447 | } else if (hwerrs == ~0ULL) { | ||
448 | ipath_dev_err(dd, "Read of hardware error status failed " | ||
449 | "(all bits set); ignoring\n"); | ||
450 | return; | ||
451 | } | ||
452 | ipath_stats.sps_hwerrs++; | ||
453 | |||
454 | /* Always clear the error status register, except MEMBISTFAIL, | ||
455 | * regardless of whether we continue or stop using the chip. | ||
456 | * We want that set so we know it failed, even across driver reload. | ||
457 | * We'll still ignore it in the hwerrmask. We do this partly for | ||
458 | * diagnostics, but also for support */ | ||
459 | ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, | ||
460 | hwerrs&~INFINIPATH_HWE_MEMBISTFAILED); | ||
461 | |||
462 | hwerrs &= dd->ipath_hwerrmask; | ||
463 | |||
464 | /* We log some errors to EEPROM, check if we have any of those. */ | ||
465 | for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx) | ||
466 | if (hwerrs & dd->ipath_eep_st_masks[log_idx].hwerrs_to_log) | ||
467 | ipath_inc_eeprom_err(dd, log_idx, 1); | ||
468 | |||
469 | /* | ||
470 | * make sure we get this much out, unless told to be quiet, | ||
471 | * or it's occurred within the last 5 seconds | ||
472 | */ | ||
473 | if ((hwerrs & ~(dd->ipath_lasthwerror | TXE_PIO_PARITY | | ||
474 | RXE_EAGER_PARITY)) || | ||
475 | (ipath_debug & __IPATH_VERBDBG)) | ||
476 | dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx " | ||
477 | "(cleared)\n", (unsigned long long) hwerrs); | ||
478 | dd->ipath_lasthwerror |= hwerrs; | ||
479 | |||
480 | if (hwerrs & ~dd->ipath_hwe_bitsextant) | ||
481 | ipath_dev_err(dd, "hwerror interrupt with unknown errors " | ||
482 | "%llx set\n", (unsigned long long) | ||
483 | (hwerrs & ~dd->ipath_hwe_bitsextant)); | ||
484 | |||
485 | ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control); | ||
486 | if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) { | ||
487 | /* | ||
488 | * parity errors in send memory are recoverable, | ||
489 | * just cancel the send (if indicated in * sendbuffererror), | ||
490 | * count the occurrence, unfreeze (if no other handled | ||
491 | * hardware error bits are set), and continue. They can | ||
492 | * occur if a processor speculative read is done to the PIO | ||
493 | * buffer while we are sending a packet, for example. | ||
494 | */ | ||
495 | if (hwerrs & TXE_PIO_PARITY) { | ||
496 | ipath_pe_txe_recover(dd); | ||
497 | hwerrs &= ~TXE_PIO_PARITY; | ||
498 | } | ||
499 | if (!hwerrs) { | ||
500 | static u32 freeze_cnt; | ||
501 | |||
502 | freeze_cnt++; | ||
503 | ipath_dbg("Clearing freezemode on ignored or recovered " | ||
504 | "hardware error (%u)\n", freeze_cnt); | ||
505 | ipath_clear_freeze(dd); | ||
506 | } | ||
507 | } | ||
508 | |||
509 | *msg = '\0'; | ||
510 | |||
511 | if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) { | ||
512 | strlcat(msg, "[Memory BIST test failed, InfiniPath hardware unusable]", | ||
513 | msgl); | ||
514 | /* ignore from now on, so disable until driver reloaded */ | ||
515 | *dd->ipath_statusp |= IPATH_STATUS_HWERROR; | ||
516 | dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED; | ||
517 | ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, | ||
518 | dd->ipath_hwerrmask); | ||
519 | } | ||
520 | |||
521 | ipath_format_hwerrors(hwerrs, | ||
522 | ipath_6120_hwerror_msgs, | ||
523 | sizeof(ipath_6120_hwerror_msgs)/ | ||
524 | sizeof(ipath_6120_hwerror_msgs[0]), | ||
525 | msg, msgl); | ||
526 | |||
527 | if (hwerrs & (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK | ||
528 | << INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT)) { | ||
529 | bits = (u32) ((hwerrs >> | ||
530 | INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) & | ||
531 | INFINIPATH_HWE_PCIEMEMPARITYERR_MASK); | ||
532 | snprintf(bitsmsg, sizeof bitsmsg, | ||
533 | "[PCIe Mem Parity Errs %x] ", bits); | ||
534 | strlcat(msg, bitsmsg, msgl); | ||
535 | } | ||
536 | |||
537 | #define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP | \ | ||
538 | INFINIPATH_HWE_COREPLL_RFSLIP ) | ||
539 | |||
540 | if (hwerrs & _IPATH_PLL_FAIL) { | ||
541 | snprintf(bitsmsg, sizeof bitsmsg, | ||
542 | "[PLL failed (%llx), InfiniPath hardware unusable]", | ||
543 | (unsigned long long) hwerrs & _IPATH_PLL_FAIL); | ||
544 | strlcat(msg, bitsmsg, msgl); | ||
545 | /* ignore from now on, so disable until driver reloaded */ | ||
546 | dd->ipath_hwerrmask &= ~(hwerrs & _IPATH_PLL_FAIL); | ||
547 | ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, | ||
548 | dd->ipath_hwerrmask); | ||
549 | } | ||
550 | |||
551 | if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) { | ||
552 | /* | ||
553 | * If it occurs, it is left masked since the external | ||
554 | * interface is unused | ||
555 | */ | ||
556 | dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED; | ||
557 | ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, | ||
558 | dd->ipath_hwerrmask); | ||
559 | } | ||
560 | |||
561 | if (hwerrs) { | ||
562 | /* | ||
563 | * if any set that we aren't ignoring; only | ||
564 | * make the complaint once, in case it's stuck | ||
565 | * or recurring, and we get here multiple | ||
566 | * times. | ||
567 | */ | ||
568 | ipath_dev_err(dd, "%s hardware error\n", msg); | ||
569 | if (dd->ipath_flags & IPATH_INITTED) { | ||
570 | ipath_set_linkstate(dd, IPATH_IB_LINKDOWN); | ||
571 | ipath_setup_pe_setextled(dd, | ||
572 | INFINIPATH_IBCS_L_STATE_DOWN, | ||
573 | INFINIPATH_IBCS_LT_STATE_DISABLED); | ||
574 | ipath_dev_err(dd, "Fatal Hardware Error (freeze " | ||
575 | "mode), no longer usable, SN %.16s\n", | ||
576 | dd->ipath_serial); | ||
577 | isfatal = 1; | ||
578 | } | ||
579 | *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY; | ||
580 | /* mark as having had error */ | ||
581 | *dd->ipath_statusp |= IPATH_STATUS_HWERROR; | ||
582 | /* | ||
583 | * mark as not usable, at a minimum until driver | ||
584 | * is reloaded, probably until reboot, since no | ||
585 | * other reset is possible. | ||
586 | */ | ||
587 | dd->ipath_flags &= ~IPATH_INITTED; | ||
588 | } else | ||
589 | *msg = 0; /* recovered from all of them */ | ||
590 | |||
591 | if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg && msg) { | ||
592 | /* | ||
593 | * for /sys status file ; if no trailing brace is copied, | ||
594 | * we'll know it was truncated. | ||
595 | */ | ||
596 | snprintf(dd->ipath_freezemsg, dd->ipath_freezelen, | ||
597 | "{%s}", msg); | ||
598 | } | ||
599 | } | ||
600 | |||
601 | /** | ||
602 | * ipath_pe_boardname - fill in the board name | ||
603 | * @dd: the infinipath device | ||
604 | * @name: the output buffer | ||
605 | * @namelen: the size of the output buffer | ||
606 | * | ||
607 | * info is based on the board revision register | ||
608 | */ | ||
609 | static int ipath_pe_boardname(struct ipath_devdata *dd, char *name, | ||
610 | size_t namelen) | ||
611 | { | ||
612 | char *n = NULL; | ||
613 | u8 boardrev = dd->ipath_boardrev; | ||
614 | int ret; | ||
615 | |||
616 | switch (boardrev) { | ||
617 | case 0: | ||
618 | n = "InfiniPath_Emulation"; | ||
619 | break; | ||
620 | case 1: | ||
621 | n = "InfiniPath_QLE7140-Bringup"; | ||
622 | break; | ||
623 | case 2: | ||
624 | n = "InfiniPath_QLE7140"; | ||
625 | break; | ||
626 | case 3: | ||
627 | n = "InfiniPath_QMI7140"; | ||
628 | break; | ||
629 | case 4: | ||
630 | n = "InfiniPath_QEM7140"; | ||
631 | break; | ||
632 | case 5: | ||
633 | n = "InfiniPath_QMH7140"; | ||
634 | break; | ||
635 | case 6: | ||
636 | n = "InfiniPath_QLE7142"; | ||
637 | break; | ||
638 | default: | ||
639 | ipath_dev_err(dd, | ||
640 | "Don't yet know about board with ID %u\n", | ||
641 | boardrev); | ||
642 | snprintf(name, namelen, "Unknown_InfiniPath_PCIe_%u", | ||
643 | boardrev); | ||
644 | break; | ||
645 | } | ||
646 | if (n) | ||
647 | snprintf(name, namelen, "%s", n); | ||
648 | |||
649 | if (dd->ipath_majrev != 4 || !dd->ipath_minrev || dd->ipath_minrev>2) { | ||
650 | ipath_dev_err(dd, "Unsupported InfiniPath hardware revision %u.%u!\n", | ||
651 | dd->ipath_majrev, dd->ipath_minrev); | ||
652 | ret = 1; | ||
653 | } else { | ||
654 | ret = 0; | ||
655 | if (dd->ipath_minrev >= 2) | ||
656 | dd->ipath_f_put_tid = ipath_pe_put_tid_2; | ||
657 | } | ||
658 | |||
659 | /* | ||
660 | * set here, not in ipath_init_*_funcs because we have to do | ||
661 | * it after we can read chip registers. | ||
662 | */ | ||
663 | dd->ipath_ureg_align = | ||
664 | ipath_read_kreg32(dd, dd->ipath_kregs->kr_pagealign); | ||
665 | |||
666 | return ret; | ||
667 | } | ||
668 | |||
669 | /** | ||
670 | * ipath_pe_init_hwerrors - enable hardware errors | ||
671 | * @dd: the infinipath device | ||
672 | * | ||
673 | * now that we have finished initializing everything that might reasonably | ||
674 | * cause a hardware error, and cleared those errors bits as they occur, | ||
675 | * we can enable hardware errors in the mask (potentially enabling | ||
676 | * freeze mode), and enable hardware errors as errors (along with | ||
677 | * everything else) in errormask | ||
678 | */ | ||
679 | static void ipath_pe_init_hwerrors(struct ipath_devdata *dd) | ||
680 | { | ||
681 | ipath_err_t val; | ||
682 | u64 extsval; | ||
683 | |||
684 | extsval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus); | ||
685 | |||
686 | if (!(extsval & INFINIPATH_EXTS_MEMBIST_ENDTEST)) | ||
687 | ipath_dev_err(dd, "MemBIST did not complete!\n"); | ||
688 | if (extsval & INFINIPATH_EXTS_MEMBIST_FOUND) | ||
689 | ipath_dbg("MemBIST corrected\n"); | ||
690 | |||
691 | val = ~0ULL; /* barring bugs, all hwerrors become interrupts, */ | ||
692 | |||
693 | if (!dd->ipath_boardrev) // no PLL for Emulator | ||
694 | val &= ~INFINIPATH_HWE_SERDESPLLFAILED; | ||
695 | |||
696 | if (dd->ipath_minrev < 2) { | ||
697 | /* workaround bug 9460 in internal interface bus parity | ||
698 | * checking. Fixed (HW bug 9490) in Rev2. | ||
699 | */ | ||
700 | val &= ~INFINIPATH_HWE_PCIEBUSPARITYRADM; | ||
701 | } | ||
702 | dd->ipath_hwerrmask = val; | ||
703 | } | ||
704 | |||
705 | /** | ||
706 | * ipath_pe_bringup_serdes - bring up the serdes | ||
707 | * @dd: the infinipath device | ||
708 | */ | ||
709 | static int ipath_pe_bringup_serdes(struct ipath_devdata *dd) | ||
710 | { | ||
711 | u64 val, config1, prev_val; | ||
712 | int ret = 0; | ||
713 | |||
714 | ipath_dbg("Trying to bringup serdes\n"); | ||
715 | |||
716 | if (ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus) & | ||
717 | INFINIPATH_HWE_SERDESPLLFAILED) { | ||
718 | ipath_dbg("At start, serdes PLL failed bit set " | ||
719 | "in hwerrstatus, clearing and continuing\n"); | ||
720 | ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, | ||
721 | INFINIPATH_HWE_SERDESPLLFAILED); | ||
722 | } | ||
723 | |||
724 | dd->ibdeltainprog = 1; | ||
725 | dd->ibsymsnap = | ||
726 | ipath_read_creg32(dd, dd->ipath_cregs->cr_ibsymbolerrcnt); | ||
727 | dd->iblnkerrsnap = | ||
728 | ipath_read_creg32(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt); | ||
729 | |||
730 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0); | ||
731 | config1 = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig1); | ||
732 | |||
733 | ipath_cdbg(VERBOSE, "SerDes status config0=%llx config1=%llx, " | ||
734 | "xgxsconfig %llx\n", (unsigned long long) val, | ||
735 | (unsigned long long) config1, (unsigned long long) | ||
736 | ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig)); | ||
737 | |||
738 | /* | ||
739 | * Force reset on, also set rxdetect enable. Must do before reading | ||
740 | * serdesstatus at least for simulation, or some of the bits in | ||
741 | * serdes status will come back as undefined and cause simulation | ||
742 | * failures | ||
743 | */ | ||
744 | val |= INFINIPATH_SERDC0_RESET_PLL | INFINIPATH_SERDC0_RXDETECT_EN | ||
745 | | INFINIPATH_SERDC0_L1PWR_DN; | ||
746 | ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val); | ||
747 | /* be sure chip saw it */ | ||
748 | ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); | ||
749 | udelay(5); /* need pll reset set at least for a bit */ | ||
750 | /* | ||
751 | * after PLL is reset, set the per-lane Resets and TxIdle and | ||
752 | * clear the PLL reset and rxdetect (to get falling edge). | ||
753 | * Leave L1PWR bits set (permanently) | ||
754 | */ | ||
755 | val &= ~(INFINIPATH_SERDC0_RXDETECT_EN | INFINIPATH_SERDC0_RESET_PLL | ||
756 | | INFINIPATH_SERDC0_L1PWR_DN); | ||
757 | val |= INFINIPATH_SERDC0_RESET_MASK | INFINIPATH_SERDC0_TXIDLE; | ||
758 | ipath_cdbg(VERBOSE, "Clearing pll reset and setting lane resets " | ||
759 | "and txidle (%llx)\n", (unsigned long long) val); | ||
760 | ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val); | ||
761 | /* be sure chip saw it */ | ||
762 | ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); | ||
763 | /* need PLL reset clear for at least 11 usec before lane | ||
764 | * resets cleared; give it a few more to be sure */ | ||
765 | udelay(15); | ||
766 | val &= ~(INFINIPATH_SERDC0_RESET_MASK | INFINIPATH_SERDC0_TXIDLE); | ||
767 | |||
768 | ipath_cdbg(VERBOSE, "Clearing lane resets and txidle " | ||
769 | "(writing %llx)\n", (unsigned long long) val); | ||
770 | ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val); | ||
771 | /* be sure chip saw it */ | ||
772 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); | ||
773 | |||
774 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig); | ||
775 | prev_val = val; | ||
776 | if (val & INFINIPATH_XGXS_RESET) | ||
777 | val &= ~INFINIPATH_XGXS_RESET; | ||
778 | if (((val >> INFINIPATH_XGXS_RX_POL_SHIFT) & | ||
779 | INFINIPATH_XGXS_RX_POL_MASK) != dd->ipath_rx_pol_inv ) { | ||
780 | /* need to compensate for Tx inversion in partner */ | ||
781 | val &= ~(INFINIPATH_XGXS_RX_POL_MASK << | ||
782 | INFINIPATH_XGXS_RX_POL_SHIFT); | ||
783 | val |= dd->ipath_rx_pol_inv << | ||
784 | INFINIPATH_XGXS_RX_POL_SHIFT; | ||
785 | } | ||
786 | if (val != prev_val) | ||
787 | ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val); | ||
788 | |||
789 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0); | ||
790 | |||
791 | /* clear current and de-emphasis bits */ | ||
792 | config1 &= ~0x0ffffffff00ULL; | ||
793 | /* set current to 20ma */ | ||
794 | config1 |= 0x00000000000ULL; | ||
795 | /* set de-emphasis to -5.68dB */ | ||
796 | config1 |= 0x0cccc000000ULL; | ||
797 | ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig1, config1); | ||
798 | |||
799 | ipath_cdbg(VERBOSE, "done: SerDes status config0=%llx " | ||
800 | "config1=%llx, sstatus=%llx xgxs=%llx\n", | ||
801 | (unsigned long long) val, (unsigned long long) config1, | ||
802 | (unsigned long long) | ||
803 | ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesstatus), | ||
804 | (unsigned long long) | ||
805 | ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig)); | ||
806 | |||
807 | return ret; | ||
808 | } | ||
809 | |||
810 | /** | ||
811 | * ipath_pe_quiet_serdes - set serdes to txidle | ||
812 | * @dd: the infinipath device | ||
813 | * Called when driver is being unloaded | ||
814 | */ | ||
815 | static void ipath_pe_quiet_serdes(struct ipath_devdata *dd) | ||
816 | { | ||
817 | u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0); | ||
818 | |||
819 | if (dd->ibsymdelta || dd->iblnkerrdelta || | ||
820 | dd->ibdeltainprog) { | ||
821 | u64 diagc; | ||
822 | /* enable counter writes */ | ||
823 | diagc = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwdiagctrl); | ||
824 | ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl, | ||
825 | diagc | INFINIPATH_DC_COUNTERWREN); | ||
826 | |||
827 | if (dd->ibsymdelta || dd->ibdeltainprog) { | ||
828 | val = ipath_read_creg32(dd, | ||
829 | dd->ipath_cregs->cr_ibsymbolerrcnt); | ||
830 | if (dd->ibdeltainprog) | ||
831 | val -= val - dd->ibsymsnap; | ||
832 | val -= dd->ibsymdelta; | ||
833 | ipath_write_creg(dd, | ||
834 | dd->ipath_cregs->cr_ibsymbolerrcnt, val); | ||
835 | } | ||
836 | if (dd->iblnkerrdelta || dd->ibdeltainprog) { | ||
837 | val = ipath_read_creg32(dd, | ||
838 | dd->ipath_cregs->cr_iblinkerrrecovcnt); | ||
839 | if (dd->ibdeltainprog) | ||
840 | val -= val - dd->iblnkerrsnap; | ||
841 | val -= dd->iblnkerrdelta; | ||
842 | ipath_write_creg(dd, | ||
843 | dd->ipath_cregs->cr_iblinkerrrecovcnt, val); | ||
844 | } | ||
845 | |||
846 | /* and disable counter writes */ | ||
847 | ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl, diagc); | ||
848 | } | ||
849 | val |= INFINIPATH_SERDC0_TXIDLE; | ||
850 | ipath_dbg("Setting TxIdleEn on serdes (config0 = %llx)\n", | ||
851 | (unsigned long long) val); | ||
852 | ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val); | ||
853 | } | ||
854 | |||
855 | static int ipath_pe_intconfig(struct ipath_devdata *dd) | ||
856 | { | ||
857 | u32 chiprev; | ||
858 | |||
859 | /* | ||
860 | * If the chip supports added error indication via GPIO pins, | ||
861 | * enable interrupts on those bits so the interrupt routine | ||
862 | * can count the events. Also set flag so interrupt routine | ||
863 | * can know they are expected. | ||
864 | */ | ||
865 | chiprev = dd->ipath_revision >> INFINIPATH_R_CHIPREVMINOR_SHIFT; | ||
866 | if ((chiprev & INFINIPATH_R_CHIPREVMINOR_MASK) > 1) { | ||
867 | /* Rev2+ reports extra errors via internal GPIO pins */ | ||
868 | dd->ipath_flags |= IPATH_GPIO_ERRINTRS; | ||
869 | dd->ipath_gpio_mask |= IPATH_GPIO_ERRINTR_MASK; | ||
870 | ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, | ||
871 | dd->ipath_gpio_mask); | ||
872 | } | ||
873 | return 0; | ||
874 | } | ||
875 | |||
876 | /** | ||
877 | * ipath_setup_pe_setextled - set the state of the two external LEDs | ||
878 | * @dd: the infinipath device | ||
879 | * @lst: the L state | ||
880 | * @ltst: the LT state | ||
881 | |||
882 | * These LEDs indicate the physical and logical state of IB link. | ||
883 | * For this chip (at least with recommended board pinouts), LED1 | ||
884 | * is Yellow (logical state) and LED2 is Green (physical state), | ||
885 | * | ||
886 | * Note: We try to match the Mellanox HCA LED behavior as best | ||
887 | * we can. Green indicates physical link state is OK (something is | ||
888 | * plugged in, and we can train). | ||
889 | * Amber indicates the link is logically up (ACTIVE). | ||
890 | * Mellanox further blinks the amber LED to indicate data packet | ||
891 | * activity, but we have no hardware support for that, so it would | ||
892 | * require waking up every 10-20 msecs and checking the counters | ||
893 | * on the chip, and then turning the LED off if appropriate. That's | ||
894 | * visible overhead, so not something we will do. | ||
895 | * | ||
896 | */ | ||
897 | static void ipath_setup_pe_setextled(struct ipath_devdata *dd, u64 lst, | ||
898 | u64 ltst) | ||
899 | { | ||
900 | u64 extctl; | ||
901 | unsigned long flags = 0; | ||
902 | |||
903 | /* the diags use the LED to indicate diag info, so we leave | ||
904 | * the external LED alone when the diags are running */ | ||
905 | if (ipath_diag_inuse) | ||
906 | return; | ||
907 | |||
908 | /* Allow override of LED display for, e.g. Locating system in rack */ | ||
909 | if (dd->ipath_led_override) { | ||
910 | ltst = (dd->ipath_led_override & IPATH_LED_PHYS) | ||
911 | ? INFINIPATH_IBCS_LT_STATE_LINKUP | ||
912 | : INFINIPATH_IBCS_LT_STATE_DISABLED; | ||
913 | lst = (dd->ipath_led_override & IPATH_LED_LOG) | ||
914 | ? INFINIPATH_IBCS_L_STATE_ACTIVE | ||
915 | : INFINIPATH_IBCS_L_STATE_DOWN; | ||
916 | } | ||
917 | |||
918 | spin_lock_irqsave(&dd->ipath_gpio_lock, flags); | ||
919 | extctl = dd->ipath_extctrl & ~(INFINIPATH_EXTC_LED1PRIPORT_ON | | ||
920 | INFINIPATH_EXTC_LED2PRIPORT_ON); | ||
921 | |||
922 | if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP) | ||
923 | extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON; | ||
924 | if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE) | ||
925 | extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON; | ||
926 | dd->ipath_extctrl = extctl; | ||
927 | ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl); | ||
928 | spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags); | ||
929 | } | ||
930 | |||
931 | /** | ||
932 | * ipath_setup_pe_cleanup - clean up any per-chip chip-specific stuff | ||
933 | * @dd: the infinipath device | ||
934 | * | ||
935 | * This is called during driver unload. | ||
936 | * We do the pci_disable_msi here, not in generic code, because it | ||
937 | * isn't used for the HT chips. If we do end up needing pci_enable_msi | ||
938 | * at some point in the future for HT, we'll move the call back | ||
939 | * into the main init_one code. | ||
940 | */ | ||
941 | static void ipath_setup_pe_cleanup(struct ipath_devdata *dd) | ||
942 | { | ||
943 | dd->ipath_msi_lo = 0; /* just in case unload fails */ | ||
944 | pci_disable_msi(dd->pcidev); | ||
945 | } | ||
946 | |||
947 | static void ipath_6120_pcie_params(struct ipath_devdata *dd) | ||
948 | { | ||
949 | u16 linkstat, speed; | ||
950 | int pos; | ||
951 | |||
952 | pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP); | ||
953 | if (!pos) { | ||
954 | ipath_dev_err(dd, "Can't find PCI Express capability!\n"); | ||
955 | goto bail; | ||
956 | } | ||
957 | |||
958 | pci_read_config_word(dd->pcidev, pos + PCI_EXP_LNKSTA, | ||
959 | &linkstat); | ||
960 | /* | ||
961 | * speed is bits 0-4, linkwidth is bits 4-8 | ||
962 | * no defines for them in headers | ||
963 | */ | ||
964 | speed = linkstat & 0xf; | ||
965 | linkstat >>= 4; | ||
966 | linkstat &= 0x1f; | ||
967 | dd->ipath_lbus_width = linkstat; | ||
968 | |||
969 | switch (speed) { | ||
970 | case 1: | ||
971 | dd->ipath_lbus_speed = 2500; /* Gen1, 2.5GHz */ | ||
972 | break; | ||
973 | case 2: | ||
974 | dd->ipath_lbus_speed = 5000; /* Gen1, 5GHz */ | ||
975 | break; | ||
976 | default: /* not defined, assume gen1 */ | ||
977 | dd->ipath_lbus_speed = 2500; | ||
978 | break; | ||
979 | } | ||
980 | |||
981 | if (linkstat < 8) | ||
982 | ipath_dev_err(dd, | ||
983 | "PCIe width %u (x8 HCA), performance reduced\n", | ||
984 | linkstat); | ||
985 | else | ||
986 | ipath_cdbg(VERBOSE, "PCIe speed %u width %u (x8 HCA)\n", | ||
987 | dd->ipath_lbus_speed, linkstat); | ||
988 | |||
989 | if (speed != 1) | ||
990 | ipath_dev_err(dd, | ||
991 | "PCIe linkspeed %u is incorrect; " | ||
992 | "should be 1 (2500)!\n", speed); | ||
993 | bail: | ||
994 | /* fill in string, even on errors */ | ||
995 | snprintf(dd->ipath_lbus_info, sizeof(dd->ipath_lbus_info), | ||
996 | "PCIe,%uMHz,x%u\n", | ||
997 | dd->ipath_lbus_speed, | ||
998 | dd->ipath_lbus_width); | ||
999 | |||
1000 | return; | ||
1001 | } | ||
1002 | |||
1003 | /** | ||
1004 | * ipath_setup_pe_config - setup PCIe config related stuff | ||
1005 | * @dd: the infinipath device | ||
1006 | * @pdev: the PCI device | ||
1007 | * | ||
1008 | * The pci_enable_msi() call will fail on systems with MSI quirks | ||
1009 | * such as those with AMD8131, even if the device of interest is not | ||
1010 | * attached to that device, (in the 2.6.13 - 2.6.15 kernels, at least, fixed | ||
1011 | * late in 2.6.16). | ||
1012 | * All that can be done is to edit the kernel source to remove the quirk | ||
1013 | * check until that is fixed. | ||
1014 | * We do not need to call enable_msi() for our HyperTransport chip, | ||
1015 | * even though it uses MSI, and we want to avoid the quirk warning, so | ||
1016 | * So we call enable_msi only for PCIe. If we do end up needing | ||
1017 | * pci_enable_msi at some point in the future for HT, we'll move the | ||
1018 | * call back into the main init_one code. | ||
1019 | * We save the msi lo and hi values, so we can restore them after | ||
1020 | * chip reset (the kernel PCI infrastructure doesn't yet handle that | ||
1021 | * correctly). | ||
1022 | */ | ||
1023 | static int ipath_setup_pe_config(struct ipath_devdata *dd, | ||
1024 | struct pci_dev *pdev) | ||
1025 | { | ||
1026 | int pos, ret; | ||
1027 | |||
1028 | dd->ipath_msi_lo = 0; /* used as a flag during reset processing */ | ||
1029 | ret = pci_enable_msi(dd->pcidev); | ||
1030 | if (ret) | ||
1031 | ipath_dev_err(dd, "pci_enable_msi failed: %d, " | ||
1032 | "interrupts may not work\n", ret); | ||
1033 | /* continue even if it fails, we may still be OK... */ | ||
1034 | dd->ipath_irq = pdev->irq; | ||
1035 | |||
1036 | if ((pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) { | ||
1037 | u16 control; | ||
1038 | pci_read_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO, | ||
1039 | &dd->ipath_msi_lo); | ||
1040 | pci_read_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI, | ||
1041 | &dd->ipath_msi_hi); | ||
1042 | pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, | ||
1043 | &control); | ||
1044 | /* now save the data (vector) info */ | ||
1045 | pci_read_config_word(dd->pcidev, | ||
1046 | pos + ((control & PCI_MSI_FLAGS_64BIT) | ||
1047 | ? 12 : 8), | ||
1048 | &dd->ipath_msi_data); | ||
1049 | ipath_cdbg(VERBOSE, "Read msi data 0x%x from config offset " | ||
1050 | "0x%x, control=0x%x\n", dd->ipath_msi_data, | ||
1051 | pos + ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8), | ||
1052 | control); | ||
1053 | /* we save the cachelinesize also, although it doesn't | ||
1054 | * really matter */ | ||
1055 | pci_read_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, | ||
1056 | &dd->ipath_pci_cacheline); | ||
1057 | } else | ||
1058 | ipath_dev_err(dd, "Can't find MSI capability, " | ||
1059 | "can't save MSI settings for reset\n"); | ||
1060 | |||
1061 | ipath_6120_pcie_params(dd); | ||
1062 | |||
1063 | dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X; | ||
1064 | dd->ipath_link_speed_supported = IPATH_IB_SDR; | ||
1065 | dd->ipath_link_width_enabled = IB_WIDTH_4X; | ||
1066 | dd->ipath_link_speed_enabled = dd->ipath_link_speed_supported; | ||
1067 | /* these can't change for this chip, so set once */ | ||
1068 | dd->ipath_link_width_active = dd->ipath_link_width_enabled; | ||
1069 | dd->ipath_link_speed_active = dd->ipath_link_speed_enabled; | ||
1070 | return 0; | ||
1071 | } | ||
1072 | |||
1073 | static void ipath_init_pe_variables(struct ipath_devdata *dd) | ||
1074 | { | ||
1075 | /* | ||
1076 | * setup the register offsets, since they are different for each | ||
1077 | * chip | ||
1078 | */ | ||
1079 | dd->ipath_kregs = &ipath_pe_kregs; | ||
1080 | dd->ipath_cregs = &ipath_pe_cregs; | ||
1081 | |||
1082 | /* | ||
1083 | * bits for selecting i2c direction and values, | ||
1084 | * used for I2C serial flash | ||
1085 | */ | ||
1086 | dd->ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM; | ||
1087 | dd->ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM; | ||
1088 | dd->ipath_gpio_sda = IPATH_GPIO_SDA; | ||
1089 | dd->ipath_gpio_scl = IPATH_GPIO_SCL; | ||
1090 | |||
1091 | /* | ||
1092 | * Fill in data for field-values that change in newer chips. | ||
1093 | * We dynamically specify only the mask for LINKTRAININGSTATE | ||
1094 | * and only the shift for LINKSTATE, as they are the only ones | ||
1095 | * that change. Also precalculate the 3 link states of interest | ||
1096 | * and the combined mask. | ||
1097 | */ | ||
1098 | dd->ibcs_ls_shift = IBA6120_IBCS_LINKSTATE_SHIFT; | ||
1099 | dd->ibcs_lts_mask = IBA6120_IBCS_LINKTRAININGSTATE_MASK; | ||
1100 | dd->ibcs_mask = (INFINIPATH_IBCS_LINKSTATE_MASK << | ||
1101 | dd->ibcs_ls_shift) | dd->ibcs_lts_mask; | ||
1102 | dd->ib_init = (INFINIPATH_IBCS_LT_STATE_LINKUP << | ||
1103 | INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) | | ||
1104 | (INFINIPATH_IBCS_L_STATE_INIT << dd->ibcs_ls_shift); | ||
1105 | dd->ib_arm = (INFINIPATH_IBCS_LT_STATE_LINKUP << | ||
1106 | INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) | | ||
1107 | (INFINIPATH_IBCS_L_STATE_ARM << dd->ibcs_ls_shift); | ||
1108 | dd->ib_active = (INFINIPATH_IBCS_LT_STATE_LINKUP << | ||
1109 | INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) | | ||
1110 | (INFINIPATH_IBCS_L_STATE_ACTIVE << dd->ibcs_ls_shift); | ||
1111 | |||
1112 | /* | ||
1113 | * Fill in data for ibcc field-values that change in newer chips. | ||
1114 | * We dynamically specify only the mask for LINKINITCMD | ||
1115 | * and only the shift for LINKCMD and MAXPKTLEN, as they are | ||
1116 | * the only ones that change. | ||
1117 | */ | ||
1118 | dd->ibcc_lic_mask = INFINIPATH_IBCC_LINKINITCMD_MASK; | ||
1119 | dd->ibcc_lc_shift = INFINIPATH_IBCC_LINKCMD_SHIFT; | ||
1120 | dd->ibcc_mpl_shift = INFINIPATH_IBCC_MAXPKTLEN_SHIFT; | ||
1121 | |||
1122 | /* Fill in shifts for RcvCtrl. */ | ||
1123 | dd->ipath_r_portenable_shift = INFINIPATH_R_PORTENABLE_SHIFT; | ||
1124 | dd->ipath_r_intravail_shift = INFINIPATH_R_INTRAVAIL_SHIFT; | ||
1125 | dd->ipath_r_tailupd_shift = INFINIPATH_R_TAILUPD_SHIFT; | ||
1126 | dd->ipath_r_portcfg_shift = 0; /* Not on IBA6120 */ | ||
1127 | |||
1128 | /* variables for sanity checking interrupt and errors */ | ||
1129 | dd->ipath_hwe_bitsextant = | ||
1130 | (INFINIPATH_HWE_RXEMEMPARITYERR_MASK << | ||
1131 | INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) | | ||
1132 | (INFINIPATH_HWE_TXEMEMPARITYERR_MASK << | ||
1133 | INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) | | ||
1134 | (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK << | ||
1135 | INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) | | ||
1136 | INFINIPATH_HWE_PCIE1PLLFAILED | | ||
1137 | INFINIPATH_HWE_PCIE0PLLFAILED | | ||
1138 | INFINIPATH_HWE_PCIEPOISONEDTLP | | ||
1139 | INFINIPATH_HWE_PCIECPLTIMEOUT | | ||
1140 | INFINIPATH_HWE_PCIEBUSPARITYXTLH | | ||
1141 | INFINIPATH_HWE_PCIEBUSPARITYXADM | | ||
1142 | INFINIPATH_HWE_PCIEBUSPARITYRADM | | ||
1143 | INFINIPATH_HWE_MEMBISTFAILED | | ||
1144 | INFINIPATH_HWE_COREPLL_FBSLIP | | ||
1145 | INFINIPATH_HWE_COREPLL_RFSLIP | | ||
1146 | INFINIPATH_HWE_SERDESPLLFAILED | | ||
1147 | INFINIPATH_HWE_IBCBUSTOSPCPARITYERR | | ||
1148 | INFINIPATH_HWE_IBCBUSFRSPCPARITYERR; | ||
1149 | dd->ipath_i_bitsextant = | ||
1150 | (INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) | | ||
1151 | (INFINIPATH_I_RCVAVAIL_MASK << | ||
1152 | INFINIPATH_I_RCVAVAIL_SHIFT) | | ||
1153 | INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT | | ||
1154 | INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO; | ||
1155 | dd->ipath_e_bitsextant = | ||
1156 | INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC | | ||
1157 | INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN | | ||
1158 | INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN | | ||
1159 | INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RUNEXPCHAR | | ||
1160 | INFINIPATH_E_RUNSUPVL | INFINIPATH_E_REBP | | ||
1161 | INFINIPATH_E_RIBFLOW | INFINIPATH_E_RBADVERSION | | ||
1162 | INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL | | ||
1163 | INFINIPATH_E_RBADTID | INFINIPATH_E_RHDRLEN | | ||
1164 | INFINIPATH_E_RHDR | INFINIPATH_E_RIBLOSTLINK | | ||
1165 | INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SMAXPKTLEN | | ||
1166 | INFINIPATH_E_SUNDERRUN | INFINIPATH_E_SPKTLEN | | ||
1167 | INFINIPATH_E_SDROPPEDSMPPKT | INFINIPATH_E_SDROPPEDDATAPKT | | ||
1168 | INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM | | ||
1169 | INFINIPATH_E_SUNSUPVL | INFINIPATH_E_IBSTATUSCHANGED | | ||
1170 | INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET | | ||
1171 | INFINIPATH_E_HARDWARE; | ||
1172 | |||
1173 | dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK; | ||
1174 | dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK; | ||
1175 | dd->ipath_i_rcvavail_shift = INFINIPATH_I_RCVAVAIL_SHIFT; | ||
1176 | dd->ipath_i_rcvurg_shift = INFINIPATH_I_RCVURG_SHIFT; | ||
1177 | |||
1178 | /* | ||
1179 | * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity. | ||
1180 | * 2 is Some Misc, 3 is reserved for future. | ||
1181 | */ | ||
1182 | dd->ipath_eep_st_masks[0].hwerrs_to_log = | ||
1183 | INFINIPATH_HWE_TXEMEMPARITYERR_MASK << | ||
1184 | INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT; | ||
1185 | |||
1186 | /* Ignore errors in PIO/PBC on systems with unordered write-combining */ | ||
1187 | if (ipath_unordered_wc()) | ||
1188 | dd->ipath_eep_st_masks[0].hwerrs_to_log &= ~TXE_PIO_PARITY; | ||
1189 | |||
1190 | dd->ipath_eep_st_masks[1].hwerrs_to_log = | ||
1191 | INFINIPATH_HWE_RXEMEMPARITYERR_MASK << | ||
1192 | INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT; | ||
1193 | |||
1194 | dd->ipath_eep_st_masks[2].errs_to_log = INFINIPATH_E_RESET; | ||
1195 | dd->delay_mult = 2; /* SDR, 4X, can't change */ | ||
1196 | } | ||
1197 | |||
1198 | /* setup the MSI stuff again after a reset. I'd like to just call | ||
1199 | * pci_enable_msi() and request_irq() again, but when I do that, | ||
1200 | * the MSI enable bit doesn't get set in the command word, and | ||
1201 | * we switch to to a different interrupt vector, which is confusing, | ||
1202 | * so I instead just do it all inline. Perhaps somehow can tie this | ||
1203 | * into the PCIe hotplug support at some point | ||
1204 | * Note, because I'm doing it all here, I don't call pci_disable_msi() | ||
1205 | * or free_irq() at the start of ipath_setup_pe_reset(). | ||
1206 | */ | ||
1207 | static int ipath_reinit_msi(struct ipath_devdata *dd) | ||
1208 | { | ||
1209 | int pos; | ||
1210 | u16 control; | ||
1211 | int ret; | ||
1212 | |||
1213 | if (!dd->ipath_msi_lo) { | ||
1214 | dev_info(&dd->pcidev->dev, "Can't restore MSI config, " | ||
1215 | "initial setup failed?\n"); | ||
1216 | ret = 0; | ||
1217 | goto bail; | ||
1218 | } | ||
1219 | |||
1220 | if (!(pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) { | ||
1221 | ipath_dev_err(dd, "Can't find MSI capability, " | ||
1222 | "can't restore MSI settings\n"); | ||
1223 | ret = 0; | ||
1224 | goto bail; | ||
1225 | } | ||
1226 | ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n", | ||
1227 | dd->ipath_msi_lo, pos + PCI_MSI_ADDRESS_LO); | ||
1228 | pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO, | ||
1229 | dd->ipath_msi_lo); | ||
1230 | ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n", | ||
1231 | dd->ipath_msi_hi, pos + PCI_MSI_ADDRESS_HI); | ||
1232 | pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI, | ||
1233 | dd->ipath_msi_hi); | ||
1234 | pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control); | ||
1235 | if (!(control & PCI_MSI_FLAGS_ENABLE)) { | ||
1236 | ipath_cdbg(VERBOSE, "MSI control at off %x was %x, " | ||
1237 | "setting MSI enable (%x)\n", pos + PCI_MSI_FLAGS, | ||
1238 | control, control | PCI_MSI_FLAGS_ENABLE); | ||
1239 | control |= PCI_MSI_FLAGS_ENABLE; | ||
1240 | pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, | ||
1241 | control); | ||
1242 | } | ||
1243 | /* now rewrite the data (vector) info */ | ||
1244 | pci_write_config_word(dd->pcidev, pos + | ||
1245 | ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8), | ||
1246 | dd->ipath_msi_data); | ||
1247 | /* we restore the cachelinesize also, although it doesn't really | ||
1248 | * matter */ | ||
1249 | pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, | ||
1250 | dd->ipath_pci_cacheline); | ||
1251 | /* and now set the pci master bit again */ | ||
1252 | pci_set_master(dd->pcidev); | ||
1253 | ret = 1; | ||
1254 | |||
1255 | bail: | ||
1256 | return ret; | ||
1257 | } | ||
1258 | |||
1259 | /* This routine sleeps, so it can only be called from user context, not | ||
1260 | * from interrupt context. If we need interrupt context, we can split | ||
1261 | * it into two routines. | ||
1262 | */ | ||
1263 | static int ipath_setup_pe_reset(struct ipath_devdata *dd) | ||
1264 | { | ||
1265 | u64 val; | ||
1266 | int i; | ||
1267 | int ret; | ||
1268 | u16 cmdval; | ||
1269 | |||
1270 | pci_read_config_word(dd->pcidev, PCI_COMMAND, &cmdval); | ||
1271 | |||
1272 | /* Use ERROR so it shows up in logs, etc. */ | ||
1273 | ipath_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->ipath_unit); | ||
1274 | /* keep chip from being accessed in a few places */ | ||
1275 | dd->ipath_flags &= ~(IPATH_INITTED|IPATH_PRESENT); | ||
1276 | val = dd->ipath_control | INFINIPATH_C_RESET; | ||
1277 | ipath_write_kreg(dd, dd->ipath_kregs->kr_control, val); | ||
1278 | mb(); | ||
1279 | |||
1280 | for (i = 1; i <= 5; i++) { | ||
1281 | int r; | ||
1282 | /* allow MBIST, etc. to complete; longer on each retry. | ||
1283 | * We sometimes get machine checks from bus timeout if no | ||
1284 | * response, so for now, make it *really* long. | ||
1285 | */ | ||
1286 | msleep(1000 + (1 + i) * 2000); | ||
1287 | if ((r = | ||
1288 | pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, | ||
1289 | dd->ipath_pcibar0))) | ||
1290 | ipath_dev_err(dd, "rewrite of BAR0 failed: %d\n", | ||
1291 | r); | ||
1292 | if ((r = | ||
1293 | pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1, | ||
1294 | dd->ipath_pcibar1))) | ||
1295 | ipath_dev_err(dd, "rewrite of BAR1 failed: %d\n", | ||
1296 | r); | ||
1297 | /* now re-enable memory access */ | ||
1298 | pci_write_config_word(dd->pcidev, PCI_COMMAND, cmdval); | ||
1299 | if ((r = pci_enable_device(dd->pcidev))) | ||
1300 | ipath_dev_err(dd, "pci_enable_device failed after " | ||
1301 | "reset: %d\n", r); | ||
1302 | /* | ||
1303 | * whether it fully enabled or not, mark as present, | ||
1304 | * again (but not INITTED) | ||
1305 | */ | ||
1306 | dd->ipath_flags |= IPATH_PRESENT; | ||
1307 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision); | ||
1308 | if (val == dd->ipath_revision) { | ||
1309 | ipath_cdbg(VERBOSE, "Got matching revision " | ||
1310 | "register %llx on try %d\n", | ||
1311 | (unsigned long long) val, i); | ||
1312 | ret = ipath_reinit_msi(dd); | ||
1313 | goto bail; | ||
1314 | } | ||
1315 | /* Probably getting -1 back */ | ||
1316 | ipath_dbg("Didn't get expected revision register, " | ||
1317 | "got %llx, try %d\n", (unsigned long long) val, | ||
1318 | i + 1); | ||
1319 | } | ||
1320 | ret = 0; /* failed */ | ||
1321 | |||
1322 | bail: | ||
1323 | if (ret) | ||
1324 | ipath_6120_pcie_params(dd); | ||
1325 | return ret; | ||
1326 | } | ||
1327 | |||
1328 | /** | ||
1329 | * ipath_pe_put_tid - write a TID in chip | ||
1330 | * @dd: the infinipath device | ||
1331 | * @tidptr: pointer to the expected TID (in chip) to update | ||
1332 | * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected | ||
1333 | * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing | ||
1334 | * | ||
1335 | * This exists as a separate routine to allow for special locking etc. | ||
1336 | * It's used for both the full cleanup on exit, as well as the normal | ||
1337 | * setup and teardown. | ||
1338 | */ | ||
1339 | static void ipath_pe_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr, | ||
1340 | u32 type, unsigned long pa) | ||
1341 | { | ||
1342 | u32 __iomem *tidp32 = (u32 __iomem *)tidptr; | ||
1343 | unsigned long flags = 0; /* keep gcc quiet */ | ||
1344 | int tidx; | ||
1345 | spinlock_t *tidlockp; | ||
1346 | |||
1347 | if (!dd->ipath_kregbase) | ||
1348 | return; | ||
1349 | |||
1350 | if (pa != dd->ipath_tidinvalid) { | ||
1351 | if (pa & ((1U << 11) - 1)) { | ||
1352 | dev_info(&dd->pcidev->dev, "BUG: physaddr %lx " | ||
1353 | "not 2KB aligned!\n", pa); | ||
1354 | return; | ||
1355 | } | ||
1356 | pa >>= 11; | ||
1357 | /* paranoia check */ | ||
1358 | if (pa & ~INFINIPATH_RT_ADDR_MASK) | ||
1359 | ipath_dev_err(dd, | ||
1360 | "BUG: Physical page address 0x%lx " | ||
1361 | "has bits set in 31-29\n", pa); | ||
1362 | |||
1363 | if (type == RCVHQ_RCV_TYPE_EAGER) | ||
1364 | pa |= dd->ipath_tidtemplate; | ||
1365 | else /* for now, always full 4KB page */ | ||
1366 | pa |= 2 << 29; | ||
1367 | } | ||
1368 | |||
1369 | /* | ||
1370 | * Workaround chip bug 9437 by writing the scratch register | ||
1371 | * before and after the TID, and with an io write barrier. | ||
1372 | * We use a spinlock around the writes, so they can't intermix | ||
1373 | * with other TID (eager or expected) writes (the chip bug | ||
1374 | * is triggered by back to back TID writes). Unfortunately, this | ||
1375 | * call can be done from interrupt level for the port 0 eager TIDs, | ||
1376 | * so we have to use irqsave locks. | ||
1377 | */ | ||
1378 | /* | ||
1379 | * Assumes tidptr always > ipath_egrtidbase | ||
1380 | * if type == RCVHQ_RCV_TYPE_EAGER. | ||
1381 | */ | ||
1382 | tidx = tidptr - dd->ipath_egrtidbase; | ||
1383 | |||
1384 | tidlockp = (type == RCVHQ_RCV_TYPE_EAGER && tidx < dd->ipath_rcvegrcnt) | ||
1385 | ? &dd->ipath_kernel_tid_lock : &dd->ipath_user_tid_lock; | ||
1386 | spin_lock_irqsave(tidlockp, flags); | ||
1387 | ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xfeeddeaf); | ||
1388 | writel(pa, tidp32); | ||
1389 | ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xdeadbeef); | ||
1390 | mmiowb(); | ||
1391 | spin_unlock_irqrestore(tidlockp, flags); | ||
1392 | } | ||
1393 | |||
1394 | /** | ||
1395 | * ipath_pe_put_tid_2 - write a TID in chip, Revision 2 or higher | ||
1396 | * @dd: the infinipath device | ||
1397 | * @tidptr: pointer to the expected TID (in chip) to update | ||
1398 | * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected | ||
1399 | * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing | ||
1400 | * | ||
1401 | * This exists as a separate routine to allow for selection of the | ||
1402 | * appropriate "flavor". The static calls in cleanup just use the | ||
1403 | * revision-agnostic form, as they are not performance critical. | ||
1404 | */ | ||
1405 | static void ipath_pe_put_tid_2(struct ipath_devdata *dd, u64 __iomem *tidptr, | ||
1406 | u32 type, unsigned long pa) | ||
1407 | { | ||
1408 | u32 __iomem *tidp32 = (u32 __iomem *)tidptr; | ||
1409 | u32 tidx; | ||
1410 | |||
1411 | if (!dd->ipath_kregbase) | ||
1412 | return; | ||
1413 | |||
1414 | if (pa != dd->ipath_tidinvalid) { | ||
1415 | if (pa & ((1U << 11) - 1)) { | ||
1416 | dev_info(&dd->pcidev->dev, "BUG: physaddr %lx " | ||
1417 | "not 2KB aligned!\n", pa); | ||
1418 | return; | ||
1419 | } | ||
1420 | pa >>= 11; | ||
1421 | /* paranoia check */ | ||
1422 | if (pa & ~INFINIPATH_RT_ADDR_MASK) | ||
1423 | ipath_dev_err(dd, | ||
1424 | "BUG: Physical page address 0x%lx " | ||
1425 | "has bits set in 31-29\n", pa); | ||
1426 | |||
1427 | if (type == RCVHQ_RCV_TYPE_EAGER) | ||
1428 | pa |= dd->ipath_tidtemplate; | ||
1429 | else /* for now, always full 4KB page */ | ||
1430 | pa |= 2 << 29; | ||
1431 | } | ||
1432 | tidx = tidptr - dd->ipath_egrtidbase; | ||
1433 | writel(pa, tidp32); | ||
1434 | mmiowb(); | ||
1435 | } | ||
1436 | |||
1437 | |||
1438 | /** | ||
1439 | * ipath_pe_clear_tid - clear all TID entries for a port, expected and eager | ||
1440 | * @dd: the infinipath device | ||
1441 | * @port: the port | ||
1442 | * | ||
1443 | * clear all TID entries for a port, expected and eager. | ||
1444 | * Used from ipath_close(). On this chip, TIDs are only 32 bits, | ||
1445 | * not 64, but they are still on 64 bit boundaries, so tidbase | ||
1446 | * is declared as u64 * for the pointer math, even though we write 32 bits | ||
1447 | */ | ||
1448 | static void ipath_pe_clear_tids(struct ipath_devdata *dd, unsigned port) | ||
1449 | { | ||
1450 | u64 __iomem *tidbase; | ||
1451 | unsigned long tidinv; | ||
1452 | int i; | ||
1453 | |||
1454 | if (!dd->ipath_kregbase) | ||
1455 | return; | ||
1456 | |||
1457 | ipath_cdbg(VERBOSE, "Invalidate TIDs for port %u\n", port); | ||
1458 | |||
1459 | tidinv = dd->ipath_tidinvalid; | ||
1460 | tidbase = (u64 __iomem *) | ||
1461 | ((char __iomem *)(dd->ipath_kregbase) + | ||
1462 | dd->ipath_rcvtidbase + | ||
1463 | port * dd->ipath_rcvtidcnt * sizeof(*tidbase)); | ||
1464 | |||
1465 | for (i = 0; i < dd->ipath_rcvtidcnt; i++) | ||
1466 | dd->ipath_f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED, | ||
1467 | tidinv); | ||
1468 | |||
1469 | tidbase = (u64 __iomem *) | ||
1470 | ((char __iomem *)(dd->ipath_kregbase) + | ||
1471 | dd->ipath_rcvegrbase + | ||
1472 | port * dd->ipath_rcvegrcnt * sizeof(*tidbase)); | ||
1473 | |||
1474 | for (i = 0; i < dd->ipath_rcvegrcnt; i++) | ||
1475 | dd->ipath_f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER, | ||
1476 | tidinv); | ||
1477 | } | ||
1478 | |||
1479 | /** | ||
1480 | * ipath_pe_tidtemplate - setup constants for TID updates | ||
1481 | * @dd: the infinipath device | ||
1482 | * | ||
1483 | * We setup stuff that we use a lot, to avoid calculating each time | ||
1484 | */ | ||
1485 | static void ipath_pe_tidtemplate(struct ipath_devdata *dd) | ||
1486 | { | ||
1487 | u32 egrsize = dd->ipath_rcvegrbufsize; | ||
1488 | |||
1489 | /* For now, we always allocate 4KB buffers (at init) so we can | ||
1490 | * receive max size packets. We may want a module parameter to | ||
1491 | * specify 2KB or 4KB and/or make be per port instead of per device | ||
1492 | * for those who want to reduce memory footprint. Note that the | ||
1493 | * ipath_rcvhdrentsize size must be large enough to hold the largest | ||
1494 | * IB header (currently 96 bytes) that we expect to handle (plus of | ||
1495 | * course the 2 dwords of RHF). | ||
1496 | */ | ||
1497 | if (egrsize == 2048) | ||
1498 | dd->ipath_tidtemplate = 1U << 29; | ||
1499 | else if (egrsize == 4096) | ||
1500 | dd->ipath_tidtemplate = 2U << 29; | ||
1501 | else { | ||
1502 | egrsize = 4096; | ||
1503 | dev_info(&dd->pcidev->dev, "BUG: unsupported egrbufsize " | ||
1504 | "%u, using %u\n", dd->ipath_rcvegrbufsize, | ||
1505 | egrsize); | ||
1506 | dd->ipath_tidtemplate = 2U << 29; | ||
1507 | } | ||
1508 | dd->ipath_tidinvalid = 0; | ||
1509 | } | ||
1510 | |||
1511 | static int ipath_pe_early_init(struct ipath_devdata *dd) | ||
1512 | { | ||
1513 | dd->ipath_flags |= IPATH_4BYTE_TID; | ||
1514 | if (ipath_unordered_wc()) | ||
1515 | dd->ipath_flags |= IPATH_PIO_FLUSH_WC; | ||
1516 | |||
1517 | /* | ||
1518 | * For openfabrics, we need to be able to handle an IB header of | ||
1519 | * 24 dwords. HT chip has arbitrary sized receive buffers, so we | ||
1520 | * made them the same size as the PIO buffers. This chip does not | ||
1521 | * handle arbitrary size buffers, so we need the header large enough | ||
1522 | * to handle largest IB header, but still have room for a 2KB MTU | ||
1523 | * standard IB packet. | ||
1524 | */ | ||
1525 | dd->ipath_rcvhdrentsize = 24; | ||
1526 | dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE; | ||
1527 | dd->ipath_rhf_offset = 0; | ||
1528 | dd->ipath_egrtidbase = (u64 __iomem *) | ||
1529 | ((char __iomem *) dd->ipath_kregbase + dd->ipath_rcvegrbase); | ||
1530 | |||
1531 | dd->ipath_rcvegrbufsize = ipath_mtu4096 ? 4096 : 2048; | ||
1532 | /* | ||
1533 | * the min() check here is currently a nop, but it may not always | ||
1534 | * be, depending on just how we do ipath_rcvegrbufsize | ||
1535 | */ | ||
1536 | dd->ipath_ibmaxlen = min(ipath_mtu4096 ? dd->ipath_piosize4k : | ||
1537 | dd->ipath_piosize2k, | ||
1538 | dd->ipath_rcvegrbufsize + | ||
1539 | (dd->ipath_rcvhdrentsize << 2)); | ||
1540 | dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen; | ||
1541 | |||
1542 | /* | ||
1543 | * We can request a receive interrupt for 1 or | ||
1544 | * more packets from current offset. For now, we set this | ||
1545 | * up for a single packet. | ||
1546 | */ | ||
1547 | dd->ipath_rhdrhead_intr_off = 1ULL<<32; | ||
1548 | |||
1549 | ipath_get_eeprom_info(dd); | ||
1550 | |||
1551 | return 0; | ||
1552 | } | ||
1553 | |||
1554 | int __attribute__((weak)) ipath_unordered_wc(void) | ||
1555 | { | ||
1556 | return 0; | ||
1557 | } | ||
1558 | |||
1559 | /** | ||
1560 | * ipath_init_pe_get_base_info - set chip-specific flags for user code | ||
1561 | * @pd: the infinipath port | ||
1562 | * @kbase: ipath_base_info pointer | ||
1563 | * | ||
1564 | * We set the PCIE flag because the lower bandwidth on PCIe vs | ||
1565 | * HyperTransport can affect some user packet algorithms. | ||
1566 | */ | ||
1567 | static int ipath_pe_get_base_info(struct ipath_portdata *pd, void *kbase) | ||
1568 | { | ||
1569 | struct ipath_base_info *kinfo = kbase; | ||
1570 | struct ipath_devdata *dd; | ||
1571 | |||
1572 | if (ipath_unordered_wc()) { | ||
1573 | kinfo->spi_runtime_flags |= IPATH_RUNTIME_FORCE_WC_ORDER; | ||
1574 | ipath_cdbg(PROC, "Intel processor, forcing WC order\n"); | ||
1575 | } | ||
1576 | else | ||
1577 | ipath_cdbg(PROC, "Not Intel processor, WC ordered\n"); | ||
1578 | |||
1579 | if (pd == NULL) | ||
1580 | goto done; | ||
1581 | |||
1582 | dd = pd->port_dd; | ||
1583 | |||
1584 | done: | ||
1585 | kinfo->spi_runtime_flags |= IPATH_RUNTIME_PCIE | | ||
1586 | IPATH_RUNTIME_FORCE_PIOAVAIL | IPATH_RUNTIME_PIO_REGSWAPPED; | ||
1587 | return 0; | ||
1588 | } | ||
1589 | |||
1590 | static void ipath_pe_free_irq(struct ipath_devdata *dd) | ||
1591 | { | ||
1592 | free_irq(dd->ipath_irq, dd); | ||
1593 | dd->ipath_irq = 0; | ||
1594 | } | ||
1595 | |||
1596 | |||
1597 | static struct ipath_message_header * | ||
1598 | ipath_pe_get_msgheader(struct ipath_devdata *dd, __le32 *rhf_addr) | ||
1599 | { | ||
1600 | return (struct ipath_message_header *) | ||
1601 | &rhf_addr[sizeof(u64) / sizeof(u32)]; | ||
1602 | } | ||
1603 | |||
1604 | static void ipath_pe_config_ports(struct ipath_devdata *dd, ushort cfgports) | ||
1605 | { | ||
1606 | dd->ipath_portcnt = | ||
1607 | ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt); | ||
1608 | dd->ipath_p0_rcvegrcnt = | ||
1609 | ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt); | ||
1610 | } | ||
1611 | |||
1612 | static void ipath_pe_read_counters(struct ipath_devdata *dd, | ||
1613 | struct infinipath_counters *cntrs) | ||
1614 | { | ||
1615 | cntrs->LBIntCnt = | ||
1616 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBIntCnt)); | ||
1617 | cntrs->LBFlowStallCnt = | ||
1618 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBFlowStallCnt)); | ||
1619 | cntrs->TxSDmaDescCnt = 0; | ||
1620 | cntrs->TxUnsupVLErrCnt = | ||
1621 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnsupVLErrCnt)); | ||
1622 | cntrs->TxDataPktCnt = | ||
1623 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDataPktCnt)); | ||
1624 | cntrs->TxFlowPktCnt = | ||
1625 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowPktCnt)); | ||
1626 | cntrs->TxDwordCnt = | ||
1627 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDwordCnt)); | ||
1628 | cntrs->TxLenErrCnt = | ||
1629 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxLenErrCnt)); | ||
1630 | cntrs->TxMaxMinLenErrCnt = | ||
1631 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxMaxMinLenErrCnt)); | ||
1632 | cntrs->TxUnderrunCnt = | ||
1633 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnderrunCnt)); | ||
1634 | cntrs->TxFlowStallCnt = | ||
1635 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowStallCnt)); | ||
1636 | cntrs->TxDroppedPktCnt = | ||
1637 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDroppedPktCnt)); | ||
1638 | cntrs->RxDroppedPktCnt = | ||
1639 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDroppedPktCnt)); | ||
1640 | cntrs->RxDataPktCnt = | ||
1641 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDataPktCnt)); | ||
1642 | cntrs->RxFlowPktCnt = | ||
1643 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowPktCnt)); | ||
1644 | cntrs->RxDwordCnt = | ||
1645 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDwordCnt)); | ||
1646 | cntrs->RxLenErrCnt = | ||
1647 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLenErrCnt)); | ||
1648 | cntrs->RxMaxMinLenErrCnt = | ||
1649 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxMaxMinLenErrCnt)); | ||
1650 | cntrs->RxICRCErrCnt = | ||
1651 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxICRCErrCnt)); | ||
1652 | cntrs->RxVCRCErrCnt = | ||
1653 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxVCRCErrCnt)); | ||
1654 | cntrs->RxFlowCtrlErrCnt = | ||
1655 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowCtrlErrCnt)); | ||
1656 | cntrs->RxBadFormatCnt = | ||
1657 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBadFormatCnt)); | ||
1658 | cntrs->RxLinkProblemCnt = | ||
1659 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLinkProblemCnt)); | ||
1660 | cntrs->RxEBPCnt = | ||
1661 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxEBPCnt)); | ||
1662 | cntrs->RxLPCRCErrCnt = | ||
1663 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLPCRCErrCnt)); | ||
1664 | cntrs->RxBufOvflCnt = | ||
1665 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBufOvflCnt)); | ||
1666 | cntrs->RxTIDFullErrCnt = | ||
1667 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDFullErrCnt)); | ||
1668 | cntrs->RxTIDValidErrCnt = | ||
1669 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDValidErrCnt)); | ||
1670 | cntrs->RxPKeyMismatchCnt = | ||
1671 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxPKeyMismatchCnt)); | ||
1672 | cntrs->RxP0HdrEgrOvflCnt = | ||
1673 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt)); | ||
1674 | cntrs->RxP1HdrEgrOvflCnt = | ||
1675 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP1HdrEgrOvflCnt)); | ||
1676 | cntrs->RxP2HdrEgrOvflCnt = | ||
1677 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP2HdrEgrOvflCnt)); | ||
1678 | cntrs->RxP3HdrEgrOvflCnt = | ||
1679 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP3HdrEgrOvflCnt)); | ||
1680 | cntrs->RxP4HdrEgrOvflCnt = | ||
1681 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP4HdrEgrOvflCnt)); | ||
1682 | cntrs->RxP5HdrEgrOvflCnt = 0; | ||
1683 | cntrs->RxP6HdrEgrOvflCnt = 0; | ||
1684 | cntrs->RxP7HdrEgrOvflCnt = 0; | ||
1685 | cntrs->RxP8HdrEgrOvflCnt = 0; | ||
1686 | cntrs->RxP9HdrEgrOvflCnt = 0; | ||
1687 | cntrs->RxP10HdrEgrOvflCnt = 0; | ||
1688 | cntrs->RxP11HdrEgrOvflCnt = 0; | ||
1689 | cntrs->RxP12HdrEgrOvflCnt = 0; | ||
1690 | cntrs->RxP13HdrEgrOvflCnt = 0; | ||
1691 | cntrs->RxP14HdrEgrOvflCnt = 0; | ||
1692 | cntrs->RxP15HdrEgrOvflCnt = 0; | ||
1693 | cntrs->RxP16HdrEgrOvflCnt = 0; | ||
1694 | cntrs->IBStatusChangeCnt = | ||
1695 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBStatusChangeCnt)); | ||
1696 | cntrs->IBLinkErrRecoveryCnt = | ||
1697 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt)); | ||
1698 | cntrs->IBLinkDownedCnt = | ||
1699 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkDownedCnt)); | ||
1700 | cntrs->IBSymbolErrCnt = | ||
1701 | ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBSymbolErrCnt)); | ||
1702 | cntrs->RxVL15DroppedPktCnt = 0; | ||
1703 | cntrs->RxOtherLocalPhyErrCnt = 0; | ||
1704 | cntrs->PcieRetryBufDiagQwordCnt = 0; | ||
1705 | cntrs->ExcessBufferOvflCnt = dd->ipath_overrun_thresh_errs; | ||
1706 | cntrs->LocalLinkIntegrityErrCnt = dd->ipath_lli_errs; | ||
1707 | cntrs->RxVlErrCnt = 0; | ||
1708 | cntrs->RxDlidFltrCnt = 0; | ||
1709 | } | ||
1710 | |||
1711 | |||
1712 | /* no interrupt fallback for these chips */ | ||
1713 | static int ipath_pe_nointr_fallback(struct ipath_devdata *dd) | ||
1714 | { | ||
1715 | return 0; | ||
1716 | } | ||
1717 | |||
1718 | |||
1719 | /* | ||
1720 | * reset the XGXS (between serdes and IBC). Slightly less intrusive | ||
1721 | * than resetting the IBC or external link state, and useful in some | ||
1722 | * cases to cause some retraining. To do this right, we reset IBC | ||
1723 | * as well. | ||
1724 | */ | ||
1725 | static void ipath_pe_xgxs_reset(struct ipath_devdata *dd) | ||
1726 | { | ||
1727 | u64 val, prev_val; | ||
1728 | |||
1729 | prev_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig); | ||
1730 | val = prev_val | INFINIPATH_XGXS_RESET; | ||
1731 | prev_val &= ~INFINIPATH_XGXS_RESET; /* be sure */ | ||
1732 | ipath_write_kreg(dd, dd->ipath_kregs->kr_control, | ||
1733 | dd->ipath_control & ~INFINIPATH_C_LINKENABLE); | ||
1734 | ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val); | ||
1735 | ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); | ||
1736 | ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, prev_val); | ||
1737 | ipath_write_kreg(dd, dd->ipath_kregs->kr_control, | ||
1738 | dd->ipath_control); | ||
1739 | } | ||
1740 | |||
1741 | |||
1742 | static int ipath_pe_get_ib_cfg(struct ipath_devdata *dd, int which) | ||
1743 | { | ||
1744 | int ret; | ||
1745 | |||
1746 | switch (which) { | ||
1747 | case IPATH_IB_CFG_LWID: | ||
1748 | ret = dd->ipath_link_width_active; | ||
1749 | break; | ||
1750 | case IPATH_IB_CFG_SPD: | ||
1751 | ret = dd->ipath_link_speed_active; | ||
1752 | break; | ||
1753 | case IPATH_IB_CFG_LWID_ENB: | ||
1754 | ret = dd->ipath_link_width_enabled; | ||
1755 | break; | ||
1756 | case IPATH_IB_CFG_SPD_ENB: | ||
1757 | ret = dd->ipath_link_speed_enabled; | ||
1758 | break; | ||
1759 | default: | ||
1760 | ret = -ENOTSUPP; | ||
1761 | break; | ||
1762 | } | ||
1763 | return ret; | ||
1764 | } | ||
1765 | |||
1766 | |||
1767 | /* we assume range checking is already done, if needed */ | ||
1768 | static int ipath_pe_set_ib_cfg(struct ipath_devdata *dd, int which, u32 val) | ||
1769 | { | ||
1770 | int ret = 0; | ||
1771 | |||
1772 | if (which == IPATH_IB_CFG_LWID_ENB) | ||
1773 | dd->ipath_link_width_enabled = val; | ||
1774 | else if (which == IPATH_IB_CFG_SPD_ENB) | ||
1775 | dd->ipath_link_speed_enabled = val; | ||
1776 | else | ||
1777 | ret = -ENOTSUPP; | ||
1778 | return ret; | ||
1779 | } | ||
1780 | |||
1781 | static void ipath_pe_config_jint(struct ipath_devdata *dd, u16 a, u16 b) | ||
1782 | { | ||
1783 | } | ||
1784 | |||
1785 | |||
1786 | static int ipath_pe_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs) | ||
1787 | { | ||
1788 | if (ibup) { | ||
1789 | if (dd->ibdeltainprog) { | ||
1790 | dd->ibdeltainprog = 0; | ||
1791 | dd->ibsymdelta += | ||
1792 | ipath_read_creg32(dd, | ||
1793 | dd->ipath_cregs->cr_ibsymbolerrcnt) - | ||
1794 | dd->ibsymsnap; | ||
1795 | dd->iblnkerrdelta += | ||
1796 | ipath_read_creg32(dd, | ||
1797 | dd->ipath_cregs->cr_iblinkerrrecovcnt) - | ||
1798 | dd->iblnkerrsnap; | ||
1799 | } | ||
1800 | } else { | ||
1801 | dd->ipath_lli_counter = 0; | ||
1802 | if (!dd->ibdeltainprog) { | ||
1803 | dd->ibdeltainprog = 1; | ||
1804 | dd->ibsymsnap = | ||
1805 | ipath_read_creg32(dd, | ||
1806 | dd->ipath_cregs->cr_ibsymbolerrcnt); | ||
1807 | dd->iblnkerrsnap = | ||
1808 | ipath_read_creg32(dd, | ||
1809 | dd->ipath_cregs->cr_iblinkerrrecovcnt); | ||
1810 | } | ||
1811 | } | ||
1812 | |||
1813 | ipath_setup_pe_setextled(dd, ipath_ib_linkstate(dd, ibcs), | ||
1814 | ipath_ib_linktrstate(dd, ibcs)); | ||
1815 | return 0; | ||
1816 | } | ||
1817 | |||
1818 | |||
1819 | /** | ||
1820 | * ipath_init_iba6120_funcs - set up the chip-specific function pointers | ||
1821 | * @dd: the infinipath device | ||
1822 | * | ||
1823 | * This is global, and is called directly at init to set up the | ||
1824 | * chip-specific function pointers for later use. | ||
1825 | */ | ||
1826 | void ipath_init_iba6120_funcs(struct ipath_devdata *dd) | ||
1827 | { | ||
1828 | dd->ipath_f_intrsetup = ipath_pe_intconfig; | ||
1829 | dd->ipath_f_bus = ipath_setup_pe_config; | ||
1830 | dd->ipath_f_reset = ipath_setup_pe_reset; | ||
1831 | dd->ipath_f_get_boardname = ipath_pe_boardname; | ||
1832 | dd->ipath_f_init_hwerrors = ipath_pe_init_hwerrors; | ||
1833 | dd->ipath_f_early_init = ipath_pe_early_init; | ||
1834 | dd->ipath_f_handle_hwerrors = ipath_pe_handle_hwerrors; | ||
1835 | dd->ipath_f_quiet_serdes = ipath_pe_quiet_serdes; | ||
1836 | dd->ipath_f_bringup_serdes = ipath_pe_bringup_serdes; | ||
1837 | dd->ipath_f_clear_tids = ipath_pe_clear_tids; | ||
1838 | /* | ||
1839 | * _f_put_tid may get changed after we read the chip revision, | ||
1840 | * but we start with the safe version for all revs | ||
1841 | */ | ||
1842 | dd->ipath_f_put_tid = ipath_pe_put_tid; | ||
1843 | dd->ipath_f_cleanup = ipath_setup_pe_cleanup; | ||
1844 | dd->ipath_f_setextled = ipath_setup_pe_setextled; | ||
1845 | dd->ipath_f_get_base_info = ipath_pe_get_base_info; | ||
1846 | dd->ipath_f_free_irq = ipath_pe_free_irq; | ||
1847 | dd->ipath_f_tidtemplate = ipath_pe_tidtemplate; | ||
1848 | dd->ipath_f_intr_fallback = ipath_pe_nointr_fallback; | ||
1849 | dd->ipath_f_xgxs_reset = ipath_pe_xgxs_reset; | ||
1850 | dd->ipath_f_get_msgheader = ipath_pe_get_msgheader; | ||
1851 | dd->ipath_f_config_ports = ipath_pe_config_ports; | ||
1852 | dd->ipath_f_read_counters = ipath_pe_read_counters; | ||
1853 | dd->ipath_f_get_ib_cfg = ipath_pe_get_ib_cfg; | ||
1854 | dd->ipath_f_set_ib_cfg = ipath_pe_set_ib_cfg; | ||
1855 | dd->ipath_f_config_jint = ipath_pe_config_jint; | ||
1856 | dd->ipath_f_ib_updown = ipath_pe_ib_updown; | ||
1857 | |||
1858 | |||
1859 | /* initialize chip-specific variables */ | ||
1860 | ipath_init_pe_variables(dd); | ||
1861 | } | ||
1862 | |||
diff --git a/drivers/infiniband/hw/ipath/ipath_iba7220.c b/drivers/infiniband/hw/ipath/ipath_iba7220.c deleted file mode 100644 index 34b778ed97fc..000000000000 --- a/drivers/infiniband/hw/ipath/ipath_iba7220.c +++ /dev/null | |||
@@ -1,2631 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved. | ||
3 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | /* | ||
34 | * This file contains all of the code that is specific to the | ||
35 | * InfiniPath 7220 chip (except that specific to the SerDes) | ||
36 | */ | ||
37 | |||
38 | #include <linux/interrupt.h> | ||
39 | #include <linux/pci.h> | ||
40 | #include <linux/sched.h> | ||
41 | #include <linux/delay.h> | ||
42 | #include <linux/io.h> | ||
43 | #include <rdma/ib_verbs.h> | ||
44 | |||
45 | #include "ipath_kernel.h" | ||
46 | #include "ipath_registers.h" | ||
47 | #include "ipath_7220.h" | ||
48 | |||
49 | static void ipath_setup_7220_setextled(struct ipath_devdata *, u64, u64); | ||
50 | |||
51 | static unsigned ipath_compat_ddr_negotiate = 1; | ||
52 | |||
53 | module_param_named(compat_ddr_negotiate, ipath_compat_ddr_negotiate, uint, | ||
54 | S_IWUSR | S_IRUGO); | ||
55 | MODULE_PARM_DESC(compat_ddr_negotiate, | ||
56 | "Attempt pre-IBTA 1.2 DDR speed negotiation"); | ||
57 | |||
58 | static unsigned ipath_sdma_fetch_arb = 1; | ||
59 | module_param_named(fetch_arb, ipath_sdma_fetch_arb, uint, S_IRUGO); | ||
60 | MODULE_PARM_DESC(fetch_arb, "IBA7220: change SDMA descriptor arbitration"); | ||
61 | |||
62 | /* | ||
63 | * This file contains almost all the chip-specific register information and | ||
64 | * access functions for the QLogic InfiniPath 7220 PCI-Express chip, with the | ||
65 | * exception of SerDes support, which in in ipath_sd7220.c. | ||
66 | * | ||
67 | * This lists the InfiniPath registers, in the actual chip layout. | ||
68 | * This structure should never be directly accessed. | ||
69 | */ | ||
70 | struct _infinipath_do_not_use_kernel_regs { | ||
71 | unsigned long long Revision; | ||
72 | unsigned long long Control; | ||
73 | unsigned long long PageAlign; | ||
74 | unsigned long long PortCnt; | ||
75 | unsigned long long DebugPortSelect; | ||
76 | unsigned long long DebugSigsIntSel; /* was Reserved0;*/ | ||
77 | unsigned long long SendRegBase; | ||
78 | unsigned long long UserRegBase; | ||
79 | unsigned long long CounterRegBase; | ||
80 | unsigned long long Scratch; | ||
81 | unsigned long long EEPROMAddrCmd; /* was Reserved1; */ | ||
82 | unsigned long long EEPROMData; /* was Reserved2; */ | ||
83 | unsigned long long IntBlocked; | ||
84 | unsigned long long IntMask; | ||
85 | unsigned long long IntStatus; | ||
86 | unsigned long long IntClear; | ||
87 | unsigned long long ErrorMask; | ||
88 | unsigned long long ErrorStatus; | ||
89 | unsigned long long ErrorClear; | ||
90 | unsigned long long HwErrMask; | ||
91 | unsigned long long HwErrStatus; | ||
92 | unsigned long long HwErrClear; | ||
93 | unsigned long long HwDiagCtrl; | ||
94 | unsigned long long MDIO; | ||
95 | unsigned long long IBCStatus; | ||
96 | unsigned long long IBCCtrl; | ||
97 | unsigned long long ExtStatus; | ||
98 | unsigned long long ExtCtrl; | ||
99 | unsigned long long GPIOOut; | ||
100 | unsigned long long GPIOMask; | ||
101 | unsigned long long GPIOStatus; | ||
102 | unsigned long long GPIOClear; | ||
103 | unsigned long long RcvCtrl; | ||
104 | unsigned long long RcvBTHQP; | ||
105 | unsigned long long RcvHdrSize; | ||
106 | unsigned long long RcvHdrCnt; | ||
107 | unsigned long long RcvHdrEntSize; | ||
108 | unsigned long long RcvTIDBase; | ||
109 | unsigned long long RcvTIDCnt; | ||
110 | unsigned long long RcvEgrBase; | ||
111 | unsigned long long RcvEgrCnt; | ||
112 | unsigned long long RcvBufBase; | ||
113 | unsigned long long RcvBufSize; | ||
114 | unsigned long long RxIntMemBase; | ||
115 | unsigned long long RxIntMemSize; | ||
116 | unsigned long long RcvPartitionKey; | ||
117 | unsigned long long RcvQPMulticastPort; | ||
118 | unsigned long long RcvPktLEDCnt; | ||
119 | unsigned long long IBCDDRCtrl; | ||
120 | unsigned long long HRTBT_GUID; | ||
121 | unsigned long long IB_SDTEST_IF_TX; | ||
122 | unsigned long long IB_SDTEST_IF_RX; | ||
123 | unsigned long long IBCDDRCtrl2; | ||
124 | unsigned long long IBCDDRStatus; | ||
125 | unsigned long long JIntReload; | ||
126 | unsigned long long IBNCModeCtrl; | ||
127 | unsigned long long SendCtrl; | ||
128 | unsigned long long SendBufBase; | ||
129 | unsigned long long SendBufSize; | ||
130 | unsigned long long SendBufCnt; | ||
131 | unsigned long long SendAvailAddr; | ||
132 | unsigned long long TxIntMemBase; | ||
133 | unsigned long long TxIntMemSize; | ||
134 | unsigned long long SendDmaBase; | ||
135 | unsigned long long SendDmaLenGen; | ||
136 | unsigned long long SendDmaTail; | ||
137 | unsigned long long SendDmaHead; | ||
138 | unsigned long long SendDmaHeadAddr; | ||
139 | unsigned long long SendDmaBufMask0; | ||
140 | unsigned long long SendDmaBufMask1; | ||
141 | unsigned long long SendDmaBufMask2; | ||
142 | unsigned long long SendDmaStatus; | ||
143 | unsigned long long SendBufferError; | ||
144 | unsigned long long SendBufferErrorCONT1; | ||
145 | unsigned long long SendBufErr2; /* was Reserved6SBE[0/6] */ | ||
146 | unsigned long long Reserved6L[2]; | ||
147 | unsigned long long AvailUpdCount; | ||
148 | unsigned long long RcvHdrAddr0; | ||
149 | unsigned long long RcvHdrAddrs[16]; /* Why enumerate? */ | ||
150 | unsigned long long Reserved7hdtl; /* Align next to 300 */ | ||
151 | unsigned long long RcvHdrTailAddr0; /* 300, like others */ | ||
152 | unsigned long long RcvHdrTailAddrs[16]; | ||
153 | unsigned long long Reserved9SW[7]; /* was [8]; we have 17 ports */ | ||
154 | unsigned long long IbsdEpbAccCtl; /* IB Serdes EPB access control */ | ||
155 | unsigned long long IbsdEpbTransReg; /* IB Serdes EPB Transaction */ | ||
156 | unsigned long long Reserved10sds; /* was SerdesStatus on */ | ||
157 | unsigned long long XGXSConfig; | ||
158 | unsigned long long IBSerDesCtrl; /* Was IBPLLCfg on Monty */ | ||
159 | unsigned long long EEPCtlStat; /* for "boot" EEPROM/FLASH */ | ||
160 | unsigned long long EEPAddrCmd; | ||
161 | unsigned long long EEPData; | ||
162 | unsigned long long PcieEpbAccCtl; | ||
163 | unsigned long long PcieEpbTransCtl; | ||
164 | unsigned long long EfuseCtl; /* E-Fuse control */ | ||
165 | unsigned long long EfuseData[4]; | ||
166 | unsigned long long ProcMon; | ||
167 | /* this chip moves following two from previous 200, 208 */ | ||
168 | unsigned long long PCIeRBufTestReg0; | ||
169 | unsigned long long PCIeRBufTestReg1; | ||
170 | /* added for this chip */ | ||
171 | unsigned long long PCIeRBufTestReg2; | ||
172 | unsigned long long PCIeRBufTestReg3; | ||
173 | /* added for this chip, debug only */ | ||
174 | unsigned long long SPC_JTAG_ACCESS_REG; | ||
175 | unsigned long long LAControlReg; | ||
176 | unsigned long long GPIODebugSelReg; | ||
177 | unsigned long long DebugPortValueReg; | ||
178 | /* added for this chip, DMA */ | ||
179 | unsigned long long SendDmaBufUsed[3]; | ||
180 | unsigned long long SendDmaReqTagUsed; | ||
181 | /* | ||
182 | * added for this chip, EFUSE: note that these program 64-bit | ||
183 | * words 2 and 3 */ | ||
184 | unsigned long long efuse_pgm_data[2]; | ||
185 | unsigned long long Reserved11LAalign[10]; /* Skip 4B0..4F8 */ | ||
186 | /* we have 30 regs for DDS and RXEQ in IB SERDES */ | ||
187 | unsigned long long SerDesDDSRXEQ[30]; | ||
188 | unsigned long long Reserved12LAalign[2]; /* Skip 5F0, 5F8 */ | ||
189 | /* added for LA debug support */ | ||
190 | unsigned long long LAMemory[32]; | ||
191 | }; | ||
192 | |||
193 | struct _infinipath_do_not_use_counters { | ||
194 | __u64 LBIntCnt; | ||
195 | __u64 LBFlowStallCnt; | ||
196 | __u64 TxSDmaDescCnt; /* was Reserved1 */ | ||
197 | __u64 TxUnsupVLErrCnt; | ||
198 | __u64 TxDataPktCnt; | ||
199 | __u64 TxFlowPktCnt; | ||
200 | __u64 TxDwordCnt; | ||
201 | __u64 TxLenErrCnt; | ||
202 | __u64 TxMaxMinLenErrCnt; | ||
203 | __u64 TxUnderrunCnt; | ||
204 | __u64 TxFlowStallCnt; | ||
205 | __u64 TxDroppedPktCnt; | ||
206 | __u64 RxDroppedPktCnt; | ||
207 | __u64 RxDataPktCnt; | ||
208 | __u64 RxFlowPktCnt; | ||
209 | __u64 RxDwordCnt; | ||
210 | __u64 RxLenErrCnt; | ||
211 | __u64 RxMaxMinLenErrCnt; | ||
212 | __u64 RxICRCErrCnt; | ||
213 | __u64 RxVCRCErrCnt; | ||
214 | __u64 RxFlowCtrlErrCnt; | ||
215 | __u64 RxBadFormatCnt; | ||
216 | __u64 RxLinkProblemCnt; | ||
217 | __u64 RxEBPCnt; | ||
218 | __u64 RxLPCRCErrCnt; | ||
219 | __u64 RxBufOvflCnt; | ||
220 | __u64 RxTIDFullErrCnt; | ||
221 | __u64 RxTIDValidErrCnt; | ||
222 | __u64 RxPKeyMismatchCnt; | ||
223 | __u64 RxP0HdrEgrOvflCnt; | ||
224 | __u64 RxP1HdrEgrOvflCnt; | ||
225 | __u64 RxP2HdrEgrOvflCnt; | ||
226 | __u64 RxP3HdrEgrOvflCnt; | ||
227 | __u64 RxP4HdrEgrOvflCnt; | ||
228 | __u64 RxP5HdrEgrOvflCnt; | ||
229 | __u64 RxP6HdrEgrOvflCnt; | ||
230 | __u64 RxP7HdrEgrOvflCnt; | ||
231 | __u64 RxP8HdrEgrOvflCnt; | ||
232 | __u64 RxP9HdrEgrOvflCnt; /* was Reserved6 */ | ||
233 | __u64 RxP10HdrEgrOvflCnt; /* was Reserved7 */ | ||
234 | __u64 RxP11HdrEgrOvflCnt; /* new for IBA7220 */ | ||
235 | __u64 RxP12HdrEgrOvflCnt; /* new for IBA7220 */ | ||
236 | __u64 RxP13HdrEgrOvflCnt; /* new for IBA7220 */ | ||
237 | __u64 RxP14HdrEgrOvflCnt; /* new for IBA7220 */ | ||
238 | __u64 RxP15HdrEgrOvflCnt; /* new for IBA7220 */ | ||
239 | __u64 RxP16HdrEgrOvflCnt; /* new for IBA7220 */ | ||
240 | __u64 IBStatusChangeCnt; | ||
241 | __u64 IBLinkErrRecoveryCnt; | ||
242 | __u64 IBLinkDownedCnt; | ||
243 | __u64 IBSymbolErrCnt; | ||
244 | /* The following are new for IBA7220 */ | ||
245 | __u64 RxVL15DroppedPktCnt; | ||
246 | __u64 RxOtherLocalPhyErrCnt; | ||
247 | __u64 PcieRetryBufDiagQwordCnt; | ||
248 | __u64 ExcessBufferOvflCnt; | ||
249 | __u64 LocalLinkIntegrityErrCnt; | ||
250 | __u64 RxVlErrCnt; | ||
251 | __u64 RxDlidFltrCnt; | ||
252 | __u64 Reserved8[7]; | ||
253 | __u64 PSStat; | ||
254 | __u64 PSStart; | ||
255 | __u64 PSInterval; | ||
256 | __u64 PSRcvDataCount; | ||
257 | __u64 PSRcvPktsCount; | ||
258 | __u64 PSXmitDataCount; | ||
259 | __u64 PSXmitPktsCount; | ||
260 | __u64 PSXmitWaitCount; | ||
261 | }; | ||
262 | |||
263 | #define IPATH_KREG_OFFSET(field) (offsetof( \ | ||
264 | struct _infinipath_do_not_use_kernel_regs, field) / sizeof(u64)) | ||
265 | #define IPATH_CREG_OFFSET(field) (offsetof( \ | ||
266 | struct _infinipath_do_not_use_counters, field) / sizeof(u64)) | ||
267 | |||
268 | static const struct ipath_kregs ipath_7220_kregs = { | ||
269 | .kr_control = IPATH_KREG_OFFSET(Control), | ||
270 | .kr_counterregbase = IPATH_KREG_OFFSET(CounterRegBase), | ||
271 | .kr_debugportselect = IPATH_KREG_OFFSET(DebugPortSelect), | ||
272 | .kr_errorclear = IPATH_KREG_OFFSET(ErrorClear), | ||
273 | .kr_errormask = IPATH_KREG_OFFSET(ErrorMask), | ||
274 | .kr_errorstatus = IPATH_KREG_OFFSET(ErrorStatus), | ||
275 | .kr_extctrl = IPATH_KREG_OFFSET(ExtCtrl), | ||
276 | .kr_extstatus = IPATH_KREG_OFFSET(ExtStatus), | ||
277 | .kr_gpio_clear = IPATH_KREG_OFFSET(GPIOClear), | ||
278 | .kr_gpio_mask = IPATH_KREG_OFFSET(GPIOMask), | ||
279 | .kr_gpio_out = IPATH_KREG_OFFSET(GPIOOut), | ||
280 | .kr_gpio_status = IPATH_KREG_OFFSET(GPIOStatus), | ||
281 | .kr_hwdiagctrl = IPATH_KREG_OFFSET(HwDiagCtrl), | ||
282 | .kr_hwerrclear = IPATH_KREG_OFFSET(HwErrClear), | ||
283 | .kr_hwerrmask = IPATH_KREG_OFFSET(HwErrMask), | ||
284 | .kr_hwerrstatus = IPATH_KREG_OFFSET(HwErrStatus), | ||
285 | .kr_ibcctrl = IPATH_KREG_OFFSET(IBCCtrl), | ||
286 | .kr_ibcstatus = IPATH_KREG_OFFSET(IBCStatus), | ||
287 | .kr_intblocked = IPATH_KREG_OFFSET(IntBlocked), | ||
288 | .kr_intclear = IPATH_KREG_OFFSET(IntClear), | ||
289 | .kr_intmask = IPATH_KREG_OFFSET(IntMask), | ||
290 | .kr_intstatus = IPATH_KREG_OFFSET(IntStatus), | ||
291 | .kr_mdio = IPATH_KREG_OFFSET(MDIO), | ||
292 | .kr_pagealign = IPATH_KREG_OFFSET(PageAlign), | ||
293 | .kr_partitionkey = IPATH_KREG_OFFSET(RcvPartitionKey), | ||
294 | .kr_portcnt = IPATH_KREG_OFFSET(PortCnt), | ||
295 | .kr_rcvbthqp = IPATH_KREG_OFFSET(RcvBTHQP), | ||
296 | .kr_rcvbufbase = IPATH_KREG_OFFSET(RcvBufBase), | ||
297 | .kr_rcvbufsize = IPATH_KREG_OFFSET(RcvBufSize), | ||
298 | .kr_rcvctrl = IPATH_KREG_OFFSET(RcvCtrl), | ||
299 | .kr_rcvegrbase = IPATH_KREG_OFFSET(RcvEgrBase), | ||
300 | .kr_rcvegrcnt = IPATH_KREG_OFFSET(RcvEgrCnt), | ||
301 | .kr_rcvhdrcnt = IPATH_KREG_OFFSET(RcvHdrCnt), | ||
302 | .kr_rcvhdrentsize = IPATH_KREG_OFFSET(RcvHdrEntSize), | ||
303 | .kr_rcvhdrsize = IPATH_KREG_OFFSET(RcvHdrSize), | ||
304 | .kr_rcvintmembase = IPATH_KREG_OFFSET(RxIntMemBase), | ||
305 | .kr_rcvintmemsize = IPATH_KREG_OFFSET(RxIntMemSize), | ||
306 | .kr_rcvtidbase = IPATH_KREG_OFFSET(RcvTIDBase), | ||
307 | .kr_rcvtidcnt = IPATH_KREG_OFFSET(RcvTIDCnt), | ||
308 | .kr_revision = IPATH_KREG_OFFSET(Revision), | ||
309 | .kr_scratch = IPATH_KREG_OFFSET(Scratch), | ||
310 | .kr_sendbuffererror = IPATH_KREG_OFFSET(SendBufferError), | ||
311 | .kr_sendctrl = IPATH_KREG_OFFSET(SendCtrl), | ||
312 | .kr_sendpioavailaddr = IPATH_KREG_OFFSET(SendAvailAddr), | ||
313 | .kr_sendpiobufbase = IPATH_KREG_OFFSET(SendBufBase), | ||
314 | .kr_sendpiobufcnt = IPATH_KREG_OFFSET(SendBufCnt), | ||
315 | .kr_sendpiosize = IPATH_KREG_OFFSET(SendBufSize), | ||
316 | .kr_sendregbase = IPATH_KREG_OFFSET(SendRegBase), | ||
317 | .kr_txintmembase = IPATH_KREG_OFFSET(TxIntMemBase), | ||
318 | .kr_txintmemsize = IPATH_KREG_OFFSET(TxIntMemSize), | ||
319 | .kr_userregbase = IPATH_KREG_OFFSET(UserRegBase), | ||
320 | |||
321 | .kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig), | ||
322 | |||
323 | /* send dma related regs */ | ||
324 | .kr_senddmabase = IPATH_KREG_OFFSET(SendDmaBase), | ||
325 | .kr_senddmalengen = IPATH_KREG_OFFSET(SendDmaLenGen), | ||
326 | .kr_senddmatail = IPATH_KREG_OFFSET(SendDmaTail), | ||
327 | .kr_senddmahead = IPATH_KREG_OFFSET(SendDmaHead), | ||
328 | .kr_senddmaheadaddr = IPATH_KREG_OFFSET(SendDmaHeadAddr), | ||
329 | .kr_senddmabufmask0 = IPATH_KREG_OFFSET(SendDmaBufMask0), | ||
330 | .kr_senddmabufmask1 = IPATH_KREG_OFFSET(SendDmaBufMask1), | ||
331 | .kr_senddmabufmask2 = IPATH_KREG_OFFSET(SendDmaBufMask2), | ||
332 | .kr_senddmastatus = IPATH_KREG_OFFSET(SendDmaStatus), | ||
333 | |||
334 | /* SerDes related regs */ | ||
335 | .kr_ibserdesctrl = IPATH_KREG_OFFSET(IBSerDesCtrl), | ||
336 | .kr_ib_epbacc = IPATH_KREG_OFFSET(IbsdEpbAccCtl), | ||
337 | .kr_ib_epbtrans = IPATH_KREG_OFFSET(IbsdEpbTransReg), | ||
338 | .kr_pcie_epbacc = IPATH_KREG_OFFSET(PcieEpbAccCtl), | ||
339 | .kr_pcie_epbtrans = IPATH_KREG_OFFSET(PcieEpbTransCtl), | ||
340 | .kr_ib_ddsrxeq = IPATH_KREG_OFFSET(SerDesDDSRXEQ), | ||
341 | |||
342 | /* | ||
343 | * These should not be used directly via ipath_read_kreg64(), | ||
344 | * use them with ipath_read_kreg64_port() | ||
345 | */ | ||
346 | .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0), | ||
347 | .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0), | ||
348 | |||
349 | /* | ||
350 | * The rcvpktled register controls one of the debug port signals, so | ||
351 | * a packet activity LED can be connected to it. | ||
352 | */ | ||
353 | .kr_rcvpktledcnt = IPATH_KREG_OFFSET(RcvPktLEDCnt), | ||
354 | .kr_pcierbuftestreg0 = IPATH_KREG_OFFSET(PCIeRBufTestReg0), | ||
355 | .kr_pcierbuftestreg1 = IPATH_KREG_OFFSET(PCIeRBufTestReg1), | ||
356 | |||
357 | .kr_hrtbt_guid = IPATH_KREG_OFFSET(HRTBT_GUID), | ||
358 | .kr_ibcddrctrl = IPATH_KREG_OFFSET(IBCDDRCtrl), | ||
359 | .kr_ibcddrstatus = IPATH_KREG_OFFSET(IBCDDRStatus), | ||
360 | .kr_jintreload = IPATH_KREG_OFFSET(JIntReload) | ||
361 | }; | ||
362 | |||
363 | static const struct ipath_cregs ipath_7220_cregs = { | ||
364 | .cr_badformatcnt = IPATH_CREG_OFFSET(RxBadFormatCnt), | ||
365 | .cr_erricrccnt = IPATH_CREG_OFFSET(RxICRCErrCnt), | ||
366 | .cr_errlinkcnt = IPATH_CREG_OFFSET(RxLinkProblemCnt), | ||
367 | .cr_errlpcrccnt = IPATH_CREG_OFFSET(RxLPCRCErrCnt), | ||
368 | .cr_errpkey = IPATH_CREG_OFFSET(RxPKeyMismatchCnt), | ||
369 | .cr_errrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowCtrlErrCnt), | ||
370 | .cr_err_rlencnt = IPATH_CREG_OFFSET(RxLenErrCnt), | ||
371 | .cr_errslencnt = IPATH_CREG_OFFSET(TxLenErrCnt), | ||
372 | .cr_errtidfull = IPATH_CREG_OFFSET(RxTIDFullErrCnt), | ||
373 | .cr_errtidvalid = IPATH_CREG_OFFSET(RxTIDValidErrCnt), | ||
374 | .cr_errvcrccnt = IPATH_CREG_OFFSET(RxVCRCErrCnt), | ||
375 | .cr_ibstatuschange = IPATH_CREG_OFFSET(IBStatusChangeCnt), | ||
376 | .cr_intcnt = IPATH_CREG_OFFSET(LBIntCnt), | ||
377 | .cr_invalidrlencnt = IPATH_CREG_OFFSET(RxMaxMinLenErrCnt), | ||
378 | .cr_invalidslencnt = IPATH_CREG_OFFSET(TxMaxMinLenErrCnt), | ||
379 | .cr_lbflowstallcnt = IPATH_CREG_OFFSET(LBFlowStallCnt), | ||
380 | .cr_pktrcvcnt = IPATH_CREG_OFFSET(RxDataPktCnt), | ||
381 | .cr_pktrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowPktCnt), | ||
382 | .cr_pktsendcnt = IPATH_CREG_OFFSET(TxDataPktCnt), | ||
383 | .cr_pktsendflowcnt = IPATH_CREG_OFFSET(TxFlowPktCnt), | ||
384 | .cr_portovflcnt = IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt), | ||
385 | .cr_rcvebpcnt = IPATH_CREG_OFFSET(RxEBPCnt), | ||
386 | .cr_rcvovflcnt = IPATH_CREG_OFFSET(RxBufOvflCnt), | ||
387 | .cr_senddropped = IPATH_CREG_OFFSET(TxDroppedPktCnt), | ||
388 | .cr_sendstallcnt = IPATH_CREG_OFFSET(TxFlowStallCnt), | ||
389 | .cr_sendunderruncnt = IPATH_CREG_OFFSET(TxUnderrunCnt), | ||
390 | .cr_wordrcvcnt = IPATH_CREG_OFFSET(RxDwordCnt), | ||
391 | .cr_wordsendcnt = IPATH_CREG_OFFSET(TxDwordCnt), | ||
392 | .cr_unsupvlcnt = IPATH_CREG_OFFSET(TxUnsupVLErrCnt), | ||
393 | .cr_rxdroppktcnt = IPATH_CREG_OFFSET(RxDroppedPktCnt), | ||
394 | .cr_iblinkerrrecovcnt = IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt), | ||
395 | .cr_iblinkdowncnt = IPATH_CREG_OFFSET(IBLinkDownedCnt), | ||
396 | .cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt), | ||
397 | .cr_vl15droppedpktcnt = IPATH_CREG_OFFSET(RxVL15DroppedPktCnt), | ||
398 | .cr_rxotherlocalphyerrcnt = | ||
399 | IPATH_CREG_OFFSET(RxOtherLocalPhyErrCnt), | ||
400 | .cr_excessbufferovflcnt = IPATH_CREG_OFFSET(ExcessBufferOvflCnt), | ||
401 | .cr_locallinkintegrityerrcnt = | ||
402 | IPATH_CREG_OFFSET(LocalLinkIntegrityErrCnt), | ||
403 | .cr_rxvlerrcnt = IPATH_CREG_OFFSET(RxVlErrCnt), | ||
404 | .cr_rxdlidfltrcnt = IPATH_CREG_OFFSET(RxDlidFltrCnt), | ||
405 | .cr_psstat = IPATH_CREG_OFFSET(PSStat), | ||
406 | .cr_psstart = IPATH_CREG_OFFSET(PSStart), | ||
407 | .cr_psinterval = IPATH_CREG_OFFSET(PSInterval), | ||
408 | .cr_psrcvdatacount = IPATH_CREG_OFFSET(PSRcvDataCount), | ||
409 | .cr_psrcvpktscount = IPATH_CREG_OFFSET(PSRcvPktsCount), | ||
410 | .cr_psxmitdatacount = IPATH_CREG_OFFSET(PSXmitDataCount), | ||
411 | .cr_psxmitpktscount = IPATH_CREG_OFFSET(PSXmitPktsCount), | ||
412 | .cr_psxmitwaitcount = IPATH_CREG_OFFSET(PSXmitWaitCount), | ||
413 | }; | ||
414 | |||
415 | /* kr_control bits */ | ||
416 | #define INFINIPATH_C_RESET (1U<<7) | ||
417 | |||
418 | /* kr_intstatus, kr_intclear, kr_intmask bits */ | ||
419 | #define INFINIPATH_I_RCVURG_MASK ((1ULL<<17)-1) | ||
420 | #define INFINIPATH_I_RCVURG_SHIFT 32 | ||
421 | #define INFINIPATH_I_RCVAVAIL_MASK ((1ULL<<17)-1) | ||
422 | #define INFINIPATH_I_RCVAVAIL_SHIFT 0 | ||
423 | #define INFINIPATH_I_SERDESTRIMDONE (1ULL<<27) | ||
424 | |||
425 | /* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */ | ||
426 | #define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK 0x00000000000000ffULL | ||
427 | #define INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT 0 | ||
428 | #define INFINIPATH_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL | ||
429 | #define INFINIPATH_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL | ||
430 | #define INFINIPATH_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL | ||
431 | #define INFINIPATH_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL | ||
432 | #define INFINIPATH_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL | ||
433 | #define INFINIPATH_HWE_COREPLL_FBSLIP 0x0080000000000000ULL | ||
434 | #define INFINIPATH_HWE_COREPLL_RFSLIP 0x0100000000000000ULL | ||
435 | #define INFINIPATH_HWE_PCIE1PLLFAILED 0x0400000000000000ULL | ||
436 | #define INFINIPATH_HWE_PCIE0PLLFAILED 0x0800000000000000ULL | ||
437 | #define INFINIPATH_HWE_SERDESPLLFAILED 0x1000000000000000ULL | ||
438 | /* specific to this chip */ | ||
439 | #define INFINIPATH_HWE_PCIECPLDATAQUEUEERR 0x0000000000000040ULL | ||
440 | #define INFINIPATH_HWE_PCIECPLHDRQUEUEERR 0x0000000000000080ULL | ||
441 | #define INFINIPATH_HWE_SDMAMEMREADERR 0x0000000010000000ULL | ||
442 | #define INFINIPATH_HWE_CLK_UC_PLLNOTLOCKED 0x2000000000000000ULL | ||
443 | #define INFINIPATH_HWE_PCIESERDESQ0PCLKNOTDETECT 0x0100000000000000ULL | ||
444 | #define INFINIPATH_HWE_PCIESERDESQ1PCLKNOTDETECT 0x0200000000000000ULL | ||
445 | #define INFINIPATH_HWE_PCIESERDESQ2PCLKNOTDETECT 0x0400000000000000ULL | ||
446 | #define INFINIPATH_HWE_PCIESERDESQ3PCLKNOTDETECT 0x0800000000000000ULL | ||
447 | #define INFINIPATH_HWE_DDSRXEQMEMORYPARITYERR 0x0000008000000000ULL | ||
448 | #define INFINIPATH_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL | ||
449 | #define INFINIPATH_HWE_PCIE_UC_OCT0MEMORYPARITYERR 0x0000001000000000ULL | ||
450 | #define INFINIPATH_HWE_PCIE_UC_OCT1MEMORYPARITYERR 0x0000002000000000ULL | ||
451 | |||
452 | #define IBA7220_IBCS_LINKTRAININGSTATE_MASK 0x1F | ||
453 | #define IBA7220_IBCS_LINKSTATE_SHIFT 5 | ||
454 | #define IBA7220_IBCS_LINKSPEED_SHIFT 8 | ||
455 | #define IBA7220_IBCS_LINKWIDTH_SHIFT 9 | ||
456 | |||
457 | #define IBA7220_IBCC_LINKINITCMD_MASK 0x7ULL | ||
458 | #define IBA7220_IBCC_LINKCMD_SHIFT 19 | ||
459 | #define IBA7220_IBCC_MAXPKTLEN_SHIFT 21 | ||
460 | |||
461 | /* kr_ibcddrctrl bits */ | ||
462 | #define IBA7220_IBC_DLIDLMC_MASK 0xFFFFFFFFUL | ||
463 | #define IBA7220_IBC_DLIDLMC_SHIFT 32 | ||
464 | #define IBA7220_IBC_HRTBT_MASK 3 | ||
465 | #define IBA7220_IBC_HRTBT_SHIFT 16 | ||
466 | #define IBA7220_IBC_HRTBT_ENB 0x10000UL | ||
467 | #define IBA7220_IBC_LANE_REV_SUPPORTED (1<<8) | ||
468 | #define IBA7220_IBC_LREV_MASK 1 | ||
469 | #define IBA7220_IBC_LREV_SHIFT 8 | ||
470 | #define IBA7220_IBC_RXPOL_MASK 1 | ||
471 | #define IBA7220_IBC_RXPOL_SHIFT 7 | ||
472 | #define IBA7220_IBC_WIDTH_SHIFT 5 | ||
473 | #define IBA7220_IBC_WIDTH_MASK 0x3 | ||
474 | #define IBA7220_IBC_WIDTH_1X_ONLY (0<<IBA7220_IBC_WIDTH_SHIFT) | ||
475 | #define IBA7220_IBC_WIDTH_4X_ONLY (1<<IBA7220_IBC_WIDTH_SHIFT) | ||
476 | #define IBA7220_IBC_WIDTH_AUTONEG (2<<IBA7220_IBC_WIDTH_SHIFT) | ||
477 | #define IBA7220_IBC_SPEED_AUTONEG (1<<1) | ||
478 | #define IBA7220_IBC_SPEED_SDR (1<<2) | ||
479 | #define IBA7220_IBC_SPEED_DDR (1<<3) | ||
480 | #define IBA7220_IBC_SPEED_AUTONEG_MASK (0x7<<1) | ||
481 | #define IBA7220_IBC_IBTA_1_2_MASK (1) | ||
482 | |||
483 | /* kr_ibcddrstatus */ | ||
484 | /* link latency shift is 0, don't bother defining */ | ||
485 | #define IBA7220_DDRSTAT_LINKLAT_MASK 0x3ffffff | ||
486 | |||
487 | /* kr_extstatus bits */ | ||
488 | #define INFINIPATH_EXTS_FREQSEL 0x2 | ||
489 | #define INFINIPATH_EXTS_SERDESSEL 0x4 | ||
490 | #define INFINIPATH_EXTS_MEMBIST_ENDTEST 0x0000000000004000 | ||
491 | #define INFINIPATH_EXTS_MEMBIST_DISABLED 0x0000000000008000 | ||
492 | |||
493 | /* kr_xgxsconfig bits */ | ||
494 | #define INFINIPATH_XGXS_RESET 0x5ULL | ||
495 | #define INFINIPATH_XGXS_FC_SAFE (1ULL<<63) | ||
496 | |||
497 | /* kr_rcvpktledcnt */ | ||
498 | #define IBA7220_LEDBLINK_ON_SHIFT 32 /* 4ns period on after packet */ | ||
499 | #define IBA7220_LEDBLINK_OFF_SHIFT 0 /* 4ns period off before next on */ | ||
500 | |||
501 | #define _IPATH_GPIO_SDA_NUM 1 | ||
502 | #define _IPATH_GPIO_SCL_NUM 0 | ||
503 | |||
504 | #define IPATH_GPIO_SDA (1ULL << \ | ||
505 | (_IPATH_GPIO_SDA_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT)) | ||
506 | #define IPATH_GPIO_SCL (1ULL << \ | ||
507 | (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT)) | ||
508 | |||
509 | #define IBA7220_R_INTRAVAIL_SHIFT 17 | ||
510 | #define IBA7220_R_TAILUPD_SHIFT 35 | ||
511 | #define IBA7220_R_PORTCFG_SHIFT 36 | ||
512 | |||
513 | #define INFINIPATH_JINT_PACKETSHIFT 16 | ||
514 | #define INFINIPATH_JINT_DEFAULT_IDLE_TICKS 0 | ||
515 | #define INFINIPATH_JINT_DEFAULT_MAX_PACKETS 0 | ||
516 | |||
517 | #define IBA7220_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */ | ||
518 | |||
519 | /* | ||
520 | * the size bits give us 2^N, in KB units. 0 marks as invalid, | ||
521 | * and 7 is reserved. We currently use only 2KB and 4KB | ||
522 | */ | ||
523 | #define IBA7220_TID_SZ_SHIFT 37 /* shift to 3bit size selector */ | ||
524 | #define IBA7220_TID_SZ_2K (1UL<<IBA7220_TID_SZ_SHIFT) /* 2KB */ | ||
525 | #define IBA7220_TID_SZ_4K (2UL<<IBA7220_TID_SZ_SHIFT) /* 4KB */ | ||
526 | #define IBA7220_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */ | ||
527 | |||
528 | #define IPATH_AUTONEG_TRIES 5 /* sequential retries to negotiate DDR */ | ||
529 | |||
530 | static char int_type[16] = "auto"; | ||
531 | module_param_string(interrupt_type, int_type, sizeof(int_type), 0444); | ||
532 | MODULE_PARM_DESC(int_type, " interrupt_type=auto|force_msi|force_intx"); | ||
533 | |||
534 | /* packet rate matching delay; chip has support */ | ||
535 | static u8 rate_to_delay[2][2] = { | ||
536 | /* 1x, 4x */ | ||
537 | { 8, 2 }, /* SDR */ | ||
538 | { 4, 1 } /* DDR */ | ||
539 | }; | ||
540 | |||
541 | /* 7220 specific hardware errors... */ | ||
542 | static const struct ipath_hwerror_msgs ipath_7220_hwerror_msgs[] = { | ||
543 | INFINIPATH_HWE_MSG(PCIEPOISONEDTLP, "PCIe Poisoned TLP"), | ||
544 | INFINIPATH_HWE_MSG(PCIECPLTIMEOUT, "PCIe completion timeout"), | ||
545 | /* | ||
546 | * In practice, it's unlikely wthat we'll see PCIe PLL, or bus | ||
547 | * parity or memory parity error failures, because most likely we | ||
548 | * won't be able to talk to the core of the chip. Nonetheless, we | ||
549 | * might see them, if they are in parts of the PCIe core that aren't | ||
550 | * essential. | ||
551 | */ | ||
552 | INFINIPATH_HWE_MSG(PCIE1PLLFAILED, "PCIePLL1"), | ||
553 | INFINIPATH_HWE_MSG(PCIE0PLLFAILED, "PCIePLL0"), | ||
554 | INFINIPATH_HWE_MSG(PCIEBUSPARITYXTLH, "PCIe XTLH core parity"), | ||
555 | INFINIPATH_HWE_MSG(PCIEBUSPARITYXADM, "PCIe ADM TX core parity"), | ||
556 | INFINIPATH_HWE_MSG(PCIEBUSPARITYRADM, "PCIe ADM RX core parity"), | ||
557 | INFINIPATH_HWE_MSG(RXDSYNCMEMPARITYERR, "Rx Dsync"), | ||
558 | INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"), | ||
559 | INFINIPATH_HWE_MSG(PCIECPLDATAQUEUEERR, "PCIe cpl header queue"), | ||
560 | INFINIPATH_HWE_MSG(PCIECPLHDRQUEUEERR, "PCIe cpl data queue"), | ||
561 | INFINIPATH_HWE_MSG(SDMAMEMREADERR, "Send DMA memory read"), | ||
562 | INFINIPATH_HWE_MSG(CLK_UC_PLLNOTLOCKED, "uC PLL clock not locked"), | ||
563 | INFINIPATH_HWE_MSG(PCIESERDESQ0PCLKNOTDETECT, | ||
564 | "PCIe serdes Q0 no clock"), | ||
565 | INFINIPATH_HWE_MSG(PCIESERDESQ1PCLKNOTDETECT, | ||
566 | "PCIe serdes Q1 no clock"), | ||
567 | INFINIPATH_HWE_MSG(PCIESERDESQ2PCLKNOTDETECT, | ||
568 | "PCIe serdes Q2 no clock"), | ||
569 | INFINIPATH_HWE_MSG(PCIESERDESQ3PCLKNOTDETECT, | ||
570 | "PCIe serdes Q3 no clock"), | ||
571 | INFINIPATH_HWE_MSG(DDSRXEQMEMORYPARITYERR, | ||
572 | "DDS RXEQ memory parity"), | ||
573 | INFINIPATH_HWE_MSG(IB_UC_MEMORYPARITYERR, "IB uC memory parity"), | ||
574 | INFINIPATH_HWE_MSG(PCIE_UC_OCT0MEMORYPARITYERR, | ||
575 | "PCIe uC oct0 memory parity"), | ||
576 | INFINIPATH_HWE_MSG(PCIE_UC_OCT1MEMORYPARITYERR, | ||
577 | "PCIe uC oct1 memory parity"), | ||
578 | }; | ||
579 | |||
580 | static void autoneg_work(struct work_struct *); | ||
581 | |||
582 | /* | ||
583 | * the offset is different for different configured port numbers, since | ||
584 | * port0 is fixed in size, but others can vary. Make it a function to | ||
585 | * make the issue more obvious. | ||
586 | */ | ||
587 | static inline u32 port_egrtid_idx(struct ipath_devdata *dd, unsigned port) | ||
588 | { | ||
589 | return port ? dd->ipath_p0_rcvegrcnt + | ||
590 | (port-1) * dd->ipath_rcvegrcnt : 0; | ||
591 | } | ||
592 | |||
593 | static void ipath_7220_txe_recover(struct ipath_devdata *dd) | ||
594 | { | ||
595 | ++ipath_stats.sps_txeparity; | ||
596 | |||
597 | dev_info(&dd->pcidev->dev, | ||
598 | "Recovering from TXE PIO parity error\n"); | ||
599 | ipath_disarm_senderrbufs(dd); | ||
600 | } | ||
601 | |||
602 | |||
603 | /** | ||
604 | * ipath_7220_handle_hwerrors - display hardware errors. | ||
605 | * @dd: the infinipath device | ||
606 | * @msg: the output buffer | ||
607 | * @msgl: the size of the output buffer | ||
608 | * | ||
609 | * Use same msg buffer as regular errors to avoid excessive stack | ||
610 | * use. Most hardware errors are catastrophic, but for right now, | ||
611 | * we'll print them and continue. We reuse the same message buffer as | ||
612 | * ipath_handle_errors() to avoid excessive stack usage. | ||
613 | */ | ||
614 | static void ipath_7220_handle_hwerrors(struct ipath_devdata *dd, char *msg, | ||
615 | size_t msgl) | ||
616 | { | ||
617 | ipath_err_t hwerrs; | ||
618 | u32 bits, ctrl; | ||
619 | int isfatal = 0; | ||
620 | char bitsmsg[64]; | ||
621 | int log_idx; | ||
622 | |||
623 | hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus); | ||
624 | if (!hwerrs) { | ||
625 | /* | ||
626 | * better than printing cofusing messages | ||
627 | * This seems to be related to clearing the crc error, or | ||
628 | * the pll error during init. | ||
629 | */ | ||
630 | ipath_cdbg(VERBOSE, "Called but no hardware errors set\n"); | ||
631 | goto bail; | ||
632 | } else if (hwerrs == ~0ULL) { | ||
633 | ipath_dev_err(dd, "Read of hardware error status failed " | ||
634 | "(all bits set); ignoring\n"); | ||
635 | goto bail; | ||
636 | } | ||
637 | ipath_stats.sps_hwerrs++; | ||
638 | |||
639 | /* | ||
640 | * Always clear the error status register, except MEMBISTFAIL, | ||
641 | * regardless of whether we continue or stop using the chip. | ||
642 | * We want that set so we know it failed, even across driver reload. | ||
643 | * We'll still ignore it in the hwerrmask. We do this partly for | ||
644 | * diagnostics, but also for support. | ||
645 | */ | ||
646 | ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, | ||
647 | hwerrs&~INFINIPATH_HWE_MEMBISTFAILED); | ||
648 | |||
649 | hwerrs &= dd->ipath_hwerrmask; | ||
650 | |||
651 | /* We log some errors to EEPROM, check if we have any of those. */ | ||
652 | for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx) | ||
653 | if (hwerrs & dd->ipath_eep_st_masks[log_idx].hwerrs_to_log) | ||
654 | ipath_inc_eeprom_err(dd, log_idx, 1); | ||
655 | /* | ||
656 | * Make sure we get this much out, unless told to be quiet, | ||
657 | * or it's occurred within the last 5 seconds. | ||
658 | */ | ||
659 | if ((hwerrs & ~(dd->ipath_lasthwerror | | ||
660 | ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | | ||
661 | INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) | ||
662 | << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT))) || | ||
663 | (ipath_debug & __IPATH_VERBDBG)) | ||
664 | dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx " | ||
665 | "(cleared)\n", (unsigned long long) hwerrs); | ||
666 | dd->ipath_lasthwerror |= hwerrs; | ||
667 | |||
668 | if (hwerrs & ~dd->ipath_hwe_bitsextant) | ||
669 | ipath_dev_err(dd, "hwerror interrupt with unknown errors " | ||
670 | "%llx set\n", (unsigned long long) | ||
671 | (hwerrs & ~dd->ipath_hwe_bitsextant)); | ||
672 | |||
673 | if (hwerrs & INFINIPATH_HWE_IB_UC_MEMORYPARITYERR) | ||
674 | ipath_sd7220_clr_ibpar(dd); | ||
675 | |||
676 | ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control); | ||
677 | if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) { | ||
678 | /* | ||
679 | * Parity errors in send memory are recoverable by h/w | ||
680 | * just do housekeeping, exit freeze mode and continue. | ||
681 | */ | ||
682 | if (hwerrs & ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | | ||
683 | INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) | ||
684 | << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)) { | ||
685 | ipath_7220_txe_recover(dd); | ||
686 | hwerrs &= ~((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | | ||
687 | INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) | ||
688 | << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT); | ||
689 | } | ||
690 | if (hwerrs) { | ||
691 | /* | ||
692 | * If any set that we aren't ignoring only make the | ||
693 | * complaint once, in case it's stuck or recurring, | ||
694 | * and we get here multiple times | ||
695 | * Force link down, so switch knows, and | ||
696 | * LEDs are turned off. | ||
697 | */ | ||
698 | if (dd->ipath_flags & IPATH_INITTED) { | ||
699 | ipath_set_linkstate(dd, IPATH_IB_LINKDOWN); | ||
700 | ipath_setup_7220_setextled(dd, | ||
701 | INFINIPATH_IBCS_L_STATE_DOWN, | ||
702 | INFINIPATH_IBCS_LT_STATE_DISABLED); | ||
703 | ipath_dev_err(dd, "Fatal Hardware Error " | ||
704 | "(freeze mode), no longer" | ||
705 | " usable, SN %.16s\n", | ||
706 | dd->ipath_serial); | ||
707 | isfatal = 1; | ||
708 | } | ||
709 | /* | ||
710 | * Mark as having had an error for driver, and also | ||
711 | * for /sys and status word mapped to user programs. | ||
712 | * This marks unit as not usable, until reset. | ||
713 | */ | ||
714 | *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY; | ||
715 | *dd->ipath_statusp |= IPATH_STATUS_HWERROR; | ||
716 | dd->ipath_flags &= ~IPATH_INITTED; | ||
717 | } else { | ||
718 | ipath_dbg("Clearing freezemode on ignored or " | ||
719 | "recovered hardware error\n"); | ||
720 | ipath_clear_freeze(dd); | ||
721 | } | ||
722 | } | ||
723 | |||
724 | *msg = '\0'; | ||
725 | |||
726 | if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) { | ||
727 | strlcat(msg, "[Memory BIST test failed, " | ||
728 | "InfiniPath hardware unusable]", msgl); | ||
729 | /* ignore from now on, so disable until driver reloaded */ | ||
730 | *dd->ipath_statusp |= IPATH_STATUS_HWERROR; | ||
731 | dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED; | ||
732 | ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, | ||
733 | dd->ipath_hwerrmask); | ||
734 | } | ||
735 | |||
736 | ipath_format_hwerrors(hwerrs, | ||
737 | ipath_7220_hwerror_msgs, | ||
738 | ARRAY_SIZE(ipath_7220_hwerror_msgs), | ||
739 | msg, msgl); | ||
740 | |||
741 | if (hwerrs & (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK | ||
742 | << INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT)) { | ||
743 | bits = (u32) ((hwerrs >> | ||
744 | INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) & | ||
745 | INFINIPATH_HWE_PCIEMEMPARITYERR_MASK); | ||
746 | snprintf(bitsmsg, sizeof bitsmsg, | ||
747 | "[PCIe Mem Parity Errs %x] ", bits); | ||
748 | strlcat(msg, bitsmsg, msgl); | ||
749 | } | ||
750 | |||
751 | #define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP | \ | ||
752 | INFINIPATH_HWE_COREPLL_RFSLIP) | ||
753 | |||
754 | if (hwerrs & _IPATH_PLL_FAIL) { | ||
755 | snprintf(bitsmsg, sizeof bitsmsg, | ||
756 | "[PLL failed (%llx), InfiniPath hardware unusable]", | ||
757 | (unsigned long long) hwerrs & _IPATH_PLL_FAIL); | ||
758 | strlcat(msg, bitsmsg, msgl); | ||
759 | /* ignore from now on, so disable until driver reloaded */ | ||
760 | dd->ipath_hwerrmask &= ~(hwerrs & _IPATH_PLL_FAIL); | ||
761 | ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, | ||
762 | dd->ipath_hwerrmask); | ||
763 | } | ||
764 | |||
765 | if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) { | ||
766 | /* | ||
767 | * If it occurs, it is left masked since the eternal | ||
768 | * interface is unused. | ||
769 | */ | ||
770 | dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED; | ||
771 | ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, | ||
772 | dd->ipath_hwerrmask); | ||
773 | } | ||
774 | |||
775 | ipath_dev_err(dd, "%s hardware error\n", msg); | ||
776 | /* | ||
777 | * For /sys status file. if no trailing } is copied, we'll | ||
778 | * know it was truncated. | ||
779 | */ | ||
780 | if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg) | ||
781 | snprintf(dd->ipath_freezemsg, dd->ipath_freezelen, | ||
782 | "{%s}", msg); | ||
783 | bail:; | ||
784 | } | ||
785 | |||
786 | /** | ||
787 | * ipath_7220_boardname - fill in the board name | ||
788 | * @dd: the infinipath device | ||
789 | * @name: the output buffer | ||
790 | * @namelen: the size of the output buffer | ||
791 | * | ||
792 | * info is based on the board revision register | ||
793 | */ | ||
794 | static int ipath_7220_boardname(struct ipath_devdata *dd, char *name, | ||
795 | size_t namelen) | ||
796 | { | ||
797 | char *n = NULL; | ||
798 | u8 boardrev = dd->ipath_boardrev; | ||
799 | int ret; | ||
800 | |||
801 | if (boardrev == 15) { | ||
802 | /* | ||
803 | * Emulator sometimes comes up all-ones, rather than zero. | ||
804 | */ | ||
805 | boardrev = 0; | ||
806 | dd->ipath_boardrev = boardrev; | ||
807 | } | ||
808 | switch (boardrev) { | ||
809 | case 0: | ||
810 | n = "InfiniPath_7220_Emulation"; | ||
811 | break; | ||
812 | case 1: | ||
813 | n = "InfiniPath_QLE7240"; | ||
814 | break; | ||
815 | case 2: | ||
816 | n = "InfiniPath_QLE7280"; | ||
817 | break; | ||
818 | case 3: | ||
819 | n = "InfiniPath_QLE7242"; | ||
820 | break; | ||
821 | case 4: | ||
822 | n = "InfiniPath_QEM7240"; | ||
823 | break; | ||
824 | case 5: | ||
825 | n = "InfiniPath_QMI7240"; | ||
826 | break; | ||
827 | case 6: | ||
828 | n = "InfiniPath_QMI7264"; | ||
829 | break; | ||
830 | case 7: | ||
831 | n = "InfiniPath_QMH7240"; | ||
832 | break; | ||
833 | case 8: | ||
834 | n = "InfiniPath_QME7240"; | ||
835 | break; | ||
836 | case 9: | ||
837 | n = "InfiniPath_QLE7250"; | ||
838 | break; | ||
839 | case 10: | ||
840 | n = "InfiniPath_QLE7290"; | ||
841 | break; | ||
842 | case 11: | ||
843 | n = "InfiniPath_QEM7250"; | ||
844 | break; | ||
845 | case 12: | ||
846 | n = "InfiniPath_QLE-Bringup"; | ||
847 | break; | ||
848 | default: | ||
849 | ipath_dev_err(dd, | ||
850 | "Don't yet know about board with ID %u\n", | ||
851 | boardrev); | ||
852 | snprintf(name, namelen, "Unknown_InfiniPath_PCIe_%u", | ||
853 | boardrev); | ||
854 | break; | ||
855 | } | ||
856 | if (n) | ||
857 | snprintf(name, namelen, "%s", n); | ||
858 | |||
859 | if (dd->ipath_majrev != 5 || !dd->ipath_minrev || | ||
860 | dd->ipath_minrev > 2) { | ||
861 | ipath_dev_err(dd, "Unsupported InfiniPath hardware " | ||
862 | "revision %u.%u!\n", | ||
863 | dd->ipath_majrev, dd->ipath_minrev); | ||
864 | ret = 1; | ||
865 | } else if (dd->ipath_minrev == 1 && | ||
866 | !(dd->ipath_flags & IPATH_INITTED)) { | ||
867 | /* Rev1 chips are prototype. Complain at init, but allow use */ | ||
868 | ipath_dev_err(dd, "Unsupported hardware " | ||
869 | "revision %u.%u, Contact support@qlogic.com\n", | ||
870 | dd->ipath_majrev, dd->ipath_minrev); | ||
871 | ret = 0; | ||
872 | } else | ||
873 | ret = 0; | ||
874 | |||
875 | /* | ||
876 | * Set here not in ipath_init_*_funcs because we have to do | ||
877 | * it after we can read chip registers. | ||
878 | */ | ||
879 | dd->ipath_ureg_align = 0x10000; /* 64KB alignment */ | ||
880 | |||
881 | return ret; | ||
882 | } | ||
883 | |||
884 | /** | ||
885 | * ipath_7220_init_hwerrors - enable hardware errors | ||
886 | * @dd: the infinipath device | ||
887 | * | ||
888 | * now that we have finished initializing everything that might reasonably | ||
889 | * cause a hardware error, and cleared those errors bits as they occur, | ||
890 | * we can enable hardware errors in the mask (potentially enabling | ||
891 | * freeze mode), and enable hardware errors as errors (along with | ||
892 | * everything else) in errormask | ||
893 | */ | ||
894 | static void ipath_7220_init_hwerrors(struct ipath_devdata *dd) | ||
895 | { | ||
896 | ipath_err_t val; | ||
897 | u64 extsval; | ||
898 | |||
899 | extsval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus); | ||
900 | |||
901 | if (!(extsval & (INFINIPATH_EXTS_MEMBIST_ENDTEST | | ||
902 | INFINIPATH_EXTS_MEMBIST_DISABLED))) | ||
903 | ipath_dev_err(dd, "MemBIST did not complete!\n"); | ||
904 | if (extsval & INFINIPATH_EXTS_MEMBIST_DISABLED) | ||
905 | dev_info(&dd->pcidev->dev, "MemBIST is disabled.\n"); | ||
906 | |||
907 | val = ~0ULL; /* barring bugs, all hwerrors become interrupts, */ | ||
908 | |||
909 | if (!dd->ipath_boardrev) /* no PLL for Emulator */ | ||
910 | val &= ~INFINIPATH_HWE_SERDESPLLFAILED; | ||
911 | |||
912 | if (dd->ipath_minrev == 1) | ||
913 | val &= ~(1ULL << 42); /* TXE LaunchFIFO Parity rev1 issue */ | ||
914 | |||
915 | val &= ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR; | ||
916 | dd->ipath_hwerrmask = val; | ||
917 | |||
918 | /* | ||
919 | * special trigger "error" is for debugging purposes. It | ||
920 | * works around a processor/chipset problem. The error | ||
921 | * interrupt allows us to count occurrences, but we don't | ||
922 | * want to pay the overhead for normal use. Emulation only | ||
923 | */ | ||
924 | if (!dd->ipath_boardrev) | ||
925 | dd->ipath_maskederrs = INFINIPATH_E_SENDSPECIALTRIGGER; | ||
926 | } | ||
927 | |||
928 | /* | ||
929 | * All detailed interaction with the SerDes has been moved to ipath_sd7220.c | ||
930 | * | ||
931 | * The portion of IBA7220-specific bringup_serdes() that actually deals with | ||
932 | * registers and memory within the SerDes itself is ipath_sd7220_init(). | ||
933 | */ | ||
934 | |||
935 | /** | ||
936 | * ipath_7220_bringup_serdes - bring up the serdes | ||
937 | * @dd: the infinipath device | ||
938 | */ | ||
939 | static int ipath_7220_bringup_serdes(struct ipath_devdata *dd) | ||
940 | { | ||
941 | int ret = 0; | ||
942 | u64 val, prev_val, guid; | ||
943 | int was_reset; /* Note whether uC was reset */ | ||
944 | |||
945 | ipath_dbg("Trying to bringup serdes\n"); | ||
946 | |||
947 | if (ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus) & | ||
948 | INFINIPATH_HWE_SERDESPLLFAILED) { | ||
949 | ipath_dbg("At start, serdes PLL failed bit set " | ||
950 | "in hwerrstatus, clearing and continuing\n"); | ||
951 | ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, | ||
952 | INFINIPATH_HWE_SERDESPLLFAILED); | ||
953 | } | ||
954 | |||
955 | dd->ibdeltainprog = 1; | ||
956 | dd->ibsymsnap = | ||
957 | ipath_read_creg32(dd, dd->ipath_cregs->cr_ibsymbolerrcnt); | ||
958 | dd->iblnkerrsnap = | ||
959 | ipath_read_creg32(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt); | ||
960 | |||
961 | if (!dd->ipath_ibcddrctrl) { | ||
962 | /* not on re-init after reset */ | ||
963 | dd->ipath_ibcddrctrl = | ||
964 | ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcddrctrl); | ||
965 | |||
966 | if (dd->ipath_link_speed_enabled == | ||
967 | (IPATH_IB_SDR | IPATH_IB_DDR)) | ||
968 | dd->ipath_ibcddrctrl |= | ||
969 | IBA7220_IBC_SPEED_AUTONEG_MASK | | ||
970 | IBA7220_IBC_IBTA_1_2_MASK; | ||
971 | else | ||
972 | dd->ipath_ibcddrctrl |= | ||
973 | dd->ipath_link_speed_enabled == IPATH_IB_DDR | ||
974 | ? IBA7220_IBC_SPEED_DDR : | ||
975 | IBA7220_IBC_SPEED_SDR; | ||
976 | if ((dd->ipath_link_width_enabled & (IB_WIDTH_1X | | ||
977 | IB_WIDTH_4X)) == (IB_WIDTH_1X | IB_WIDTH_4X)) | ||
978 | dd->ipath_ibcddrctrl |= IBA7220_IBC_WIDTH_AUTONEG; | ||
979 | else | ||
980 | dd->ipath_ibcddrctrl |= | ||
981 | dd->ipath_link_width_enabled == IB_WIDTH_4X | ||
982 | ? IBA7220_IBC_WIDTH_4X_ONLY : | ||
983 | IBA7220_IBC_WIDTH_1X_ONLY; | ||
984 | |||
985 | /* always enable these on driver reload, not sticky */ | ||
986 | dd->ipath_ibcddrctrl |= | ||
987 | IBA7220_IBC_RXPOL_MASK << IBA7220_IBC_RXPOL_SHIFT; | ||
988 | dd->ipath_ibcddrctrl |= | ||
989 | IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT; | ||
990 | /* | ||
991 | * automatic lane reversal detection for receive | ||
992 | * doesn't work correctly in rev 1, so disable it | ||
993 | * on that rev, otherwise enable (disabling not | ||
994 | * sticky across reload for >rev1) | ||
995 | */ | ||
996 | if (dd->ipath_minrev == 1) | ||
997 | dd->ipath_ibcddrctrl &= | ||
998 | ~IBA7220_IBC_LANE_REV_SUPPORTED; | ||
999 | else | ||
1000 | dd->ipath_ibcddrctrl |= | ||
1001 | IBA7220_IBC_LANE_REV_SUPPORTED; | ||
1002 | } | ||
1003 | |||
1004 | ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcddrctrl, | ||
1005 | dd->ipath_ibcddrctrl); | ||
1006 | |||
1007 | ipath_write_kreg(dd, IPATH_KREG_OFFSET(IBNCModeCtrl), 0Ull); | ||
1008 | |||
1009 | /* IBA7220 has SERDES MPU reset in D0 of what _was_ IBPLLCfg */ | ||
1010 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibserdesctrl); | ||
1011 | /* remember if uC was in Reset or not, for dactrim */ | ||
1012 | was_reset = (val & 1); | ||
1013 | ipath_cdbg(VERBOSE, "IBReset %s xgxsconfig %llx\n", | ||
1014 | was_reset ? "Asserted" : "Negated", (unsigned long long) | ||
1015 | ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig)); | ||
1016 | |||
1017 | if (dd->ipath_boardrev) { | ||
1018 | /* | ||
1019 | * Hardware is not emulator, and may have been reset. Init it. | ||
1020 | * Below will release reset, but needs to know if chip was | ||
1021 | * originally in reset, to only trim DACs on first time | ||
1022 | * after chip reset or powercycle (not driver reload) | ||
1023 | */ | ||
1024 | ret = ipath_sd7220_init(dd, was_reset); | ||
1025 | } | ||
1026 | |||
1027 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig); | ||
1028 | prev_val = val; | ||
1029 | val |= INFINIPATH_XGXS_FC_SAFE; | ||
1030 | if (val != prev_val) { | ||
1031 | ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val); | ||
1032 | ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); | ||
1033 | } | ||
1034 | if (val & INFINIPATH_XGXS_RESET) | ||
1035 | val &= ~INFINIPATH_XGXS_RESET; | ||
1036 | if (val != prev_val) | ||
1037 | ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val); | ||
1038 | |||
1039 | ipath_cdbg(VERBOSE, "done: xgxs=%llx from %llx\n", | ||
1040 | (unsigned long long) | ||
1041 | ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig), | ||
1042 | (unsigned long long) prev_val); | ||
1043 | |||
1044 | guid = be64_to_cpu(dd->ipath_guid); | ||
1045 | |||
1046 | if (!guid) { | ||
1047 | /* have to have something, so use likely unique tsc */ | ||
1048 | guid = get_cycles(); | ||
1049 | ipath_dbg("No GUID for heartbeat, faking %llx\n", | ||
1050 | (unsigned long long)guid); | ||
1051 | } else | ||
1052 | ipath_cdbg(VERBOSE, "Wrote %llX to HRTBT_GUID\n", | ||
1053 | (unsigned long long) guid); | ||
1054 | ipath_write_kreg(dd, dd->ipath_kregs->kr_hrtbt_guid, guid); | ||
1055 | return ret; | ||
1056 | } | ||
1057 | |||
1058 | static void ipath_7220_config_jint(struct ipath_devdata *dd, | ||
1059 | u16 idle_ticks, u16 max_packets) | ||
1060 | { | ||
1061 | |||
1062 | /* | ||
1063 | * We can request a receive interrupt for 1 or more packets | ||
1064 | * from current offset. | ||
1065 | */ | ||
1066 | if (idle_ticks == 0 || max_packets == 0) | ||
1067 | /* interrupt after one packet if no mitigation */ | ||
1068 | dd->ipath_rhdrhead_intr_off = | ||
1069 | 1ULL << IBA7220_HDRHEAD_PKTINT_SHIFT; | ||
1070 | else | ||
1071 | /* Turn off RcvHdrHead interrupts if using mitigation */ | ||
1072 | dd->ipath_rhdrhead_intr_off = 0ULL; | ||
1073 | |||
1074 | /* refresh kernel RcvHdrHead registers... */ | ||
1075 | ipath_write_ureg(dd, ur_rcvhdrhead, | ||
1076 | dd->ipath_rhdrhead_intr_off | | ||
1077 | dd->ipath_pd[0]->port_head, 0); | ||
1078 | |||
1079 | dd->ipath_jint_max_packets = max_packets; | ||
1080 | dd->ipath_jint_idle_ticks = idle_ticks; | ||
1081 | ipath_write_kreg(dd, dd->ipath_kregs->kr_jintreload, | ||
1082 | ((u64) max_packets << INFINIPATH_JINT_PACKETSHIFT) | | ||
1083 | idle_ticks); | ||
1084 | } | ||
1085 | |||
1086 | /** | ||
1087 | * ipath_7220_quiet_serdes - set serdes to txidle | ||
1088 | * @dd: the infinipath device | ||
1089 | * Called when driver is being unloaded | ||
1090 | */ | ||
1091 | static void ipath_7220_quiet_serdes(struct ipath_devdata *dd) | ||
1092 | { | ||
1093 | u64 val; | ||
1094 | if (dd->ibsymdelta || dd->iblnkerrdelta || | ||
1095 | dd->ibdeltainprog) { | ||
1096 | u64 diagc; | ||
1097 | /* enable counter writes */ | ||
1098 | diagc = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwdiagctrl); | ||
1099 | ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl, | ||
1100 | diagc | INFINIPATH_DC_COUNTERWREN); | ||
1101 | |||
1102 | if (dd->ibsymdelta || dd->ibdeltainprog) { | ||
1103 | val = ipath_read_creg32(dd, | ||
1104 | dd->ipath_cregs->cr_ibsymbolerrcnt); | ||
1105 | if (dd->ibdeltainprog) | ||
1106 | val -= val - dd->ibsymsnap; | ||
1107 | val -= dd->ibsymdelta; | ||
1108 | ipath_write_creg(dd, | ||
1109 | dd->ipath_cregs->cr_ibsymbolerrcnt, val); | ||
1110 | } | ||
1111 | if (dd->iblnkerrdelta || dd->ibdeltainprog) { | ||
1112 | val = ipath_read_creg32(dd, | ||
1113 | dd->ipath_cregs->cr_iblinkerrrecovcnt); | ||
1114 | if (dd->ibdeltainprog) | ||
1115 | val -= val - dd->iblnkerrsnap; | ||
1116 | val -= dd->iblnkerrdelta; | ||
1117 | ipath_write_creg(dd, | ||
1118 | dd->ipath_cregs->cr_iblinkerrrecovcnt, val); | ||
1119 | } | ||
1120 | |||
1121 | /* and disable counter writes */ | ||
1122 | ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl, diagc); | ||
1123 | } | ||
1124 | |||
1125 | dd->ipath_flags &= ~IPATH_IB_AUTONEG_INPROG; | ||
1126 | wake_up(&dd->ipath_autoneg_wait); | ||
1127 | cancel_delayed_work(&dd->ipath_autoneg_work); | ||
1128 | flush_scheduled_work(); | ||
1129 | ipath_shutdown_relock_poll(dd); | ||
1130 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig); | ||
1131 | val |= INFINIPATH_XGXS_RESET; | ||
1132 | ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val); | ||
1133 | } | ||
1134 | |||
1135 | static int ipath_7220_intconfig(struct ipath_devdata *dd) | ||
1136 | { | ||
1137 | ipath_7220_config_jint(dd, dd->ipath_jint_idle_ticks, | ||
1138 | dd->ipath_jint_max_packets); | ||
1139 | return 0; | ||
1140 | } | ||
1141 | |||
1142 | /** | ||
1143 | * ipath_setup_7220_setextled - set the state of the two external LEDs | ||
1144 | * @dd: the infinipath device | ||
1145 | * @lst: the L state | ||
1146 | * @ltst: the LT state | ||
1147 | * | ||
1148 | * These LEDs indicate the physical and logical state of IB link. | ||
1149 | * For this chip (at least with recommended board pinouts), LED1 | ||
1150 | * is Yellow (logical state) and LED2 is Green (physical state), | ||
1151 | * | ||
1152 | * Note: We try to match the Mellanox HCA LED behavior as best | ||
1153 | * we can. Green indicates physical link state is OK (something is | ||
1154 | * plugged in, and we can train). | ||
1155 | * Amber indicates the link is logically up (ACTIVE). | ||
1156 | * Mellanox further blinks the amber LED to indicate data packet | ||
1157 | * activity, but we have no hardware support for that, so it would | ||
1158 | * require waking up every 10-20 msecs and checking the counters | ||
1159 | * on the chip, and then turning the LED off if appropriate. That's | ||
1160 | * visible overhead, so not something we will do. | ||
1161 | * | ||
1162 | */ | ||
1163 | static void ipath_setup_7220_setextled(struct ipath_devdata *dd, u64 lst, | ||
1164 | u64 ltst) | ||
1165 | { | ||
1166 | u64 extctl, ledblink = 0; | ||
1167 | unsigned long flags = 0; | ||
1168 | |||
1169 | /* the diags use the LED to indicate diag info, so we leave | ||
1170 | * the external LED alone when the diags are running */ | ||
1171 | if (ipath_diag_inuse) | ||
1172 | return; | ||
1173 | |||
1174 | /* Allow override of LED display for, e.g. Locating system in rack */ | ||
1175 | if (dd->ipath_led_override) { | ||
1176 | ltst = (dd->ipath_led_override & IPATH_LED_PHYS) | ||
1177 | ? INFINIPATH_IBCS_LT_STATE_LINKUP | ||
1178 | : INFINIPATH_IBCS_LT_STATE_DISABLED; | ||
1179 | lst = (dd->ipath_led_override & IPATH_LED_LOG) | ||
1180 | ? INFINIPATH_IBCS_L_STATE_ACTIVE | ||
1181 | : INFINIPATH_IBCS_L_STATE_DOWN; | ||
1182 | } | ||
1183 | |||
1184 | spin_lock_irqsave(&dd->ipath_gpio_lock, flags); | ||
1185 | extctl = dd->ipath_extctrl & ~(INFINIPATH_EXTC_LED1PRIPORT_ON | | ||
1186 | INFINIPATH_EXTC_LED2PRIPORT_ON); | ||
1187 | if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP) { | ||
1188 | extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON; | ||
1189 | /* | ||
1190 | * counts are in chip clock (4ns) periods. | ||
1191 | * This is 1/16 sec (66.6ms) on, | ||
1192 | * 3/16 sec (187.5 ms) off, with packets rcvd | ||
1193 | */ | ||
1194 | ledblink = ((66600*1000UL/4) << IBA7220_LEDBLINK_ON_SHIFT) | ||
1195 | | ((187500*1000UL/4) << IBA7220_LEDBLINK_OFF_SHIFT); | ||
1196 | } | ||
1197 | if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE) | ||
1198 | extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON; | ||
1199 | dd->ipath_extctrl = extctl; | ||
1200 | ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl); | ||
1201 | spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags); | ||
1202 | |||
1203 | if (ledblink) /* blink the LED on packet receive */ | ||
1204 | ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvpktledcnt, | ||
1205 | ledblink); | ||
1206 | } | ||
1207 | |||
1208 | /* | ||
1209 | * Similar to pci_intx(pdev, 1), except that we make sure | ||
1210 | * msi is off... | ||
1211 | */ | ||
1212 | static void ipath_enable_intx(struct pci_dev *pdev) | ||
1213 | { | ||
1214 | u16 cw, new; | ||
1215 | int pos; | ||
1216 | |||
1217 | /* first, turn on INTx */ | ||
1218 | pci_read_config_word(pdev, PCI_COMMAND, &cw); | ||
1219 | new = cw & ~PCI_COMMAND_INTX_DISABLE; | ||
1220 | if (new != cw) | ||
1221 | pci_write_config_word(pdev, PCI_COMMAND, new); | ||
1222 | |||
1223 | /* then turn off MSI */ | ||
1224 | pos = pci_find_capability(pdev, PCI_CAP_ID_MSI); | ||
1225 | if (pos) { | ||
1226 | pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw); | ||
1227 | new = cw & ~PCI_MSI_FLAGS_ENABLE; | ||
1228 | if (new != cw) | ||
1229 | pci_write_config_word(pdev, pos + PCI_MSI_FLAGS, new); | ||
1230 | } | ||
1231 | } | ||
1232 | |||
1233 | static int ipath_msi_enabled(struct pci_dev *pdev) | ||
1234 | { | ||
1235 | int pos, ret = 0; | ||
1236 | |||
1237 | pos = pci_find_capability(pdev, PCI_CAP_ID_MSI); | ||
1238 | if (pos) { | ||
1239 | u16 cw; | ||
1240 | |||
1241 | pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw); | ||
1242 | ret = !!(cw & PCI_MSI_FLAGS_ENABLE); | ||
1243 | } | ||
1244 | return ret; | ||
1245 | } | ||
1246 | |||
1247 | /* | ||
1248 | * disable msi interrupt if enabled, and clear the flag. | ||
1249 | * flag is used primarily for the fallback to INTx, but | ||
1250 | * is also used in reinit after reset as a flag. | ||
1251 | */ | ||
1252 | static void ipath_7220_nomsi(struct ipath_devdata *dd) | ||
1253 | { | ||
1254 | dd->ipath_msi_lo = 0; | ||
1255 | |||
1256 | if (ipath_msi_enabled(dd->pcidev)) { | ||
1257 | /* | ||
1258 | * free, but don't zero; later kernels require | ||
1259 | * it be freed before disable_msi, so the intx | ||
1260 | * setup has to request it again. | ||
1261 | */ | ||
1262 | if (dd->ipath_irq) | ||
1263 | free_irq(dd->ipath_irq, dd); | ||
1264 | pci_disable_msi(dd->pcidev); | ||
1265 | } | ||
1266 | } | ||
1267 | |||
1268 | /* | ||
1269 | * ipath_setup_7220_cleanup - clean up any per-chip chip-specific stuff | ||
1270 | * @dd: the infinipath device | ||
1271 | * | ||
1272 | * Nothing but msi interrupt cleanup for now. | ||
1273 | * | ||
1274 | * This is called during driver unload. | ||
1275 | */ | ||
1276 | static void ipath_setup_7220_cleanup(struct ipath_devdata *dd) | ||
1277 | { | ||
1278 | ipath_7220_nomsi(dd); | ||
1279 | } | ||
1280 | |||
1281 | |||
1282 | static void ipath_7220_pcie_params(struct ipath_devdata *dd, u32 boardrev) | ||
1283 | { | ||
1284 | u16 linkstat, minwidth, speed; | ||
1285 | int pos; | ||
1286 | |||
1287 | pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP); | ||
1288 | if (!pos) { | ||
1289 | ipath_dev_err(dd, "Can't find PCI Express capability!\n"); | ||
1290 | goto bail; | ||
1291 | } | ||
1292 | |||
1293 | pci_read_config_word(dd->pcidev, pos + PCI_EXP_LNKSTA, | ||
1294 | &linkstat); | ||
1295 | /* | ||
1296 | * speed is bits 0-4, linkwidth is bits 4-8 | ||
1297 | * no defines for them in headers | ||
1298 | */ | ||
1299 | speed = linkstat & 0xf; | ||
1300 | linkstat >>= 4; | ||
1301 | linkstat &= 0x1f; | ||
1302 | dd->ipath_lbus_width = linkstat; | ||
1303 | switch (boardrev) { | ||
1304 | case 0: | ||
1305 | case 2: | ||
1306 | case 10: | ||
1307 | case 12: | ||
1308 | minwidth = 16; /* x16 capable boards */ | ||
1309 | break; | ||
1310 | default: | ||
1311 | minwidth = 8; /* x8 capable boards */ | ||
1312 | break; | ||
1313 | } | ||
1314 | |||
1315 | switch (speed) { | ||
1316 | case 1: | ||
1317 | dd->ipath_lbus_speed = 2500; /* Gen1, 2.5GHz */ | ||
1318 | break; | ||
1319 | case 2: | ||
1320 | dd->ipath_lbus_speed = 5000; /* Gen1, 5GHz */ | ||
1321 | break; | ||
1322 | default: /* not defined, assume gen1 */ | ||
1323 | dd->ipath_lbus_speed = 2500; | ||
1324 | break; | ||
1325 | } | ||
1326 | |||
1327 | if (linkstat < minwidth) | ||
1328 | ipath_dev_err(dd, | ||
1329 | "PCIe width %u (x%u HCA), performance " | ||
1330 | "reduced\n", linkstat, minwidth); | ||
1331 | else | ||
1332 | ipath_cdbg(VERBOSE, "PCIe speed %u width %u (x%u HCA)\n", | ||
1333 | dd->ipath_lbus_speed, linkstat, minwidth); | ||
1334 | |||
1335 | if (speed != 1) | ||
1336 | ipath_dev_err(dd, | ||
1337 | "PCIe linkspeed %u is incorrect; " | ||
1338 | "should be 1 (2500)!\n", speed); | ||
1339 | |||
1340 | bail: | ||
1341 | /* fill in string, even on errors */ | ||
1342 | snprintf(dd->ipath_lbus_info, sizeof(dd->ipath_lbus_info), | ||
1343 | "PCIe,%uMHz,x%u\n", | ||
1344 | dd->ipath_lbus_speed, | ||
1345 | dd->ipath_lbus_width); | ||
1346 | return; | ||
1347 | } | ||
1348 | |||
1349 | |||
1350 | /** | ||
1351 | * ipath_setup_7220_config - setup PCIe config related stuff | ||
1352 | * @dd: the infinipath device | ||
1353 | * @pdev: the PCI device | ||
1354 | * | ||
1355 | * The pci_enable_msi() call will fail on systems with MSI quirks | ||
1356 | * such as those with AMD8131, even if the device of interest is not | ||
1357 | * attached to that device, (in the 2.6.13 - 2.6.15 kernels, at least, fixed | ||
1358 | * late in 2.6.16). | ||
1359 | * All that can be done is to edit the kernel source to remove the quirk | ||
1360 | * check until that is fixed. | ||
1361 | * We do not need to call enable_msi() for our HyperTransport chip, | ||
1362 | * even though it uses MSI, and we want to avoid the quirk warning, so | ||
1363 | * So we call enable_msi only for PCIe. If we do end up needing | ||
1364 | * pci_enable_msi at some point in the future for HT, we'll move the | ||
1365 | * call back into the main init_one code. | ||
1366 | * We save the msi lo and hi values, so we can restore them after | ||
1367 | * chip reset (the kernel PCI infrastructure doesn't yet handle that | ||
1368 | * correctly). | ||
1369 | */ | ||
1370 | static int ipath_setup_7220_config(struct ipath_devdata *dd, | ||
1371 | struct pci_dev *pdev) | ||
1372 | { | ||
1373 | int pos, ret = -1; | ||
1374 | u32 boardrev; | ||
1375 | |||
1376 | dd->ipath_msi_lo = 0; /* used as a flag during reset processing */ | ||
1377 | |||
1378 | pos = pci_find_capability(pdev, PCI_CAP_ID_MSI); | ||
1379 | if (!strcmp(int_type, "force_msi") || !strcmp(int_type, "auto")) | ||
1380 | ret = pci_enable_msi(pdev); | ||
1381 | if (ret) { | ||
1382 | if (!strcmp(int_type, "force_msi")) { | ||
1383 | ipath_dev_err(dd, "pci_enable_msi failed: %d, " | ||
1384 | "force_msi is on, so not continuing.\n", | ||
1385 | ret); | ||
1386 | return ret; | ||
1387 | } | ||
1388 | |||
1389 | ipath_enable_intx(pdev); | ||
1390 | if (!strcmp(int_type, "auto")) | ||
1391 | ipath_dev_err(dd, "pci_enable_msi failed: %d, " | ||
1392 | "falling back to INTx\n", ret); | ||
1393 | } else if (pos) { | ||
1394 | u16 control; | ||
1395 | pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO, | ||
1396 | &dd->ipath_msi_lo); | ||
1397 | pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_HI, | ||
1398 | &dd->ipath_msi_hi); | ||
1399 | pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, | ||
1400 | &control); | ||
1401 | /* now save the data (vector) info */ | ||
1402 | pci_read_config_word(pdev, | ||
1403 | pos + ((control & PCI_MSI_FLAGS_64BIT) | ||
1404 | ? PCI_MSI_DATA_64 : | ||
1405 | PCI_MSI_DATA_32), | ||
1406 | &dd->ipath_msi_data); | ||
1407 | } else | ||
1408 | ipath_dev_err(dd, "Can't find MSI capability, " | ||
1409 | "can't save MSI settings for reset\n"); | ||
1410 | |||
1411 | dd->ipath_irq = pdev->irq; | ||
1412 | |||
1413 | /* | ||
1414 | * We save the cachelinesize also, although it doesn't | ||
1415 | * really matter. | ||
1416 | */ | ||
1417 | pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, | ||
1418 | &dd->ipath_pci_cacheline); | ||
1419 | |||
1420 | /* | ||
1421 | * this function called early, ipath_boardrev not set yet. Can't | ||
1422 | * use ipath_read_kreg64() yet, too early in init, so use readq() | ||
1423 | */ | ||
1424 | boardrev = (readq(&dd->ipath_kregbase[dd->ipath_kregs->kr_revision]) | ||
1425 | >> INFINIPATH_R_BOARDID_SHIFT) & INFINIPATH_R_BOARDID_MASK; | ||
1426 | |||
1427 | ipath_7220_pcie_params(dd, boardrev); | ||
1428 | |||
1429 | dd->ipath_flags |= IPATH_NODMA_RTAIL | IPATH_HAS_SEND_DMA | | ||
1430 | IPATH_HAS_PBC_CNT | IPATH_HAS_THRESH_UPDATE; | ||
1431 | dd->ipath_pioupd_thresh = 4U; /* set default update threshold */ | ||
1432 | return 0; | ||
1433 | } | ||
1434 | |||
1435 | static void ipath_init_7220_variables(struct ipath_devdata *dd) | ||
1436 | { | ||
1437 | /* | ||
1438 | * setup the register offsets, since they are different for each | ||
1439 | * chip | ||
1440 | */ | ||
1441 | dd->ipath_kregs = &ipath_7220_kregs; | ||
1442 | dd->ipath_cregs = &ipath_7220_cregs; | ||
1443 | |||
1444 | /* | ||
1445 | * bits for selecting i2c direction and values, | ||
1446 | * used for I2C serial flash | ||
1447 | */ | ||
1448 | dd->ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM; | ||
1449 | dd->ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM; | ||
1450 | dd->ipath_gpio_sda = IPATH_GPIO_SDA; | ||
1451 | dd->ipath_gpio_scl = IPATH_GPIO_SCL; | ||
1452 | |||
1453 | /* | ||
1454 | * Fill in data for field-values that change in IBA7220. | ||
1455 | * We dynamically specify only the mask for LINKTRAININGSTATE | ||
1456 | * and only the shift for LINKSTATE, as they are the only ones | ||
1457 | * that change. Also precalculate the 3 link states of interest | ||
1458 | * and the combined mask. | ||
1459 | */ | ||
1460 | dd->ibcs_ls_shift = IBA7220_IBCS_LINKSTATE_SHIFT; | ||
1461 | dd->ibcs_lts_mask = IBA7220_IBCS_LINKTRAININGSTATE_MASK; | ||
1462 | dd->ibcs_mask = (INFINIPATH_IBCS_LINKSTATE_MASK << | ||
1463 | dd->ibcs_ls_shift) | dd->ibcs_lts_mask; | ||
1464 | dd->ib_init = (INFINIPATH_IBCS_LT_STATE_LINKUP << | ||
1465 | INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) | | ||
1466 | (INFINIPATH_IBCS_L_STATE_INIT << dd->ibcs_ls_shift); | ||
1467 | dd->ib_arm = (INFINIPATH_IBCS_LT_STATE_LINKUP << | ||
1468 | INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) | | ||
1469 | (INFINIPATH_IBCS_L_STATE_ARM << dd->ibcs_ls_shift); | ||
1470 | dd->ib_active = (INFINIPATH_IBCS_LT_STATE_LINKUP << | ||
1471 | INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) | | ||
1472 | (INFINIPATH_IBCS_L_STATE_ACTIVE << dd->ibcs_ls_shift); | ||
1473 | |||
1474 | /* | ||
1475 | * Fill in data for ibcc field-values that change in IBA7220. | ||
1476 | * We dynamically specify only the mask for LINKINITCMD | ||
1477 | * and only the shift for LINKCMD and MAXPKTLEN, as they are | ||
1478 | * the only ones that change. | ||
1479 | */ | ||
1480 | dd->ibcc_lic_mask = IBA7220_IBCC_LINKINITCMD_MASK; | ||
1481 | dd->ibcc_lc_shift = IBA7220_IBCC_LINKCMD_SHIFT; | ||
1482 | dd->ibcc_mpl_shift = IBA7220_IBCC_MAXPKTLEN_SHIFT; | ||
1483 | |||
1484 | /* Fill in shifts for RcvCtrl. */ | ||
1485 | dd->ipath_r_portenable_shift = INFINIPATH_R_PORTENABLE_SHIFT; | ||
1486 | dd->ipath_r_intravail_shift = IBA7220_R_INTRAVAIL_SHIFT; | ||
1487 | dd->ipath_r_tailupd_shift = IBA7220_R_TAILUPD_SHIFT; | ||
1488 | dd->ipath_r_portcfg_shift = IBA7220_R_PORTCFG_SHIFT; | ||
1489 | |||
1490 | /* variables for sanity checking interrupt and errors */ | ||
1491 | dd->ipath_hwe_bitsextant = | ||
1492 | (INFINIPATH_HWE_RXEMEMPARITYERR_MASK << | ||
1493 | INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) | | ||
1494 | (INFINIPATH_HWE_TXEMEMPARITYERR_MASK << | ||
1495 | INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) | | ||
1496 | (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK << | ||
1497 | INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) | | ||
1498 | INFINIPATH_HWE_PCIE1PLLFAILED | | ||
1499 | INFINIPATH_HWE_PCIE0PLLFAILED | | ||
1500 | INFINIPATH_HWE_PCIEPOISONEDTLP | | ||
1501 | INFINIPATH_HWE_PCIECPLTIMEOUT | | ||
1502 | INFINIPATH_HWE_PCIEBUSPARITYXTLH | | ||
1503 | INFINIPATH_HWE_PCIEBUSPARITYXADM | | ||
1504 | INFINIPATH_HWE_PCIEBUSPARITYRADM | | ||
1505 | INFINIPATH_HWE_MEMBISTFAILED | | ||
1506 | INFINIPATH_HWE_COREPLL_FBSLIP | | ||
1507 | INFINIPATH_HWE_COREPLL_RFSLIP | | ||
1508 | INFINIPATH_HWE_SERDESPLLFAILED | | ||
1509 | INFINIPATH_HWE_IBCBUSTOSPCPARITYERR | | ||
1510 | INFINIPATH_HWE_IBCBUSFRSPCPARITYERR | | ||
1511 | INFINIPATH_HWE_PCIECPLDATAQUEUEERR | | ||
1512 | INFINIPATH_HWE_PCIECPLHDRQUEUEERR | | ||
1513 | INFINIPATH_HWE_SDMAMEMREADERR | | ||
1514 | INFINIPATH_HWE_CLK_UC_PLLNOTLOCKED | | ||
1515 | INFINIPATH_HWE_PCIESERDESQ0PCLKNOTDETECT | | ||
1516 | INFINIPATH_HWE_PCIESERDESQ1PCLKNOTDETECT | | ||
1517 | INFINIPATH_HWE_PCIESERDESQ2PCLKNOTDETECT | | ||
1518 | INFINIPATH_HWE_PCIESERDESQ3PCLKNOTDETECT | | ||
1519 | INFINIPATH_HWE_DDSRXEQMEMORYPARITYERR | | ||
1520 | INFINIPATH_HWE_IB_UC_MEMORYPARITYERR | | ||
1521 | INFINIPATH_HWE_PCIE_UC_OCT0MEMORYPARITYERR | | ||
1522 | INFINIPATH_HWE_PCIE_UC_OCT1MEMORYPARITYERR; | ||
1523 | dd->ipath_i_bitsextant = | ||
1524 | INFINIPATH_I_SDMAINT | INFINIPATH_I_SDMADISABLED | | ||
1525 | (INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) | | ||
1526 | (INFINIPATH_I_RCVAVAIL_MASK << | ||
1527 | INFINIPATH_I_RCVAVAIL_SHIFT) | | ||
1528 | INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT | | ||
1529 | INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO | | ||
1530 | INFINIPATH_I_JINT | INFINIPATH_I_SERDESTRIMDONE; | ||
1531 | dd->ipath_e_bitsextant = | ||
1532 | INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC | | ||
1533 | INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN | | ||
1534 | INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN | | ||
1535 | INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RUNEXPCHAR | | ||
1536 | INFINIPATH_E_RUNSUPVL | INFINIPATH_E_REBP | | ||
1537 | INFINIPATH_E_RIBFLOW | INFINIPATH_E_RBADVERSION | | ||
1538 | INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL | | ||
1539 | INFINIPATH_E_RBADTID | INFINIPATH_E_RHDRLEN | | ||
1540 | INFINIPATH_E_RHDR | INFINIPATH_E_RIBLOSTLINK | | ||
1541 | INFINIPATH_E_SENDSPECIALTRIGGER | | ||
1542 | INFINIPATH_E_SDMADISABLED | INFINIPATH_E_SMINPKTLEN | | ||
1543 | INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SUNDERRUN | | ||
1544 | INFINIPATH_E_SPKTLEN | INFINIPATH_E_SDROPPEDSMPPKT | | ||
1545 | INFINIPATH_E_SDROPPEDDATAPKT | | ||
1546 | INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM | | ||
1547 | INFINIPATH_E_SUNSUPVL | INFINIPATH_E_SENDBUFMISUSE | | ||
1548 | INFINIPATH_E_SDMAGENMISMATCH | INFINIPATH_E_SDMAOUTOFBOUND | | ||
1549 | INFINIPATH_E_SDMATAILOUTOFBOUND | INFINIPATH_E_SDMABASE | | ||
1550 | INFINIPATH_E_SDMA1STDESC | INFINIPATH_E_SDMARPYTAG | | ||
1551 | INFINIPATH_E_SDMADWEN | INFINIPATH_E_SDMAMISSINGDW | | ||
1552 | INFINIPATH_E_SDMAUNEXPDATA | | ||
1553 | INFINIPATH_E_IBSTATUSCHANGED | INFINIPATH_E_INVALIDADDR | | ||
1554 | INFINIPATH_E_RESET | INFINIPATH_E_HARDWARE | | ||
1555 | INFINIPATH_E_SDMADESCADDRMISALIGN | | ||
1556 | INFINIPATH_E_INVALIDEEPCMD; | ||
1557 | |||
1558 | dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK; | ||
1559 | dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK; | ||
1560 | dd->ipath_i_rcvavail_shift = INFINIPATH_I_RCVAVAIL_SHIFT; | ||
1561 | dd->ipath_i_rcvurg_shift = INFINIPATH_I_RCVURG_SHIFT; | ||
1562 | dd->ipath_flags |= IPATH_INTREG_64 | IPATH_HAS_MULT_IB_SPEED | ||
1563 | | IPATH_HAS_LINK_LATENCY; | ||
1564 | |||
1565 | /* | ||
1566 | * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity. | ||
1567 | * 2 is Some Misc, 3 is reserved for future. | ||
1568 | */ | ||
1569 | dd->ipath_eep_st_masks[0].hwerrs_to_log = | ||
1570 | INFINIPATH_HWE_TXEMEMPARITYERR_MASK << | ||
1571 | INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT; | ||
1572 | |||
1573 | dd->ipath_eep_st_masks[1].hwerrs_to_log = | ||
1574 | INFINIPATH_HWE_RXEMEMPARITYERR_MASK << | ||
1575 | INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT; | ||
1576 | |||
1577 | dd->ipath_eep_st_masks[2].errs_to_log = INFINIPATH_E_RESET; | ||
1578 | |||
1579 | ipath_linkrecovery = 0; | ||
1580 | |||
1581 | init_waitqueue_head(&dd->ipath_autoneg_wait); | ||
1582 | INIT_DELAYED_WORK(&dd->ipath_autoneg_work, autoneg_work); | ||
1583 | |||
1584 | dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X; | ||
1585 | dd->ipath_link_speed_supported = IPATH_IB_SDR | IPATH_IB_DDR; | ||
1586 | |||
1587 | dd->ipath_link_width_enabled = dd->ipath_link_width_supported; | ||
1588 | dd->ipath_link_speed_enabled = dd->ipath_link_speed_supported; | ||
1589 | /* | ||
1590 | * set the initial values to reasonable default, will be set | ||
1591 | * for real when link is up. | ||
1592 | */ | ||
1593 | dd->ipath_link_width_active = IB_WIDTH_4X; | ||
1594 | dd->ipath_link_speed_active = IPATH_IB_SDR; | ||
1595 | dd->delay_mult = rate_to_delay[0][1]; | ||
1596 | } | ||
1597 | |||
1598 | |||
1599 | /* | ||
1600 | * Setup the MSI stuff again after a reset. I'd like to just call | ||
1601 | * pci_enable_msi() and request_irq() again, but when I do that, | ||
1602 | * the MSI enable bit doesn't get set in the command word, and | ||
1603 | * we switch to to a different interrupt vector, which is confusing, | ||
1604 | * so I instead just do it all inline. Perhaps somehow can tie this | ||
1605 | * into the PCIe hotplug support at some point | ||
1606 | * Note, because I'm doing it all here, I don't call pci_disable_msi() | ||
1607 | * or free_irq() at the start of ipath_setup_7220_reset(). | ||
1608 | */ | ||
1609 | static int ipath_reinit_msi(struct ipath_devdata *dd) | ||
1610 | { | ||
1611 | int ret = 0; | ||
1612 | |||
1613 | int pos; | ||
1614 | u16 control; | ||
1615 | if (!dd->ipath_msi_lo) /* Using intX, or init problem */ | ||
1616 | goto bail; | ||
1617 | |||
1618 | pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI); | ||
1619 | if (!pos) { | ||
1620 | ipath_dev_err(dd, "Can't find MSI capability, " | ||
1621 | "can't restore MSI settings\n"); | ||
1622 | goto bail; | ||
1623 | } | ||
1624 | ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n", | ||
1625 | dd->ipath_msi_lo, pos + PCI_MSI_ADDRESS_LO); | ||
1626 | pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO, | ||
1627 | dd->ipath_msi_lo); | ||
1628 | ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n", | ||
1629 | dd->ipath_msi_hi, pos + PCI_MSI_ADDRESS_HI); | ||
1630 | pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI, | ||
1631 | dd->ipath_msi_hi); | ||
1632 | pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control); | ||
1633 | if (!(control & PCI_MSI_FLAGS_ENABLE)) { | ||
1634 | ipath_cdbg(VERBOSE, "MSI control at off %x was %x, " | ||
1635 | "setting MSI enable (%x)\n", pos + PCI_MSI_FLAGS, | ||
1636 | control, control | PCI_MSI_FLAGS_ENABLE); | ||
1637 | control |= PCI_MSI_FLAGS_ENABLE; | ||
1638 | pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, | ||
1639 | control); | ||
1640 | } | ||
1641 | /* now rewrite the data (vector) info */ | ||
1642 | pci_write_config_word(dd->pcidev, pos + | ||
1643 | ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8), | ||
1644 | dd->ipath_msi_data); | ||
1645 | ret = 1; | ||
1646 | |||
1647 | bail: | ||
1648 | if (!ret) { | ||
1649 | ipath_dbg("Using INTx, MSI disabled or not configured\n"); | ||
1650 | ipath_enable_intx(dd->pcidev); | ||
1651 | ret = 1; | ||
1652 | } | ||
1653 | /* | ||
1654 | * We restore the cachelinesize also, although it doesn't really | ||
1655 | * matter. | ||
1656 | */ | ||
1657 | pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, | ||
1658 | dd->ipath_pci_cacheline); | ||
1659 | /* and now set the pci master bit again */ | ||
1660 | pci_set_master(dd->pcidev); | ||
1661 | |||
1662 | return ret; | ||
1663 | } | ||
1664 | |||
1665 | /* | ||
1666 | * This routine sleeps, so it can only be called from user context, not | ||
1667 | * from interrupt context. If we need interrupt context, we can split | ||
1668 | * it into two routines. | ||
1669 | */ | ||
1670 | static int ipath_setup_7220_reset(struct ipath_devdata *dd) | ||
1671 | { | ||
1672 | u64 val; | ||
1673 | int i; | ||
1674 | int ret; | ||
1675 | u16 cmdval; | ||
1676 | |||
1677 | pci_read_config_word(dd->pcidev, PCI_COMMAND, &cmdval); | ||
1678 | |||
1679 | /* Use dev_err so it shows up in logs, etc. */ | ||
1680 | ipath_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->ipath_unit); | ||
1681 | |||
1682 | /* keep chip from being accessed in a few places */ | ||
1683 | dd->ipath_flags &= ~(IPATH_INITTED | IPATH_PRESENT); | ||
1684 | val = dd->ipath_control | INFINIPATH_C_RESET; | ||
1685 | ipath_write_kreg(dd, dd->ipath_kregs->kr_control, val); | ||
1686 | mb(); | ||
1687 | |||
1688 | for (i = 1; i <= 5; i++) { | ||
1689 | int r; | ||
1690 | |||
1691 | /* | ||
1692 | * Allow MBIST, etc. to complete; longer on each retry. | ||
1693 | * We sometimes get machine checks from bus timeout if no | ||
1694 | * response, so for now, make it *really* long. | ||
1695 | */ | ||
1696 | msleep(1000 + (1 + i) * 2000); | ||
1697 | r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, | ||
1698 | dd->ipath_pcibar0); | ||
1699 | if (r) | ||
1700 | ipath_dev_err(dd, "rewrite of BAR0 failed: %d\n", r); | ||
1701 | r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1, | ||
1702 | dd->ipath_pcibar1); | ||
1703 | if (r) | ||
1704 | ipath_dev_err(dd, "rewrite of BAR1 failed: %d\n", r); | ||
1705 | /* now re-enable memory access */ | ||
1706 | pci_write_config_word(dd->pcidev, PCI_COMMAND, cmdval); | ||
1707 | r = pci_enable_device(dd->pcidev); | ||
1708 | if (r) | ||
1709 | ipath_dev_err(dd, "pci_enable_device failed after " | ||
1710 | "reset: %d\n", r); | ||
1711 | /* | ||
1712 | * whether it fully enabled or not, mark as present, | ||
1713 | * again (but not INITTED) | ||
1714 | */ | ||
1715 | dd->ipath_flags |= IPATH_PRESENT; | ||
1716 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision); | ||
1717 | if (val == dd->ipath_revision) { | ||
1718 | ipath_cdbg(VERBOSE, "Got matching revision " | ||
1719 | "register %llx on try %d\n", | ||
1720 | (unsigned long long) val, i); | ||
1721 | ret = ipath_reinit_msi(dd); | ||
1722 | goto bail; | ||
1723 | } | ||
1724 | /* Probably getting -1 back */ | ||
1725 | ipath_dbg("Didn't get expected revision register, " | ||
1726 | "got %llx, try %d\n", (unsigned long long) val, | ||
1727 | i + 1); | ||
1728 | } | ||
1729 | ret = 0; /* failed */ | ||
1730 | |||
1731 | bail: | ||
1732 | if (ret) | ||
1733 | ipath_7220_pcie_params(dd, dd->ipath_boardrev); | ||
1734 | |||
1735 | return ret; | ||
1736 | } | ||
1737 | |||
1738 | /** | ||
1739 | * ipath_7220_put_tid - write a TID to the chip | ||
1740 | * @dd: the infinipath device | ||
1741 | * @tidptr: pointer to the expected TID (in chip) to update | ||
1742 | * @tidtype: 0 for eager, 1 for expected | ||
1743 | * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing | ||
1744 | * | ||
1745 | * This exists as a separate routine to allow for selection of the | ||
1746 | * appropriate "flavor". The static calls in cleanup just use the | ||
1747 | * revision-agnostic form, as they are not performance critical. | ||
1748 | */ | ||
1749 | static void ipath_7220_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr, | ||
1750 | u32 type, unsigned long pa) | ||
1751 | { | ||
1752 | if (pa != dd->ipath_tidinvalid) { | ||
1753 | u64 chippa = pa >> IBA7220_TID_PA_SHIFT; | ||
1754 | |||
1755 | /* paranoia checks */ | ||
1756 | if (pa != (chippa << IBA7220_TID_PA_SHIFT)) { | ||
1757 | dev_info(&dd->pcidev->dev, "BUG: physaddr %lx " | ||
1758 | "not 2KB aligned!\n", pa); | ||
1759 | return; | ||
1760 | } | ||
1761 | if (chippa >= (1UL << IBA7220_TID_SZ_SHIFT)) { | ||
1762 | ipath_dev_err(dd, | ||
1763 | "BUG: Physical page address 0x%lx " | ||
1764 | "larger than supported\n", pa); | ||
1765 | return; | ||
1766 | } | ||
1767 | |||
1768 | if (type == RCVHQ_RCV_TYPE_EAGER) | ||
1769 | chippa |= dd->ipath_tidtemplate; | ||
1770 | else /* for now, always full 4KB page */ | ||
1771 | chippa |= IBA7220_TID_SZ_4K; | ||
1772 | writeq(chippa, tidptr); | ||
1773 | } else | ||
1774 | writeq(pa, tidptr); | ||
1775 | mmiowb(); | ||
1776 | } | ||
1777 | |||
1778 | /** | ||
1779 | * ipath_7220_clear_tid - clear all TID entries for a port, expected and eager | ||
1780 | * @dd: the infinipath device | ||
1781 | * @port: the port | ||
1782 | * | ||
1783 | * clear all TID entries for a port, expected and eager. | ||
1784 | * Used from ipath_close(). On this chip, TIDs are only 32 bits, | ||
1785 | * not 64, but they are still on 64 bit boundaries, so tidbase | ||
1786 | * is declared as u64 * for the pointer math, even though we write 32 bits | ||
1787 | */ | ||
1788 | static void ipath_7220_clear_tids(struct ipath_devdata *dd, unsigned port) | ||
1789 | { | ||
1790 | u64 __iomem *tidbase; | ||
1791 | unsigned long tidinv; | ||
1792 | int i; | ||
1793 | |||
1794 | if (!dd->ipath_kregbase) | ||
1795 | return; | ||
1796 | |||
1797 | ipath_cdbg(VERBOSE, "Invalidate TIDs for port %u\n", port); | ||
1798 | |||
1799 | tidinv = dd->ipath_tidinvalid; | ||
1800 | tidbase = (u64 __iomem *) | ||
1801 | ((char __iomem *)(dd->ipath_kregbase) + | ||
1802 | dd->ipath_rcvtidbase + | ||
1803 | port * dd->ipath_rcvtidcnt * sizeof(*tidbase)); | ||
1804 | |||
1805 | for (i = 0; i < dd->ipath_rcvtidcnt; i++) | ||
1806 | ipath_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED, | ||
1807 | tidinv); | ||
1808 | |||
1809 | tidbase = (u64 __iomem *) | ||
1810 | ((char __iomem *)(dd->ipath_kregbase) + | ||
1811 | dd->ipath_rcvegrbase + port_egrtid_idx(dd, port) | ||
1812 | * sizeof(*tidbase)); | ||
1813 | |||
1814 | for (i = port ? dd->ipath_rcvegrcnt : dd->ipath_p0_rcvegrcnt; i; i--) | ||
1815 | ipath_7220_put_tid(dd, &tidbase[i-1], RCVHQ_RCV_TYPE_EAGER, | ||
1816 | tidinv); | ||
1817 | } | ||
1818 | |||
1819 | /** | ||
1820 | * ipath_7220_tidtemplate - setup constants for TID updates | ||
1821 | * @dd: the infinipath device | ||
1822 | * | ||
1823 | * We setup stuff that we use a lot, to avoid calculating each time | ||
1824 | */ | ||
1825 | static void ipath_7220_tidtemplate(struct ipath_devdata *dd) | ||
1826 | { | ||
1827 | /* For now, we always allocate 4KB buffers (at init) so we can | ||
1828 | * receive max size packets. We may want a module parameter to | ||
1829 | * specify 2KB or 4KB and/or make be per port instead of per device | ||
1830 | * for those who want to reduce memory footprint. Note that the | ||
1831 | * ipath_rcvhdrentsize size must be large enough to hold the largest | ||
1832 | * IB header (currently 96 bytes) that we expect to handle (plus of | ||
1833 | * course the 2 dwords of RHF). | ||
1834 | */ | ||
1835 | if (dd->ipath_rcvegrbufsize == 2048) | ||
1836 | dd->ipath_tidtemplate = IBA7220_TID_SZ_2K; | ||
1837 | else if (dd->ipath_rcvegrbufsize == 4096) | ||
1838 | dd->ipath_tidtemplate = IBA7220_TID_SZ_4K; | ||
1839 | else { | ||
1840 | dev_info(&dd->pcidev->dev, "BUG: unsupported egrbufsize " | ||
1841 | "%u, using %u\n", dd->ipath_rcvegrbufsize, | ||
1842 | 4096); | ||
1843 | dd->ipath_tidtemplate = IBA7220_TID_SZ_4K; | ||
1844 | } | ||
1845 | dd->ipath_tidinvalid = 0; | ||
1846 | } | ||
1847 | |||
1848 | static int ipath_7220_early_init(struct ipath_devdata *dd) | ||
1849 | { | ||
1850 | u32 i, s; | ||
1851 | |||
1852 | if (strcmp(int_type, "auto") && | ||
1853 | strcmp(int_type, "force_msi") && | ||
1854 | strcmp(int_type, "force_intx")) { | ||
1855 | ipath_dev_err(dd, "Invalid interrupt_type: '%s', expecting " | ||
1856 | "auto, force_msi or force_intx\n", int_type); | ||
1857 | return -EINVAL; | ||
1858 | } | ||
1859 | |||
1860 | /* | ||
1861 | * Control[4] has been added to change the arbitration within | ||
1862 | * the SDMA engine between favoring data fetches over descriptor | ||
1863 | * fetches. ipath_sdma_fetch_arb==0 gives data fetches priority. | ||
1864 | */ | ||
1865 | if (ipath_sdma_fetch_arb && (dd->ipath_minrev > 1)) | ||
1866 | dd->ipath_control |= 1<<4; | ||
1867 | |||
1868 | dd->ipath_flags |= IPATH_4BYTE_TID; | ||
1869 | |||
1870 | /* | ||
1871 | * For openfabrics, we need to be able to handle an IB header of | ||
1872 | * 24 dwords. HT chip has arbitrary sized receive buffers, so we | ||
1873 | * made them the same size as the PIO buffers. This chip does not | ||
1874 | * handle arbitrary size buffers, so we need the header large enough | ||
1875 | * to handle largest IB header, but still have room for a 2KB MTU | ||
1876 | * standard IB packet. | ||
1877 | */ | ||
1878 | dd->ipath_rcvhdrentsize = 24; | ||
1879 | dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE; | ||
1880 | dd->ipath_rhf_offset = | ||
1881 | dd->ipath_rcvhdrentsize - sizeof(u64) / sizeof(u32); | ||
1882 | |||
1883 | dd->ipath_rcvegrbufsize = ipath_mtu4096 ? 4096 : 2048; | ||
1884 | /* | ||
1885 | * the min() check here is currently a nop, but it may not always | ||
1886 | * be, depending on just how we do ipath_rcvegrbufsize | ||
1887 | */ | ||
1888 | dd->ipath_ibmaxlen = min(ipath_mtu4096 ? dd->ipath_piosize4k : | ||
1889 | dd->ipath_piosize2k, | ||
1890 | dd->ipath_rcvegrbufsize + | ||
1891 | (dd->ipath_rcvhdrentsize << 2)); | ||
1892 | dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen; | ||
1893 | |||
1894 | ipath_7220_config_jint(dd, INFINIPATH_JINT_DEFAULT_IDLE_TICKS, | ||
1895 | INFINIPATH_JINT_DEFAULT_MAX_PACKETS); | ||
1896 | |||
1897 | if (dd->ipath_boardrev) /* no eeprom on emulator */ | ||
1898 | ipath_get_eeprom_info(dd); | ||
1899 | |||
1900 | /* start of code to check and print procmon */ | ||
1901 | s = ipath_read_kreg32(dd, IPATH_KREG_OFFSET(ProcMon)); | ||
1902 | s &= ~(1U<<31); /* clear done bit */ | ||
1903 | s |= 1U<<14; /* clear counter (write 1 to clear) */ | ||
1904 | ipath_write_kreg(dd, IPATH_KREG_OFFSET(ProcMon), s); | ||
1905 | /* make sure clear_counter low long enough before start */ | ||
1906 | ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); | ||
1907 | ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); | ||
1908 | |||
1909 | s &= ~(1U<<14); /* allow counter to count (before starting) */ | ||
1910 | ipath_write_kreg(dd, IPATH_KREG_OFFSET(ProcMon), s); | ||
1911 | ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); | ||
1912 | ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); | ||
1913 | s = ipath_read_kreg32(dd, IPATH_KREG_OFFSET(ProcMon)); | ||
1914 | |||
1915 | s |= 1U<<15; /* start the counter */ | ||
1916 | s &= ~(1U<<31); /* clear done bit */ | ||
1917 | s &= ~0x7ffU; /* clear frequency bits */ | ||
1918 | s |= 0xe29; /* set frequency bits, in case cleared */ | ||
1919 | ipath_write_kreg(dd, IPATH_KREG_OFFSET(ProcMon), s); | ||
1920 | |||
1921 | s = 0; | ||
1922 | for (i = 500; i > 0 && !(s&(1ULL<<31)); i--) { | ||
1923 | ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); | ||
1924 | s = ipath_read_kreg32(dd, IPATH_KREG_OFFSET(ProcMon)); | ||
1925 | } | ||
1926 | if (!(s&(1U<<31))) | ||
1927 | ipath_dev_err(dd, "ProcMon register not valid: 0x%x\n", s); | ||
1928 | else | ||
1929 | ipath_dbg("ProcMon=0x%x, count=0x%x\n", s, (s>>16)&0x1ff); | ||
1930 | |||
1931 | return 0; | ||
1932 | } | ||
1933 | |||
1934 | /** | ||
1935 | * ipath_init_7220_get_base_info - set chip-specific flags for user code | ||
1936 | * @pd: the infinipath port | ||
1937 | * @kbase: ipath_base_info pointer | ||
1938 | * | ||
1939 | * We set the PCIE flag because the lower bandwidth on PCIe vs | ||
1940 | * HyperTransport can affect some user packet algorithims. | ||
1941 | */ | ||
1942 | static int ipath_7220_get_base_info(struct ipath_portdata *pd, void *kbase) | ||
1943 | { | ||
1944 | struct ipath_base_info *kinfo = kbase; | ||
1945 | |||
1946 | kinfo->spi_runtime_flags |= | ||
1947 | IPATH_RUNTIME_PCIE | IPATH_RUNTIME_NODMA_RTAIL | | ||
1948 | IPATH_RUNTIME_SDMA; | ||
1949 | |||
1950 | return 0; | ||
1951 | } | ||
1952 | |||
1953 | static void ipath_7220_free_irq(struct ipath_devdata *dd) | ||
1954 | { | ||
1955 | free_irq(dd->ipath_irq, dd); | ||
1956 | dd->ipath_irq = 0; | ||
1957 | } | ||
1958 | |||
1959 | static struct ipath_message_header * | ||
1960 | ipath_7220_get_msgheader(struct ipath_devdata *dd, __le32 *rhf_addr) | ||
1961 | { | ||
1962 | u32 offset = ipath_hdrget_offset(rhf_addr); | ||
1963 | |||
1964 | return (struct ipath_message_header *) | ||
1965 | (rhf_addr - dd->ipath_rhf_offset + offset); | ||
1966 | } | ||
1967 | |||
1968 | static void ipath_7220_config_ports(struct ipath_devdata *dd, ushort cfgports) | ||
1969 | { | ||
1970 | u32 nchipports; | ||
1971 | |||
1972 | nchipports = ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt); | ||
1973 | if (!cfgports) { | ||
1974 | int ncpus = num_online_cpus(); | ||
1975 | |||
1976 | if (ncpus <= 4) | ||
1977 | dd->ipath_portcnt = 5; | ||
1978 | else if (ncpus <= 8) | ||
1979 | dd->ipath_portcnt = 9; | ||
1980 | if (dd->ipath_portcnt) | ||
1981 | ipath_dbg("Auto-configured for %u ports, %d cpus " | ||
1982 | "online\n", dd->ipath_portcnt, ncpus); | ||
1983 | } else if (cfgports <= nchipports) | ||
1984 | dd->ipath_portcnt = cfgports; | ||
1985 | if (!dd->ipath_portcnt) /* none of the above, set to max */ | ||
1986 | dd->ipath_portcnt = nchipports; | ||
1987 | /* | ||
1988 | * chip can be configured for 5, 9, or 17 ports, and choice | ||
1989 | * affects number of eager TIDs per port (1K, 2K, 4K). | ||
1990 | */ | ||
1991 | if (dd->ipath_portcnt > 9) | ||
1992 | dd->ipath_rcvctrl |= 2ULL << IBA7220_R_PORTCFG_SHIFT; | ||
1993 | else if (dd->ipath_portcnt > 5) | ||
1994 | dd->ipath_rcvctrl |= 1ULL << IBA7220_R_PORTCFG_SHIFT; | ||
1995 | /* else configure for default 5 receive ports */ | ||
1996 | ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, | ||
1997 | dd->ipath_rcvctrl); | ||
1998 | dd->ipath_p0_rcvegrcnt = 2048; /* always */ | ||
1999 | if (dd->ipath_flags & IPATH_HAS_SEND_DMA) | ||
2000 | dd->ipath_pioreserved = 3; /* kpiobufs used for PIO */ | ||
2001 | } | ||
2002 | |||
2003 | |||
2004 | static int ipath_7220_get_ib_cfg(struct ipath_devdata *dd, int which) | ||
2005 | { | ||
2006 | int lsb, ret = 0; | ||
2007 | u64 maskr; /* right-justified mask */ | ||
2008 | |||
2009 | switch (which) { | ||
2010 | case IPATH_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */ | ||
2011 | lsb = IBA7220_IBC_HRTBT_SHIFT; | ||
2012 | maskr = IBA7220_IBC_HRTBT_MASK; | ||
2013 | break; | ||
2014 | |||
2015 | case IPATH_IB_CFG_LWID_ENB: /* Get allowed Link-width */ | ||
2016 | ret = dd->ipath_link_width_enabled; | ||
2017 | goto done; | ||
2018 | |||
2019 | case IPATH_IB_CFG_LWID: /* Get currently active Link-width */ | ||
2020 | ret = dd->ipath_link_width_active; | ||
2021 | goto done; | ||
2022 | |||
2023 | case IPATH_IB_CFG_SPD_ENB: /* Get allowed Link speeds */ | ||
2024 | ret = dd->ipath_link_speed_enabled; | ||
2025 | goto done; | ||
2026 | |||
2027 | case IPATH_IB_CFG_SPD: /* Get current Link spd */ | ||
2028 | ret = dd->ipath_link_speed_active; | ||
2029 | goto done; | ||
2030 | |||
2031 | case IPATH_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */ | ||
2032 | lsb = IBA7220_IBC_RXPOL_SHIFT; | ||
2033 | maskr = IBA7220_IBC_RXPOL_MASK; | ||
2034 | break; | ||
2035 | |||
2036 | case IPATH_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */ | ||
2037 | lsb = IBA7220_IBC_LREV_SHIFT; | ||
2038 | maskr = IBA7220_IBC_LREV_MASK; | ||
2039 | break; | ||
2040 | |||
2041 | case IPATH_IB_CFG_LINKLATENCY: | ||
2042 | ret = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcddrstatus) | ||
2043 | & IBA7220_DDRSTAT_LINKLAT_MASK; | ||
2044 | goto done; | ||
2045 | |||
2046 | default: | ||
2047 | ret = -ENOTSUPP; | ||
2048 | goto done; | ||
2049 | } | ||
2050 | ret = (int)((dd->ipath_ibcddrctrl >> lsb) & maskr); | ||
2051 | done: | ||
2052 | return ret; | ||
2053 | } | ||
2054 | |||
2055 | static int ipath_7220_set_ib_cfg(struct ipath_devdata *dd, int which, u32 val) | ||
2056 | { | ||
2057 | int lsb, ret = 0, setforce = 0; | ||
2058 | u64 maskr; /* right-justified mask */ | ||
2059 | |||
2060 | switch (which) { | ||
2061 | case IPATH_IB_CFG_LIDLMC: | ||
2062 | /* | ||
2063 | * Set LID and LMC. Combined to avoid possible hazard | ||
2064 | * caller puts LMC in 16MSbits, DLID in 16LSbits of val | ||
2065 | */ | ||
2066 | lsb = IBA7220_IBC_DLIDLMC_SHIFT; | ||
2067 | maskr = IBA7220_IBC_DLIDLMC_MASK; | ||
2068 | break; | ||
2069 | |||
2070 | case IPATH_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */ | ||
2071 | if (val & IPATH_IB_HRTBT_ON && | ||
2072 | (dd->ipath_flags & IPATH_NO_HRTBT)) | ||
2073 | goto bail; | ||
2074 | lsb = IBA7220_IBC_HRTBT_SHIFT; | ||
2075 | maskr = IBA7220_IBC_HRTBT_MASK; | ||
2076 | break; | ||
2077 | |||
2078 | case IPATH_IB_CFG_LWID_ENB: /* set allowed Link-width */ | ||
2079 | /* | ||
2080 | * As with speed, only write the actual register if | ||
2081 | * the link is currently down, otherwise takes effect | ||
2082 | * on next link change. | ||
2083 | */ | ||
2084 | dd->ipath_link_width_enabled = val; | ||
2085 | if ((dd->ipath_flags & (IPATH_LINKDOWN|IPATH_LINKINIT)) != | ||
2086 | IPATH_LINKDOWN) | ||
2087 | goto bail; | ||
2088 | /* | ||
2089 | * We set the IPATH_IB_FORCE_NOTIFY bit so updown | ||
2090 | * will get called because we want update | ||
2091 | * link_width_active, and the change may not take | ||
2092 | * effect for some time (if we are in POLL), so this | ||
2093 | * flag will force the updown routine to be called | ||
2094 | * on the next ibstatuschange down interrupt, even | ||
2095 | * if it's not an down->up transition. | ||
2096 | */ | ||
2097 | val--; /* convert from IB to chip */ | ||
2098 | maskr = IBA7220_IBC_WIDTH_MASK; | ||
2099 | lsb = IBA7220_IBC_WIDTH_SHIFT; | ||
2100 | setforce = 1; | ||
2101 | dd->ipath_flags |= IPATH_IB_FORCE_NOTIFY; | ||
2102 | break; | ||
2103 | |||
2104 | case IPATH_IB_CFG_SPD_ENB: /* set allowed Link speeds */ | ||
2105 | /* | ||
2106 | * If we turn off IB1.2, need to preset SerDes defaults, | ||
2107 | * but not right now. Set a flag for the next time | ||
2108 | * we command the link down. As with width, only write the | ||
2109 | * actual register if the link is currently down, otherwise | ||
2110 | * takes effect on next link change. Since setting is being | ||
2111 | * explictly requested (via MAD or sysfs), clear autoneg | ||
2112 | * failure status if speed autoneg is enabled. | ||
2113 | */ | ||
2114 | dd->ipath_link_speed_enabled = val; | ||
2115 | if (dd->ipath_ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK && | ||
2116 | !(val & (val - 1))) | ||
2117 | dd->ipath_presets_needed = 1; | ||
2118 | if ((dd->ipath_flags & (IPATH_LINKDOWN|IPATH_LINKINIT)) != | ||
2119 | IPATH_LINKDOWN) | ||
2120 | goto bail; | ||
2121 | /* | ||
2122 | * We set the IPATH_IB_FORCE_NOTIFY bit so updown | ||
2123 | * will get called because we want update | ||
2124 | * link_speed_active, and the change may not take | ||
2125 | * effect for some time (if we are in POLL), so this | ||
2126 | * flag will force the updown routine to be called | ||
2127 | * on the next ibstatuschange down interrupt, even | ||
2128 | * if it's not an down->up transition. When setting | ||
2129 | * speed autoneg, clear AUTONEG_FAILED. | ||
2130 | */ | ||
2131 | if (val == (IPATH_IB_SDR | IPATH_IB_DDR)) { | ||
2132 | val = IBA7220_IBC_SPEED_AUTONEG_MASK | | ||
2133 | IBA7220_IBC_IBTA_1_2_MASK; | ||
2134 | dd->ipath_flags &= ~IPATH_IB_AUTONEG_FAILED; | ||
2135 | } else | ||
2136 | val = val == IPATH_IB_DDR ? IBA7220_IBC_SPEED_DDR | ||
2137 | : IBA7220_IBC_SPEED_SDR; | ||
2138 | maskr = IBA7220_IBC_SPEED_AUTONEG_MASK | | ||
2139 | IBA7220_IBC_IBTA_1_2_MASK; | ||
2140 | lsb = 0; /* speed bits are low bits */ | ||
2141 | setforce = 1; | ||
2142 | break; | ||
2143 | |||
2144 | case IPATH_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */ | ||
2145 | lsb = IBA7220_IBC_RXPOL_SHIFT; | ||
2146 | maskr = IBA7220_IBC_RXPOL_MASK; | ||
2147 | break; | ||
2148 | |||
2149 | case IPATH_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */ | ||
2150 | lsb = IBA7220_IBC_LREV_SHIFT; | ||
2151 | maskr = IBA7220_IBC_LREV_MASK; | ||
2152 | break; | ||
2153 | |||
2154 | default: | ||
2155 | ret = -ENOTSUPP; | ||
2156 | goto bail; | ||
2157 | } | ||
2158 | dd->ipath_ibcddrctrl &= ~(maskr << lsb); | ||
2159 | dd->ipath_ibcddrctrl |= (((u64) val & maskr) << lsb); | ||
2160 | ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcddrctrl, | ||
2161 | dd->ipath_ibcddrctrl); | ||
2162 | if (setforce) | ||
2163 | dd->ipath_flags |= IPATH_IB_FORCE_NOTIFY; | ||
2164 | bail: | ||
2165 | return ret; | ||
2166 | } | ||
2167 | |||
2168 | static void ipath_7220_read_counters(struct ipath_devdata *dd, | ||
2169 | struct infinipath_counters *cntrs) | ||
2170 | { | ||
2171 | u64 *counters = (u64 *) cntrs; | ||
2172 | int i; | ||
2173 | |||
2174 | for (i = 0; i < sizeof(*cntrs) / sizeof(u64); i++) | ||
2175 | counters[i] = ipath_snap_cntr(dd, i); | ||
2176 | } | ||
2177 | |||
2178 | /* if we are using MSI, try to fallback to INTx */ | ||
2179 | static int ipath_7220_intr_fallback(struct ipath_devdata *dd) | ||
2180 | { | ||
2181 | if (dd->ipath_msi_lo) { | ||
2182 | dev_info(&dd->pcidev->dev, "MSI interrupt not detected," | ||
2183 | " trying INTx interrupts\n"); | ||
2184 | ipath_7220_nomsi(dd); | ||
2185 | ipath_enable_intx(dd->pcidev); | ||
2186 | /* | ||
2187 | * some newer kernels require free_irq before disable_msi, | ||
2188 | * and irq can be changed during disable and intx enable | ||
2189 | * and we need to therefore use the pcidev->irq value, | ||
2190 | * not our saved MSI value. | ||
2191 | */ | ||
2192 | dd->ipath_irq = dd->pcidev->irq; | ||
2193 | if (request_irq(dd->ipath_irq, ipath_intr, IRQF_SHARED, | ||
2194 | IPATH_DRV_NAME, dd)) | ||
2195 | ipath_dev_err(dd, | ||
2196 | "Could not re-request_irq for INTx\n"); | ||
2197 | return 1; | ||
2198 | } | ||
2199 | return 0; | ||
2200 | } | ||
2201 | |||
2202 | /* | ||
2203 | * reset the XGXS (between serdes and IBC). Slightly less intrusive | ||
2204 | * than resetting the IBC or external link state, and useful in some | ||
2205 | * cases to cause some retraining. To do this right, we reset IBC | ||
2206 | * as well. | ||
2207 | */ | ||
2208 | static void ipath_7220_xgxs_reset(struct ipath_devdata *dd) | ||
2209 | { | ||
2210 | u64 val, prev_val; | ||
2211 | |||
2212 | prev_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig); | ||
2213 | val = prev_val | INFINIPATH_XGXS_RESET; | ||
2214 | prev_val &= ~INFINIPATH_XGXS_RESET; /* be sure */ | ||
2215 | ipath_write_kreg(dd, dd->ipath_kregs->kr_control, | ||
2216 | dd->ipath_control & ~INFINIPATH_C_LINKENABLE); | ||
2217 | ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val); | ||
2218 | ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); | ||
2219 | ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, prev_val); | ||
2220 | ipath_write_kreg(dd, dd->ipath_kregs->kr_control, | ||
2221 | dd->ipath_control); | ||
2222 | } | ||
2223 | |||
2224 | |||
2225 | /* Still needs cleanup, too much hardwired stuff */ | ||
2226 | static void autoneg_send(struct ipath_devdata *dd, | ||
2227 | u32 *hdr, u32 dcnt, u32 *data) | ||
2228 | { | ||
2229 | int i; | ||
2230 | u64 cnt; | ||
2231 | u32 __iomem *piobuf; | ||
2232 | u32 pnum; | ||
2233 | |||
2234 | i = 0; | ||
2235 | cnt = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */ | ||
2236 | while (!(piobuf = ipath_getpiobuf(dd, cnt, &pnum))) { | ||
2237 | if (i++ > 15) { | ||
2238 | ipath_dbg("Couldn't get pio buffer for send\n"); | ||
2239 | return; | ||
2240 | } | ||
2241 | udelay(2); | ||
2242 | } | ||
2243 | if (dd->ipath_flags&IPATH_HAS_PBC_CNT) | ||
2244 | cnt |= 0x80000000UL<<32; /* mark as VL15 */ | ||
2245 | writeq(cnt, piobuf); | ||
2246 | ipath_flush_wc(); | ||
2247 | __iowrite32_copy(piobuf + 2, hdr, 7); | ||
2248 | __iowrite32_copy(piobuf + 9, data, dcnt); | ||
2249 | ipath_flush_wc(); | ||
2250 | } | ||
2251 | |||
2252 | /* | ||
2253 | * _start packet gets sent twice at start, _done gets sent twice at end | ||
2254 | */ | ||
2255 | static void ipath_autoneg_send(struct ipath_devdata *dd, int which) | ||
2256 | { | ||
2257 | static u32 swapped; | ||
2258 | u32 dw, i, hcnt, dcnt, *data; | ||
2259 | static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba }; | ||
2260 | static u32 madpayload_start[0x40] = { | ||
2261 | 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0, | ||
2262 | 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, | ||
2263 | 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */ | ||
2264 | }; | ||
2265 | static u32 madpayload_done[0x40] = { | ||
2266 | 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0, | ||
2267 | 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, | ||
2268 | 0x40000001, 0x1388, 0x15e, /* rest 0's */ | ||
2269 | }; | ||
2270 | dcnt = ARRAY_SIZE(madpayload_start); | ||
2271 | hcnt = ARRAY_SIZE(hdr); | ||
2272 | if (!swapped) { | ||
2273 | /* for maintainability, do it at runtime */ | ||
2274 | for (i = 0; i < hcnt; i++) { | ||
2275 | dw = (__force u32) cpu_to_be32(hdr[i]); | ||
2276 | hdr[i] = dw; | ||
2277 | } | ||
2278 | for (i = 0; i < dcnt; i++) { | ||
2279 | dw = (__force u32) cpu_to_be32(madpayload_start[i]); | ||
2280 | madpayload_start[i] = dw; | ||
2281 | dw = (__force u32) cpu_to_be32(madpayload_done[i]); | ||
2282 | madpayload_done[i] = dw; | ||
2283 | } | ||
2284 | swapped = 1; | ||
2285 | } | ||
2286 | |||
2287 | data = which ? madpayload_done : madpayload_start; | ||
2288 | ipath_cdbg(PKT, "Sending %s special MADs\n", which?"done":"start"); | ||
2289 | |||
2290 | autoneg_send(dd, hdr, dcnt, data); | ||
2291 | ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); | ||
2292 | udelay(2); | ||
2293 | autoneg_send(dd, hdr, dcnt, data); | ||
2294 | ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); | ||
2295 | udelay(2); | ||
2296 | } | ||
2297 | |||
2298 | |||
2299 | |||
2300 | /* | ||
2301 | * Do the absolute minimum to cause an IB speed change, and make it | ||
2302 | * ready, but don't actually trigger the change. The caller will | ||
2303 | * do that when ready (if link is in Polling training state, it will | ||
2304 | * happen immediately, otherwise when link next goes down) | ||
2305 | * | ||
2306 | * This routine should only be used as part of the DDR autonegotation | ||
2307 | * code for devices that are not compliant with IB 1.2 (or code that | ||
2308 | * fixes things up for same). | ||
2309 | * | ||
2310 | * When link has gone down, and autoneg enabled, or autoneg has | ||
2311 | * failed and we give up until next time we set both speeds, and | ||
2312 | * then we want IBTA enabled as well as "use max enabled speed. | ||
2313 | */ | ||
2314 | static void set_speed_fast(struct ipath_devdata *dd, u32 speed) | ||
2315 | { | ||
2316 | dd->ipath_ibcddrctrl &= ~(IBA7220_IBC_SPEED_AUTONEG_MASK | | ||
2317 | IBA7220_IBC_IBTA_1_2_MASK | | ||
2318 | (IBA7220_IBC_WIDTH_MASK << IBA7220_IBC_WIDTH_SHIFT)); | ||
2319 | |||
2320 | if (speed == (IPATH_IB_SDR | IPATH_IB_DDR)) | ||
2321 | dd->ipath_ibcddrctrl |= IBA7220_IBC_SPEED_AUTONEG_MASK | | ||
2322 | IBA7220_IBC_IBTA_1_2_MASK; | ||
2323 | else | ||
2324 | dd->ipath_ibcddrctrl |= speed == IPATH_IB_DDR ? | ||
2325 | IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR; | ||
2326 | |||
2327 | /* | ||
2328 | * Convert from IB-style 1 = 1x, 2 = 4x, 3 = auto | ||
2329 | * to chip-centric 0 = 1x, 1 = 4x, 2 = auto | ||
2330 | */ | ||
2331 | dd->ipath_ibcddrctrl |= (u64)(dd->ipath_link_width_enabled - 1) << | ||
2332 | IBA7220_IBC_WIDTH_SHIFT; | ||
2333 | ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcddrctrl, | ||
2334 | dd->ipath_ibcddrctrl); | ||
2335 | ipath_cdbg(VERBOSE, "setup for IB speed (%x) done\n", speed); | ||
2336 | } | ||
2337 | |||
2338 | |||
2339 | /* | ||
2340 | * this routine is only used when we are not talking to another | ||
2341 | * IB 1.2-compliant device that we think can do DDR. | ||
2342 | * (This includes all existing switch chips as of Oct 2007.) | ||
2343 | * 1.2-compliant devices go directly to DDR prior to reaching INIT | ||
2344 | */ | ||
2345 | static void try_auto_neg(struct ipath_devdata *dd) | ||
2346 | { | ||
2347 | /* | ||
2348 | * required for older non-IB1.2 DDR switches. Newer | ||
2349 | * non-IB-compliant switches don't need it, but so far, | ||
2350 | * aren't bothered by it either. "Magic constant" | ||
2351 | */ | ||
2352 | ipath_write_kreg(dd, IPATH_KREG_OFFSET(IBNCModeCtrl), | ||
2353 | 0x3b9dc07); | ||
2354 | dd->ipath_flags |= IPATH_IB_AUTONEG_INPROG; | ||
2355 | ipath_autoneg_send(dd, 0); | ||
2356 | set_speed_fast(dd, IPATH_IB_DDR); | ||
2357 | ipath_toggle_rclkrls(dd); | ||
2358 | /* 2 msec is minimum length of a poll cycle */ | ||
2359 | schedule_delayed_work(&dd->ipath_autoneg_work, | ||
2360 | msecs_to_jiffies(2)); | ||
2361 | } | ||
2362 | |||
2363 | |||
2364 | static int ipath_7220_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs) | ||
2365 | { | ||
2366 | int ret = 0, symadj = 0; | ||
2367 | u32 ltstate = ipath_ib_linkstate(dd, ibcs); | ||
2368 | |||
2369 | dd->ipath_link_width_active = | ||
2370 | ((ibcs >> IBA7220_IBCS_LINKWIDTH_SHIFT) & 1) ? | ||
2371 | IB_WIDTH_4X : IB_WIDTH_1X; | ||
2372 | dd->ipath_link_speed_active = | ||
2373 | ((ibcs >> IBA7220_IBCS_LINKSPEED_SHIFT) & 1) ? | ||
2374 | IPATH_IB_DDR : IPATH_IB_SDR; | ||
2375 | |||
2376 | if (!ibup) { | ||
2377 | /* | ||
2378 | * when link goes down we don't want aeq running, so it | ||
2379 | * won't't interfere with IBC training, etc., and we need | ||
2380 | * to go back to the static SerDes preset values | ||
2381 | */ | ||
2382 | if (dd->ipath_x1_fix_tries && | ||
2383 | ltstate <= INFINIPATH_IBCS_LT_STATE_SLEEPQUIET && | ||
2384 | ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP) | ||
2385 | dd->ipath_x1_fix_tries = 0; | ||
2386 | if (!(dd->ipath_flags & (IPATH_IB_AUTONEG_FAILED | | ||
2387 | IPATH_IB_AUTONEG_INPROG))) | ||
2388 | set_speed_fast(dd, dd->ipath_link_speed_enabled); | ||
2389 | if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)) { | ||
2390 | ipath_cdbg(VERBOSE, "Setting RXEQ defaults\n"); | ||
2391 | ipath_sd7220_presets(dd); | ||
2392 | } | ||
2393 | /* this might better in ipath_sd7220_presets() */ | ||
2394 | ipath_set_relock_poll(dd, ibup); | ||
2395 | } else { | ||
2396 | if (ipath_compat_ddr_negotiate && | ||
2397 | !(dd->ipath_flags & (IPATH_IB_AUTONEG_FAILED | | ||
2398 | IPATH_IB_AUTONEG_INPROG)) && | ||
2399 | dd->ipath_link_speed_active == IPATH_IB_SDR && | ||
2400 | (dd->ipath_link_speed_enabled & | ||
2401 | (IPATH_IB_DDR | IPATH_IB_SDR)) == | ||
2402 | (IPATH_IB_DDR | IPATH_IB_SDR) && | ||
2403 | dd->ipath_autoneg_tries < IPATH_AUTONEG_TRIES) { | ||
2404 | /* we are SDR, and DDR auto-negotiation enabled */ | ||
2405 | ++dd->ipath_autoneg_tries; | ||
2406 | ipath_dbg("DDR negotiation try, %u/%u\n", | ||
2407 | dd->ipath_autoneg_tries, | ||
2408 | IPATH_AUTONEG_TRIES); | ||
2409 | if (!dd->ibdeltainprog) { | ||
2410 | dd->ibdeltainprog = 1; | ||
2411 | dd->ibsymsnap = ipath_read_creg32(dd, | ||
2412 | dd->ipath_cregs->cr_ibsymbolerrcnt); | ||
2413 | dd->iblnkerrsnap = ipath_read_creg32(dd, | ||
2414 | dd->ipath_cregs->cr_iblinkerrrecovcnt); | ||
2415 | } | ||
2416 | try_auto_neg(dd); | ||
2417 | ret = 1; /* no other IB status change processing */ | ||
2418 | } else if ((dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) | ||
2419 | && dd->ipath_link_speed_active == IPATH_IB_SDR) { | ||
2420 | ipath_autoneg_send(dd, 1); | ||
2421 | set_speed_fast(dd, IPATH_IB_DDR); | ||
2422 | udelay(2); | ||
2423 | ipath_toggle_rclkrls(dd); | ||
2424 | ret = 1; /* no other IB status change processing */ | ||
2425 | } else { | ||
2426 | if ((dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) && | ||
2427 | (dd->ipath_link_speed_active & IPATH_IB_DDR)) { | ||
2428 | ipath_dbg("Got to INIT with DDR autoneg\n"); | ||
2429 | dd->ipath_flags &= ~(IPATH_IB_AUTONEG_INPROG | ||
2430 | | IPATH_IB_AUTONEG_FAILED); | ||
2431 | dd->ipath_autoneg_tries = 0; | ||
2432 | /* re-enable SDR, for next link down */ | ||
2433 | set_speed_fast(dd, | ||
2434 | dd->ipath_link_speed_enabled); | ||
2435 | wake_up(&dd->ipath_autoneg_wait); | ||
2436 | symadj = 1; | ||
2437 | } else if (dd->ipath_flags & IPATH_IB_AUTONEG_FAILED) { | ||
2438 | /* | ||
2439 | * clear autoneg failure flag, and do setup | ||
2440 | * so we'll try next time link goes down and | ||
2441 | * back to INIT (possibly connected to different | ||
2442 | * device). | ||
2443 | */ | ||
2444 | ipath_dbg("INIT %sDR after autoneg failure\n", | ||
2445 | (dd->ipath_link_speed_active & | ||
2446 | IPATH_IB_DDR) ? "D" : "S"); | ||
2447 | dd->ipath_flags &= ~IPATH_IB_AUTONEG_FAILED; | ||
2448 | dd->ipath_ibcddrctrl |= | ||
2449 | IBA7220_IBC_IBTA_1_2_MASK; | ||
2450 | ipath_write_kreg(dd, | ||
2451 | IPATH_KREG_OFFSET(IBNCModeCtrl), 0); | ||
2452 | symadj = 1; | ||
2453 | } | ||
2454 | } | ||
2455 | /* | ||
2456 | * if we are in 1X on rev1 only, and are in autoneg width, | ||
2457 | * it could be due to an xgxs problem, so if we haven't | ||
2458 | * already tried, try twice to get to 4X; if we | ||
2459 | * tried, and couldn't, report it, since it will | ||
2460 | * probably not be what is desired. | ||
2461 | */ | ||
2462 | if (dd->ipath_minrev == 1 && | ||
2463 | (dd->ipath_link_width_enabled & (IB_WIDTH_1X | | ||
2464 | IB_WIDTH_4X)) == (IB_WIDTH_1X | IB_WIDTH_4X) | ||
2465 | && dd->ipath_link_width_active == IB_WIDTH_1X | ||
2466 | && dd->ipath_x1_fix_tries < 3) { | ||
2467 | if (++dd->ipath_x1_fix_tries == 3) { | ||
2468 | dev_info(&dd->pcidev->dev, | ||
2469 | "IB link is in 1X mode\n"); | ||
2470 | if (!(dd->ipath_flags & | ||
2471 | IPATH_IB_AUTONEG_INPROG)) | ||
2472 | symadj = 1; | ||
2473 | } | ||
2474 | else { | ||
2475 | ipath_cdbg(VERBOSE, "IB 1X in " | ||
2476 | "auto-width, try %u to be " | ||
2477 | "sure it's really 1X; " | ||
2478 | "ltstate %u\n", | ||
2479 | dd->ipath_x1_fix_tries, | ||
2480 | ltstate); | ||
2481 | dd->ipath_f_xgxs_reset(dd); | ||
2482 | ret = 1; /* skip other processing */ | ||
2483 | } | ||
2484 | } else if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)) | ||
2485 | symadj = 1; | ||
2486 | |||
2487 | if (!ret) { | ||
2488 | dd->delay_mult = rate_to_delay | ||
2489 | [(ibcs >> IBA7220_IBCS_LINKSPEED_SHIFT) & 1] | ||
2490 | [(ibcs >> IBA7220_IBCS_LINKWIDTH_SHIFT) & 1]; | ||
2491 | |||
2492 | ipath_set_relock_poll(dd, ibup); | ||
2493 | } | ||
2494 | } | ||
2495 | |||
2496 | if (symadj) { | ||
2497 | if (dd->ibdeltainprog) { | ||
2498 | dd->ibdeltainprog = 0; | ||
2499 | dd->ibsymdelta += ipath_read_creg32(dd, | ||
2500 | dd->ipath_cregs->cr_ibsymbolerrcnt) - | ||
2501 | dd->ibsymsnap; | ||
2502 | dd->iblnkerrdelta += ipath_read_creg32(dd, | ||
2503 | dd->ipath_cregs->cr_iblinkerrrecovcnt) - | ||
2504 | dd->iblnkerrsnap; | ||
2505 | } | ||
2506 | } else if (!ibup && !dd->ibdeltainprog | ||
2507 | && !(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)) { | ||
2508 | dd->ibdeltainprog = 1; | ||
2509 | dd->ibsymsnap = ipath_read_creg32(dd, | ||
2510 | dd->ipath_cregs->cr_ibsymbolerrcnt); | ||
2511 | dd->iblnkerrsnap = ipath_read_creg32(dd, | ||
2512 | dd->ipath_cregs->cr_iblinkerrrecovcnt); | ||
2513 | } | ||
2514 | |||
2515 | if (!ret) | ||
2516 | ipath_setup_7220_setextled(dd, ipath_ib_linkstate(dd, ibcs), | ||
2517 | ltstate); | ||
2518 | return ret; | ||
2519 | } | ||
2520 | |||
2521 | |||
2522 | /* | ||
2523 | * Handle the empirically determined mechanism for auto-negotiation | ||
2524 | * of DDR speed with switches. | ||
2525 | */ | ||
2526 | static void autoneg_work(struct work_struct *work) | ||
2527 | { | ||
2528 | struct ipath_devdata *dd; | ||
2529 | u64 startms; | ||
2530 | u32 lastlts, i; | ||
2531 | |||
2532 | dd = container_of(work, struct ipath_devdata, | ||
2533 | ipath_autoneg_work.work); | ||
2534 | |||
2535 | startms = jiffies_to_msecs(jiffies); | ||
2536 | |||
2537 | /* | ||
2538 | * busy wait for this first part, it should be at most a | ||
2539 | * few hundred usec, since we scheduled ourselves for 2msec. | ||
2540 | */ | ||
2541 | for (i = 0; i < 25; i++) { | ||
2542 | lastlts = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat); | ||
2543 | if (lastlts == INFINIPATH_IBCS_LT_STATE_POLLQUIET) { | ||
2544 | ipath_set_linkstate(dd, IPATH_IB_LINKDOWN_DISABLE); | ||
2545 | break; | ||
2546 | } | ||
2547 | udelay(100); | ||
2548 | } | ||
2549 | |||
2550 | if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)) | ||
2551 | goto done; /* we got there early or told to stop */ | ||
2552 | |||
2553 | /* we expect this to timeout */ | ||
2554 | if (wait_event_timeout(dd->ipath_autoneg_wait, | ||
2555 | !(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG), | ||
2556 | msecs_to_jiffies(90))) | ||
2557 | goto done; | ||
2558 | |||
2559 | ipath_toggle_rclkrls(dd); | ||
2560 | |||
2561 | /* we expect this to timeout */ | ||
2562 | if (wait_event_timeout(dd->ipath_autoneg_wait, | ||
2563 | !(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG), | ||
2564 | msecs_to_jiffies(1700))) | ||
2565 | goto done; | ||
2566 | |||
2567 | set_speed_fast(dd, IPATH_IB_SDR); | ||
2568 | ipath_toggle_rclkrls(dd); | ||
2569 | |||
2570 | /* | ||
2571 | * wait up to 250 msec for link to train and get to INIT at DDR; | ||
2572 | * this should terminate early | ||
2573 | */ | ||
2574 | wait_event_timeout(dd->ipath_autoneg_wait, | ||
2575 | !(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG), | ||
2576 | msecs_to_jiffies(250)); | ||
2577 | done: | ||
2578 | if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) { | ||
2579 | ipath_dbg("Did not get to DDR INIT (%x) after %Lu msecs\n", | ||
2580 | ipath_ib_state(dd, dd->ipath_lastibcstat), | ||
2581 | (unsigned long long) jiffies_to_msecs(jiffies)-startms); | ||
2582 | dd->ipath_flags &= ~IPATH_IB_AUTONEG_INPROG; | ||
2583 | if (dd->ipath_autoneg_tries == IPATH_AUTONEG_TRIES) { | ||
2584 | dd->ipath_flags |= IPATH_IB_AUTONEG_FAILED; | ||
2585 | ipath_dbg("Giving up on DDR until next IB " | ||
2586 | "link Down\n"); | ||
2587 | dd->ipath_autoneg_tries = 0; | ||
2588 | } | ||
2589 | set_speed_fast(dd, dd->ipath_link_speed_enabled); | ||
2590 | } | ||
2591 | } | ||
2592 | |||
2593 | |||
2594 | /** | ||
2595 | * ipath_init_iba7220_funcs - set up the chip-specific function pointers | ||
2596 | * @dd: the infinipath device | ||
2597 | * | ||
2598 | * This is global, and is called directly at init to set up the | ||
2599 | * chip-specific function pointers for later use. | ||
2600 | */ | ||
2601 | void ipath_init_iba7220_funcs(struct ipath_devdata *dd) | ||
2602 | { | ||
2603 | dd->ipath_f_intrsetup = ipath_7220_intconfig; | ||
2604 | dd->ipath_f_bus = ipath_setup_7220_config; | ||
2605 | dd->ipath_f_reset = ipath_setup_7220_reset; | ||
2606 | dd->ipath_f_get_boardname = ipath_7220_boardname; | ||
2607 | dd->ipath_f_init_hwerrors = ipath_7220_init_hwerrors; | ||
2608 | dd->ipath_f_early_init = ipath_7220_early_init; | ||
2609 | dd->ipath_f_handle_hwerrors = ipath_7220_handle_hwerrors; | ||
2610 | dd->ipath_f_quiet_serdes = ipath_7220_quiet_serdes; | ||
2611 | dd->ipath_f_bringup_serdes = ipath_7220_bringup_serdes; | ||
2612 | dd->ipath_f_clear_tids = ipath_7220_clear_tids; | ||
2613 | dd->ipath_f_put_tid = ipath_7220_put_tid; | ||
2614 | dd->ipath_f_cleanup = ipath_setup_7220_cleanup; | ||
2615 | dd->ipath_f_setextled = ipath_setup_7220_setextled; | ||
2616 | dd->ipath_f_get_base_info = ipath_7220_get_base_info; | ||
2617 | dd->ipath_f_free_irq = ipath_7220_free_irq; | ||
2618 | dd->ipath_f_tidtemplate = ipath_7220_tidtemplate; | ||
2619 | dd->ipath_f_intr_fallback = ipath_7220_intr_fallback; | ||
2620 | dd->ipath_f_xgxs_reset = ipath_7220_xgxs_reset; | ||
2621 | dd->ipath_f_get_ib_cfg = ipath_7220_get_ib_cfg; | ||
2622 | dd->ipath_f_set_ib_cfg = ipath_7220_set_ib_cfg; | ||
2623 | dd->ipath_f_config_jint = ipath_7220_config_jint; | ||
2624 | dd->ipath_f_config_ports = ipath_7220_config_ports; | ||
2625 | dd->ipath_f_read_counters = ipath_7220_read_counters; | ||
2626 | dd->ipath_f_get_msgheader = ipath_7220_get_msgheader; | ||
2627 | dd->ipath_f_ib_updown = ipath_7220_ib_updown; | ||
2628 | |||
2629 | /* initialize chip-specific variables */ | ||
2630 | ipath_init_7220_variables(dd); | ||
2631 | } | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h index b3d7efcdf021..6559af60bffd 100644 --- a/drivers/infiniband/hw/ipath/ipath_kernel.h +++ b/drivers/infiniband/hw/ipath/ipath_kernel.h | |||
@@ -1030,8 +1030,6 @@ void ipath_free_data(struct ipath_portdata *dd); | |||
1030 | u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32, u32 *); | 1030 | u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32, u32 *); |
1031 | void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start, | 1031 | void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start, |
1032 | unsigned len, int avail); | 1032 | unsigned len, int avail); |
1033 | void ipath_init_iba7220_funcs(struct ipath_devdata *); | ||
1034 | void ipath_init_iba6120_funcs(struct ipath_devdata *); | ||
1035 | void ipath_init_iba6110_funcs(struct ipath_devdata *); | 1033 | void ipath_init_iba6110_funcs(struct ipath_devdata *); |
1036 | void ipath_get_eeprom_info(struct ipath_devdata *); | 1034 | void ipath_get_eeprom_info(struct ipath_devdata *); |
1037 | int ipath_update_eeprom_log(struct ipath_devdata *dd); | 1035 | int ipath_update_eeprom_log(struct ipath_devdata *dd); |
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index 559f39be0dcc..dd7f26d04d46 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c | |||
@@ -2182,7 +2182,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd) | |||
2182 | snprintf(dev->node_desc, sizeof(dev->node_desc), | 2182 | snprintf(dev->node_desc, sizeof(dev->node_desc), |
2183 | IPATH_IDSTR " %s", init_utsname()->nodename); | 2183 | IPATH_IDSTR " %s", init_utsname()->nodename); |
2184 | 2184 | ||
2185 | ret = ib_register_device(dev); | 2185 | ret = ib_register_device(dev, NULL); |
2186 | if (ret) | 2186 | if (ret) |
2187 | goto err_reg; | 2187 | goto err_reg; |
2188 | 2188 | ||
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 39051417054c..4e94e360e43b 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -662,7 +662,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
662 | spin_lock_init(&ibdev->sm_lock); | 662 | spin_lock_init(&ibdev->sm_lock); |
663 | mutex_init(&ibdev->cap_mask_mutex); | 663 | mutex_init(&ibdev->cap_mask_mutex); |
664 | 664 | ||
665 | if (ib_register_device(&ibdev->ib_dev)) | 665 | if (ib_register_device(&ibdev->ib_dev, NULL)) |
666 | goto err_map; | 666 | goto err_map; |
667 | 667 | ||
668 | if (mlx4_ib_mad_init(ibdev)) | 668 | if (mlx4_ib_mad_init(ibdev)) |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index f080a784bc79..1e0b4b6074ad 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c | |||
@@ -1403,7 +1403,7 @@ int mthca_register_device(struct mthca_dev *dev) | |||
1403 | 1403 | ||
1404 | mutex_init(&dev->cap_mask_mutex); | 1404 | mutex_init(&dev->cap_mask_mutex); |
1405 | 1405 | ||
1406 | ret = ib_register_device(&dev->ib_dev); | 1406 | ret = ib_register_device(&dev->ib_dev, NULL); |
1407 | if (ret) | 1407 | if (ret) |
1408 | return ret; | 1408 | return ret; |
1409 | 1409 | ||
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 86acb7d57064..57874a165083 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c | |||
@@ -2584,7 +2584,6 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) | |||
2584 | break; | 2584 | break; |
2585 | } | 2585 | } |
2586 | } | 2586 | } |
2587 | spin_unlock_irqrestore(&nesadapter->phy_lock, flags); | ||
2588 | 2587 | ||
2589 | if (phy_data & 0x0004) { | 2588 | if (phy_data & 0x0004) { |
2590 | if (wide_ppm_offset && | 2589 | if (wide_ppm_offset && |
@@ -2639,6 +2638,8 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) | |||
2639 | } | 2638 | } |
2640 | } | 2639 | } |
2641 | 2640 | ||
2641 | spin_unlock_irqrestore(&nesadapter->phy_lock, flags); | ||
2642 | |||
2642 | nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_IDLE; | 2643 | nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_IDLE; |
2643 | } | 2644 | } |
2644 | 2645 | ||
@@ -3422,6 +3423,7 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, | |||
3422 | struct nes_adapter *nesadapter = nesdev->nesadapter; | 3423 | struct nes_adapter *nesadapter = nesdev->nesadapter; |
3423 | u32 aeq_info; | 3424 | u32 aeq_info; |
3424 | u32 next_iwarp_state = 0; | 3425 | u32 next_iwarp_state = 0; |
3426 | u32 aeqe_cq_id; | ||
3425 | u16 async_event_id; | 3427 | u16 async_event_id; |
3426 | u8 tcp_state; | 3428 | u8 tcp_state; |
3427 | u8 iwarp_state; | 3429 | u8 iwarp_state; |
@@ -3449,6 +3451,14 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, | |||
3449 | le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), aeqe, | 3451 | le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), aeqe, |
3450 | nes_tcp_state_str[tcp_state], nes_iwarp_state_str[iwarp_state]); | 3452 | nes_tcp_state_str[tcp_state], nes_iwarp_state_str[iwarp_state]); |
3451 | 3453 | ||
3454 | aeqe_cq_id = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]); | ||
3455 | if (aeq_info & NES_AEQE_QP) { | ||
3456 | if ((!nes_is_resource_allocated(nesadapter, nesadapter->allocated_qps, | ||
3457 | aeqe_cq_id)) || | ||
3458 | (atomic_read(&nesqp->close_timer_started))) | ||
3459 | return; | ||
3460 | } | ||
3461 | |||
3452 | switch (async_event_id) { | 3462 | switch (async_event_id) { |
3453 | case NES_AEQE_AEID_LLP_FIN_RECEIVED: | 3463 | case NES_AEQE_AEID_LLP_FIN_RECEIVED: |
3454 | if (nesqp->term_flags) | 3464 | if (nesqp->term_flags) |
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index e95e8d09ff38..5cc0a9ae5bb1 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c | |||
@@ -1001,6 +1001,7 @@ static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu) | |||
1001 | return ret; | 1001 | return ret; |
1002 | } | 1002 | } |
1003 | 1003 | ||
1004 | |||
1004 | static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = { | 1005 | static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = { |
1005 | "Link Change Interrupts", | 1006 | "Link Change Interrupts", |
1006 | "Linearized SKBs", | 1007 | "Linearized SKBs", |
@@ -1015,11 +1016,15 @@ static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = { | |||
1015 | "Rx Jabber Errors", | 1016 | "Rx Jabber Errors", |
1016 | "Rx Oversized Frames", | 1017 | "Rx Oversized Frames", |
1017 | "Rx Short Frames", | 1018 | "Rx Short Frames", |
1019 | "Rx Length Errors", | ||
1020 | "Rx CRC Errors", | ||
1021 | "Rx Port Discard", | ||
1018 | "Endnode Rx Discards", | 1022 | "Endnode Rx Discards", |
1019 | "Endnode Rx Octets", | 1023 | "Endnode Rx Octets", |
1020 | "Endnode Rx Frames", | 1024 | "Endnode Rx Frames", |
1021 | "Endnode Tx Octets", | 1025 | "Endnode Tx Octets", |
1022 | "Endnode Tx Frames", | 1026 | "Endnode Tx Frames", |
1027 | "Tx Errors", | ||
1023 | "mh detected", | 1028 | "mh detected", |
1024 | "mh pauses", | 1029 | "mh pauses", |
1025 | "Retransmission Count", | 1030 | "Retransmission Count", |
@@ -1048,19 +1053,13 @@ static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = { | |||
1048 | "CM Nodes Destroyed", | 1053 | "CM Nodes Destroyed", |
1049 | "CM Accel Drops", | 1054 | "CM Accel Drops", |
1050 | "CM Resets Received", | 1055 | "CM Resets Received", |
1056 | "Free 4Kpbls", | ||
1057 | "Free 256pbls", | ||
1051 | "Timer Inits", | 1058 | "Timer Inits", |
1052 | "CQ Depth 1", | ||
1053 | "CQ Depth 4", | ||
1054 | "CQ Depth 16", | ||
1055 | "CQ Depth 24", | ||
1056 | "CQ Depth 32", | ||
1057 | "CQ Depth 128", | ||
1058 | "CQ Depth 256", | ||
1059 | "LRO aggregated", | 1059 | "LRO aggregated", |
1060 | "LRO flushed", | 1060 | "LRO flushed", |
1061 | "LRO no_desc", | 1061 | "LRO no_desc", |
1062 | }; | 1062 | }; |
1063 | |||
1064 | #define NES_ETHTOOL_STAT_COUNT ARRAY_SIZE(nes_ethtool_stringset) | 1063 | #define NES_ETHTOOL_STAT_COUNT ARRAY_SIZE(nes_ethtool_stringset) |
1065 | 1064 | ||
1066 | /** | 1065 | /** |
@@ -1120,12 +1119,14 @@ static void nes_netdev_get_strings(struct net_device *netdev, u32 stringset, | |||
1120 | /** | 1119 | /** |
1121 | * nes_netdev_get_ethtool_stats | 1120 | * nes_netdev_get_ethtool_stats |
1122 | */ | 1121 | */ |
1122 | |||
1123 | static void nes_netdev_get_ethtool_stats(struct net_device *netdev, | 1123 | static void nes_netdev_get_ethtool_stats(struct net_device *netdev, |
1124 | struct ethtool_stats *target_ethtool_stats, u64 *target_stat_values) | 1124 | struct ethtool_stats *target_ethtool_stats, u64 *target_stat_values) |
1125 | { | 1125 | { |
1126 | u64 u64temp; | 1126 | u64 u64temp; |
1127 | struct nes_vnic *nesvnic = netdev_priv(netdev); | 1127 | struct nes_vnic *nesvnic = netdev_priv(netdev); |
1128 | struct nes_device *nesdev = nesvnic->nesdev; | 1128 | struct nes_device *nesdev = nesvnic->nesdev; |
1129 | struct nes_adapter *nesadapter = nesdev->nesadapter; | ||
1129 | u32 nic_count; | 1130 | u32 nic_count; |
1130 | u32 u32temp; | 1131 | u32 u32temp; |
1131 | u32 index = 0; | 1132 | u32 index = 0; |
@@ -1154,6 +1155,46 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev, | |||
1154 | nesvnic->nesdev->port_tx_discards += u32temp; | 1155 | nesvnic->nesdev->port_tx_discards += u32temp; |
1155 | nesvnic->netstats.tx_dropped += u32temp; | 1156 | nesvnic->netstats.tx_dropped += u32temp; |
1156 | 1157 | ||
1158 | u32temp = nes_read_indexed(nesdev, | ||
1159 | NES_IDX_MAC_RX_SHORT_FRAMES + (nesvnic->nesdev->mac_index*0x200)); | ||
1160 | nesvnic->netstats.rx_dropped += u32temp; | ||
1161 | nesvnic->nesdev->mac_rx_errors += u32temp; | ||
1162 | nesvnic->nesdev->mac_rx_short_frames += u32temp; | ||
1163 | |||
1164 | u32temp = nes_read_indexed(nesdev, | ||
1165 | NES_IDX_MAC_RX_OVERSIZED_FRAMES + (nesvnic->nesdev->mac_index*0x200)); | ||
1166 | nesvnic->netstats.rx_dropped += u32temp; | ||
1167 | nesvnic->nesdev->mac_rx_errors += u32temp; | ||
1168 | nesvnic->nesdev->mac_rx_oversized_frames += u32temp; | ||
1169 | |||
1170 | u32temp = nes_read_indexed(nesdev, | ||
1171 | NES_IDX_MAC_RX_JABBER_FRAMES + (nesvnic->nesdev->mac_index*0x200)); | ||
1172 | nesvnic->netstats.rx_dropped += u32temp; | ||
1173 | nesvnic->nesdev->mac_rx_errors += u32temp; | ||
1174 | nesvnic->nesdev->mac_rx_jabber_frames += u32temp; | ||
1175 | |||
1176 | u32temp = nes_read_indexed(nesdev, | ||
1177 | NES_IDX_MAC_RX_SYMBOL_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200)); | ||
1178 | nesvnic->netstats.rx_dropped += u32temp; | ||
1179 | nesvnic->nesdev->mac_rx_errors += u32temp; | ||
1180 | nesvnic->nesdev->mac_rx_symbol_err_frames += u32temp; | ||
1181 | |||
1182 | u32temp = nes_read_indexed(nesdev, | ||
1183 | NES_IDX_MAC_RX_LENGTH_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200)); | ||
1184 | nesvnic->netstats.rx_length_errors += u32temp; | ||
1185 | nesvnic->nesdev->mac_rx_errors += u32temp; | ||
1186 | |||
1187 | u32temp = nes_read_indexed(nesdev, | ||
1188 | NES_IDX_MAC_RX_CRC_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200)); | ||
1189 | nesvnic->nesdev->mac_rx_errors += u32temp; | ||
1190 | nesvnic->nesdev->mac_rx_crc_errors += u32temp; | ||
1191 | nesvnic->netstats.rx_crc_errors += u32temp; | ||
1192 | |||
1193 | u32temp = nes_read_indexed(nesdev, | ||
1194 | NES_IDX_MAC_TX_ERRORS + (nesvnic->nesdev->mac_index*0x200)); | ||
1195 | nesvnic->nesdev->mac_tx_errors += u32temp; | ||
1196 | nesvnic->netstats.tx_errors += u32temp; | ||
1197 | |||
1157 | for (nic_count = 0; nic_count < NES_MAX_PORT_COUNT; nic_count++) { | 1198 | for (nic_count = 0; nic_count < NES_MAX_PORT_COUNT; nic_count++) { |
1158 | if (nesvnic->qp_nic_index[nic_count] == 0xf) | 1199 | if (nesvnic->qp_nic_index[nic_count] == 0xf) |
1159 | break; | 1200 | break; |
@@ -1218,11 +1259,15 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev, | |||
1218 | target_stat_values[++index] = nesvnic->nesdev->mac_rx_jabber_frames; | 1259 | target_stat_values[++index] = nesvnic->nesdev->mac_rx_jabber_frames; |
1219 | target_stat_values[++index] = nesvnic->nesdev->mac_rx_oversized_frames; | 1260 | target_stat_values[++index] = nesvnic->nesdev->mac_rx_oversized_frames; |
1220 | target_stat_values[++index] = nesvnic->nesdev->mac_rx_short_frames; | 1261 | target_stat_values[++index] = nesvnic->nesdev->mac_rx_short_frames; |
1262 | target_stat_values[++index] = nesvnic->netstats.rx_length_errors; | ||
1263 | target_stat_values[++index] = nesvnic->nesdev->mac_rx_crc_errors; | ||
1264 | target_stat_values[++index] = nesvnic->nesdev->port_rx_discards; | ||
1221 | target_stat_values[++index] = nesvnic->endnode_nstat_rx_discard; | 1265 | target_stat_values[++index] = nesvnic->endnode_nstat_rx_discard; |
1222 | target_stat_values[++index] = nesvnic->endnode_nstat_rx_octets; | 1266 | target_stat_values[++index] = nesvnic->endnode_nstat_rx_octets; |
1223 | target_stat_values[++index] = nesvnic->endnode_nstat_rx_frames; | 1267 | target_stat_values[++index] = nesvnic->endnode_nstat_rx_frames; |
1224 | target_stat_values[++index] = nesvnic->endnode_nstat_tx_octets; | 1268 | target_stat_values[++index] = nesvnic->endnode_nstat_tx_octets; |
1225 | target_stat_values[++index] = nesvnic->endnode_nstat_tx_frames; | 1269 | target_stat_values[++index] = nesvnic->endnode_nstat_tx_frames; |
1270 | target_stat_values[++index] = nesvnic->nesdev->mac_tx_errors; | ||
1226 | target_stat_values[++index] = mh_detected; | 1271 | target_stat_values[++index] = mh_detected; |
1227 | target_stat_values[++index] = mh_pauses_sent; | 1272 | target_stat_values[++index] = mh_pauses_sent; |
1228 | target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits; | 1273 | target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits; |
@@ -1251,21 +1296,14 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev, | |||
1251 | target_stat_values[++index] = atomic_read(&cm_nodes_destroyed); | 1296 | target_stat_values[++index] = atomic_read(&cm_nodes_destroyed); |
1252 | target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts); | 1297 | target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts); |
1253 | target_stat_values[++index] = atomic_read(&cm_resets_recvd); | 1298 | target_stat_values[++index] = atomic_read(&cm_resets_recvd); |
1299 | target_stat_values[++index] = nesadapter->free_4kpbl; | ||
1300 | target_stat_values[++index] = nesadapter->free_256pbl; | ||
1254 | target_stat_values[++index] = int_mod_timer_init; | 1301 | target_stat_values[++index] = int_mod_timer_init; |
1255 | target_stat_values[++index] = int_mod_cq_depth_1; | ||
1256 | target_stat_values[++index] = int_mod_cq_depth_4; | ||
1257 | target_stat_values[++index] = int_mod_cq_depth_16; | ||
1258 | target_stat_values[++index] = int_mod_cq_depth_24; | ||
1259 | target_stat_values[++index] = int_mod_cq_depth_32; | ||
1260 | target_stat_values[++index] = int_mod_cq_depth_128; | ||
1261 | target_stat_values[++index] = int_mod_cq_depth_256; | ||
1262 | target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated; | 1302 | target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated; |
1263 | target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed; | 1303 | target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed; |
1264 | target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc; | 1304 | target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc; |
1265 | |||
1266 | } | 1305 | } |
1267 | 1306 | ||
1268 | |||
1269 | /** | 1307 | /** |
1270 | * nes_netdev_get_drvinfo | 1308 | * nes_netdev_get_drvinfo |
1271 | */ | 1309 | */ |
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 925e1f2d1d55..9bc2d744b2ea 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c | |||
@@ -3962,7 +3962,7 @@ int nes_register_ofa_device(struct nes_ib_device *nesibdev) | |||
3962 | struct nes_adapter *nesadapter = nesdev->nesadapter; | 3962 | struct nes_adapter *nesadapter = nesdev->nesadapter; |
3963 | int i, ret; | 3963 | int i, ret; |
3964 | 3964 | ||
3965 | ret = ib_register_device(&nesvnic->nesibdev->ibdev); | 3965 | ret = ib_register_device(&nesvnic->nesibdev->ibdev, NULL); |
3966 | if (ret) { | 3966 | if (ret) { |
3967 | return ret; | 3967 | return ret; |
3968 | } | 3968 | } |
diff --git a/drivers/infiniband/hw/qib/Kconfig b/drivers/infiniband/hw/qib/Kconfig new file mode 100644 index 000000000000..7c03a70c55a2 --- /dev/null +++ b/drivers/infiniband/hw/qib/Kconfig | |||
@@ -0,0 +1,7 @@ | |||
1 | config INFINIBAND_QIB | ||
2 | tristate "QLogic PCIe HCA support" | ||
3 | depends on 64BIT && NET | ||
4 | ---help--- | ||
5 | This is a low-level driver for QLogic PCIe QLE InfiniBand host | ||
6 | channel adapters. This driver does not support the QLogic | ||
7 | HyperTransport card (model QHT7140). | ||
diff --git a/drivers/infiniband/hw/qib/Makefile b/drivers/infiniband/hw/qib/Makefile new file mode 100644 index 000000000000..c6515a1b9a6a --- /dev/null +++ b/drivers/infiniband/hw/qib/Makefile | |||
@@ -0,0 +1,15 @@ | |||
1 | obj-$(CONFIG_INFINIBAND_QIB) += ib_qib.o | ||
2 | |||
3 | ib_qib-y := qib_cq.o qib_diag.o qib_dma.o qib_driver.o qib_eeprom.o \ | ||
4 | qib_file_ops.o qib_fs.o qib_init.o qib_intr.o qib_keys.o \ | ||
5 | qib_mad.o qib_mmap.o qib_mr.o qib_pcie.o qib_pio_copy.o \ | ||
6 | qib_qp.o qib_qsfp.o qib_rc.o qib_ruc.o qib_sdma.o qib_srq.o \ | ||
7 | qib_sysfs.o qib_twsi.o qib_tx.o qib_uc.o qib_ud.o \ | ||
8 | qib_user_pages.o qib_user_sdma.o qib_verbs_mcast.o qib_iba7220.o \ | ||
9 | qib_sd7220.o qib_sd7220_img.o qib_iba7322.o qib_verbs.o | ||
10 | |||
11 | # 6120 has no fallback if no MSI interrupts, others can do INTx | ||
12 | ib_qib-$(CONFIG_PCI_MSI) += qib_iba6120.o | ||
13 | |||
14 | ib_qib-$(CONFIG_X86_64) += qib_wc_x86_64.o | ||
15 | ib_qib-$(CONFIG_PPC64) += qib_wc_ppc64.o | ||
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h new file mode 100644 index 000000000000..32d9208efcff --- /dev/null +++ b/drivers/infiniband/hw/qib/qib.h | |||
@@ -0,0 +1,1439 @@ | |||
1 | #ifndef _QIB_KERNEL_H | ||
2 | #define _QIB_KERNEL_H | ||
3 | /* | ||
4 | * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. | ||
5 | * All rights reserved. | ||
6 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | ||
7 | * | ||
8 | * This software is available to you under a choice of one of two | ||
9 | * licenses. You may choose to be licensed under the terms of the GNU | ||
10 | * General Public License (GPL) Version 2, available from the file | ||
11 | * COPYING in the main directory of this source tree, or the | ||
12 | * OpenIB.org BSD license below: | ||
13 | * | ||
14 | * Redistribution and use in source and binary forms, with or | ||
15 | * without modification, are permitted provided that the following | ||
16 | * conditions are met: | ||
17 | * | ||
18 | * - Redistributions of source code must retain the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer. | ||
21 | * | ||
22 | * - Redistributions in binary form must reproduce the above | ||
23 | * copyright notice, this list of conditions and the following | ||
24 | * disclaimer in the documentation and/or other materials | ||
25 | * provided with the distribution. | ||
26 | * | ||
27 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
28 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
29 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
30 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
31 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
32 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
33 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
34 | * SOFTWARE. | ||
35 | */ | ||
36 | |||
37 | /* | ||
38 | * This header file is the base header file for qlogic_ib kernel code | ||
39 | * qib_user.h serves a similar purpose for user code. | ||
40 | */ | ||
41 | |||
42 | #include <linux/interrupt.h> | ||
43 | #include <linux/pci.h> | ||
44 | #include <linux/dma-mapping.h> | ||
45 | #include <linux/mutex.h> | ||
46 | #include <linux/list.h> | ||
47 | #include <linux/scatterlist.h> | ||
48 | #include <linux/io.h> | ||
49 | #include <linux/fs.h> | ||
50 | #include <linux/completion.h> | ||
51 | #include <linux/kref.h> | ||
52 | #include <linux/sched.h> | ||
53 | |||
54 | #include "qib_common.h" | ||
55 | #include "qib_verbs.h" | ||
56 | |||
57 | /* only s/w major version of QLogic_IB we can handle */ | ||
58 | #define QIB_CHIP_VERS_MAJ 2U | ||
59 | |||
60 | /* don't care about this except printing */ | ||
61 | #define QIB_CHIP_VERS_MIN 0U | ||
62 | |||
63 | /* The Organization Unique Identifier (Mfg code), and its position in GUID */ | ||
64 | #define QIB_OUI 0x001175 | ||
65 | #define QIB_OUI_LSB 40 | ||
66 | |||
67 | /* | ||
68 | * per driver stats, either not device nor port-specific, or | ||
69 | * summed over all of the devices and ports. | ||
70 | * They are described by name via ipathfs filesystem, so layout | ||
71 | * and number of elements can change without breaking compatibility. | ||
72 | * If members are added or deleted qib_statnames[] in qib_fs.c must | ||
73 | * change to match. | ||
74 | */ | ||
75 | struct qlogic_ib_stats { | ||
76 | __u64 sps_ints; /* number of interrupts handled */ | ||
77 | __u64 sps_errints; /* number of error interrupts */ | ||
78 | __u64 sps_txerrs; /* tx-related packet errors */ | ||
79 | __u64 sps_rcverrs; /* non-crc rcv packet errors */ | ||
80 | __u64 sps_hwerrs; /* hardware errors reported (parity, etc.) */ | ||
81 | __u64 sps_nopiobufs; /* no pio bufs avail from kernel */ | ||
82 | __u64 sps_ctxts; /* number of contexts currently open */ | ||
83 | __u64 sps_lenerrs; /* number of kernel packets where RHF != LRH len */ | ||
84 | __u64 sps_buffull; | ||
85 | __u64 sps_hdrfull; | ||
86 | }; | ||
87 | |||
88 | extern struct qlogic_ib_stats qib_stats; | ||
89 | extern struct pci_error_handlers qib_pci_err_handler; | ||
90 | extern struct pci_driver qib_driver; | ||
91 | |||
92 | #define QIB_CHIP_SWVERSION QIB_CHIP_VERS_MAJ | ||
93 | /* | ||
94 | * First-cut critierion for "device is active" is | ||
95 | * two thousand dwords combined Tx, Rx traffic per | ||
96 | * 5-second interval. SMA packets are 64 dwords, | ||
97 | * and occur "a few per second", presumably each way. | ||
98 | */ | ||
99 | #define QIB_TRAFFIC_ACTIVE_THRESHOLD (2000) | ||
100 | |||
101 | /* | ||
102 | * Struct used to indicate which errors are logged in each of the | ||
103 | * error-counters that are logged to EEPROM. A counter is incremented | ||
104 | * _once_ (saturating at 255) for each event with any bits set in | ||
105 | * the error or hwerror register masks below. | ||
106 | */ | ||
107 | #define QIB_EEP_LOG_CNT (4) | ||
108 | struct qib_eep_log_mask { | ||
109 | u64 errs_to_log; | ||
110 | u64 hwerrs_to_log; | ||
111 | }; | ||
112 | |||
113 | /* | ||
114 | * Below contains all data related to a single context (formerly called port). | ||
115 | */ | ||
116 | struct qib_ctxtdata { | ||
117 | void **rcvegrbuf; | ||
118 | dma_addr_t *rcvegrbuf_phys; | ||
119 | /* rcvhdrq base, needs mmap before useful */ | ||
120 | void *rcvhdrq; | ||
121 | /* kernel virtual address where hdrqtail is updated */ | ||
122 | void *rcvhdrtail_kvaddr; | ||
123 | /* | ||
124 | * temp buffer for expected send setup, allocated at open, instead | ||
125 | * of each setup call | ||
126 | */ | ||
127 | void *tid_pg_list; | ||
128 | /* | ||
129 | * Shared page for kernel to signal user processes that send buffers | ||
130 | * need disarming. The process should call QIB_CMD_DISARM_BUFS | ||
131 | * or QIB_CMD_ACK_EVENT with IPATH_EVENT_DISARM_BUFS set. | ||
132 | */ | ||
133 | unsigned long *user_event_mask; | ||
134 | /* when waiting for rcv or pioavail */ | ||
135 | wait_queue_head_t wait; | ||
136 | /* | ||
137 | * rcvegr bufs base, physical, must fit | ||
138 | * in 44 bits so 32 bit programs mmap64 44 bit works) | ||
139 | */ | ||
140 | dma_addr_t rcvegr_phys; | ||
141 | /* mmap of hdrq, must fit in 44 bits */ | ||
142 | dma_addr_t rcvhdrq_phys; | ||
143 | dma_addr_t rcvhdrqtailaddr_phys; | ||
144 | |||
145 | /* | ||
146 | * number of opens (including slave sub-contexts) on this instance | ||
147 | * (ignoring forks, dup, etc. for now) | ||
148 | */ | ||
149 | int cnt; | ||
150 | /* | ||
151 | * how much space to leave at start of eager TID entries for | ||
152 | * protocol use, on each TID | ||
153 | */ | ||
154 | /* instead of calculating it */ | ||
155 | unsigned ctxt; | ||
156 | /* non-zero if ctxt is being shared. */ | ||
157 | u16 subctxt_cnt; | ||
158 | /* non-zero if ctxt is being shared. */ | ||
159 | u16 subctxt_id; | ||
160 | /* number of eager TID entries. */ | ||
161 | u16 rcvegrcnt; | ||
162 | /* index of first eager TID entry. */ | ||
163 | u16 rcvegr_tid_base; | ||
164 | /* number of pio bufs for this ctxt (all procs, if shared) */ | ||
165 | u32 piocnt; | ||
166 | /* first pio buffer for this ctxt */ | ||
167 | u32 pio_base; | ||
168 | /* chip offset of PIO buffers for this ctxt */ | ||
169 | u32 piobufs; | ||
170 | /* how many alloc_pages() chunks in rcvegrbuf_pages */ | ||
171 | u32 rcvegrbuf_chunks; | ||
172 | /* how many egrbufs per chunk */ | ||
173 | u32 rcvegrbufs_perchunk; | ||
174 | /* order for rcvegrbuf_pages */ | ||
175 | size_t rcvegrbuf_size; | ||
176 | /* rcvhdrq size (for freeing) */ | ||
177 | size_t rcvhdrq_size; | ||
178 | /* per-context flags for fileops/intr communication */ | ||
179 | unsigned long flag; | ||
180 | /* next expected TID to check when looking for free */ | ||
181 | u32 tidcursor; | ||
182 | /* WAIT_RCV that timed out, no interrupt */ | ||
183 | u32 rcvwait_to; | ||
184 | /* WAIT_PIO that timed out, no interrupt */ | ||
185 | u32 piowait_to; | ||
186 | /* WAIT_RCV already happened, no wait */ | ||
187 | u32 rcvnowait; | ||
188 | /* WAIT_PIO already happened, no wait */ | ||
189 | u32 pionowait; | ||
190 | /* total number of polled urgent packets */ | ||
191 | u32 urgent; | ||
192 | /* saved total number of polled urgent packets for poll edge trigger */ | ||
193 | u32 urgent_poll; | ||
194 | /* pid of process using this ctxt */ | ||
195 | pid_t pid; | ||
196 | pid_t subpid[QLOGIC_IB_MAX_SUBCTXT]; | ||
197 | /* same size as task_struct .comm[], command that opened context */ | ||
198 | char comm[16]; | ||
199 | /* pkeys set by this use of this ctxt */ | ||
200 | u16 pkeys[4]; | ||
201 | /* so file ops can get at unit */ | ||
202 | struct qib_devdata *dd; | ||
203 | /* so funcs that need physical port can get it easily */ | ||
204 | struct qib_pportdata *ppd; | ||
205 | /* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */ | ||
206 | void *subctxt_uregbase; | ||
207 | /* An array of pages for the eager receive buffers * N */ | ||
208 | void *subctxt_rcvegrbuf; | ||
209 | /* An array of pages for the eager header queue entries * N */ | ||
210 | void *subctxt_rcvhdr_base; | ||
211 | /* The version of the library which opened this ctxt */ | ||
212 | u32 userversion; | ||
213 | /* Bitmask of active slaves */ | ||
214 | u32 active_slaves; | ||
215 | /* Type of packets or conditions we want to poll for */ | ||
216 | u16 poll_type; | ||
217 | /* receive packet sequence counter */ | ||
218 | u8 seq_cnt; | ||
219 | u8 redirect_seq_cnt; | ||
220 | /* ctxt rcvhdrq head offset */ | ||
221 | u32 head; | ||
222 | u32 pkt_count; | ||
223 | /* QPs waiting for context processing */ | ||
224 | struct list_head qp_wait_list; | ||
225 | }; | ||
226 | |||
227 | struct qib_sge_state; | ||
228 | |||
229 | struct qib_sdma_txreq { | ||
230 | int flags; | ||
231 | int sg_count; | ||
232 | dma_addr_t addr; | ||
233 | void (*callback)(struct qib_sdma_txreq *, int); | ||
234 | u16 start_idx; /* sdma private */ | ||
235 | u16 next_descq_idx; /* sdma private */ | ||
236 | struct list_head list; /* sdma private */ | ||
237 | }; | ||
238 | |||
239 | struct qib_sdma_desc { | ||
240 | __le64 qw[2]; | ||
241 | }; | ||
242 | |||
243 | struct qib_verbs_txreq { | ||
244 | struct qib_sdma_txreq txreq; | ||
245 | struct qib_qp *qp; | ||
246 | struct qib_swqe *wqe; | ||
247 | u32 dwords; | ||
248 | u16 hdr_dwords; | ||
249 | u16 hdr_inx; | ||
250 | struct qib_pio_header *align_buf; | ||
251 | struct qib_mregion *mr; | ||
252 | struct qib_sge_state *ss; | ||
253 | }; | ||
254 | |||
255 | #define QIB_SDMA_TXREQ_F_USELARGEBUF 0x1 | ||
256 | #define QIB_SDMA_TXREQ_F_HEADTOHOST 0x2 | ||
257 | #define QIB_SDMA_TXREQ_F_INTREQ 0x4 | ||
258 | #define QIB_SDMA_TXREQ_F_FREEBUF 0x8 | ||
259 | #define QIB_SDMA_TXREQ_F_FREEDESC 0x10 | ||
260 | |||
261 | #define QIB_SDMA_TXREQ_S_OK 0 | ||
262 | #define QIB_SDMA_TXREQ_S_SENDERROR 1 | ||
263 | #define QIB_SDMA_TXREQ_S_ABORTED 2 | ||
264 | #define QIB_SDMA_TXREQ_S_SHUTDOWN 3 | ||
265 | |||
266 | /* | ||
267 | * Get/Set IB link-level config parameters for f_get/set_ib_cfg() | ||
268 | * Mostly for MADs that set or query link parameters, also ipath | ||
269 | * config interfaces | ||
270 | */ | ||
271 | #define QIB_IB_CFG_LIDLMC 0 /* LID (LS16b) and Mask (MS16b) */ | ||
272 | #define QIB_IB_CFG_LWID_ENB 2 /* allowed Link-width */ | ||
273 | #define QIB_IB_CFG_LWID 3 /* currently active Link-width */ | ||
274 | #define QIB_IB_CFG_SPD_ENB 4 /* allowed Link speeds */ | ||
275 | #define QIB_IB_CFG_SPD 5 /* current Link spd */ | ||
276 | #define QIB_IB_CFG_RXPOL_ENB 6 /* Auto-RX-polarity enable */ | ||
277 | #define QIB_IB_CFG_LREV_ENB 7 /* Auto-Lane-reversal enable */ | ||
278 | #define QIB_IB_CFG_LINKLATENCY 8 /* Link Latency (IB1.2 only) */ | ||
279 | #define QIB_IB_CFG_HRTBT 9 /* IB heartbeat off/enable/auto; DDR/QDR only */ | ||
280 | #define QIB_IB_CFG_OP_VLS 10 /* operational VLs */ | ||
281 | #define QIB_IB_CFG_VL_HIGH_CAP 11 /* num of VL high priority weights */ | ||
282 | #define QIB_IB_CFG_VL_LOW_CAP 12 /* num of VL low priority weights */ | ||
283 | #define QIB_IB_CFG_OVERRUN_THRESH 13 /* IB overrun threshold */ | ||
284 | #define QIB_IB_CFG_PHYERR_THRESH 14 /* IB PHY error threshold */ | ||
285 | #define QIB_IB_CFG_LINKDEFAULT 15 /* IB link default (sleep/poll) */ | ||
286 | #define QIB_IB_CFG_PKEYS 16 /* update partition keys */ | ||
287 | #define QIB_IB_CFG_MTU 17 /* update MTU in IBC */ | ||
288 | #define QIB_IB_CFG_LSTATE 18 /* update linkcmd and linkinitcmd in IBC */ | ||
289 | #define QIB_IB_CFG_VL_HIGH_LIMIT 19 | ||
290 | #define QIB_IB_CFG_PMA_TICKS 20 /* PMA sample tick resolution */ | ||
291 | #define QIB_IB_CFG_PORT 21 /* switch port we are connected to */ | ||
292 | |||
293 | /* | ||
294 | * for CFG_LSTATE: LINKCMD in upper 16 bits, LINKINITCMD in lower 16 | ||
295 | * IB_LINKINITCMD_POLL and SLEEP are also used as set/get values for | ||
296 | * QIB_IB_CFG_LINKDEFAULT cmd | ||
297 | */ | ||
298 | #define IB_LINKCMD_DOWN (0 << 16) | ||
299 | #define IB_LINKCMD_ARMED (1 << 16) | ||
300 | #define IB_LINKCMD_ACTIVE (2 << 16) | ||
301 | #define IB_LINKINITCMD_NOP 0 | ||
302 | #define IB_LINKINITCMD_POLL 1 | ||
303 | #define IB_LINKINITCMD_SLEEP 2 | ||
304 | #define IB_LINKINITCMD_DISABLE 3 | ||
305 | |||
306 | /* | ||
307 | * valid states passed to qib_set_linkstate() user call | ||
308 | */ | ||
309 | #define QIB_IB_LINKDOWN 0 | ||
310 | #define QIB_IB_LINKARM 1 | ||
311 | #define QIB_IB_LINKACTIVE 2 | ||
312 | #define QIB_IB_LINKDOWN_ONLY 3 | ||
313 | #define QIB_IB_LINKDOWN_SLEEP 4 | ||
314 | #define QIB_IB_LINKDOWN_DISABLE 5 | ||
315 | |||
316 | /* | ||
317 | * These 7 values (SDR, DDR, and QDR may be ORed for auto-speed | ||
318 | * negotiation) are used for the 3rd argument to path_f_set_ib_cfg | ||
319 | * with cmd QIB_IB_CFG_SPD_ENB, by direct calls or via sysfs. They | ||
320 | * are also the the possible values for qib_link_speed_enabled and active | ||
321 | * The values were chosen to match values used within the IB spec. | ||
322 | */ | ||
323 | #define QIB_IB_SDR 1 | ||
324 | #define QIB_IB_DDR 2 | ||
325 | #define QIB_IB_QDR 4 | ||
326 | |||
327 | #define QIB_DEFAULT_MTU 4096 | ||
328 | |||
329 | /* | ||
330 | * Possible IB config parameters for f_get/set_ib_table() | ||
331 | */ | ||
332 | #define QIB_IB_TBL_VL_HIGH_ARB 1 /* Get/set VL high priority weights */ | ||
333 | #define QIB_IB_TBL_VL_LOW_ARB 2 /* Get/set VL low priority weights */ | ||
334 | |||
335 | /* | ||
336 | * Possible "operations" for f_rcvctrl(ppd, op, ctxt) | ||
337 | * these are bits so they can be combined, e.g. | ||
338 | * QIB_RCVCTRL_INTRAVAIL_ENB | QIB_RCVCTRL_CTXT_ENB | ||
339 | */ | ||
340 | #define QIB_RCVCTRL_TAILUPD_ENB 0x01 | ||
341 | #define QIB_RCVCTRL_TAILUPD_DIS 0x02 | ||
342 | #define QIB_RCVCTRL_CTXT_ENB 0x04 | ||
343 | #define QIB_RCVCTRL_CTXT_DIS 0x08 | ||
344 | #define QIB_RCVCTRL_INTRAVAIL_ENB 0x10 | ||
345 | #define QIB_RCVCTRL_INTRAVAIL_DIS 0x20 | ||
346 | #define QIB_RCVCTRL_PKEY_ENB 0x40 /* Note, default is enabled */ | ||
347 | #define QIB_RCVCTRL_PKEY_DIS 0x80 | ||
348 | #define QIB_RCVCTRL_BP_ENB 0x0100 | ||
349 | #define QIB_RCVCTRL_BP_DIS 0x0200 | ||
350 | #define QIB_RCVCTRL_TIDFLOW_ENB 0x0400 | ||
351 | #define QIB_RCVCTRL_TIDFLOW_DIS 0x0800 | ||
352 | |||
353 | /* | ||
354 | * Possible "operations" for f_sendctrl(ppd, op, var) | ||
355 | * these are bits so they can be combined, e.g. | ||
356 | * QIB_SENDCTRL_BUFAVAIL_ENB | QIB_SENDCTRL_ENB | ||
357 | * Some operations (e.g. DISARM, ABORT) are known to | ||
358 | * be "one-shot", so do not modify shadow. | ||
359 | */ | ||
360 | #define QIB_SENDCTRL_DISARM (0x1000) | ||
361 | #define QIB_SENDCTRL_DISARM_BUF(bufn) ((bufn) | QIB_SENDCTRL_DISARM) | ||
362 | /* available (0x2000) */ | ||
363 | #define QIB_SENDCTRL_AVAIL_DIS (0x4000) | ||
364 | #define QIB_SENDCTRL_AVAIL_ENB (0x8000) | ||
365 | #define QIB_SENDCTRL_AVAIL_BLIP (0x10000) | ||
366 | #define QIB_SENDCTRL_SEND_DIS (0x20000) | ||
367 | #define QIB_SENDCTRL_SEND_ENB (0x40000) | ||
368 | #define QIB_SENDCTRL_FLUSH (0x80000) | ||
369 | #define QIB_SENDCTRL_CLEAR (0x100000) | ||
370 | #define QIB_SENDCTRL_DISARM_ALL (0x200000) | ||
371 | |||
372 | /* | ||
373 | * These are the generic indices for requesting per-port | ||
374 | * counter values via the f_portcntr function. They | ||
375 | * are always returned as 64 bit values, although most | ||
376 | * are 32 bit counters. | ||
377 | */ | ||
378 | /* send-related counters */ | ||
379 | #define QIBPORTCNTR_PKTSEND 0U | ||
380 | #define QIBPORTCNTR_WORDSEND 1U | ||
381 | #define QIBPORTCNTR_PSXMITDATA 2U | ||
382 | #define QIBPORTCNTR_PSXMITPKTS 3U | ||
383 | #define QIBPORTCNTR_PSXMITWAIT 4U | ||
384 | #define QIBPORTCNTR_SENDSTALL 5U | ||
385 | /* receive-related counters */ | ||
386 | #define QIBPORTCNTR_PKTRCV 6U | ||
387 | #define QIBPORTCNTR_PSRCVDATA 7U | ||
388 | #define QIBPORTCNTR_PSRCVPKTS 8U | ||
389 | #define QIBPORTCNTR_RCVEBP 9U | ||
390 | #define QIBPORTCNTR_RCVOVFL 10U | ||
391 | #define QIBPORTCNTR_WORDRCV 11U | ||
392 | /* IB link related error counters */ | ||
393 | #define QIBPORTCNTR_RXLOCALPHYERR 12U | ||
394 | #define QIBPORTCNTR_RXVLERR 13U | ||
395 | #define QIBPORTCNTR_ERRICRC 14U | ||
396 | #define QIBPORTCNTR_ERRVCRC 15U | ||
397 | #define QIBPORTCNTR_ERRLPCRC 16U | ||
398 | #define QIBPORTCNTR_BADFORMAT 17U | ||
399 | #define QIBPORTCNTR_ERR_RLEN 18U | ||
400 | #define QIBPORTCNTR_IBSYMBOLERR 19U | ||
401 | #define QIBPORTCNTR_INVALIDRLEN 20U | ||
402 | #define QIBPORTCNTR_UNSUPVL 21U | ||
403 | #define QIBPORTCNTR_EXCESSBUFOVFL 22U | ||
404 | #define QIBPORTCNTR_ERRLINK 23U | ||
405 | #define QIBPORTCNTR_IBLINKDOWN 24U | ||
406 | #define QIBPORTCNTR_IBLINKERRRECOV 25U | ||
407 | #define QIBPORTCNTR_LLI 26U | ||
408 | /* other error counters */ | ||
409 | #define QIBPORTCNTR_RXDROPPKT 27U | ||
410 | #define QIBPORTCNTR_VL15PKTDROP 28U | ||
411 | #define QIBPORTCNTR_ERRPKEY 29U | ||
412 | #define QIBPORTCNTR_KHDROVFL 30U | ||
413 | /* sampling counters (these are actually control registers) */ | ||
414 | #define QIBPORTCNTR_PSINTERVAL 31U | ||
415 | #define QIBPORTCNTR_PSSTART 32U | ||
416 | #define QIBPORTCNTR_PSSTAT 33U | ||
417 | |||
418 | /* how often we check for packet activity for "power on hours (in seconds) */ | ||
419 | #define ACTIVITY_TIMER 5 | ||
420 | |||
421 | /* Below is an opaque struct. Each chip (device) can maintain | ||
422 | * private data needed for its operation, but not germane to the | ||
423 | * rest of the driver. For convenience, we define another that | ||
424 | * is chip-specific, per-port | ||
425 | */ | ||
426 | struct qib_chip_specific; | ||
427 | struct qib_chipport_specific; | ||
428 | |||
429 | enum qib_sdma_states { | ||
430 | qib_sdma_state_s00_hw_down, | ||
431 | qib_sdma_state_s10_hw_start_up_wait, | ||
432 | qib_sdma_state_s20_idle, | ||
433 | qib_sdma_state_s30_sw_clean_up_wait, | ||
434 | qib_sdma_state_s40_hw_clean_up_wait, | ||
435 | qib_sdma_state_s50_hw_halt_wait, | ||
436 | qib_sdma_state_s99_running, | ||
437 | }; | ||
438 | |||
439 | enum qib_sdma_events { | ||
440 | qib_sdma_event_e00_go_hw_down, | ||
441 | qib_sdma_event_e10_go_hw_start, | ||
442 | qib_sdma_event_e20_hw_started, | ||
443 | qib_sdma_event_e30_go_running, | ||
444 | qib_sdma_event_e40_sw_cleaned, | ||
445 | qib_sdma_event_e50_hw_cleaned, | ||
446 | qib_sdma_event_e60_hw_halted, | ||
447 | qib_sdma_event_e70_go_idle, | ||
448 | qib_sdma_event_e7220_err_halted, | ||
449 | qib_sdma_event_e7322_err_halted, | ||
450 | qib_sdma_event_e90_timer_tick, | ||
451 | }; | ||
452 | |||
453 | extern char *qib_sdma_state_names[]; | ||
454 | extern char *qib_sdma_event_names[]; | ||
455 | |||
456 | struct sdma_set_state_action { | ||
457 | unsigned op_enable:1; | ||
458 | unsigned op_intenable:1; | ||
459 | unsigned op_halt:1; | ||
460 | unsigned op_drain:1; | ||
461 | unsigned go_s99_running_tofalse:1; | ||
462 | unsigned go_s99_running_totrue:1; | ||
463 | }; | ||
464 | |||
465 | struct qib_sdma_state { | ||
466 | struct kref kref; | ||
467 | struct completion comp; | ||
468 | enum qib_sdma_states current_state; | ||
469 | struct sdma_set_state_action *set_state_action; | ||
470 | unsigned current_op; | ||
471 | unsigned go_s99_running; | ||
472 | unsigned first_sendbuf; | ||
473 | unsigned last_sendbuf; /* really last +1 */ | ||
474 | /* debugging/devel */ | ||
475 | enum qib_sdma_states previous_state; | ||
476 | unsigned previous_op; | ||
477 | enum qib_sdma_events last_event; | ||
478 | }; | ||
479 | |||
480 | struct xmit_wait { | ||
481 | struct timer_list timer; | ||
482 | u64 counter; | ||
483 | u8 flags; | ||
484 | struct cache { | ||
485 | u64 psxmitdata; | ||
486 | u64 psrcvdata; | ||
487 | u64 psxmitpkts; | ||
488 | u64 psrcvpkts; | ||
489 | u64 psxmitwait; | ||
490 | } counter_cache; | ||
491 | }; | ||
492 | |||
493 | /* | ||
494 | * The structure below encapsulates data relevant to a physical IB Port. | ||
495 | * Current chips support only one such port, but the separation | ||
496 | * clarifies things a bit. Note that to conform to IB conventions, | ||
497 | * port-numbers are one-based. The first or only port is port1. | ||
498 | */ | ||
499 | struct qib_pportdata { | ||
500 | struct qib_ibport ibport_data; | ||
501 | |||
502 | struct qib_devdata *dd; | ||
503 | struct qib_chippport_specific *cpspec; /* chip-specific per-port */ | ||
504 | struct kobject pport_kobj; | ||
505 | struct kobject sl2vl_kobj; | ||
506 | struct kobject diagc_kobj; | ||
507 | |||
508 | /* GUID for this interface, in network order */ | ||
509 | __be64 guid; | ||
510 | |||
511 | /* QIB_POLL, etc. link-state specific flags, per port */ | ||
512 | u32 lflags; | ||
513 | /* qib_lflags driver is waiting for */ | ||
514 | u32 state_wanted; | ||
515 | spinlock_t lflags_lock; | ||
516 | /* number of (port-specific) interrupts for this port -- saturates... */ | ||
517 | u32 int_counter; | ||
518 | |||
519 | /* ref count for each pkey */ | ||
520 | atomic_t pkeyrefs[4]; | ||
521 | |||
522 | /* | ||
523 | * this address is mapped readonly into user processes so they can | ||
524 | * get status cheaply, whenever they want. One qword of status per port | ||
525 | */ | ||
526 | u64 *statusp; | ||
527 | |||
528 | /* SendDMA related entries */ | ||
529 | spinlock_t sdma_lock; | ||
530 | struct qib_sdma_state sdma_state; | ||
531 | unsigned long sdma_buf_jiffies; | ||
532 | struct qib_sdma_desc *sdma_descq; | ||
533 | u64 sdma_descq_added; | ||
534 | u64 sdma_descq_removed; | ||
535 | u16 sdma_descq_cnt; | ||
536 | u16 sdma_descq_tail; | ||
537 | u16 sdma_descq_head; | ||
538 | u16 sdma_next_intr; | ||
539 | u16 sdma_reset_wait; | ||
540 | u8 sdma_generation; | ||
541 | struct tasklet_struct sdma_sw_clean_up_task; | ||
542 | struct list_head sdma_activelist; | ||
543 | |||
544 | dma_addr_t sdma_descq_phys; | ||
545 | volatile __le64 *sdma_head_dma; /* DMA'ed by chip */ | ||
546 | dma_addr_t sdma_head_phys; | ||
547 | |||
548 | wait_queue_head_t state_wait; /* for state_wanted */ | ||
549 | |||
550 | /* HoL blocking for SMP replies */ | ||
551 | unsigned hol_state; | ||
552 | struct timer_list hol_timer; | ||
553 | |||
554 | /* | ||
555 | * Shadow copies of registers; size indicates read access size. | ||
556 | * Most of them are readonly, but some are write-only register, | ||
557 | * where we manipulate the bits in the shadow copy, and then write | ||
558 | * the shadow copy to qlogic_ib. | ||
559 | * | ||
560 | * We deliberately make most of these 32 bits, since they have | ||
561 | * restricted range. For any that we read, we won't to generate 32 | ||
562 | * bit accesses, since Opteron will generate 2 separate 32 bit HT | ||
563 | * transactions for a 64 bit read, and we want to avoid unnecessary | ||
564 | * bus transactions. | ||
565 | */ | ||
566 | |||
567 | /* This is the 64 bit group */ | ||
568 | /* last ibcstatus. opaque outside chip-specific code */ | ||
569 | u64 lastibcstat; | ||
570 | |||
571 | /* these are the "32 bit" regs */ | ||
572 | |||
573 | /* | ||
574 | * the following two are 32-bit bitmasks, but {test,clear,set}_bit | ||
575 | * all expect bit fields to be "unsigned long" | ||
576 | */ | ||
577 | unsigned long p_rcvctrl; /* shadow per-port rcvctrl */ | ||
578 | unsigned long p_sendctrl; /* shadow per-port sendctrl */ | ||
579 | |||
580 | u32 ibmtu; /* The MTU programmed for this unit */ | ||
581 | /* | ||
582 | * Current max size IB packet (in bytes) including IB headers, that | ||
583 | * we can send. Changes when ibmtu changes. | ||
584 | */ | ||
585 | u32 ibmaxlen; | ||
586 | /* | ||
587 | * ibmaxlen at init time, limited by chip and by receive buffer | ||
588 | * size. Not changed after init. | ||
589 | */ | ||
590 | u32 init_ibmaxlen; | ||
591 | /* LID programmed for this instance */ | ||
592 | u16 lid; | ||
593 | /* list of pkeys programmed; 0 if not set */ | ||
594 | u16 pkeys[4]; | ||
595 | /* LID mask control */ | ||
596 | u8 lmc; | ||
597 | u8 link_width_supported; | ||
598 | u8 link_speed_supported; | ||
599 | u8 link_width_enabled; | ||
600 | u8 link_speed_enabled; | ||
601 | u8 link_width_active; | ||
602 | u8 link_speed_active; | ||
603 | u8 vls_supported; | ||
604 | u8 vls_operational; | ||
605 | /* Rx Polarity inversion (compensate for ~tx on partner) */ | ||
606 | u8 rx_pol_inv; | ||
607 | |||
608 | u8 hw_pidx; /* physical port index */ | ||
609 | u8 port; /* IB port number and index into dd->pports - 1 */ | ||
610 | |||
611 | u8 delay_mult; | ||
612 | |||
613 | /* used to override LED behavior */ | ||
614 | u8 led_override; /* Substituted for normal value, if non-zero */ | ||
615 | u16 led_override_timeoff; /* delta to next timer event */ | ||
616 | u8 led_override_vals[2]; /* Alternates per blink-frame */ | ||
617 | u8 led_override_phase; /* Just counts, LSB picks from vals[] */ | ||
618 | atomic_t led_override_timer_active; | ||
619 | /* Used to flash LEDs in override mode */ | ||
620 | struct timer_list led_override_timer; | ||
621 | struct xmit_wait cong_stats; | ||
622 | struct timer_list symerr_clear_timer; | ||
623 | }; | ||
624 | |||
625 | /* Observers. Not to be taken lightly, possibly not to ship. */ | ||
626 | /* | ||
627 | * If a diag read or write is to (bottom <= offset <= top), | ||
628 | * the "hoook" is called, allowing, e.g. shadows to be | ||
629 | * updated in sync with the driver. struct diag_observer | ||
630 | * is the "visible" part. | ||
631 | */ | ||
632 | struct diag_observer; | ||
633 | |||
634 | typedef int (*diag_hook) (struct qib_devdata *dd, | ||
635 | const struct diag_observer *op, | ||
636 | u32 offs, u64 *data, u64 mask, int only_32); | ||
637 | |||
638 | struct diag_observer { | ||
639 | diag_hook hook; | ||
640 | u32 bottom; | ||
641 | u32 top; | ||
642 | }; | ||
643 | |||
644 | extern int qib_register_observer(struct qib_devdata *dd, | ||
645 | const struct diag_observer *op); | ||
646 | |||
647 | /* Only declared here, not defined. Private to diags */ | ||
648 | struct diag_observer_list_elt; | ||
649 | |||
650 | /* device data struct now contains only "general per-device" info. | ||
651 | * fields related to a physical IB port are in a qib_pportdata struct, | ||
652 | * described above) while fields only used by a particualr chip-type are in | ||
653 | * a qib_chipdata struct, whose contents are opaque to this file. | ||
654 | */ | ||
655 | struct qib_devdata { | ||
656 | struct qib_ibdev verbs_dev; /* must be first */ | ||
657 | struct list_head list; | ||
658 | /* pointers to related structs for this device */ | ||
659 | /* pci access data structure */ | ||
660 | struct pci_dev *pcidev; | ||
661 | struct cdev *user_cdev; | ||
662 | struct cdev *diag_cdev; | ||
663 | struct device *user_device; | ||
664 | struct device *diag_device; | ||
665 | |||
666 | /* mem-mapped pointer to base of chip regs */ | ||
667 | u64 __iomem *kregbase; | ||
668 | /* end of mem-mapped chip space excluding sendbuf and user regs */ | ||
669 | u64 __iomem *kregend; | ||
670 | /* physical address of chip for io_remap, etc. */ | ||
671 | resource_size_t physaddr; | ||
672 | /* qib_cfgctxts pointers */ | ||
673 | struct qib_ctxtdata **rcd; /* Receive Context Data */ | ||
674 | |||
675 | /* qib_pportdata, points to array of (physical) port-specific | ||
676 | * data structs, indexed by pidx (0..n-1) | ||
677 | */ | ||
678 | struct qib_pportdata *pport; | ||
679 | struct qib_chip_specific *cspec; /* chip-specific */ | ||
680 | |||
681 | /* kvirt address of 1st 2k pio buffer */ | ||
682 | void __iomem *pio2kbase; | ||
683 | /* kvirt address of 1st 4k pio buffer */ | ||
684 | void __iomem *pio4kbase; | ||
685 | /* mem-mapped pointer to base of PIO buffers (if using WC PAT) */ | ||
686 | void __iomem *piobase; | ||
687 | /* mem-mapped pointer to base of user chip regs (if using WC PAT) */ | ||
688 | u64 __iomem *userbase; | ||
689 | /* | ||
690 | * points to area where PIOavail registers will be DMA'ed. | ||
691 | * Has to be on a page of it's own, because the page will be | ||
692 | * mapped into user program space. This copy is *ONLY* ever | ||
693 | * written by DMA, not by the driver! Need a copy per device | ||
694 | * when we get to multiple devices | ||
695 | */ | ||
696 | volatile __le64 *pioavailregs_dma; /* DMA'ed by chip */ | ||
697 | /* physical address where updates occur */ | ||
698 | dma_addr_t pioavailregs_phys; | ||
699 | |||
700 | /* device-specific implementations of functions needed by | ||
701 | * common code. Contrary to previous consensus, we can't | ||
702 | * really just point to a device-specific table, because we | ||
703 | * may need to "bend", e.g. *_f_put_tid | ||
704 | */ | ||
705 | /* fallback to alternate interrupt type if possible */ | ||
706 | int (*f_intr_fallback)(struct qib_devdata *); | ||
707 | /* hard reset chip */ | ||
708 | int (*f_reset)(struct qib_devdata *); | ||
709 | void (*f_quiet_serdes)(struct qib_pportdata *); | ||
710 | int (*f_bringup_serdes)(struct qib_pportdata *); | ||
711 | int (*f_early_init)(struct qib_devdata *); | ||
712 | void (*f_clear_tids)(struct qib_devdata *, struct qib_ctxtdata *); | ||
713 | void (*f_put_tid)(struct qib_devdata *, u64 __iomem*, | ||
714 | u32, unsigned long); | ||
715 | void (*f_cleanup)(struct qib_devdata *); | ||
716 | void (*f_setextled)(struct qib_pportdata *, u32); | ||
717 | /* fill out chip-specific fields */ | ||
718 | int (*f_get_base_info)(struct qib_ctxtdata *, struct qib_base_info *); | ||
719 | /* free irq */ | ||
720 | void (*f_free_irq)(struct qib_devdata *); | ||
721 | struct qib_message_header *(*f_get_msgheader) | ||
722 | (struct qib_devdata *, __le32 *); | ||
723 | void (*f_config_ctxts)(struct qib_devdata *); | ||
724 | int (*f_get_ib_cfg)(struct qib_pportdata *, int); | ||
725 | int (*f_set_ib_cfg)(struct qib_pportdata *, int, u32); | ||
726 | int (*f_set_ib_loopback)(struct qib_pportdata *, const char *); | ||
727 | int (*f_get_ib_table)(struct qib_pportdata *, int, void *); | ||
728 | int (*f_set_ib_table)(struct qib_pportdata *, int, void *); | ||
729 | u32 (*f_iblink_state)(u64); | ||
730 | u8 (*f_ibphys_portstate)(u64); | ||
731 | void (*f_xgxs_reset)(struct qib_pportdata *); | ||
732 | /* per chip actions needed for IB Link up/down changes */ | ||
733 | int (*f_ib_updown)(struct qib_pportdata *, int, u64); | ||
734 | u32 __iomem *(*f_getsendbuf)(struct qib_pportdata *, u64, u32 *); | ||
735 | /* Read/modify/write of GPIO pins (potentially chip-specific */ | ||
736 | int (*f_gpio_mod)(struct qib_devdata *dd, u32 out, u32 dir, | ||
737 | u32 mask); | ||
738 | /* Enable writes to config EEPROM (if supported) */ | ||
739 | int (*f_eeprom_wen)(struct qib_devdata *dd, int wen); | ||
740 | /* | ||
741 | * modify rcvctrl shadow[s] and write to appropriate chip-regs. | ||
742 | * see above QIB_RCVCTRL_xxx_ENB/DIS for operations. | ||
743 | * (ctxt == -1) means "all contexts", only meaningful for | ||
744 | * clearing. Could remove if chip_spec shutdown properly done. | ||
745 | */ | ||
746 | void (*f_rcvctrl)(struct qib_pportdata *, unsigned int op, | ||
747 | int ctxt); | ||
748 | /* Read/modify/write sendctrl appropriately for op and port. */ | ||
749 | void (*f_sendctrl)(struct qib_pportdata *, u32 op); | ||
750 | void (*f_set_intr_state)(struct qib_devdata *, u32); | ||
751 | void (*f_set_armlaunch)(struct qib_devdata *, u32); | ||
752 | void (*f_wantpiobuf_intr)(struct qib_devdata *, u32); | ||
753 | int (*f_late_initreg)(struct qib_devdata *); | ||
754 | int (*f_init_sdma_regs)(struct qib_pportdata *); | ||
755 | u16 (*f_sdma_gethead)(struct qib_pportdata *); | ||
756 | int (*f_sdma_busy)(struct qib_pportdata *); | ||
757 | void (*f_sdma_update_tail)(struct qib_pportdata *, u16); | ||
758 | void (*f_sdma_set_desc_cnt)(struct qib_pportdata *, unsigned); | ||
759 | void (*f_sdma_sendctrl)(struct qib_pportdata *, unsigned); | ||
760 | void (*f_sdma_hw_clean_up)(struct qib_pportdata *); | ||
761 | void (*f_sdma_hw_start_up)(struct qib_pportdata *); | ||
762 | void (*f_sdma_init_early)(struct qib_pportdata *); | ||
763 | void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32); | ||
764 | void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32); | ||
765 | u32 (*f_hdrqempty)(struct qib_ctxtdata *); | ||
766 | u64 (*f_portcntr)(struct qib_pportdata *, u32); | ||
767 | u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **, | ||
768 | u64 **); | ||
769 | u32 (*f_read_portcntrs)(struct qib_devdata *, loff_t, u32, | ||
770 | char **, u64 **); | ||
771 | u32 (*f_setpbc_control)(struct qib_pportdata *, u32, u8, u8); | ||
772 | void (*f_initvl15_bufs)(struct qib_devdata *); | ||
773 | void (*f_init_ctxt)(struct qib_ctxtdata *); | ||
774 | void (*f_txchk_change)(struct qib_devdata *, u32, u32, u32, | ||
775 | struct qib_ctxtdata *); | ||
776 | void (*f_writescratch)(struct qib_devdata *, u32); | ||
777 | int (*f_tempsense_rd)(struct qib_devdata *, int regnum); | ||
778 | |||
779 | char *boardname; /* human readable board info */ | ||
780 | |||
781 | /* template for writing TIDs */ | ||
782 | u64 tidtemplate; | ||
783 | /* value to write to free TIDs */ | ||
784 | u64 tidinvalid; | ||
785 | |||
786 | /* number of registers used for pioavail */ | ||
787 | u32 pioavregs; | ||
788 | /* device (not port) flags, basically device capabilities */ | ||
789 | u32 flags; | ||
790 | /* last buffer for user use */ | ||
791 | u32 lastctxt_piobuf; | ||
792 | |||
793 | /* saturating counter of (non-port-specific) device interrupts */ | ||
794 | u32 int_counter; | ||
795 | |||
796 | /* pio bufs allocated per ctxt */ | ||
797 | u32 pbufsctxt; | ||
798 | /* if remainder on bufs/ctxt, ctxts < extrabuf get 1 extra */ | ||
799 | u32 ctxts_extrabuf; | ||
800 | /* | ||
801 | * number of ctxts configured as max; zero is set to number chip | ||
802 | * supports, less gives more pio bufs/ctxt, etc. | ||
803 | */ | ||
804 | u32 cfgctxts; | ||
805 | |||
806 | /* | ||
807 | * hint that we should update pioavailshadow before | ||
808 | * looking for a PIO buffer | ||
809 | */ | ||
810 | u32 upd_pio_shadow; | ||
811 | |||
812 | /* internal debugging stats */ | ||
813 | u32 maxpkts_call; | ||
814 | u32 avgpkts_call; | ||
815 | u64 nopiobufs; | ||
816 | |||
817 | /* PCI Vendor ID (here for NodeInfo) */ | ||
818 | u16 vendorid; | ||
819 | /* PCI Device ID (here for NodeInfo) */ | ||
820 | u16 deviceid; | ||
821 | /* for write combining settings */ | ||
822 | unsigned long wc_cookie; | ||
823 | unsigned long wc_base; | ||
824 | unsigned long wc_len; | ||
825 | |||
826 | /* shadow copy of struct page *'s for exp tid pages */ | ||
827 | struct page **pageshadow; | ||
828 | /* shadow copy of dma handles for exp tid pages */ | ||
829 | dma_addr_t *physshadow; | ||
830 | u64 __iomem *egrtidbase; | ||
831 | spinlock_t sendctrl_lock; /* protect changes to sendctrl shadow */ | ||
832 | /* around rcd and (user ctxts) ctxt_cnt use (intr vs free) */ | ||
833 | spinlock_t uctxt_lock; /* rcd and user context changes */ | ||
834 | /* | ||
835 | * per unit status, see also portdata statusp | ||
836 | * mapped readonly into user processes so they can get unit and | ||
837 | * IB link status cheaply | ||
838 | */ | ||
839 | u64 *devstatusp; | ||
840 | char *freezemsg; /* freeze msg if hw error put chip in freeze */ | ||
841 | u32 freezelen; /* max length of freezemsg */ | ||
842 | /* timer used to prevent stats overflow, error throttling, etc. */ | ||
843 | struct timer_list stats_timer; | ||
844 | |||
845 | /* timer to verify interrupts work, and fallback if possible */ | ||
846 | struct timer_list intrchk_timer; | ||
847 | unsigned long ureg_align; /* user register alignment */ | ||
848 | |||
849 | /* | ||
850 | * Protects pioavailshadow, pioavailkernel, pio_need_disarm, and | ||
851 | * pio_writing. | ||
852 | */ | ||
853 | spinlock_t pioavail_lock; | ||
854 | |||
855 | /* | ||
856 | * Shadow copies of registers; size indicates read access size. | ||
857 | * Most of them are readonly, but some are write-only register, | ||
858 | * where we manipulate the bits in the shadow copy, and then write | ||
859 | * the shadow copy to qlogic_ib. | ||
860 | * | ||
861 | * We deliberately make most of these 32 bits, since they have | ||
862 | * restricted range. For any that we read, we won't to generate 32 | ||
863 | * bit accesses, since Opteron will generate 2 separate 32 bit HT | ||
864 | * transactions for a 64 bit read, and we want to avoid unnecessary | ||
865 | * bus transactions. | ||
866 | */ | ||
867 | |||
868 | /* This is the 64 bit group */ | ||
869 | |||
870 | unsigned long pioavailshadow[6]; | ||
871 | /* bitmap of send buffers available for the kernel to use with PIO. */ | ||
872 | unsigned long pioavailkernel[6]; | ||
873 | /* bitmap of send buffers which need to be disarmed. */ | ||
874 | unsigned long pio_need_disarm[3]; | ||
875 | /* bitmap of send buffers which are being written to. */ | ||
876 | unsigned long pio_writing[3]; | ||
877 | /* kr_revision shadow */ | ||
878 | u64 revision; | ||
879 | /* Base GUID for device (from eeprom, network order) */ | ||
880 | __be64 base_guid; | ||
881 | |||
882 | /* | ||
883 | * kr_sendpiobufbase value (chip offset of pio buffers), and the | ||
884 | * base of the 2KB buffer s(user processes only use 2K) | ||
885 | */ | ||
886 | u64 piobufbase; | ||
887 | u32 pio2k_bufbase; | ||
888 | |||
889 | /* these are the "32 bit" regs */ | ||
890 | |||
891 | /* number of GUIDs in the flash for this interface */ | ||
892 | u32 nguid; | ||
893 | /* | ||
894 | * the following two are 32-bit bitmasks, but {test,clear,set}_bit | ||
895 | * all expect bit fields to be "unsigned long" | ||
896 | */ | ||
897 | unsigned long rcvctrl; /* shadow per device rcvctrl */ | ||
898 | unsigned long sendctrl; /* shadow per device sendctrl */ | ||
899 | |||
900 | /* value we put in kr_rcvhdrcnt */ | ||
901 | u32 rcvhdrcnt; | ||
902 | /* value we put in kr_rcvhdrsize */ | ||
903 | u32 rcvhdrsize; | ||
904 | /* value we put in kr_rcvhdrentsize */ | ||
905 | u32 rcvhdrentsize; | ||
906 | /* kr_ctxtcnt value */ | ||
907 | u32 ctxtcnt; | ||
908 | /* kr_pagealign value */ | ||
909 | u32 palign; | ||
910 | /* number of "2KB" PIO buffers */ | ||
911 | u32 piobcnt2k; | ||
912 | /* size in bytes of "2KB" PIO buffers */ | ||
913 | u32 piosize2k; | ||
914 | /* max usable size in dwords of a "2KB" PIO buffer before going "4KB" */ | ||
915 | u32 piosize2kmax_dwords; | ||
916 | /* number of "4KB" PIO buffers */ | ||
917 | u32 piobcnt4k; | ||
918 | /* size in bytes of "4KB" PIO buffers */ | ||
919 | u32 piosize4k; | ||
920 | /* kr_rcvegrbase value */ | ||
921 | u32 rcvegrbase; | ||
922 | /* kr_rcvtidbase value */ | ||
923 | u32 rcvtidbase; | ||
924 | /* kr_rcvtidcnt value */ | ||
925 | u32 rcvtidcnt; | ||
926 | /* kr_userregbase */ | ||
927 | u32 uregbase; | ||
928 | /* shadow the control register contents */ | ||
929 | u32 control; | ||
930 | |||
931 | /* chip address space used by 4k pio buffers */ | ||
932 | u32 align4k; | ||
933 | /* size of each rcvegrbuffer */ | ||
934 | u32 rcvegrbufsize; | ||
935 | /* localbus width (1, 2,4,8,16,32) from config space */ | ||
936 | u32 lbus_width; | ||
937 | /* localbus speed in MHz */ | ||
938 | u32 lbus_speed; | ||
939 | int unit; /* unit # of this chip */ | ||
940 | |||
941 | /* start of CHIP_SPEC move to chipspec, but need code changes */ | ||
942 | /* low and high portions of MSI capability/vector */ | ||
943 | u32 msi_lo; | ||
944 | /* saved after PCIe init for restore after reset */ | ||
945 | u32 msi_hi; | ||
946 | /* MSI data (vector) saved for restore */ | ||
947 | u16 msi_data; | ||
948 | /* so we can rewrite it after a chip reset */ | ||
949 | u32 pcibar0; | ||
950 | /* so we can rewrite it after a chip reset */ | ||
951 | u32 pcibar1; | ||
952 | u64 rhdrhead_intr_off; | ||
953 | |||
954 | /* | ||
955 | * ASCII serial number, from flash, large enough for original | ||
956 | * all digit strings, and longer QLogic serial number format | ||
957 | */ | ||
958 | u8 serial[16]; | ||
959 | /* human readable board version */ | ||
960 | u8 boardversion[96]; | ||
961 | u8 lbus_info[32]; /* human readable localbus info */ | ||
962 | /* chip major rev, from qib_revision */ | ||
963 | u8 majrev; | ||
964 | /* chip minor rev, from qib_revision */ | ||
965 | u8 minrev; | ||
966 | |||
967 | /* Misc small ints */ | ||
968 | /* Number of physical ports available */ | ||
969 | u8 num_pports; | ||
970 | /* Lowest context number which can be used by user processes */ | ||
971 | u8 first_user_ctxt; | ||
972 | u8 n_krcv_queues; | ||
973 | u8 qpn_mask; | ||
974 | u8 skip_kctxt_mask; | ||
975 | |||
976 | u16 rhf_offset; /* offset of RHF within receive header entry */ | ||
977 | |||
978 | /* | ||
979 | * GPIO pins for twsi-connected devices, and device code for eeprom | ||
980 | */ | ||
981 | u8 gpio_sda_num; | ||
982 | u8 gpio_scl_num; | ||
983 | u8 twsi_eeprom_dev; | ||
984 | u8 board_atten; | ||
985 | |||
986 | /* Support (including locks) for EEPROM logging of errors and time */ | ||
987 | /* control access to actual counters, timer */ | ||
988 | spinlock_t eep_st_lock; | ||
989 | /* control high-level access to EEPROM */ | ||
990 | struct mutex eep_lock; | ||
991 | uint64_t traffic_wds; | ||
992 | /* active time is kept in seconds, but logged in hours */ | ||
993 | atomic_t active_time; | ||
994 | /* Below are nominal shadow of EEPROM, new since last EEPROM update */ | ||
995 | uint8_t eep_st_errs[QIB_EEP_LOG_CNT]; | ||
996 | uint8_t eep_st_new_errs[QIB_EEP_LOG_CNT]; | ||
997 | uint16_t eep_hrs; | ||
998 | /* | ||
999 | * masks for which bits of errs, hwerrs that cause | ||
1000 | * each of the counters to increment. | ||
1001 | */ | ||
1002 | struct qib_eep_log_mask eep_st_masks[QIB_EEP_LOG_CNT]; | ||
1003 | struct qib_diag_client *diag_client; | ||
1004 | spinlock_t qib_diag_trans_lock; /* protect diag observer ops */ | ||
1005 | struct diag_observer_list_elt *diag_observer_list; | ||
1006 | |||
1007 | u8 psxmitwait_supported; | ||
1008 | /* cycle length of PS* counters in HW (in picoseconds) */ | ||
1009 | u16 psxmitwait_check_rate; | ||
1010 | }; | ||
1011 | |||
1012 | /* hol_state values */ | ||
1013 | #define QIB_HOL_UP 0 | ||
1014 | #define QIB_HOL_INIT 1 | ||
1015 | |||
1016 | #define QIB_SDMA_SENDCTRL_OP_ENABLE (1U << 0) | ||
1017 | #define QIB_SDMA_SENDCTRL_OP_INTENABLE (1U << 1) | ||
1018 | #define QIB_SDMA_SENDCTRL_OP_HALT (1U << 2) | ||
1019 | #define QIB_SDMA_SENDCTRL_OP_CLEANUP (1U << 3) | ||
1020 | #define QIB_SDMA_SENDCTRL_OP_DRAIN (1U << 4) | ||
1021 | |||
1022 | /* operation types for f_txchk_change() */ | ||
1023 | #define TXCHK_CHG_TYPE_DIS1 3 | ||
1024 | #define TXCHK_CHG_TYPE_ENAB1 2 | ||
1025 | #define TXCHK_CHG_TYPE_KERN 1 | ||
1026 | #define TXCHK_CHG_TYPE_USER 0 | ||
1027 | |||
1028 | #define QIB_CHASE_TIME msecs_to_jiffies(145) | ||
1029 | #define QIB_CHASE_DIS_TIME msecs_to_jiffies(160) | ||
1030 | |||
1031 | /* Private data for file operations */ | ||
1032 | struct qib_filedata { | ||
1033 | struct qib_ctxtdata *rcd; | ||
1034 | unsigned subctxt; | ||
1035 | unsigned tidcursor; | ||
1036 | struct qib_user_sdma_queue *pq; | ||
1037 | int rec_cpu_num; /* for cpu affinity; -1 if none */ | ||
1038 | }; | ||
1039 | |||
1040 | extern struct list_head qib_dev_list; | ||
1041 | extern spinlock_t qib_devs_lock; | ||
1042 | extern struct qib_devdata *qib_lookup(int unit); | ||
1043 | extern u32 qib_cpulist_count; | ||
1044 | extern unsigned long *qib_cpulist; | ||
1045 | |||
1046 | extern unsigned qib_wc_pat; | ||
1047 | int qib_init(struct qib_devdata *, int); | ||
1048 | int init_chip_wc_pat(struct qib_devdata *dd, u32); | ||
1049 | int qib_enable_wc(struct qib_devdata *dd); | ||
1050 | void qib_disable_wc(struct qib_devdata *dd); | ||
1051 | int qib_count_units(int *npresentp, int *nupp); | ||
1052 | int qib_count_active_units(void); | ||
1053 | |||
1054 | int qib_cdev_init(int minor, const char *name, | ||
1055 | const struct file_operations *fops, | ||
1056 | struct cdev **cdevp, struct device **devp); | ||
1057 | void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp); | ||
1058 | int qib_dev_init(void); | ||
1059 | void qib_dev_cleanup(void); | ||
1060 | |||
1061 | int qib_diag_add(struct qib_devdata *); | ||
1062 | void qib_diag_remove(struct qib_devdata *); | ||
1063 | void qib_handle_e_ibstatuschanged(struct qib_pportdata *, u64); | ||
1064 | void qib_sdma_update_tail(struct qib_pportdata *, u16); /* hold sdma_lock */ | ||
1065 | |||
1066 | int qib_decode_err(struct qib_devdata *dd, char *buf, size_t blen, u64 err); | ||
1067 | void qib_bad_intrstatus(struct qib_devdata *); | ||
1068 | void qib_handle_urcv(struct qib_devdata *, u64); | ||
1069 | |||
1070 | /* clean up any per-chip chip-specific stuff */ | ||
1071 | void qib_chip_cleanup(struct qib_devdata *); | ||
1072 | /* clean up any chip type-specific stuff */ | ||
1073 | void qib_chip_done(void); | ||
1074 | |||
1075 | /* check to see if we have to force ordering for write combining */ | ||
1076 | int qib_unordered_wc(void); | ||
1077 | void qib_pio_copy(void __iomem *to, const void *from, size_t count); | ||
1078 | |||
1079 | void qib_disarm_piobufs(struct qib_devdata *, unsigned, unsigned); | ||
1080 | int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *); | ||
1081 | void qib_disarm_piobufs_set(struct qib_devdata *, unsigned long *, unsigned); | ||
1082 | void qib_cancel_sends(struct qib_pportdata *); | ||
1083 | |||
1084 | int qib_create_rcvhdrq(struct qib_devdata *, struct qib_ctxtdata *); | ||
1085 | int qib_setup_eagerbufs(struct qib_ctxtdata *); | ||
1086 | void qib_set_ctxtcnt(struct qib_devdata *); | ||
1087 | int qib_create_ctxts(struct qib_devdata *dd); | ||
1088 | struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *, u32); | ||
1089 | void qib_init_pportdata(struct qib_pportdata *, struct qib_devdata *, u8, u8); | ||
1090 | void qib_free_ctxtdata(struct qib_devdata *, struct qib_ctxtdata *); | ||
1091 | |||
1092 | u32 qib_kreceive(struct qib_ctxtdata *, u32 *, u32 *); | ||
1093 | int qib_reset_device(int); | ||
1094 | int qib_wait_linkstate(struct qib_pportdata *, u32, int); | ||
1095 | int qib_set_linkstate(struct qib_pportdata *, u8); | ||
1096 | int qib_set_mtu(struct qib_pportdata *, u16); | ||
1097 | int qib_set_lid(struct qib_pportdata *, u32, u8); | ||
1098 | void qib_hol_down(struct qib_pportdata *); | ||
1099 | void qib_hol_init(struct qib_pportdata *); | ||
1100 | void qib_hol_up(struct qib_pportdata *); | ||
1101 | void qib_hol_event(unsigned long); | ||
1102 | void qib_disable_after_error(struct qib_devdata *); | ||
1103 | int qib_set_uevent_bits(struct qib_pportdata *, const int); | ||
1104 | |||
1105 | /* for use in system calls, where we want to know device type, etc. */ | ||
1106 | #define ctxt_fp(fp) \ | ||
1107 | (((struct qib_filedata *)(fp)->private_data)->rcd) | ||
1108 | #define subctxt_fp(fp) \ | ||
1109 | (((struct qib_filedata *)(fp)->private_data)->subctxt) | ||
1110 | #define tidcursor_fp(fp) \ | ||
1111 | (((struct qib_filedata *)(fp)->private_data)->tidcursor) | ||
1112 | #define user_sdma_queue_fp(fp) \ | ||
1113 | (((struct qib_filedata *)(fp)->private_data)->pq) | ||
1114 | |||
1115 | static inline struct qib_devdata *dd_from_ppd(struct qib_pportdata *ppd) | ||
1116 | { | ||
1117 | return ppd->dd; | ||
1118 | } | ||
1119 | |||
1120 | static inline struct qib_devdata *dd_from_dev(struct qib_ibdev *dev) | ||
1121 | { | ||
1122 | return container_of(dev, struct qib_devdata, verbs_dev); | ||
1123 | } | ||
1124 | |||
1125 | static inline struct qib_devdata *dd_from_ibdev(struct ib_device *ibdev) | ||
1126 | { | ||
1127 | return dd_from_dev(to_idev(ibdev)); | ||
1128 | } | ||
1129 | |||
1130 | static inline struct qib_pportdata *ppd_from_ibp(struct qib_ibport *ibp) | ||
1131 | { | ||
1132 | return container_of(ibp, struct qib_pportdata, ibport_data); | ||
1133 | } | ||
1134 | |||
1135 | static inline struct qib_ibport *to_iport(struct ib_device *ibdev, u8 port) | ||
1136 | { | ||
1137 | struct qib_devdata *dd = dd_from_ibdev(ibdev); | ||
1138 | unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */ | ||
1139 | |||
1140 | WARN_ON(pidx >= dd->num_pports); | ||
1141 | return &dd->pport[pidx].ibport_data; | ||
1142 | } | ||
1143 | |||
1144 | /* | ||
1145 | * values for dd->flags (_device_ related flags) and | ||
1146 | */ | ||
1147 | #define QIB_HAS_LINK_LATENCY 0x1 /* supports link latency (IB 1.2) */ | ||
1148 | #define QIB_INITTED 0x2 /* chip and driver up and initted */ | ||
1149 | #define QIB_DOING_RESET 0x4 /* in the middle of doing chip reset */ | ||
1150 | #define QIB_PRESENT 0x8 /* chip accesses can be done */ | ||
1151 | #define QIB_PIO_FLUSH_WC 0x10 /* Needs Write combining flush for PIO */ | ||
1152 | #define QIB_HAS_THRESH_UPDATE 0x40 | ||
1153 | #define QIB_HAS_SDMA_TIMEOUT 0x80 | ||
1154 | #define QIB_USE_SPCL_TRIG 0x100 /* SpecialTrigger launch enabled */ | ||
1155 | #define QIB_NODMA_RTAIL 0x200 /* rcvhdrtail register DMA enabled */ | ||
1156 | #define QIB_HAS_INTX 0x800 /* Supports INTx interrupts */ | ||
1157 | #define QIB_HAS_SEND_DMA 0x1000 /* Supports Send DMA */ | ||
1158 | #define QIB_HAS_VLSUPP 0x2000 /* Supports multiple VLs; PBC different */ | ||
1159 | #define QIB_HAS_HDRSUPP 0x4000 /* Supports header suppression */ | ||
1160 | #define QIB_BADINTR 0x8000 /* severe interrupt problems */ | ||
1161 | #define QIB_DCA_ENABLED 0x10000 /* Direct Cache Access enabled */ | ||
1162 | #define QIB_HAS_QSFP 0x20000 /* device (card instance) has QSFP */ | ||
1163 | |||
1164 | /* | ||
1165 | * values for ppd->lflags (_ib_port_ related flags) | ||
1166 | */ | ||
1167 | #define QIBL_LINKV 0x1 /* IB link state valid */ | ||
1168 | #define QIBL_LINKDOWN 0x8 /* IB link is down */ | ||
1169 | #define QIBL_LINKINIT 0x10 /* IB link level is up */ | ||
1170 | #define QIBL_LINKARMED 0x20 /* IB link is ARMED */ | ||
1171 | #define QIBL_LINKACTIVE 0x40 /* IB link is ACTIVE */ | ||
1172 | /* leave a gap for more IB-link state */ | ||
1173 | #define QIBL_IB_AUTONEG_INPROG 0x1000 /* non-IBTA DDR/QDR neg active */ | ||
1174 | #define QIBL_IB_AUTONEG_FAILED 0x2000 /* non-IBTA DDR/QDR neg failed */ | ||
1175 | #define QIBL_IB_LINK_DISABLED 0x4000 /* Linkdown-disable forced, | ||
1176 | * Do not try to bring up */ | ||
1177 | #define QIBL_IB_FORCE_NOTIFY 0x8000 /* force notify on next ib change */ | ||
1178 | |||
1179 | /* IB dword length mask in PBC (lower 11 bits); same for all chips */ | ||
1180 | #define QIB_PBC_LENGTH_MASK ((1 << 11) - 1) | ||
1181 | |||
1182 | |||
1183 | /* ctxt_flag bit offsets */ | ||
1184 | /* waiting for a packet to arrive */ | ||
1185 | #define QIB_CTXT_WAITING_RCV 2 | ||
1186 | /* master has not finished initializing */ | ||
1187 | #define QIB_CTXT_MASTER_UNINIT 4 | ||
1188 | /* waiting for an urgent packet to arrive */ | ||
1189 | #define QIB_CTXT_WAITING_URG 5 | ||
1190 | |||
1191 | /* free up any allocated data at closes */ | ||
1192 | void qib_free_data(struct qib_ctxtdata *dd); | ||
1193 | void qib_chg_pioavailkernel(struct qib_devdata *, unsigned, unsigned, | ||
1194 | u32, struct qib_ctxtdata *); | ||
1195 | struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *, | ||
1196 | const struct pci_device_id *); | ||
1197 | struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *, | ||
1198 | const struct pci_device_id *); | ||
1199 | struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *, | ||
1200 | const struct pci_device_id *); | ||
1201 | void qib_free_devdata(struct qib_devdata *); | ||
1202 | struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra); | ||
1203 | |||
1204 | #define QIB_TWSI_NO_DEV 0xFF | ||
1205 | /* Below qib_twsi_ functions must be called with eep_lock held */ | ||
1206 | int qib_twsi_reset(struct qib_devdata *dd); | ||
1207 | int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer, | ||
1208 | int len); | ||
1209 | int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr, | ||
1210 | const void *buffer, int len); | ||
1211 | void qib_get_eeprom_info(struct qib_devdata *); | ||
1212 | int qib_update_eeprom_log(struct qib_devdata *dd); | ||
1213 | void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr); | ||
1214 | void qib_dump_lookup_output_queue(struct qib_devdata *); | ||
1215 | void qib_force_pio_avail_update(struct qib_devdata *); | ||
1216 | void qib_clear_symerror_on_linkup(unsigned long opaque); | ||
1217 | |||
1218 | /* | ||
1219 | * Set LED override, only the two LSBs have "public" meaning, but | ||
1220 | * any non-zero value substitutes them for the Link and LinkTrain | ||
1221 | * LED states. | ||
1222 | */ | ||
1223 | #define QIB_LED_PHYS 1 /* Physical (linktraining) GREEN LED */ | ||
1224 | #define QIB_LED_LOG 2 /* Logical (link) YELLOW LED */ | ||
1225 | void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val); | ||
1226 | |||
1227 | /* send dma routines */ | ||
1228 | int qib_setup_sdma(struct qib_pportdata *); | ||
1229 | void qib_teardown_sdma(struct qib_pportdata *); | ||
1230 | void __qib_sdma_intr(struct qib_pportdata *); | ||
1231 | void qib_sdma_intr(struct qib_pportdata *); | ||
1232 | int qib_sdma_verbs_send(struct qib_pportdata *, struct qib_sge_state *, | ||
1233 | u32, struct qib_verbs_txreq *); | ||
1234 | /* ppd->sdma_lock should be locked before calling this. */ | ||
1235 | int qib_sdma_make_progress(struct qib_pportdata *dd); | ||
1236 | |||
1237 | /* must be called under qib_sdma_lock */ | ||
1238 | static inline u16 qib_sdma_descq_freecnt(const struct qib_pportdata *ppd) | ||
1239 | { | ||
1240 | return ppd->sdma_descq_cnt - | ||
1241 | (ppd->sdma_descq_added - ppd->sdma_descq_removed) - 1; | ||
1242 | } | ||
1243 | |||
1244 | static inline int __qib_sdma_running(struct qib_pportdata *ppd) | ||
1245 | { | ||
1246 | return ppd->sdma_state.current_state == qib_sdma_state_s99_running; | ||
1247 | } | ||
1248 | int qib_sdma_running(struct qib_pportdata *); | ||
1249 | |||
1250 | void __qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events); | ||
1251 | void qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events); | ||
1252 | |||
1253 | /* | ||
1254 | * number of words used for protocol header if not set by qib_userinit(); | ||
1255 | */ | ||
1256 | #define QIB_DFLT_RCVHDRSIZE 9 | ||
1257 | |||
1258 | /* | ||
1259 | * We need to be able to handle an IB header of at least 24 dwords. | ||
1260 | * We need the rcvhdrq large enough to handle largest IB header, but | ||
1261 | * still have room for a 2KB MTU standard IB packet. | ||
1262 | * Additionally, some processor/memory controller combinations | ||
1263 | * benefit quite strongly from having the DMA'ed data be cacheline | ||
1264 | * aligned and a cacheline multiple, so we set the size to 32 dwords | ||
1265 | * (2 64-byte primary cachelines for pretty much all processors of | ||
1266 | * interest). The alignment hurts nothing, other than using somewhat | ||
1267 | * more memory. | ||
1268 | */ | ||
1269 | #define QIB_RCVHDR_ENTSIZE 32 | ||
1270 | |||
1271 | int qib_get_user_pages(unsigned long, size_t, struct page **); | ||
1272 | void qib_release_user_pages(struct page **, size_t); | ||
1273 | int qib_eeprom_read(struct qib_devdata *, u8, void *, int); | ||
1274 | int qib_eeprom_write(struct qib_devdata *, u8, const void *, int); | ||
1275 | u32 __iomem *qib_getsendbuf_range(struct qib_devdata *, u32 *, u32, u32); | ||
1276 | void qib_sendbuf_done(struct qib_devdata *, unsigned); | ||
1277 | |||
1278 | static inline void qib_clear_rcvhdrtail(const struct qib_ctxtdata *rcd) | ||
1279 | { | ||
1280 | *((u64 *) rcd->rcvhdrtail_kvaddr) = 0ULL; | ||
1281 | } | ||
1282 | |||
1283 | static inline u32 qib_get_rcvhdrtail(const struct qib_ctxtdata *rcd) | ||
1284 | { | ||
1285 | /* | ||
1286 | * volatile because it's a DMA target from the chip, routine is | ||
1287 | * inlined, and don't want register caching or reordering. | ||
1288 | */ | ||
1289 | return (u32) le64_to_cpu( | ||
1290 | *((volatile __le64 *)rcd->rcvhdrtail_kvaddr)); /* DMA'ed */ | ||
1291 | } | ||
1292 | |||
1293 | static inline u32 qib_get_hdrqtail(const struct qib_ctxtdata *rcd) | ||
1294 | { | ||
1295 | const struct qib_devdata *dd = rcd->dd; | ||
1296 | u32 hdrqtail; | ||
1297 | |||
1298 | if (dd->flags & QIB_NODMA_RTAIL) { | ||
1299 | __le32 *rhf_addr; | ||
1300 | u32 seq; | ||
1301 | |||
1302 | rhf_addr = (__le32 *) rcd->rcvhdrq + | ||
1303 | rcd->head + dd->rhf_offset; | ||
1304 | seq = qib_hdrget_seq(rhf_addr); | ||
1305 | hdrqtail = rcd->head; | ||
1306 | if (seq == rcd->seq_cnt) | ||
1307 | hdrqtail++; | ||
1308 | } else | ||
1309 | hdrqtail = qib_get_rcvhdrtail(rcd); | ||
1310 | |||
1311 | return hdrqtail; | ||
1312 | } | ||
1313 | |||
1314 | /* | ||
1315 | * sysfs interface. | ||
1316 | */ | ||
1317 | |||
1318 | extern const char ib_qib_version[]; | ||
1319 | |||
1320 | int qib_device_create(struct qib_devdata *); | ||
1321 | void qib_device_remove(struct qib_devdata *); | ||
1322 | |||
1323 | int qib_create_port_files(struct ib_device *ibdev, u8 port_num, | ||
1324 | struct kobject *kobj); | ||
1325 | int qib_verbs_register_sysfs(struct qib_devdata *); | ||
1326 | void qib_verbs_unregister_sysfs(struct qib_devdata *); | ||
1327 | /* Hook for sysfs read of QSFP */ | ||
1328 | extern int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len); | ||
1329 | |||
1330 | int __init qib_init_qibfs(void); | ||
1331 | int __exit qib_exit_qibfs(void); | ||
1332 | |||
1333 | int qibfs_add(struct qib_devdata *); | ||
1334 | int qibfs_remove(struct qib_devdata *); | ||
1335 | |||
1336 | int qib_pcie_init(struct pci_dev *, const struct pci_device_id *); | ||
1337 | int qib_pcie_ddinit(struct qib_devdata *, struct pci_dev *, | ||
1338 | const struct pci_device_id *); | ||
1339 | void qib_pcie_ddcleanup(struct qib_devdata *); | ||
1340 | int qib_pcie_params(struct qib_devdata *, u32, u32 *, struct msix_entry *); | ||
1341 | int qib_reinit_intr(struct qib_devdata *); | ||
1342 | void qib_enable_intx(struct pci_dev *); | ||
1343 | void qib_nomsi(struct qib_devdata *); | ||
1344 | void qib_nomsix(struct qib_devdata *); | ||
1345 | void qib_pcie_getcmd(struct qib_devdata *, u16 *, u8 *, u8 *); | ||
1346 | void qib_pcie_reenable(struct qib_devdata *, u16, u8, u8); | ||
1347 | |||
1348 | /* | ||
1349 | * dma_addr wrappers - all 0's invalid for hw | ||
1350 | */ | ||
1351 | dma_addr_t qib_map_page(struct pci_dev *, struct page *, unsigned long, | ||
1352 | size_t, int); | ||
1353 | const char *qib_get_unit_name(int unit); | ||
1354 | |||
1355 | /* | ||
1356 | * Flush write combining store buffers (if present) and perform a write | ||
1357 | * barrier. | ||
1358 | */ | ||
1359 | #if defined(CONFIG_X86_64) | ||
1360 | #define qib_flush_wc() asm volatile("sfence" : : : "memory") | ||
1361 | #else | ||
1362 | #define qib_flush_wc() wmb() /* no reorder around wc flush */ | ||
1363 | #endif | ||
1364 | |||
1365 | /* global module parameter variables */ | ||
1366 | extern unsigned qib_ibmtu; | ||
1367 | extern ushort qib_cfgctxts; | ||
1368 | extern ushort qib_num_cfg_vls; | ||
1369 | extern ushort qib_mini_init; /* If set, do few (ideally 0) writes to chip */ | ||
1370 | extern unsigned qib_n_krcv_queues; | ||
1371 | extern unsigned qib_sdma_fetch_arb; | ||
1372 | extern unsigned qib_compat_ddr_negotiate; | ||
1373 | extern int qib_special_trigger; | ||
1374 | |||
1375 | extern struct mutex qib_mutex; | ||
1376 | |||
1377 | /* Number of seconds before our card status check... */ | ||
1378 | #define STATUS_TIMEOUT 60 | ||
1379 | |||
1380 | #define QIB_DRV_NAME "ib_qib" | ||
1381 | #define QIB_USER_MINOR_BASE 0 | ||
1382 | #define QIB_TRACE_MINOR 127 | ||
1383 | #define QIB_DIAGPKT_MINOR 128 | ||
1384 | #define QIB_DIAG_MINOR_BASE 129 | ||
1385 | #define QIB_NMINORS 255 | ||
1386 | |||
1387 | #define PCI_VENDOR_ID_PATHSCALE 0x1fc1 | ||
1388 | #define PCI_VENDOR_ID_QLOGIC 0x1077 | ||
1389 | #define PCI_DEVICE_ID_QLOGIC_IB_6120 0x10 | ||
1390 | #define PCI_DEVICE_ID_QLOGIC_IB_7220 0x7220 | ||
1391 | #define PCI_DEVICE_ID_QLOGIC_IB_7322 0x7322 | ||
1392 | |||
1393 | /* | ||
1394 | * qib_early_err is used (only!) to print early errors before devdata is | ||
1395 | * allocated, or when dd->pcidev may not be valid, and at the tail end of | ||
1396 | * cleanup when devdata may have been freed, etc. qib_dev_porterr is | ||
1397 | * the same as qib_dev_err, but is used when the message really needs | ||
1398 | * the IB port# to be definitive as to what's happening.. | ||
1399 | * All of these go to the trace log, and the trace log entry is done | ||
1400 | * first to avoid possible serial port delays from printk. | ||
1401 | */ | ||
1402 | #define qib_early_err(dev, fmt, ...) \ | ||
1403 | do { \ | ||
1404 | dev_info(dev, KERN_ERR QIB_DRV_NAME ": " fmt, ##__VA_ARGS__); \ | ||
1405 | } while (0) | ||
1406 | |||
1407 | #define qib_dev_err(dd, fmt, ...) \ | ||
1408 | do { \ | ||
1409 | dev_err(&(dd)->pcidev->dev, "%s: " fmt, \ | ||
1410 | qib_get_unit_name((dd)->unit), ##__VA_ARGS__); \ | ||
1411 | } while (0) | ||
1412 | |||
1413 | #define qib_dev_porterr(dd, port, fmt, ...) \ | ||
1414 | do { \ | ||
1415 | dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \ | ||
1416 | qib_get_unit_name((dd)->unit), (dd)->unit, (port), \ | ||
1417 | ##__VA_ARGS__); \ | ||
1418 | } while (0) | ||
1419 | |||
1420 | #define qib_devinfo(pcidev, fmt, ...) \ | ||
1421 | do { \ | ||
1422 | dev_info(&(pcidev)->dev, fmt, ##__VA_ARGS__); \ | ||
1423 | } while (0) | ||
1424 | |||
1425 | /* | ||
1426 | * this is used for formatting hw error messages... | ||
1427 | */ | ||
1428 | struct qib_hwerror_msgs { | ||
1429 | u64 mask; | ||
1430 | const char *msg; | ||
1431 | }; | ||
1432 | |||
1433 | #define QLOGIC_IB_HWE_MSG(a, b) { .mask = a, .msg = b } | ||
1434 | |||
1435 | /* in qib_intr.c... */ | ||
1436 | void qib_format_hwerrors(u64 hwerrs, | ||
1437 | const struct qib_hwerror_msgs *hwerrmsgs, | ||
1438 | size_t nhwerrmsgs, char *msg, size_t lmsg); | ||
1439 | #endif /* _QIB_KERNEL_H */ | ||
diff --git a/drivers/infiniband/hw/qib/qib_6120_regs.h b/drivers/infiniband/hw/qib/qib_6120_regs.h new file mode 100644 index 000000000000..e16cb6f7de2c --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_6120_regs.h | |||
@@ -0,0 +1,977 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | /* This file is mechanically generated from RTL. Any hand-edits will be lost! */ | ||
34 | |||
35 | #define QIB_6120_Revision_OFFS 0x0 | ||
36 | #define QIB_6120_Revision_R_Simulator_LSB 0x3F | ||
37 | #define QIB_6120_Revision_R_Simulator_RMASK 0x1 | ||
38 | #define QIB_6120_Revision_Reserved_LSB 0x28 | ||
39 | #define QIB_6120_Revision_Reserved_RMASK 0x7FFFFF | ||
40 | #define QIB_6120_Revision_BoardID_LSB 0x20 | ||
41 | #define QIB_6120_Revision_BoardID_RMASK 0xFF | ||
42 | #define QIB_6120_Revision_R_SW_LSB 0x18 | ||
43 | #define QIB_6120_Revision_R_SW_RMASK 0xFF | ||
44 | #define QIB_6120_Revision_R_Arch_LSB 0x10 | ||
45 | #define QIB_6120_Revision_R_Arch_RMASK 0xFF | ||
46 | #define QIB_6120_Revision_R_ChipRevMajor_LSB 0x8 | ||
47 | #define QIB_6120_Revision_R_ChipRevMajor_RMASK 0xFF | ||
48 | #define QIB_6120_Revision_R_ChipRevMinor_LSB 0x0 | ||
49 | #define QIB_6120_Revision_R_ChipRevMinor_RMASK 0xFF | ||
50 | |||
51 | #define QIB_6120_Control_OFFS 0x8 | ||
52 | #define QIB_6120_Control_TxLatency_LSB 0x4 | ||
53 | #define QIB_6120_Control_TxLatency_RMASK 0x1 | ||
54 | #define QIB_6120_Control_PCIERetryBufDiagEn_LSB 0x3 | ||
55 | #define QIB_6120_Control_PCIERetryBufDiagEn_RMASK 0x1 | ||
56 | #define QIB_6120_Control_LinkEn_LSB 0x2 | ||
57 | #define QIB_6120_Control_LinkEn_RMASK 0x1 | ||
58 | #define QIB_6120_Control_FreezeMode_LSB 0x1 | ||
59 | #define QIB_6120_Control_FreezeMode_RMASK 0x1 | ||
60 | #define QIB_6120_Control_SyncReset_LSB 0x0 | ||
61 | #define QIB_6120_Control_SyncReset_RMASK 0x1 | ||
62 | |||
63 | #define QIB_6120_PageAlign_OFFS 0x10 | ||
64 | |||
65 | #define QIB_6120_PortCnt_OFFS 0x18 | ||
66 | |||
67 | #define QIB_6120_SendRegBase_OFFS 0x30 | ||
68 | |||
69 | #define QIB_6120_UserRegBase_OFFS 0x38 | ||
70 | |||
71 | #define QIB_6120_CntrRegBase_OFFS 0x40 | ||
72 | |||
73 | #define QIB_6120_Scratch_OFFS 0x48 | ||
74 | #define QIB_6120_Scratch_TopHalf_LSB 0x20 | ||
75 | #define QIB_6120_Scratch_TopHalf_RMASK 0xFFFFFFFF | ||
76 | #define QIB_6120_Scratch_BottomHalf_LSB 0x0 | ||
77 | #define QIB_6120_Scratch_BottomHalf_RMASK 0xFFFFFFFF | ||
78 | |||
79 | #define QIB_6120_IntBlocked_OFFS 0x60 | ||
80 | #define QIB_6120_IntBlocked_ErrorIntBlocked_LSB 0x1F | ||
81 | #define QIB_6120_IntBlocked_ErrorIntBlocked_RMASK 0x1 | ||
82 | #define QIB_6120_IntBlocked_PioSetIntBlocked_LSB 0x1E | ||
83 | #define QIB_6120_IntBlocked_PioSetIntBlocked_RMASK 0x1 | ||
84 | #define QIB_6120_IntBlocked_PioBufAvailIntBlocked_LSB 0x1D | ||
85 | #define QIB_6120_IntBlocked_PioBufAvailIntBlocked_RMASK 0x1 | ||
86 | #define QIB_6120_IntBlocked_assertGPIOIntBlocked_LSB 0x1C | ||
87 | #define QIB_6120_IntBlocked_assertGPIOIntBlocked_RMASK 0x1 | ||
88 | #define QIB_6120_IntBlocked_Reserved_LSB 0xF | ||
89 | #define QIB_6120_IntBlocked_Reserved_RMASK 0x1FFF | ||
90 | #define QIB_6120_IntBlocked_RcvAvail4IntBlocked_LSB 0x10 | ||
91 | #define QIB_6120_IntBlocked_RcvAvail4IntBlocked_RMASK 0x1 | ||
92 | #define QIB_6120_IntBlocked_RcvAvail3IntBlocked_LSB 0xF | ||
93 | #define QIB_6120_IntBlocked_RcvAvail3IntBlocked_RMASK 0x1 | ||
94 | #define QIB_6120_IntBlocked_RcvAvail2IntBlocked_LSB 0xE | ||
95 | #define QIB_6120_IntBlocked_RcvAvail2IntBlocked_RMASK 0x1 | ||
96 | #define QIB_6120_IntBlocked_RcvAvail1IntBlocked_LSB 0xD | ||
97 | #define QIB_6120_IntBlocked_RcvAvail1IntBlocked_RMASK 0x1 | ||
98 | #define QIB_6120_IntBlocked_RcvAvail0IntBlocked_LSB 0xC | ||
99 | #define QIB_6120_IntBlocked_RcvAvail0IntBlocked_RMASK 0x1 | ||
100 | #define QIB_6120_IntBlocked_Reserved1_LSB 0x5 | ||
101 | #define QIB_6120_IntBlocked_Reserved1_RMASK 0x7F | ||
102 | #define QIB_6120_IntBlocked_RcvUrg4IntBlocked_LSB 0x4 | ||
103 | #define QIB_6120_IntBlocked_RcvUrg4IntBlocked_RMASK 0x1 | ||
104 | #define QIB_6120_IntBlocked_RcvUrg3IntBlocked_LSB 0x3 | ||
105 | #define QIB_6120_IntBlocked_RcvUrg3IntBlocked_RMASK 0x1 | ||
106 | #define QIB_6120_IntBlocked_RcvUrg2IntBlocked_LSB 0x2 | ||
107 | #define QIB_6120_IntBlocked_RcvUrg2IntBlocked_RMASK 0x1 | ||
108 | #define QIB_6120_IntBlocked_RcvUrg1IntBlocked_LSB 0x1 | ||
109 | #define QIB_6120_IntBlocked_RcvUrg1IntBlocked_RMASK 0x1 | ||
110 | #define QIB_6120_IntBlocked_RcvUrg0IntBlocked_LSB 0x0 | ||
111 | #define QIB_6120_IntBlocked_RcvUrg0IntBlocked_RMASK 0x1 | ||
112 | |||
113 | #define QIB_6120_IntMask_OFFS 0x68 | ||
114 | #define QIB_6120_IntMask_ErrorIntMask_LSB 0x1F | ||
115 | #define QIB_6120_IntMask_ErrorIntMask_RMASK 0x1 | ||
116 | #define QIB_6120_IntMask_PioSetIntMask_LSB 0x1E | ||
117 | #define QIB_6120_IntMask_PioSetIntMask_RMASK 0x1 | ||
118 | #define QIB_6120_IntMask_PioBufAvailIntMask_LSB 0x1D | ||
119 | #define QIB_6120_IntMask_PioBufAvailIntMask_RMASK 0x1 | ||
120 | #define QIB_6120_IntMask_assertGPIOIntMask_LSB 0x1C | ||
121 | #define QIB_6120_IntMask_assertGPIOIntMask_RMASK 0x1 | ||
122 | #define QIB_6120_IntMask_Reserved_LSB 0x11 | ||
123 | #define QIB_6120_IntMask_Reserved_RMASK 0x7FF | ||
124 | #define QIB_6120_IntMask_RcvAvail4IntMask_LSB 0x10 | ||
125 | #define QIB_6120_IntMask_RcvAvail4IntMask_RMASK 0x1 | ||
126 | #define QIB_6120_IntMask_RcvAvail3IntMask_LSB 0xF | ||
127 | #define QIB_6120_IntMask_RcvAvail3IntMask_RMASK 0x1 | ||
128 | #define QIB_6120_IntMask_RcvAvail2IntMask_LSB 0xE | ||
129 | #define QIB_6120_IntMask_RcvAvail2IntMask_RMASK 0x1 | ||
130 | #define QIB_6120_IntMask_RcvAvail1IntMask_LSB 0xD | ||
131 | #define QIB_6120_IntMask_RcvAvail1IntMask_RMASK 0x1 | ||
132 | #define QIB_6120_IntMask_RcvAvail0IntMask_LSB 0xC | ||
133 | #define QIB_6120_IntMask_RcvAvail0IntMask_RMASK 0x1 | ||
134 | #define QIB_6120_IntMask_Reserved1_LSB 0x5 | ||
135 | #define QIB_6120_IntMask_Reserved1_RMASK 0x7F | ||
136 | #define QIB_6120_IntMask_RcvUrg4IntMask_LSB 0x4 | ||
137 | #define QIB_6120_IntMask_RcvUrg4IntMask_RMASK 0x1 | ||
138 | #define QIB_6120_IntMask_RcvUrg3IntMask_LSB 0x3 | ||
139 | #define QIB_6120_IntMask_RcvUrg3IntMask_RMASK 0x1 | ||
140 | #define QIB_6120_IntMask_RcvUrg2IntMask_LSB 0x2 | ||
141 | #define QIB_6120_IntMask_RcvUrg2IntMask_RMASK 0x1 | ||
142 | #define QIB_6120_IntMask_RcvUrg1IntMask_LSB 0x1 | ||
143 | #define QIB_6120_IntMask_RcvUrg1IntMask_RMASK 0x1 | ||
144 | #define QIB_6120_IntMask_RcvUrg0IntMask_LSB 0x0 | ||
145 | #define QIB_6120_IntMask_RcvUrg0IntMask_RMASK 0x1 | ||
146 | |||
147 | #define QIB_6120_IntStatus_OFFS 0x70 | ||
148 | #define QIB_6120_IntStatus_Error_LSB 0x1F | ||
149 | #define QIB_6120_IntStatus_Error_RMASK 0x1 | ||
150 | #define QIB_6120_IntStatus_PioSent_LSB 0x1E | ||
151 | #define QIB_6120_IntStatus_PioSent_RMASK 0x1 | ||
152 | #define QIB_6120_IntStatus_PioBufAvail_LSB 0x1D | ||
153 | #define QIB_6120_IntStatus_PioBufAvail_RMASK 0x1 | ||
154 | #define QIB_6120_IntStatus_assertGPIO_LSB 0x1C | ||
155 | #define QIB_6120_IntStatus_assertGPIO_RMASK 0x1 | ||
156 | #define QIB_6120_IntStatus_Reserved_LSB 0xF | ||
157 | #define QIB_6120_IntStatus_Reserved_RMASK 0x1FFF | ||
158 | #define QIB_6120_IntStatus_RcvAvail4_LSB 0x10 | ||
159 | #define QIB_6120_IntStatus_RcvAvail4_RMASK 0x1 | ||
160 | #define QIB_6120_IntStatus_RcvAvail3_LSB 0xF | ||
161 | #define QIB_6120_IntStatus_RcvAvail3_RMASK 0x1 | ||
162 | #define QIB_6120_IntStatus_RcvAvail2_LSB 0xE | ||
163 | #define QIB_6120_IntStatus_RcvAvail2_RMASK 0x1 | ||
164 | #define QIB_6120_IntStatus_RcvAvail1_LSB 0xD | ||
165 | #define QIB_6120_IntStatus_RcvAvail1_RMASK 0x1 | ||
166 | #define QIB_6120_IntStatus_RcvAvail0_LSB 0xC | ||
167 | #define QIB_6120_IntStatus_RcvAvail0_RMASK 0x1 | ||
168 | #define QIB_6120_IntStatus_Reserved1_LSB 0x5 | ||
169 | #define QIB_6120_IntStatus_Reserved1_RMASK 0x7F | ||
170 | #define QIB_6120_IntStatus_RcvUrg4_LSB 0x4 | ||
171 | #define QIB_6120_IntStatus_RcvUrg4_RMASK 0x1 | ||
172 | #define QIB_6120_IntStatus_RcvUrg3_LSB 0x3 | ||
173 | #define QIB_6120_IntStatus_RcvUrg3_RMASK 0x1 | ||
174 | #define QIB_6120_IntStatus_RcvUrg2_LSB 0x2 | ||
175 | #define QIB_6120_IntStatus_RcvUrg2_RMASK 0x1 | ||
176 | #define QIB_6120_IntStatus_RcvUrg1_LSB 0x1 | ||
177 | #define QIB_6120_IntStatus_RcvUrg1_RMASK 0x1 | ||
178 | #define QIB_6120_IntStatus_RcvUrg0_LSB 0x0 | ||
179 | #define QIB_6120_IntStatus_RcvUrg0_RMASK 0x1 | ||
180 | |||
181 | #define QIB_6120_IntClear_OFFS 0x78 | ||
182 | #define QIB_6120_IntClear_ErrorIntClear_LSB 0x1F | ||
183 | #define QIB_6120_IntClear_ErrorIntClear_RMASK 0x1 | ||
184 | #define QIB_6120_IntClear_PioSetIntClear_LSB 0x1E | ||
185 | #define QIB_6120_IntClear_PioSetIntClear_RMASK 0x1 | ||
186 | #define QIB_6120_IntClear_PioBufAvailIntClear_LSB 0x1D | ||
187 | #define QIB_6120_IntClear_PioBufAvailIntClear_RMASK 0x1 | ||
188 | #define QIB_6120_IntClear_assertGPIOIntClear_LSB 0x1C | ||
189 | #define QIB_6120_IntClear_assertGPIOIntClear_RMASK 0x1 | ||
190 | #define QIB_6120_IntClear_Reserved_LSB 0xF | ||
191 | #define QIB_6120_IntClear_Reserved_RMASK 0x1FFF | ||
192 | #define QIB_6120_IntClear_RcvAvail4IntClear_LSB 0x10 | ||
193 | #define QIB_6120_IntClear_RcvAvail4IntClear_RMASK 0x1 | ||
194 | #define QIB_6120_IntClear_RcvAvail3IntClear_LSB 0xF | ||
195 | #define QIB_6120_IntClear_RcvAvail3IntClear_RMASK 0x1 | ||
196 | #define QIB_6120_IntClear_RcvAvail2IntClear_LSB 0xE | ||
197 | #define QIB_6120_IntClear_RcvAvail2IntClear_RMASK 0x1 | ||
198 | #define QIB_6120_IntClear_RcvAvail1IntClear_LSB 0xD | ||
199 | #define QIB_6120_IntClear_RcvAvail1IntClear_RMASK 0x1 | ||
200 | #define QIB_6120_IntClear_RcvAvail0IntClear_LSB 0xC | ||
201 | #define QIB_6120_IntClear_RcvAvail0IntClear_RMASK 0x1 | ||
202 | #define QIB_6120_IntClear_Reserved1_LSB 0x5 | ||
203 | #define QIB_6120_IntClear_Reserved1_RMASK 0x7F | ||
204 | #define QIB_6120_IntClear_RcvUrg4IntClear_LSB 0x4 | ||
205 | #define QIB_6120_IntClear_RcvUrg4IntClear_RMASK 0x1 | ||
206 | #define QIB_6120_IntClear_RcvUrg3IntClear_LSB 0x3 | ||
207 | #define QIB_6120_IntClear_RcvUrg3IntClear_RMASK 0x1 | ||
208 | #define QIB_6120_IntClear_RcvUrg2IntClear_LSB 0x2 | ||
209 | #define QIB_6120_IntClear_RcvUrg2IntClear_RMASK 0x1 | ||
210 | #define QIB_6120_IntClear_RcvUrg1IntClear_LSB 0x1 | ||
211 | #define QIB_6120_IntClear_RcvUrg1IntClear_RMASK 0x1 | ||
212 | #define QIB_6120_IntClear_RcvUrg0IntClear_LSB 0x0 | ||
213 | #define QIB_6120_IntClear_RcvUrg0IntClear_RMASK 0x1 | ||
214 | |||
215 | #define QIB_6120_ErrMask_OFFS 0x80 | ||
216 | #define QIB_6120_ErrMask_Reserved_LSB 0x34 | ||
217 | #define QIB_6120_ErrMask_Reserved_RMASK 0xFFF | ||
218 | #define QIB_6120_ErrMask_HardwareErrMask_LSB 0x33 | ||
219 | #define QIB_6120_ErrMask_HardwareErrMask_RMASK 0x1 | ||
220 | #define QIB_6120_ErrMask_ResetNegatedMask_LSB 0x32 | ||
221 | #define QIB_6120_ErrMask_ResetNegatedMask_RMASK 0x1 | ||
222 | #define QIB_6120_ErrMask_InvalidAddrErrMask_LSB 0x31 | ||
223 | #define QIB_6120_ErrMask_InvalidAddrErrMask_RMASK 0x1 | ||
224 | #define QIB_6120_ErrMask_IBStatusChangedMask_LSB 0x30 | ||
225 | #define QIB_6120_ErrMask_IBStatusChangedMask_RMASK 0x1 | ||
226 | #define QIB_6120_ErrMask_Reserved1_LSB 0x26 | ||
227 | #define QIB_6120_ErrMask_Reserved1_RMASK 0x3FF | ||
228 | #define QIB_6120_ErrMask_SendUnsupportedVLErrMask_LSB 0x25 | ||
229 | #define QIB_6120_ErrMask_SendUnsupportedVLErrMask_RMASK 0x1 | ||
230 | #define QIB_6120_ErrMask_SendUnexpectedPktNumErrMask_LSB 0x24 | ||
231 | #define QIB_6120_ErrMask_SendUnexpectedPktNumErrMask_RMASK 0x1 | ||
232 | #define QIB_6120_ErrMask_SendPioArmLaunchErrMask_LSB 0x23 | ||
233 | #define QIB_6120_ErrMask_SendPioArmLaunchErrMask_RMASK 0x1 | ||
234 | #define QIB_6120_ErrMask_SendDroppedDataPktErrMask_LSB 0x22 | ||
235 | #define QIB_6120_ErrMask_SendDroppedDataPktErrMask_RMASK 0x1 | ||
236 | #define QIB_6120_ErrMask_SendDroppedSmpPktErrMask_LSB 0x21 | ||
237 | #define QIB_6120_ErrMask_SendDroppedSmpPktErrMask_RMASK 0x1 | ||
238 | #define QIB_6120_ErrMask_SendPktLenErrMask_LSB 0x20 | ||
239 | #define QIB_6120_ErrMask_SendPktLenErrMask_RMASK 0x1 | ||
240 | #define QIB_6120_ErrMask_SendUnderRunErrMask_LSB 0x1F | ||
241 | #define QIB_6120_ErrMask_SendUnderRunErrMask_RMASK 0x1 | ||
242 | #define QIB_6120_ErrMask_SendMaxPktLenErrMask_LSB 0x1E | ||
243 | #define QIB_6120_ErrMask_SendMaxPktLenErrMask_RMASK 0x1 | ||
244 | #define QIB_6120_ErrMask_SendMinPktLenErrMask_LSB 0x1D | ||
245 | #define QIB_6120_ErrMask_SendMinPktLenErrMask_RMASK 0x1 | ||
246 | #define QIB_6120_ErrMask_Reserved2_LSB 0x12 | ||
247 | #define QIB_6120_ErrMask_Reserved2_RMASK 0x7FF | ||
248 | #define QIB_6120_ErrMask_RcvIBLostLinkErrMask_LSB 0x11 | ||
249 | #define QIB_6120_ErrMask_RcvIBLostLinkErrMask_RMASK 0x1 | ||
250 | #define QIB_6120_ErrMask_RcvHdrErrMask_LSB 0x10 | ||
251 | #define QIB_6120_ErrMask_RcvHdrErrMask_RMASK 0x1 | ||
252 | #define QIB_6120_ErrMask_RcvHdrLenErrMask_LSB 0xF | ||
253 | #define QIB_6120_ErrMask_RcvHdrLenErrMask_RMASK 0x1 | ||
254 | #define QIB_6120_ErrMask_RcvBadTidErrMask_LSB 0xE | ||
255 | #define QIB_6120_ErrMask_RcvBadTidErrMask_RMASK 0x1 | ||
256 | #define QIB_6120_ErrMask_RcvHdrFullErrMask_LSB 0xD | ||
257 | #define QIB_6120_ErrMask_RcvHdrFullErrMask_RMASK 0x1 | ||
258 | #define QIB_6120_ErrMask_RcvEgrFullErrMask_LSB 0xC | ||
259 | #define QIB_6120_ErrMask_RcvEgrFullErrMask_RMASK 0x1 | ||
260 | #define QIB_6120_ErrMask_RcvBadVersionErrMask_LSB 0xB | ||
261 | #define QIB_6120_ErrMask_RcvBadVersionErrMask_RMASK 0x1 | ||
262 | #define QIB_6120_ErrMask_RcvIBFlowErrMask_LSB 0xA | ||
263 | #define QIB_6120_ErrMask_RcvIBFlowErrMask_RMASK 0x1 | ||
264 | #define QIB_6120_ErrMask_RcvEBPErrMask_LSB 0x9 | ||
265 | #define QIB_6120_ErrMask_RcvEBPErrMask_RMASK 0x1 | ||
266 | #define QIB_6120_ErrMask_RcvUnsupportedVLErrMask_LSB 0x8 | ||
267 | #define QIB_6120_ErrMask_RcvUnsupportedVLErrMask_RMASK 0x1 | ||
268 | #define QIB_6120_ErrMask_RcvUnexpectedCharErrMask_LSB 0x7 | ||
269 | #define QIB_6120_ErrMask_RcvUnexpectedCharErrMask_RMASK 0x1 | ||
270 | #define QIB_6120_ErrMask_RcvShortPktLenErrMask_LSB 0x6 | ||
271 | #define QIB_6120_ErrMask_RcvShortPktLenErrMask_RMASK 0x1 | ||
272 | #define QIB_6120_ErrMask_RcvLongPktLenErrMask_LSB 0x5 | ||
273 | #define QIB_6120_ErrMask_RcvLongPktLenErrMask_RMASK 0x1 | ||
274 | #define QIB_6120_ErrMask_RcvMaxPktLenErrMask_LSB 0x4 | ||
275 | #define QIB_6120_ErrMask_RcvMaxPktLenErrMask_RMASK 0x1 | ||
276 | #define QIB_6120_ErrMask_RcvMinPktLenErrMask_LSB 0x3 | ||
277 | #define QIB_6120_ErrMask_RcvMinPktLenErrMask_RMASK 0x1 | ||
278 | #define QIB_6120_ErrMask_RcvICRCErrMask_LSB 0x2 | ||
279 | #define QIB_6120_ErrMask_RcvICRCErrMask_RMASK 0x1 | ||
280 | #define QIB_6120_ErrMask_RcvVCRCErrMask_LSB 0x1 | ||
281 | #define QIB_6120_ErrMask_RcvVCRCErrMask_RMASK 0x1 | ||
282 | #define QIB_6120_ErrMask_RcvFormatErrMask_LSB 0x0 | ||
283 | #define QIB_6120_ErrMask_RcvFormatErrMask_RMASK 0x1 | ||
284 | |||
285 | #define QIB_6120_ErrStatus_OFFS 0x88 | ||
286 | #define QIB_6120_ErrStatus_Reserved_LSB 0x34 | ||
287 | #define QIB_6120_ErrStatus_Reserved_RMASK 0xFFF | ||
288 | #define QIB_6120_ErrStatus_HardwareErr_LSB 0x33 | ||
289 | #define QIB_6120_ErrStatus_HardwareErr_RMASK 0x1 | ||
290 | #define QIB_6120_ErrStatus_ResetNegated_LSB 0x32 | ||
291 | #define QIB_6120_ErrStatus_ResetNegated_RMASK 0x1 | ||
292 | #define QIB_6120_ErrStatus_InvalidAddrErr_LSB 0x31 | ||
293 | #define QIB_6120_ErrStatus_InvalidAddrErr_RMASK 0x1 | ||
294 | #define QIB_6120_ErrStatus_IBStatusChanged_LSB 0x30 | ||
295 | #define QIB_6120_ErrStatus_IBStatusChanged_RMASK 0x1 | ||
296 | #define QIB_6120_ErrStatus_Reserved1_LSB 0x26 | ||
297 | #define QIB_6120_ErrStatus_Reserved1_RMASK 0x3FF | ||
298 | #define QIB_6120_ErrStatus_SendUnsupportedVLErr_LSB 0x25 | ||
299 | #define QIB_6120_ErrStatus_SendUnsupportedVLErr_RMASK 0x1 | ||
300 | #define QIB_6120_ErrStatus_SendUnexpectedPktNumErr_LSB 0x24 | ||
301 | #define QIB_6120_ErrStatus_SendUnexpectedPktNumErr_RMASK 0x1 | ||
302 | #define QIB_6120_ErrStatus_SendPioArmLaunchErr_LSB 0x23 | ||
303 | #define QIB_6120_ErrStatus_SendPioArmLaunchErr_RMASK 0x1 | ||
304 | #define QIB_6120_ErrStatus_SendDroppedDataPktErr_LSB 0x22 | ||
305 | #define QIB_6120_ErrStatus_SendDroppedDataPktErr_RMASK 0x1 | ||
306 | #define QIB_6120_ErrStatus_SendDroppedSmpPktErr_LSB 0x21 | ||
307 | #define QIB_6120_ErrStatus_SendDroppedSmpPktErr_RMASK 0x1 | ||
308 | #define QIB_6120_ErrStatus_SendPktLenErr_LSB 0x20 | ||
309 | #define QIB_6120_ErrStatus_SendPktLenErr_RMASK 0x1 | ||
310 | #define QIB_6120_ErrStatus_SendUnderRunErr_LSB 0x1F | ||
311 | #define QIB_6120_ErrStatus_SendUnderRunErr_RMASK 0x1 | ||
312 | #define QIB_6120_ErrStatus_SendMaxPktLenErr_LSB 0x1E | ||
313 | #define QIB_6120_ErrStatus_SendMaxPktLenErr_RMASK 0x1 | ||
314 | #define QIB_6120_ErrStatus_SendMinPktLenErr_LSB 0x1D | ||
315 | #define QIB_6120_ErrStatus_SendMinPktLenErr_RMASK 0x1 | ||
316 | #define QIB_6120_ErrStatus_Reserved2_LSB 0x12 | ||
317 | #define QIB_6120_ErrStatus_Reserved2_RMASK 0x7FF | ||
318 | #define QIB_6120_ErrStatus_RcvIBLostLinkErr_LSB 0x11 | ||
319 | #define QIB_6120_ErrStatus_RcvIBLostLinkErr_RMASK 0x1 | ||
320 | #define QIB_6120_ErrStatus_RcvHdrErr_LSB 0x10 | ||
321 | #define QIB_6120_ErrStatus_RcvHdrErr_RMASK 0x1 | ||
322 | #define QIB_6120_ErrStatus_RcvHdrLenErr_LSB 0xF | ||
323 | #define QIB_6120_ErrStatus_RcvHdrLenErr_RMASK 0x1 | ||
324 | #define QIB_6120_ErrStatus_RcvBadTidErr_LSB 0xE | ||
325 | #define QIB_6120_ErrStatus_RcvBadTidErr_RMASK 0x1 | ||
326 | #define QIB_6120_ErrStatus_RcvHdrFullErr_LSB 0xD | ||
327 | #define QIB_6120_ErrStatus_RcvHdrFullErr_RMASK 0x1 | ||
328 | #define QIB_6120_ErrStatus_RcvEgrFullErr_LSB 0xC | ||
329 | #define QIB_6120_ErrStatus_RcvEgrFullErr_RMASK 0x1 | ||
330 | #define QIB_6120_ErrStatus_RcvBadVersionErr_LSB 0xB | ||
331 | #define QIB_6120_ErrStatus_RcvBadVersionErr_RMASK 0x1 | ||
332 | #define QIB_6120_ErrStatus_RcvIBFlowErr_LSB 0xA | ||
333 | #define QIB_6120_ErrStatus_RcvIBFlowErr_RMASK 0x1 | ||
334 | #define QIB_6120_ErrStatus_RcvEBPErr_LSB 0x9 | ||
335 | #define QIB_6120_ErrStatus_RcvEBPErr_RMASK 0x1 | ||
336 | #define QIB_6120_ErrStatus_RcvUnsupportedVLErr_LSB 0x8 | ||
337 | #define QIB_6120_ErrStatus_RcvUnsupportedVLErr_RMASK 0x1 | ||
338 | #define QIB_6120_ErrStatus_RcvUnexpectedCharErr_LSB 0x7 | ||
339 | #define QIB_6120_ErrStatus_RcvUnexpectedCharErr_RMASK 0x1 | ||
340 | #define QIB_6120_ErrStatus_RcvShortPktLenErr_LSB 0x6 | ||
341 | #define QIB_6120_ErrStatus_RcvShortPktLenErr_RMASK 0x1 | ||
342 | #define QIB_6120_ErrStatus_RcvLongPktLenErr_LSB 0x5 | ||
343 | #define QIB_6120_ErrStatus_RcvLongPktLenErr_RMASK 0x1 | ||
344 | #define QIB_6120_ErrStatus_RcvMaxPktLenErr_LSB 0x4 | ||
345 | #define QIB_6120_ErrStatus_RcvMaxPktLenErr_RMASK 0x1 | ||
346 | #define QIB_6120_ErrStatus_RcvMinPktLenErr_LSB 0x3 | ||
347 | #define QIB_6120_ErrStatus_RcvMinPktLenErr_RMASK 0x1 | ||
348 | #define QIB_6120_ErrStatus_RcvICRCErr_LSB 0x2 | ||
349 | #define QIB_6120_ErrStatus_RcvICRCErr_RMASK 0x1 | ||
350 | #define QIB_6120_ErrStatus_RcvVCRCErr_LSB 0x1 | ||
351 | #define QIB_6120_ErrStatus_RcvVCRCErr_RMASK 0x1 | ||
352 | #define QIB_6120_ErrStatus_RcvFormatErr_LSB 0x0 | ||
353 | #define QIB_6120_ErrStatus_RcvFormatErr_RMASK 0x1 | ||
354 | |||
355 | #define QIB_6120_ErrClear_OFFS 0x90 | ||
356 | #define QIB_6120_ErrClear_Reserved_LSB 0x34 | ||
357 | #define QIB_6120_ErrClear_Reserved_RMASK 0xFFF | ||
358 | #define QIB_6120_ErrClear_HardwareErrClear_LSB 0x33 | ||
359 | #define QIB_6120_ErrClear_HardwareErrClear_RMASK 0x1 | ||
360 | #define QIB_6120_ErrClear_ResetNegatedClear_LSB 0x32 | ||
361 | #define QIB_6120_ErrClear_ResetNegatedClear_RMASK 0x1 | ||
362 | #define QIB_6120_ErrClear_InvalidAddrErrClear_LSB 0x31 | ||
363 | #define QIB_6120_ErrClear_InvalidAddrErrClear_RMASK 0x1 | ||
364 | #define QIB_6120_ErrClear_IBStatusChangedClear_LSB 0x30 | ||
365 | #define QIB_6120_ErrClear_IBStatusChangedClear_RMASK 0x1 | ||
366 | #define QIB_6120_ErrClear_Reserved1_LSB 0x26 | ||
367 | #define QIB_6120_ErrClear_Reserved1_RMASK 0x3FF | ||
368 | #define QIB_6120_ErrClear_SendUnsupportedVLErrClear_LSB 0x25 | ||
369 | #define QIB_6120_ErrClear_SendUnsupportedVLErrClear_RMASK 0x1 | ||
370 | #define QIB_6120_ErrClear_SendUnexpectedPktNumErrClear_LSB 0x24 | ||
371 | #define QIB_6120_ErrClear_SendUnexpectedPktNumErrClear_RMASK 0x1 | ||
372 | #define QIB_6120_ErrClear_SendPioArmLaunchErrClear_LSB 0x23 | ||
373 | #define QIB_6120_ErrClear_SendPioArmLaunchErrClear_RMASK 0x1 | ||
374 | #define QIB_6120_ErrClear_SendDroppedDataPktErrClear_LSB 0x22 | ||
375 | #define QIB_6120_ErrClear_SendDroppedDataPktErrClear_RMASK 0x1 | ||
376 | #define QIB_6120_ErrClear_SendDroppedSmpPktErrClear_LSB 0x21 | ||
377 | #define QIB_6120_ErrClear_SendDroppedSmpPktErrClear_RMASK 0x1 | ||
378 | #define QIB_6120_ErrClear_SendPktLenErrClear_LSB 0x20 | ||
379 | #define QIB_6120_ErrClear_SendPktLenErrClear_RMASK 0x1 | ||
380 | #define QIB_6120_ErrClear_SendUnderRunErrClear_LSB 0x1F | ||
381 | #define QIB_6120_ErrClear_SendUnderRunErrClear_RMASK 0x1 | ||
382 | #define QIB_6120_ErrClear_SendMaxPktLenErrClear_LSB 0x1E | ||
383 | #define QIB_6120_ErrClear_SendMaxPktLenErrClear_RMASK 0x1 | ||
384 | #define QIB_6120_ErrClear_SendMinPktLenErrClear_LSB 0x1D | ||
385 | #define QIB_6120_ErrClear_SendMinPktLenErrClear_RMASK 0x1 | ||
386 | #define QIB_6120_ErrClear_Reserved2_LSB 0x12 | ||
387 | #define QIB_6120_ErrClear_Reserved2_RMASK 0x7FF | ||
388 | #define QIB_6120_ErrClear_RcvIBLostLinkErrClear_LSB 0x11 | ||
389 | #define QIB_6120_ErrClear_RcvIBLostLinkErrClear_RMASK 0x1 | ||
390 | #define QIB_6120_ErrClear_RcvHdrErrClear_LSB 0x10 | ||
391 | #define QIB_6120_ErrClear_RcvHdrErrClear_RMASK 0x1 | ||
392 | #define QIB_6120_ErrClear_RcvHdrLenErrClear_LSB 0xF | ||
393 | #define QIB_6120_ErrClear_RcvHdrLenErrClear_RMASK 0x1 | ||
394 | #define QIB_6120_ErrClear_RcvBadTidErrClear_LSB 0xE | ||
395 | #define QIB_6120_ErrClear_RcvBadTidErrClear_RMASK 0x1 | ||
396 | #define QIB_6120_ErrClear_RcvHdrFullErrClear_LSB 0xD | ||
397 | #define QIB_6120_ErrClear_RcvHdrFullErrClear_RMASK 0x1 | ||
398 | #define QIB_6120_ErrClear_RcvEgrFullErrClear_LSB 0xC | ||
399 | #define QIB_6120_ErrClear_RcvEgrFullErrClear_RMASK 0x1 | ||
400 | #define QIB_6120_ErrClear_RcvBadVersionErrClear_LSB 0xB | ||
401 | #define QIB_6120_ErrClear_RcvBadVersionErrClear_RMASK 0x1 | ||
402 | #define QIB_6120_ErrClear_RcvIBFlowErrClear_LSB 0xA | ||
403 | #define QIB_6120_ErrClear_RcvIBFlowErrClear_RMASK 0x1 | ||
404 | #define QIB_6120_ErrClear_RcvEBPErrClear_LSB 0x9 | ||
405 | #define QIB_6120_ErrClear_RcvEBPErrClear_RMASK 0x1 | ||
406 | #define QIB_6120_ErrClear_RcvUnsupportedVLErrClear_LSB 0x8 | ||
407 | #define QIB_6120_ErrClear_RcvUnsupportedVLErrClear_RMASK 0x1 | ||
408 | #define QIB_6120_ErrClear_RcvUnexpectedCharErrClear_LSB 0x7 | ||
409 | #define QIB_6120_ErrClear_RcvUnexpectedCharErrClear_RMASK 0x1 | ||
410 | #define QIB_6120_ErrClear_RcvShortPktLenErrClear_LSB 0x6 | ||
411 | #define QIB_6120_ErrClear_RcvShortPktLenErrClear_RMASK 0x1 | ||
412 | #define QIB_6120_ErrClear_RcvLongPktLenErrClear_LSB 0x5 | ||
413 | #define QIB_6120_ErrClear_RcvLongPktLenErrClear_RMASK 0x1 | ||
414 | #define QIB_6120_ErrClear_RcvMaxPktLenErrClear_LSB 0x4 | ||
415 | #define QIB_6120_ErrClear_RcvMaxPktLenErrClear_RMASK 0x1 | ||
416 | #define QIB_6120_ErrClear_RcvMinPktLenErrClear_LSB 0x3 | ||
417 | #define QIB_6120_ErrClear_RcvMinPktLenErrClear_RMASK 0x1 | ||
418 | #define QIB_6120_ErrClear_RcvICRCErrClear_LSB 0x2 | ||
419 | #define QIB_6120_ErrClear_RcvICRCErrClear_RMASK 0x1 | ||
420 | #define QIB_6120_ErrClear_RcvVCRCErrClear_LSB 0x1 | ||
421 | #define QIB_6120_ErrClear_RcvVCRCErrClear_RMASK 0x1 | ||
422 | #define QIB_6120_ErrClear_RcvFormatErrClear_LSB 0x0 | ||
423 | #define QIB_6120_ErrClear_RcvFormatErrClear_RMASK 0x1 | ||
424 | |||
425 | #define QIB_6120_HwErrMask_OFFS 0x98 | ||
426 | #define QIB_6120_HwErrMask_IBCBusFromSPCParityErrMask_LSB 0x3F | ||
427 | #define QIB_6120_HwErrMask_IBCBusFromSPCParityErrMask_RMASK 0x1 | ||
428 | #define QIB_6120_HwErrMask_IBCBusToSPCParityErrMask_LSB 0x3E | ||
429 | #define QIB_6120_HwErrMask_IBCBusToSPCParityErrMask_RMASK 0x1 | ||
430 | #define QIB_6120_HwErrMask_Reserved_LSB 0x3D | ||
431 | #define QIB_6120_HwErrMask_Reserved_RMASK 0x1 | ||
432 | #define QIB_6120_HwErrMask_IBSerdesPClkNotDetectMask_LSB 0x3C | ||
433 | #define QIB_6120_HwErrMask_IBSerdesPClkNotDetectMask_RMASK 0x1 | ||
434 | #define QIB_6120_HwErrMask_PCIESerdesQ0PClkNotDetectMask_LSB 0x3B | ||
435 | #define QIB_6120_HwErrMask_PCIESerdesQ0PClkNotDetectMask_RMASK 0x1 | ||
436 | #define QIB_6120_HwErrMask_PCIESerdesQ1PClkNotDetectMask_LSB 0x3A | ||
437 | #define QIB_6120_HwErrMask_PCIESerdesQ1PClkNotDetectMask_RMASK 0x1 | ||
438 | #define QIB_6120_HwErrMask_Reserved1_LSB 0x39 | ||
439 | #define QIB_6120_HwErrMask_Reserved1_RMASK 0x1 | ||
440 | #define QIB_6120_HwErrMask_IBPLLrfSlipMask_LSB 0x38 | ||
441 | #define QIB_6120_HwErrMask_IBPLLrfSlipMask_RMASK 0x1 | ||
442 | #define QIB_6120_HwErrMask_IBPLLfbSlipMask_LSB 0x37 | ||
443 | #define QIB_6120_HwErrMask_IBPLLfbSlipMask_RMASK 0x1 | ||
444 | #define QIB_6120_HwErrMask_PowerOnBISTFailedMask_LSB 0x36 | ||
445 | #define QIB_6120_HwErrMask_PowerOnBISTFailedMask_RMASK 0x1 | ||
446 | #define QIB_6120_HwErrMask_Reserved2_LSB 0x33 | ||
447 | #define QIB_6120_HwErrMask_Reserved2_RMASK 0x7 | ||
448 | #define QIB_6120_HwErrMask_RXEMemParityErrMask_LSB 0x2C | ||
449 | #define QIB_6120_HwErrMask_RXEMemParityErrMask_RMASK 0x7F | ||
450 | #define QIB_6120_HwErrMask_TXEMemParityErrMask_LSB 0x28 | ||
451 | #define QIB_6120_HwErrMask_TXEMemParityErrMask_RMASK 0xF | ||
452 | #define QIB_6120_HwErrMask_Reserved3_LSB 0x22 | ||
453 | #define QIB_6120_HwErrMask_Reserved3_RMASK 0x3F | ||
454 | #define QIB_6120_HwErrMask_PCIeBusParityErrMask_LSB 0x1F | ||
455 | #define QIB_6120_HwErrMask_PCIeBusParityErrMask_RMASK 0x7 | ||
456 | #define QIB_6120_HwErrMask_PcieCplTimeoutMask_LSB 0x1E | ||
457 | #define QIB_6120_HwErrMask_PcieCplTimeoutMask_RMASK 0x1 | ||
458 | #define QIB_6120_HwErrMask_PoisonedTLPMask_LSB 0x1D | ||
459 | #define QIB_6120_HwErrMask_PoisonedTLPMask_RMASK 0x1 | ||
460 | #define QIB_6120_HwErrMask_Reserved4_LSB 0x6 | ||
461 | #define QIB_6120_HwErrMask_Reserved4_RMASK 0x7FFFFF | ||
462 | #define QIB_6120_HwErrMask_PCIeMemParityErrMask_LSB 0x0 | ||
463 | #define QIB_6120_HwErrMask_PCIeMemParityErrMask_RMASK 0x3F | ||
464 | |||
465 | #define QIB_6120_HwErrStatus_OFFS 0xA0 | ||
466 | #define QIB_6120_HwErrStatus_IBCBusFromSPCParityErr_LSB 0x3F | ||
467 | #define QIB_6120_HwErrStatus_IBCBusFromSPCParityErr_RMASK 0x1 | ||
468 | #define QIB_6120_HwErrStatus_IBCBusToSPCParityErr_LSB 0x3E | ||
469 | #define QIB_6120_HwErrStatus_IBCBusToSPCParityErr_RMASK 0x1 | ||
470 | #define QIB_6120_HwErrStatus_Reserved_LSB 0x3D | ||
471 | #define QIB_6120_HwErrStatus_Reserved_RMASK 0x1 | ||
472 | #define QIB_6120_HwErrStatus_IBSerdesPClkNotDetect_LSB 0x3C | ||
473 | #define QIB_6120_HwErrStatus_IBSerdesPClkNotDetect_RMASK 0x1 | ||
474 | #define QIB_6120_HwErrStatus_PCIESerdesQ0PClkNotDetect_LSB 0x3B | ||
475 | #define QIB_6120_HwErrStatus_PCIESerdesQ0PClkNotDetect_RMASK 0x1 | ||
476 | #define QIB_6120_HwErrStatus_PCIESerdesQ1PClkNotDetect_LSB 0x3A | ||
477 | #define QIB_6120_HwErrStatus_PCIESerdesQ1PClkNotDetect_RMASK 0x1 | ||
478 | #define QIB_6120_HwErrStatus_Reserved1_LSB 0x39 | ||
479 | #define QIB_6120_HwErrStatus_Reserved1_RMASK 0x1 | ||
480 | #define QIB_6120_HwErrStatus_IBPLLrfSlip_LSB 0x38 | ||
481 | #define QIB_6120_HwErrStatus_IBPLLrfSlip_RMASK 0x1 | ||
482 | #define QIB_6120_HwErrStatus_IBPLLfbSlip_LSB 0x37 | ||
483 | #define QIB_6120_HwErrStatus_IBPLLfbSlip_RMASK 0x1 | ||
484 | #define QIB_6120_HwErrStatus_PowerOnBISTFailed_LSB 0x36 | ||
485 | #define QIB_6120_HwErrStatus_PowerOnBISTFailed_RMASK 0x1 | ||
486 | #define QIB_6120_HwErrStatus_Reserved2_LSB 0x33 | ||
487 | #define QIB_6120_HwErrStatus_Reserved2_RMASK 0x7 | ||
488 | #define QIB_6120_HwErrStatus_RXEMemParity_LSB 0x2C | ||
489 | #define QIB_6120_HwErrStatus_RXEMemParity_RMASK 0x7F | ||
490 | #define QIB_6120_HwErrStatus_TXEMemParity_LSB 0x28 | ||
491 | #define QIB_6120_HwErrStatus_TXEMemParity_RMASK 0xF | ||
492 | #define QIB_6120_HwErrStatus_Reserved3_LSB 0x22 | ||
493 | #define QIB_6120_HwErrStatus_Reserved3_RMASK 0x3F | ||
494 | #define QIB_6120_HwErrStatus_PCIeBusParity_LSB 0x1F | ||
495 | #define QIB_6120_HwErrStatus_PCIeBusParity_RMASK 0x7 | ||
496 | #define QIB_6120_HwErrStatus_PcieCplTimeout_LSB 0x1E | ||
497 | #define QIB_6120_HwErrStatus_PcieCplTimeout_RMASK 0x1 | ||
498 | #define QIB_6120_HwErrStatus_PoisenedTLP_LSB 0x1D | ||
499 | #define QIB_6120_HwErrStatus_PoisenedTLP_RMASK 0x1 | ||
500 | #define QIB_6120_HwErrStatus_Reserved4_LSB 0x6 | ||
501 | #define QIB_6120_HwErrStatus_Reserved4_RMASK 0x7FFFFF | ||
502 | #define QIB_6120_HwErrStatus_PCIeMemParity_LSB 0x0 | ||
503 | #define QIB_6120_HwErrStatus_PCIeMemParity_RMASK 0x3F | ||
504 | |||
505 | #define QIB_6120_HwErrClear_OFFS 0xA8 | ||
506 | #define QIB_6120_HwErrClear_IBCBusFromSPCParityErrClear_LSB 0x3F | ||
507 | #define QIB_6120_HwErrClear_IBCBusFromSPCParityErrClear_RMASK 0x1 | ||
508 | #define QIB_6120_HwErrClear_IBCBusToSPCparityErrClear_LSB 0x3E | ||
509 | #define QIB_6120_HwErrClear_IBCBusToSPCparityErrClear_RMASK 0x1 | ||
510 | #define QIB_6120_HwErrClear_Reserved_LSB 0x3D | ||
511 | #define QIB_6120_HwErrClear_Reserved_RMASK 0x1 | ||
512 | #define QIB_6120_HwErrClear_IBSerdesPClkNotDetectClear_LSB 0x3C | ||
513 | #define QIB_6120_HwErrClear_IBSerdesPClkNotDetectClear_RMASK 0x1 | ||
514 | #define QIB_6120_HwErrClear_PCIESerdesQ0PClkNotDetectClear_LSB 0x3B | ||
515 | #define QIB_6120_HwErrClear_PCIESerdesQ0PClkNotDetectClear_RMASK 0x1 | ||
516 | #define QIB_6120_HwErrClear_PCIESerdesQ1PClkNotDetectClear_LSB 0x3A | ||
517 | #define QIB_6120_HwErrClear_PCIESerdesQ1PClkNotDetectClear_RMASK 0x1 | ||
518 | #define QIB_6120_HwErrClear_Reserved1_LSB 0x39 | ||
519 | #define QIB_6120_HwErrClear_Reserved1_RMASK 0x1 | ||
520 | #define QIB_6120_HwErrClear_IBPLLrfSlipClear_LSB 0x38 | ||
521 | #define QIB_6120_HwErrClear_IBPLLrfSlipClear_RMASK 0x1 | ||
522 | #define QIB_6120_HwErrClear_IBPLLfbSlipClear_LSB 0x37 | ||
523 | #define QIB_6120_HwErrClear_IBPLLfbSlipClear_RMASK 0x1 | ||
524 | #define QIB_6120_HwErrClear_PowerOnBISTFailedClear_LSB 0x36 | ||
525 | #define QIB_6120_HwErrClear_PowerOnBISTFailedClear_RMASK 0x1 | ||
526 | #define QIB_6120_HwErrClear_Reserved2_LSB 0x33 | ||
527 | #define QIB_6120_HwErrClear_Reserved2_RMASK 0x7 | ||
528 | #define QIB_6120_HwErrClear_RXEMemParityClear_LSB 0x2C | ||
529 | #define QIB_6120_HwErrClear_RXEMemParityClear_RMASK 0x7F | ||
530 | #define QIB_6120_HwErrClear_TXEMemParityClear_LSB 0x28 | ||
531 | #define QIB_6120_HwErrClear_TXEMemParityClear_RMASK 0xF | ||
532 | #define QIB_6120_HwErrClear_Reserved3_LSB 0x22 | ||
533 | #define QIB_6120_HwErrClear_Reserved3_RMASK 0x3F | ||
534 | #define QIB_6120_HwErrClear_PCIeBusParityClr_LSB 0x1F | ||
535 | #define QIB_6120_HwErrClear_PCIeBusParityClr_RMASK 0x7 | ||
536 | #define QIB_6120_HwErrClear_PcieCplTimeoutClear_LSB 0x1E | ||
537 | #define QIB_6120_HwErrClear_PcieCplTimeoutClear_RMASK 0x1 | ||
538 | #define QIB_6120_HwErrClear_PoisonedTLPClear_LSB 0x1D | ||
539 | #define QIB_6120_HwErrClear_PoisonedTLPClear_RMASK 0x1 | ||
540 | #define QIB_6120_HwErrClear_Reserved4_LSB 0x6 | ||
541 | #define QIB_6120_HwErrClear_Reserved4_RMASK 0x7FFFFF | ||
542 | #define QIB_6120_HwErrClear_PCIeMemParityClr_LSB 0x0 | ||
543 | #define QIB_6120_HwErrClear_PCIeMemParityClr_RMASK 0x3F | ||
544 | |||
545 | #define QIB_6120_HwDiagCtrl_OFFS 0xB0 | ||
546 | #define QIB_6120_HwDiagCtrl_ForceIBCBusFromSPCParityErr_LSB 0x3F | ||
547 | #define QIB_6120_HwDiagCtrl_ForceIBCBusFromSPCParityErr_RMASK 0x1 | ||
548 | #define QIB_6120_HwDiagCtrl_ForceIBCBusToSPCParityErr_LSB 0x3E | ||
549 | #define QIB_6120_HwDiagCtrl_ForceIBCBusToSPCParityErr_RMASK 0x1 | ||
550 | #define QIB_6120_HwDiagCtrl_CounterWrEnable_LSB 0x3D | ||
551 | #define QIB_6120_HwDiagCtrl_CounterWrEnable_RMASK 0x1 | ||
552 | #define QIB_6120_HwDiagCtrl_CounterDisable_LSB 0x3C | ||
553 | #define QIB_6120_HwDiagCtrl_CounterDisable_RMASK 0x1 | ||
554 | #define QIB_6120_HwDiagCtrl_Reserved_LSB 0x33 | ||
555 | #define QIB_6120_HwDiagCtrl_Reserved_RMASK 0x1FF | ||
556 | #define QIB_6120_HwDiagCtrl_ForceRxMemParityErr_LSB 0x2C | ||
557 | #define QIB_6120_HwDiagCtrl_ForceRxMemParityErr_RMASK 0x7F | ||
558 | #define QIB_6120_HwDiagCtrl_ForceTxMemparityErr_LSB 0x28 | ||
559 | #define QIB_6120_HwDiagCtrl_ForceTxMemparityErr_RMASK 0xF | ||
560 | #define QIB_6120_HwDiagCtrl_Reserved1_LSB 0x23 | ||
561 | #define QIB_6120_HwDiagCtrl_Reserved1_RMASK 0x1F | ||
562 | #define QIB_6120_HwDiagCtrl_forcePCIeBusParity_LSB 0x1F | ||
563 | #define QIB_6120_HwDiagCtrl_forcePCIeBusParity_RMASK 0xF | ||
564 | #define QIB_6120_HwDiagCtrl_Reserved2_LSB 0x6 | ||
565 | #define QIB_6120_HwDiagCtrl_Reserved2_RMASK 0x1FFFFFF | ||
566 | #define QIB_6120_HwDiagCtrl_forcePCIeMemParity_LSB 0x0 | ||
567 | #define QIB_6120_HwDiagCtrl_forcePCIeMemParity_RMASK 0x3F | ||
568 | |||
569 | #define QIB_6120_IBCStatus_OFFS 0xC0 | ||
570 | #define QIB_6120_IBCStatus_TxCreditOk_LSB 0x1F | ||
571 | #define QIB_6120_IBCStatus_TxCreditOk_RMASK 0x1 | ||
572 | #define QIB_6120_IBCStatus_TxReady_LSB 0x1E | ||
573 | #define QIB_6120_IBCStatus_TxReady_RMASK 0x1 | ||
574 | #define QIB_6120_IBCStatus_Reserved_LSB 0x7 | ||
575 | #define QIB_6120_IBCStatus_Reserved_RMASK 0x7FFFFF | ||
576 | #define QIB_6120_IBCStatus_LinkState_LSB 0x4 | ||
577 | #define QIB_6120_IBCStatus_LinkState_RMASK 0x7 | ||
578 | #define QIB_6120_IBCStatus_LinkTrainingState_LSB 0x0 | ||
579 | #define QIB_6120_IBCStatus_LinkTrainingState_RMASK 0xF | ||
580 | |||
581 | #define QIB_6120_IBCCtrl_OFFS 0xC8 | ||
582 | #define QIB_6120_IBCCtrl_Loopback_LSB 0x3F | ||
583 | #define QIB_6120_IBCCtrl_Loopback_RMASK 0x1 | ||
584 | #define QIB_6120_IBCCtrl_LinkDownDefaultState_LSB 0x3E | ||
585 | #define QIB_6120_IBCCtrl_LinkDownDefaultState_RMASK 0x1 | ||
586 | #define QIB_6120_IBCCtrl_Reserved_LSB 0x2B | ||
587 | #define QIB_6120_IBCCtrl_Reserved_RMASK 0x7FFFF | ||
588 | #define QIB_6120_IBCCtrl_CreditScale_LSB 0x28 | ||
589 | #define QIB_6120_IBCCtrl_CreditScale_RMASK 0x7 | ||
590 | #define QIB_6120_IBCCtrl_OverrunThreshold_LSB 0x24 | ||
591 | #define QIB_6120_IBCCtrl_OverrunThreshold_RMASK 0xF | ||
592 | #define QIB_6120_IBCCtrl_PhyerrThreshold_LSB 0x20 | ||
593 | #define QIB_6120_IBCCtrl_PhyerrThreshold_RMASK 0xF | ||
594 | #define QIB_6120_IBCCtrl_Reserved1_LSB 0x1F | ||
595 | #define QIB_6120_IBCCtrl_Reserved1_RMASK 0x1 | ||
596 | #define QIB_6120_IBCCtrl_MaxPktLen_LSB 0x14 | ||
597 | #define QIB_6120_IBCCtrl_MaxPktLen_RMASK 0x7FF | ||
598 | #define QIB_6120_IBCCtrl_LinkCmd_LSB 0x12 | ||
599 | #define QIB_6120_IBCCtrl_LinkCmd_RMASK 0x3 | ||
600 | #define QIB_6120_IBCCtrl_LinkInitCmd_LSB 0x10 | ||
601 | #define QIB_6120_IBCCtrl_LinkInitCmd_RMASK 0x3 | ||
602 | #define QIB_6120_IBCCtrl_FlowCtrlWaterMark_LSB 0x8 | ||
603 | #define QIB_6120_IBCCtrl_FlowCtrlWaterMark_RMASK 0xFF | ||
604 | #define QIB_6120_IBCCtrl_FlowCtrlPeriod_LSB 0x0 | ||
605 | #define QIB_6120_IBCCtrl_FlowCtrlPeriod_RMASK 0xFF | ||
606 | |||
607 | #define QIB_6120_EXTStatus_OFFS 0xD0 | ||
608 | #define QIB_6120_EXTStatus_GPIOIn_LSB 0x30 | ||
609 | #define QIB_6120_EXTStatus_GPIOIn_RMASK 0xFFFF | ||
610 | #define QIB_6120_EXTStatus_Reserved_LSB 0x20 | ||
611 | #define QIB_6120_EXTStatus_Reserved_RMASK 0xFFFF | ||
612 | #define QIB_6120_EXTStatus_Reserved1_LSB 0x10 | ||
613 | #define QIB_6120_EXTStatus_Reserved1_RMASK 0xFFFF | ||
614 | #define QIB_6120_EXTStatus_MemBISTFoundErr_LSB 0xF | ||
615 | #define QIB_6120_EXTStatus_MemBISTFoundErr_RMASK 0x1 | ||
616 | #define QIB_6120_EXTStatus_MemBISTEndTest_LSB 0xE | ||
617 | #define QIB_6120_EXTStatus_MemBISTEndTest_RMASK 0x1 | ||
618 | #define QIB_6120_EXTStatus_Reserved2_LSB 0x0 | ||
619 | #define QIB_6120_EXTStatus_Reserved2_RMASK 0x3FFF | ||
620 | |||
621 | #define QIB_6120_EXTCtrl_OFFS 0xD8 | ||
622 | #define QIB_6120_EXTCtrl_GPIOOe_LSB 0x30 | ||
623 | #define QIB_6120_EXTCtrl_GPIOOe_RMASK 0xFFFF | ||
624 | #define QIB_6120_EXTCtrl_GPIOInvert_LSB 0x20 | ||
625 | #define QIB_6120_EXTCtrl_GPIOInvert_RMASK 0xFFFF | ||
626 | #define QIB_6120_EXTCtrl_Reserved_LSB 0x4 | ||
627 | #define QIB_6120_EXTCtrl_Reserved_RMASK 0xFFFFFFF | ||
628 | #define QIB_6120_EXTCtrl_LEDPriPortGreenOn_LSB 0x3 | ||
629 | #define QIB_6120_EXTCtrl_LEDPriPortGreenOn_RMASK 0x1 | ||
630 | #define QIB_6120_EXTCtrl_LEDPriPortYellowOn_LSB 0x2 | ||
631 | #define QIB_6120_EXTCtrl_LEDPriPortYellowOn_RMASK 0x1 | ||
632 | #define QIB_6120_EXTCtrl_LEDGblOkGreenOn_LSB 0x1 | ||
633 | #define QIB_6120_EXTCtrl_LEDGblOkGreenOn_RMASK 0x1 | ||
634 | #define QIB_6120_EXTCtrl_LEDGblErrRedOff_LSB 0x0 | ||
635 | #define QIB_6120_EXTCtrl_LEDGblErrRedOff_RMASK 0x1 | ||
636 | |||
637 | #define QIB_6120_GPIOOut_OFFS 0xE0 | ||
638 | |||
639 | #define QIB_6120_GPIOMask_OFFS 0xE8 | ||
640 | |||
641 | #define QIB_6120_GPIOStatus_OFFS 0xF0 | ||
642 | |||
643 | #define QIB_6120_GPIOClear_OFFS 0xF8 | ||
644 | |||
645 | #define QIB_6120_RcvCtrl_OFFS 0x100 | ||
646 | #define QIB_6120_RcvCtrl_TailUpd_LSB 0x1F | ||
647 | #define QIB_6120_RcvCtrl_TailUpd_RMASK 0x1 | ||
648 | #define QIB_6120_RcvCtrl_RcvPartitionKeyDisable_LSB 0x1E | ||
649 | #define QIB_6120_RcvCtrl_RcvPartitionKeyDisable_RMASK 0x1 | ||
650 | #define QIB_6120_RcvCtrl_Reserved_LSB 0x15 | ||
651 | #define QIB_6120_RcvCtrl_Reserved_RMASK 0x1FF | ||
652 | #define QIB_6120_RcvCtrl_IntrAvail_LSB 0x10 | ||
653 | #define QIB_6120_RcvCtrl_IntrAvail_RMASK 0x1F | ||
654 | #define QIB_6120_RcvCtrl_Reserved1_LSB 0x9 | ||
655 | #define QIB_6120_RcvCtrl_Reserved1_RMASK 0x7F | ||
656 | #define QIB_6120_RcvCtrl_Reserved2_LSB 0x5 | ||
657 | #define QIB_6120_RcvCtrl_Reserved2_RMASK 0xF | ||
658 | #define QIB_6120_RcvCtrl_PortEnable_LSB 0x0 | ||
659 | #define QIB_6120_RcvCtrl_PortEnable_RMASK 0x1F | ||
660 | |||
661 | #define QIB_6120_RcvBTHQP_OFFS 0x108 | ||
662 | #define QIB_6120_RcvBTHQP_BTHQP_Mask_LSB 0x1E | ||
663 | #define QIB_6120_RcvBTHQP_BTHQP_Mask_RMASK 0x3 | ||
664 | #define QIB_6120_RcvBTHQP_Reserved_LSB 0x18 | ||
665 | #define QIB_6120_RcvBTHQP_Reserved_RMASK 0x3F | ||
666 | #define QIB_6120_RcvBTHQP_RcvBTHQP_LSB 0x0 | ||
667 | #define QIB_6120_RcvBTHQP_RcvBTHQP_RMASK 0xFFFFFF | ||
668 | |||
669 | #define QIB_6120_RcvHdrSize_OFFS 0x110 | ||
670 | |||
671 | #define QIB_6120_RcvHdrCnt_OFFS 0x118 | ||
672 | |||
673 | #define QIB_6120_RcvHdrEntSize_OFFS 0x120 | ||
674 | |||
675 | #define QIB_6120_RcvTIDBase_OFFS 0x128 | ||
676 | |||
677 | #define QIB_6120_RcvTIDCnt_OFFS 0x130 | ||
678 | |||
679 | #define QIB_6120_RcvEgrBase_OFFS 0x138 | ||
680 | |||
681 | #define QIB_6120_RcvEgrCnt_OFFS 0x140 | ||
682 | |||
683 | #define QIB_6120_RcvBufBase_OFFS 0x148 | ||
684 | |||
685 | #define QIB_6120_RcvBufSize_OFFS 0x150 | ||
686 | |||
687 | #define QIB_6120_RxIntMemBase_OFFS 0x158 | ||
688 | |||
689 | #define QIB_6120_RxIntMemSize_OFFS 0x160 | ||
690 | |||
691 | #define QIB_6120_RcvPartitionKey_OFFS 0x168 | ||
692 | |||
693 | #define QIB_6120_RcvPktLEDCnt_OFFS 0x178 | ||
694 | #define QIB_6120_RcvPktLEDCnt_ONperiod_LSB 0x20 | ||
695 | #define QIB_6120_RcvPktLEDCnt_ONperiod_RMASK 0xFFFFFFFF | ||
696 | #define QIB_6120_RcvPktLEDCnt_OFFperiod_LSB 0x0 | ||
697 | #define QIB_6120_RcvPktLEDCnt_OFFperiod_RMASK 0xFFFFFFFF | ||
698 | |||
699 | #define QIB_6120_SendCtrl_OFFS 0x1C0 | ||
700 | #define QIB_6120_SendCtrl_Disarm_LSB 0x1F | ||
701 | #define QIB_6120_SendCtrl_Disarm_RMASK 0x1 | ||
702 | #define QIB_6120_SendCtrl_Reserved_LSB 0x17 | ||
703 | #define QIB_6120_SendCtrl_Reserved_RMASK 0xFF | ||
704 | #define QIB_6120_SendCtrl_DisarmPIOBuf_LSB 0x10 | ||
705 | #define QIB_6120_SendCtrl_DisarmPIOBuf_RMASK 0x7F | ||
706 | #define QIB_6120_SendCtrl_Reserved1_LSB 0x4 | ||
707 | #define QIB_6120_SendCtrl_Reserved1_RMASK 0xFFF | ||
708 | #define QIB_6120_SendCtrl_PIOEnable_LSB 0x3 | ||
709 | #define QIB_6120_SendCtrl_PIOEnable_RMASK 0x1 | ||
710 | #define QIB_6120_SendCtrl_PIOBufAvailUpd_LSB 0x2 | ||
711 | #define QIB_6120_SendCtrl_PIOBufAvailUpd_RMASK 0x1 | ||
712 | #define QIB_6120_SendCtrl_PIOIntBufAvail_LSB 0x1 | ||
713 | #define QIB_6120_SendCtrl_PIOIntBufAvail_RMASK 0x1 | ||
714 | #define QIB_6120_SendCtrl_Abort_LSB 0x0 | ||
715 | #define QIB_6120_SendCtrl_Abort_RMASK 0x1 | ||
716 | |||
717 | #define QIB_6120_SendPIOBufBase_OFFS 0x1C8 | ||
718 | #define QIB_6120_SendPIOBufBase_Reserved_LSB 0x35 | ||
719 | #define QIB_6120_SendPIOBufBase_Reserved_RMASK 0x7FF | ||
720 | #define QIB_6120_SendPIOBufBase_BaseAddr_LargePIO_LSB 0x20 | ||
721 | #define QIB_6120_SendPIOBufBase_BaseAddr_LargePIO_RMASK 0x1FFFFF | ||
722 | #define QIB_6120_SendPIOBufBase_Reserved1_LSB 0x15 | ||
723 | #define QIB_6120_SendPIOBufBase_Reserved1_RMASK 0x7FF | ||
724 | #define QIB_6120_SendPIOBufBase_BaseAddr_SmallPIO_LSB 0x0 | ||
725 | #define QIB_6120_SendPIOBufBase_BaseAddr_SmallPIO_RMASK 0x1FFFFF | ||
726 | |||
727 | #define QIB_6120_SendPIOSize_OFFS 0x1D0 | ||
728 | #define QIB_6120_SendPIOSize_Reserved_LSB 0x2D | ||
729 | #define QIB_6120_SendPIOSize_Reserved_RMASK 0xFFFFF | ||
730 | #define QIB_6120_SendPIOSize_Size_LargePIO_LSB 0x20 | ||
731 | #define QIB_6120_SendPIOSize_Size_LargePIO_RMASK 0x1FFF | ||
732 | #define QIB_6120_SendPIOSize_Reserved1_LSB 0xC | ||
733 | #define QIB_6120_SendPIOSize_Reserved1_RMASK 0xFFFFF | ||
734 | #define QIB_6120_SendPIOSize_Size_SmallPIO_LSB 0x0 | ||
735 | #define QIB_6120_SendPIOSize_Size_SmallPIO_RMASK 0xFFF | ||
736 | |||
737 | #define QIB_6120_SendPIOBufCnt_OFFS 0x1D8 | ||
738 | #define QIB_6120_SendPIOBufCnt_Reserved_LSB 0x24 | ||
739 | #define QIB_6120_SendPIOBufCnt_Reserved_RMASK 0xFFFFFFF | ||
740 | #define QIB_6120_SendPIOBufCnt_Num_LargePIO_LSB 0x20 | ||
741 | #define QIB_6120_SendPIOBufCnt_Num_LargePIO_RMASK 0xF | ||
742 | #define QIB_6120_SendPIOBufCnt_Reserved1_LSB 0x9 | ||
743 | #define QIB_6120_SendPIOBufCnt_Reserved1_RMASK 0x7FFFFF | ||
744 | #define QIB_6120_SendPIOBufCnt_Num_SmallPIO_LSB 0x0 | ||
745 | #define QIB_6120_SendPIOBufCnt_Num_SmallPIO_RMASK 0x1FF | ||
746 | |||
747 | #define QIB_6120_SendPIOAvailAddr_OFFS 0x1E0 | ||
748 | #define QIB_6120_SendPIOAvailAddr_SendPIOAvailAddr_LSB 0x6 | ||
749 | #define QIB_6120_SendPIOAvailAddr_SendPIOAvailAddr_RMASK 0x3FFFFFFFF | ||
750 | #define QIB_6120_SendPIOAvailAddr_Reserved_LSB 0x0 | ||
751 | #define QIB_6120_SendPIOAvailAddr_Reserved_RMASK 0x3F | ||
752 | |||
753 | #define QIB_6120_SendBufErr0_OFFS 0x240 | ||
754 | #define QIB_6120_SendBufErr0_SendBufErrPIO_63_0_LSB 0x0 | ||
755 | #define QIB_6120_SendBufErr0_SendBufErrPIO_63_0_RMASK 0x0 | ||
756 | |||
757 | #define QIB_6120_RcvHdrAddr0_OFFS 0x280 | ||
758 | #define QIB_6120_RcvHdrAddr0_RcvHdrAddr0_LSB 0x2 | ||
759 | #define QIB_6120_RcvHdrAddr0_RcvHdrAddr0_RMASK 0x3FFFFFFFFF | ||
760 | #define QIB_6120_RcvHdrAddr0_Reserved_LSB 0x0 | ||
761 | #define QIB_6120_RcvHdrAddr0_Reserved_RMASK 0x3 | ||
762 | |||
763 | #define QIB_6120_RcvHdrTailAddr0_OFFS 0x300 | ||
764 | #define QIB_6120_RcvHdrTailAddr0_RcvHdrTailAddr0_LSB 0x2 | ||
765 | #define QIB_6120_RcvHdrTailAddr0_RcvHdrTailAddr0_RMASK 0x3FFFFFFFFF | ||
766 | #define QIB_6120_RcvHdrTailAddr0_Reserved_LSB 0x0 | ||
767 | #define QIB_6120_RcvHdrTailAddr0_Reserved_RMASK 0x3 | ||
768 | |||
769 | #define QIB_6120_SerdesCfg0_OFFS 0x3C0 | ||
770 | #define QIB_6120_SerdesCfg0_DisableIBTxIdleDetect_LSB 0x3F | ||
771 | #define QIB_6120_SerdesCfg0_DisableIBTxIdleDetect_RMASK 0x1 | ||
772 | #define QIB_6120_SerdesCfg0_Reserved_LSB 0x38 | ||
773 | #define QIB_6120_SerdesCfg0_Reserved_RMASK 0x7F | ||
774 | #define QIB_6120_SerdesCfg0_RxEqCtl_LSB 0x36 | ||
775 | #define QIB_6120_SerdesCfg0_RxEqCtl_RMASK 0x3 | ||
776 | #define QIB_6120_SerdesCfg0_TxTermAdj_LSB 0x34 | ||
777 | #define QIB_6120_SerdesCfg0_TxTermAdj_RMASK 0x3 | ||
778 | #define QIB_6120_SerdesCfg0_RxTermAdj_LSB 0x32 | ||
779 | #define QIB_6120_SerdesCfg0_RxTermAdj_RMASK 0x3 | ||
780 | #define QIB_6120_SerdesCfg0_TermAdj1_LSB 0x31 | ||
781 | #define QIB_6120_SerdesCfg0_TermAdj1_RMASK 0x1 | ||
782 | #define QIB_6120_SerdesCfg0_TermAdj0_LSB 0x30 | ||
783 | #define QIB_6120_SerdesCfg0_TermAdj0_RMASK 0x1 | ||
784 | #define QIB_6120_SerdesCfg0_LPBKA_LSB 0x2F | ||
785 | #define QIB_6120_SerdesCfg0_LPBKA_RMASK 0x1 | ||
786 | #define QIB_6120_SerdesCfg0_LPBKB_LSB 0x2E | ||
787 | #define QIB_6120_SerdesCfg0_LPBKB_RMASK 0x1 | ||
788 | #define QIB_6120_SerdesCfg0_LPBKC_LSB 0x2D | ||
789 | #define QIB_6120_SerdesCfg0_LPBKC_RMASK 0x1 | ||
790 | #define QIB_6120_SerdesCfg0_LPBKD_LSB 0x2C | ||
791 | #define QIB_6120_SerdesCfg0_LPBKD_RMASK 0x1 | ||
792 | #define QIB_6120_SerdesCfg0_PW_LSB 0x2B | ||
793 | #define QIB_6120_SerdesCfg0_PW_RMASK 0x1 | ||
794 | #define QIB_6120_SerdesCfg0_RefSel_LSB 0x29 | ||
795 | #define QIB_6120_SerdesCfg0_RefSel_RMASK 0x3 | ||
796 | #define QIB_6120_SerdesCfg0_ParReset_LSB 0x28 | ||
797 | #define QIB_6120_SerdesCfg0_ParReset_RMASK 0x1 | ||
798 | #define QIB_6120_SerdesCfg0_ParLPBK_LSB 0x27 | ||
799 | #define QIB_6120_SerdesCfg0_ParLPBK_RMASK 0x1 | ||
800 | #define QIB_6120_SerdesCfg0_OffsetEn_LSB 0x26 | ||
801 | #define QIB_6120_SerdesCfg0_OffsetEn_RMASK 0x1 | ||
802 | #define QIB_6120_SerdesCfg0_Offset_LSB 0x1E | ||
803 | #define QIB_6120_SerdesCfg0_Offset_RMASK 0xFF | ||
804 | #define QIB_6120_SerdesCfg0_L2PwrDn_LSB 0x1D | ||
805 | #define QIB_6120_SerdesCfg0_L2PwrDn_RMASK 0x1 | ||
806 | #define QIB_6120_SerdesCfg0_ResetPLL_LSB 0x1C | ||
807 | #define QIB_6120_SerdesCfg0_ResetPLL_RMASK 0x1 | ||
808 | #define QIB_6120_SerdesCfg0_RxTermEnX_LSB 0x18 | ||
809 | #define QIB_6120_SerdesCfg0_RxTermEnX_RMASK 0xF | ||
810 | #define QIB_6120_SerdesCfg0_BeaconTxEnX_LSB 0x14 | ||
811 | #define QIB_6120_SerdesCfg0_BeaconTxEnX_RMASK 0xF | ||
812 | #define QIB_6120_SerdesCfg0_RxDetEnX_LSB 0x10 | ||
813 | #define QIB_6120_SerdesCfg0_RxDetEnX_RMASK 0xF | ||
814 | #define QIB_6120_SerdesCfg0_TxIdeEnX_LSB 0xC | ||
815 | #define QIB_6120_SerdesCfg0_TxIdeEnX_RMASK 0xF | ||
816 | #define QIB_6120_SerdesCfg0_RxIdleEnX_LSB 0x8 | ||
817 | #define QIB_6120_SerdesCfg0_RxIdleEnX_RMASK 0xF | ||
818 | #define QIB_6120_SerdesCfg0_L1PwrDnA_LSB 0x7 | ||
819 | #define QIB_6120_SerdesCfg0_L1PwrDnA_RMASK 0x1 | ||
820 | #define QIB_6120_SerdesCfg0_L1PwrDnB_LSB 0x6 | ||
821 | #define QIB_6120_SerdesCfg0_L1PwrDnB_RMASK 0x1 | ||
822 | #define QIB_6120_SerdesCfg0_L1PwrDnC_LSB 0x5 | ||
823 | #define QIB_6120_SerdesCfg0_L1PwrDnC_RMASK 0x1 | ||
824 | #define QIB_6120_SerdesCfg0_L1PwrDnD_LSB 0x4 | ||
825 | #define QIB_6120_SerdesCfg0_L1PwrDnD_RMASK 0x1 | ||
826 | #define QIB_6120_SerdesCfg0_ResetA_LSB 0x3 | ||
827 | #define QIB_6120_SerdesCfg0_ResetA_RMASK 0x1 | ||
828 | #define QIB_6120_SerdesCfg0_ResetB_LSB 0x2 | ||
829 | #define QIB_6120_SerdesCfg0_ResetB_RMASK 0x1 | ||
830 | #define QIB_6120_SerdesCfg0_ResetC_LSB 0x1 | ||
831 | #define QIB_6120_SerdesCfg0_ResetC_RMASK 0x1 | ||
832 | #define QIB_6120_SerdesCfg0_ResetD_LSB 0x0 | ||
833 | #define QIB_6120_SerdesCfg0_ResetD_RMASK 0x1 | ||
834 | |||
835 | #define QIB_6120_SerdesStat_OFFS 0x3D0 | ||
836 | #define QIB_6120_SerdesStat_Reserved_LSB 0xC | ||
837 | #define QIB_6120_SerdesStat_Reserved_RMASK 0xFFFFFFFFFFFFF | ||
838 | #define QIB_6120_SerdesStat_BeaconDetA_LSB 0xB | ||
839 | #define QIB_6120_SerdesStat_BeaconDetA_RMASK 0x1 | ||
840 | #define QIB_6120_SerdesStat_BeaconDetB_LSB 0xA | ||
841 | #define QIB_6120_SerdesStat_BeaconDetB_RMASK 0x1 | ||
842 | #define QIB_6120_SerdesStat_BeaconDetC_LSB 0x9 | ||
843 | #define QIB_6120_SerdesStat_BeaconDetC_RMASK 0x1 | ||
844 | #define QIB_6120_SerdesStat_BeaconDetD_LSB 0x8 | ||
845 | #define QIB_6120_SerdesStat_BeaconDetD_RMASK 0x1 | ||
846 | #define QIB_6120_SerdesStat_RxDetA_LSB 0x7 | ||
847 | #define QIB_6120_SerdesStat_RxDetA_RMASK 0x1 | ||
848 | #define QIB_6120_SerdesStat_RxDetB_LSB 0x6 | ||
849 | #define QIB_6120_SerdesStat_RxDetB_RMASK 0x1 | ||
850 | #define QIB_6120_SerdesStat_RxDetC_LSB 0x5 | ||
851 | #define QIB_6120_SerdesStat_RxDetC_RMASK 0x1 | ||
852 | #define QIB_6120_SerdesStat_RxDetD_LSB 0x4 | ||
853 | #define QIB_6120_SerdesStat_RxDetD_RMASK 0x1 | ||
854 | #define QIB_6120_SerdesStat_TxIdleDetA_LSB 0x3 | ||
855 | #define QIB_6120_SerdesStat_TxIdleDetA_RMASK 0x1 | ||
856 | #define QIB_6120_SerdesStat_TxIdleDetB_LSB 0x2 | ||
857 | #define QIB_6120_SerdesStat_TxIdleDetB_RMASK 0x1 | ||
858 | #define QIB_6120_SerdesStat_TxIdleDetC_LSB 0x1 | ||
859 | #define QIB_6120_SerdesStat_TxIdleDetC_RMASK 0x1 | ||
860 | #define QIB_6120_SerdesStat_TxIdleDetD_LSB 0x0 | ||
861 | #define QIB_6120_SerdesStat_TxIdleDetD_RMASK 0x1 | ||
862 | |||
863 | #define QIB_6120_XGXSCfg_OFFS 0x3D8 | ||
864 | #define QIB_6120_XGXSCfg_ArmLaunchErrorDisable_LSB 0x3F | ||
865 | #define QIB_6120_XGXSCfg_ArmLaunchErrorDisable_RMASK 0x1 | ||
866 | #define QIB_6120_XGXSCfg_Reserved_LSB 0x17 | ||
867 | #define QIB_6120_XGXSCfg_Reserved_RMASK 0xFFFFFFFFFF | ||
868 | #define QIB_6120_XGXSCfg_polarity_inv_LSB 0x13 | ||
869 | #define QIB_6120_XGXSCfg_polarity_inv_RMASK 0xF | ||
870 | #define QIB_6120_XGXSCfg_link_sync_mask_LSB 0x9 | ||
871 | #define QIB_6120_XGXSCfg_link_sync_mask_RMASK 0x3FF | ||
872 | #define QIB_6120_XGXSCfg_port_addr_LSB 0x4 | ||
873 | #define QIB_6120_XGXSCfg_port_addr_RMASK 0x1F | ||
874 | #define QIB_6120_XGXSCfg_mdd_30_LSB 0x3 | ||
875 | #define QIB_6120_XGXSCfg_mdd_30_RMASK 0x1 | ||
876 | #define QIB_6120_XGXSCfg_xcv_resetn_LSB 0x2 | ||
877 | #define QIB_6120_XGXSCfg_xcv_resetn_RMASK 0x1 | ||
878 | #define QIB_6120_XGXSCfg_Reserved1_LSB 0x1 | ||
879 | #define QIB_6120_XGXSCfg_Reserved1_RMASK 0x1 | ||
880 | #define QIB_6120_XGXSCfg_tx_rx_resetn_LSB 0x0 | ||
881 | #define QIB_6120_XGXSCfg_tx_rx_resetn_RMASK 0x1 | ||
882 | |||
883 | #define QIB_6120_LBIntCnt_OFFS 0x12000 | ||
884 | |||
885 | #define QIB_6120_LBFlowStallCnt_OFFS 0x12008 | ||
886 | |||
887 | #define QIB_6120_TxUnsupVLErrCnt_OFFS 0x12018 | ||
888 | |||
889 | #define QIB_6120_TxDataPktCnt_OFFS 0x12020 | ||
890 | |||
891 | #define QIB_6120_TxFlowPktCnt_OFFS 0x12028 | ||
892 | |||
893 | #define QIB_6120_TxDwordCnt_OFFS 0x12030 | ||
894 | |||
895 | #define QIB_6120_TxLenErrCnt_OFFS 0x12038 | ||
896 | |||
897 | #define QIB_6120_TxMaxMinLenErrCnt_OFFS 0x12040 | ||
898 | |||
899 | #define QIB_6120_TxUnderrunCnt_OFFS 0x12048 | ||
900 | |||
901 | #define QIB_6120_TxFlowStallCnt_OFFS 0x12050 | ||
902 | |||
903 | #define QIB_6120_TxDroppedPktCnt_OFFS 0x12058 | ||
904 | |||
905 | #define QIB_6120_RxDroppedPktCnt_OFFS 0x12060 | ||
906 | |||
907 | #define QIB_6120_RxDataPktCnt_OFFS 0x12068 | ||
908 | |||
909 | #define QIB_6120_RxFlowPktCnt_OFFS 0x12070 | ||
910 | |||
911 | #define QIB_6120_RxDwordCnt_OFFS 0x12078 | ||
912 | |||
913 | #define QIB_6120_RxLenErrCnt_OFFS 0x12080 | ||
914 | |||
915 | #define QIB_6120_RxMaxMinLenErrCnt_OFFS 0x12088 | ||
916 | |||
917 | #define QIB_6120_RxICRCErrCnt_OFFS 0x12090 | ||
918 | |||
919 | #define QIB_6120_RxVCRCErrCnt_OFFS 0x12098 | ||
920 | |||
921 | #define QIB_6120_RxFlowCtrlErrCnt_OFFS 0x120A0 | ||
922 | |||
923 | #define QIB_6120_RxBadFormatCnt_OFFS 0x120A8 | ||
924 | |||
925 | #define QIB_6120_RxLinkProblemCnt_OFFS 0x120B0 | ||
926 | |||
927 | #define QIB_6120_RxEBPCnt_OFFS 0x120B8 | ||
928 | |||
929 | #define QIB_6120_RxLPCRCErrCnt_OFFS 0x120C0 | ||
930 | |||
931 | #define QIB_6120_RxBufOvflCnt_OFFS 0x120C8 | ||
932 | |||
933 | #define QIB_6120_RxTIDFullErrCnt_OFFS 0x120D0 | ||
934 | |||
935 | #define QIB_6120_RxTIDValidErrCnt_OFFS 0x120D8 | ||
936 | |||
937 | #define QIB_6120_RxPKeyMismatchCnt_OFFS 0x120E0 | ||
938 | |||
939 | #define QIB_6120_RxP0HdrEgrOvflCnt_OFFS 0x120E8 | ||
940 | |||
941 | #define QIB_6120_IBStatusChangeCnt_OFFS 0x12140 | ||
942 | |||
943 | #define QIB_6120_IBLinkErrRecoveryCnt_OFFS 0x12148 | ||
944 | |||
945 | #define QIB_6120_IBLinkDownedCnt_OFFS 0x12150 | ||
946 | |||
947 | #define QIB_6120_IBSymbolErrCnt_OFFS 0x12158 | ||
948 | |||
949 | #define QIB_6120_PcieRetryBufDiagQwordCnt_OFFS 0x12170 | ||
950 | |||
951 | #define QIB_6120_RcvEgrArray0_OFFS 0x14000 | ||
952 | |||
953 | #define QIB_6120_RcvTIDArray0_OFFS 0x54000 | ||
954 | |||
955 | #define QIB_6120_PIOLaunchFIFO_OFFS 0x64000 | ||
956 | |||
957 | #define QIB_6120_SendPIOpbcCache_OFFS 0x64800 | ||
958 | |||
959 | #define QIB_6120_RcvBuf1_OFFS 0x72000 | ||
960 | |||
961 | #define QIB_6120_RcvBuf2_OFFS 0x75000 | ||
962 | |||
963 | #define QIB_6120_RcvFlags_OFFS 0x77000 | ||
964 | |||
965 | #define QIB_6120_RcvLookupBuf1_OFFS 0x79000 | ||
966 | |||
967 | #define QIB_6120_RcvDMABuf_OFFS 0x7B000 | ||
968 | |||
969 | #define QIB_6120_MiscRXEIntMem_OFFS 0x7C000 | ||
970 | |||
971 | #define QIB_6120_PCIERcvBuf_OFFS 0x80000 | ||
972 | |||
973 | #define QIB_6120_PCIERetryBuf_OFFS 0x82000 | ||
974 | |||
975 | #define QIB_6120_PCIERcvBufRdToWrAddr_OFFS 0x84000 | ||
976 | |||
977 | #define QIB_6120_PIOBuf0_MA_OFFS 0x100000 | ||
diff --git a/drivers/infiniband/hw/qib/qib_7220.h b/drivers/infiniband/hw/qib/qib_7220.h new file mode 100644 index 000000000000..ea0bfd896f92 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_7220.h | |||
@@ -0,0 +1,156 @@ | |||
1 | #ifndef _QIB_7220_H | ||
2 | #define _QIB_7220_H | ||
3 | /* | ||
4 | * Copyright (c) 2007, 2009, 2010 QLogic Corporation. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | /* grab register-defs auto-generated by HW */ | ||
36 | #include "qib_7220_regs.h" | ||
37 | |||
38 | /* The number of eager receive TIDs for context zero. */ | ||
39 | #define IBA7220_KRCVEGRCNT 2048U | ||
40 | |||
41 | #define IB_7220_LT_STATE_CFGRCVFCFG 0x09 | ||
42 | #define IB_7220_LT_STATE_CFGWAITRMT 0x0a | ||
43 | #define IB_7220_LT_STATE_TXREVLANES 0x0d | ||
44 | #define IB_7220_LT_STATE_CFGENH 0x10 | ||
45 | |||
46 | struct qib_chip_specific { | ||
47 | u64 __iomem *cregbase; | ||
48 | u64 *cntrs; | ||
49 | u64 *portcntrs; | ||
50 | spinlock_t sdepb_lock; /* serdes EPB bus */ | ||
51 | spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */ | ||
52 | spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */ | ||
53 | u64 hwerrmask; | ||
54 | u64 errormask; | ||
55 | u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */ | ||
56 | u64 gpio_mask; /* shadow the gpio mask register */ | ||
57 | u64 extctrl; /* shadow the gpio output enable, etc... */ | ||
58 | u32 ncntrs; | ||
59 | u32 nportcntrs; | ||
60 | u32 cntrnamelen; | ||
61 | u32 portcntrnamelen; | ||
62 | u32 numctxts; | ||
63 | u32 rcvegrcnt; | ||
64 | u32 autoneg_tries; | ||
65 | u32 serdes_first_init_done; | ||
66 | u32 sdmabufcnt; | ||
67 | u32 lastbuf_for_pio; | ||
68 | u32 updthresh; /* current AvailUpdThld */ | ||
69 | u32 updthresh_dflt; /* default AvailUpdThld */ | ||
70 | int irq; | ||
71 | u8 presets_needed; | ||
72 | u8 relock_timer_active; | ||
73 | char emsgbuf[128]; | ||
74 | char sdmamsgbuf[192]; | ||
75 | char bitsmsgbuf[64]; | ||
76 | struct timer_list relock_timer; | ||
77 | unsigned int relock_interval; /* in jiffies */ | ||
78 | }; | ||
79 | |||
80 | struct qib_chippport_specific { | ||
81 | struct qib_pportdata pportdata; | ||
82 | wait_queue_head_t autoneg_wait; | ||
83 | struct delayed_work autoneg_work; | ||
84 | struct timer_list chase_timer; | ||
85 | /* | ||
86 | * these 5 fields are used to establish deltas for IB symbol | ||
87 | * errors and linkrecovery errors. They can be reported on | ||
88 | * some chips during link negotiation prior to INIT, and with | ||
89 | * DDR when faking DDR negotiations with non-IBTA switches. | ||
90 | * The chip counters are adjusted at driver unload if there is | ||
91 | * a non-zero delta. | ||
92 | */ | ||
93 | u64 ibdeltainprog; | ||
94 | u64 ibsymdelta; | ||
95 | u64 ibsymsnap; | ||
96 | u64 iblnkerrdelta; | ||
97 | u64 iblnkerrsnap; | ||
98 | u64 ibcctrl; /* kr_ibcctrl shadow */ | ||
99 | u64 ibcddrctrl; /* kr_ibcddrctrl shadow */ | ||
100 | u64 chase_end; | ||
101 | u32 last_delay_mult; | ||
102 | }; | ||
103 | |||
104 | /* | ||
105 | * This header file provides the declarations and common definitions | ||
106 | * for (mostly) manipulation of the SerDes blocks within the IBA7220. | ||
107 | * the functions declared should only be called from within other | ||
108 | * 7220-related files such as qib_iba7220.c or qib_sd7220.c. | ||
109 | */ | ||
110 | int qib_sd7220_presets(struct qib_devdata *dd); | ||
111 | int qib_sd7220_init(struct qib_devdata *dd); | ||
112 | int qib_sd7220_prog_ld(struct qib_devdata *dd, int sdnum, u8 *img, | ||
113 | int len, int offset); | ||
114 | int qib_sd7220_prog_vfy(struct qib_devdata *dd, int sdnum, const u8 *img, | ||
115 | int len, int offset); | ||
116 | void qib_sd7220_clr_ibpar(struct qib_devdata *); | ||
117 | /* | ||
118 | * Below used for sdnum parameter, selecting one of the two sections | ||
119 | * used for PCIe, or the single SerDes used for IB, which is the | ||
120 | * only one currently used | ||
121 | */ | ||
122 | #define IB_7220_SERDES 2 | ||
123 | |||
124 | int qib_sd7220_ib_load(struct qib_devdata *dd); | ||
125 | int qib_sd7220_ib_vfy(struct qib_devdata *dd); | ||
126 | |||
127 | static inline u32 qib_read_kreg32(const struct qib_devdata *dd, | ||
128 | const u16 regno) | ||
129 | { | ||
130 | if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) | ||
131 | return -1; | ||
132 | return readl((u32 __iomem *)&dd->kregbase[regno]); | ||
133 | } | ||
134 | |||
135 | static inline u64 qib_read_kreg64(const struct qib_devdata *dd, | ||
136 | const u16 regno) | ||
137 | { | ||
138 | if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) | ||
139 | return -1; | ||
140 | |||
141 | return readq(&dd->kregbase[regno]); | ||
142 | } | ||
143 | |||
144 | static inline void qib_write_kreg(const struct qib_devdata *dd, | ||
145 | const u16 regno, u64 value) | ||
146 | { | ||
147 | if (dd->kregbase) | ||
148 | writeq(value, &dd->kregbase[regno]); | ||
149 | } | ||
150 | |||
151 | void set_7220_relock_poll(struct qib_devdata *, int); | ||
152 | void shutdown_7220_relock_poll(struct qib_devdata *); | ||
153 | void toggle_7220_rclkrls(struct qib_devdata *); | ||
154 | |||
155 | |||
156 | #endif /* _QIB_7220_H */ | ||
diff --git a/drivers/infiniband/hw/qib/qib_7220_regs.h b/drivers/infiniband/hw/qib/qib_7220_regs.h new file mode 100644 index 000000000000..0da5bb750e52 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_7220_regs.h | |||
@@ -0,0 +1,1496 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved. | ||
3 | * | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | * | ||
33 | */ | ||
34 | |||
35 | /* This file is mechanically generated from RTL. Any hand-edits will be lost! */ | ||
36 | |||
37 | #define QIB_7220_Revision_OFFS 0x0 | ||
38 | #define QIB_7220_Revision_R_Simulator_LSB 0x3F | ||
39 | #define QIB_7220_Revision_R_Simulator_RMASK 0x1 | ||
40 | #define QIB_7220_Revision_R_Emulation_LSB 0x3E | ||
41 | #define QIB_7220_Revision_R_Emulation_RMASK 0x1 | ||
42 | #define QIB_7220_Revision_R_Emulation_Revcode_LSB 0x28 | ||
43 | #define QIB_7220_Revision_R_Emulation_Revcode_RMASK 0x3FFFFF | ||
44 | #define QIB_7220_Revision_BoardID_LSB 0x20 | ||
45 | #define QIB_7220_Revision_BoardID_RMASK 0xFF | ||
46 | #define QIB_7220_Revision_R_SW_LSB 0x18 | ||
47 | #define QIB_7220_Revision_R_SW_RMASK 0xFF | ||
48 | #define QIB_7220_Revision_R_Arch_LSB 0x10 | ||
49 | #define QIB_7220_Revision_R_Arch_RMASK 0xFF | ||
50 | #define QIB_7220_Revision_R_ChipRevMajor_LSB 0x8 | ||
51 | #define QIB_7220_Revision_R_ChipRevMajor_RMASK 0xFF | ||
52 | #define QIB_7220_Revision_R_ChipRevMinor_LSB 0x0 | ||
53 | #define QIB_7220_Revision_R_ChipRevMinor_RMASK 0xFF | ||
54 | |||
55 | #define QIB_7220_Control_OFFS 0x8 | ||
56 | #define QIB_7220_Control_SyncResetExceptPcieIRAMRST_LSB 0x7 | ||
57 | #define QIB_7220_Control_SyncResetExceptPcieIRAMRST_RMASK 0x1 | ||
58 | #define QIB_7220_Control_PCIECplQDiagEn_LSB 0x6 | ||
59 | #define QIB_7220_Control_PCIECplQDiagEn_RMASK 0x1 | ||
60 | #define QIB_7220_Control_Reserved_LSB 0x5 | ||
61 | #define QIB_7220_Control_Reserved_RMASK 0x1 | ||
62 | #define QIB_7220_Control_TxLatency_LSB 0x4 | ||
63 | #define QIB_7220_Control_TxLatency_RMASK 0x1 | ||
64 | #define QIB_7220_Control_PCIERetryBufDiagEn_LSB 0x3 | ||
65 | #define QIB_7220_Control_PCIERetryBufDiagEn_RMASK 0x1 | ||
66 | #define QIB_7220_Control_LinkEn_LSB 0x2 | ||
67 | #define QIB_7220_Control_LinkEn_RMASK 0x1 | ||
68 | #define QIB_7220_Control_FreezeMode_LSB 0x1 | ||
69 | #define QIB_7220_Control_FreezeMode_RMASK 0x1 | ||
70 | #define QIB_7220_Control_SyncReset_LSB 0x0 | ||
71 | #define QIB_7220_Control_SyncReset_RMASK 0x1 | ||
72 | |||
73 | #define QIB_7220_PageAlign_OFFS 0x10 | ||
74 | |||
75 | #define QIB_7220_PortCnt_OFFS 0x18 | ||
76 | |||
77 | #define QIB_7220_SendRegBase_OFFS 0x30 | ||
78 | |||
79 | #define QIB_7220_UserRegBase_OFFS 0x38 | ||
80 | |||
81 | #define QIB_7220_CntrRegBase_OFFS 0x40 | ||
82 | |||
83 | #define QIB_7220_Scratch_OFFS 0x48 | ||
84 | |||
85 | #define QIB_7220_IntMask_OFFS 0x68 | ||
86 | #define QIB_7220_IntMask_SDmaIntMask_LSB 0x3F | ||
87 | #define QIB_7220_IntMask_SDmaIntMask_RMASK 0x1 | ||
88 | #define QIB_7220_IntMask_SDmaDisabledMasked_LSB 0x3E | ||
89 | #define QIB_7220_IntMask_SDmaDisabledMasked_RMASK 0x1 | ||
90 | #define QIB_7220_IntMask_Reserved_LSB 0x31 | ||
91 | #define QIB_7220_IntMask_Reserved_RMASK 0x1FFF | ||
92 | #define QIB_7220_IntMask_RcvUrg16IntMask_LSB 0x30 | ||
93 | #define QIB_7220_IntMask_RcvUrg16IntMask_RMASK 0x1 | ||
94 | #define QIB_7220_IntMask_RcvUrg15IntMask_LSB 0x2F | ||
95 | #define QIB_7220_IntMask_RcvUrg15IntMask_RMASK 0x1 | ||
96 | #define QIB_7220_IntMask_RcvUrg14IntMask_LSB 0x2E | ||
97 | #define QIB_7220_IntMask_RcvUrg14IntMask_RMASK 0x1 | ||
98 | #define QIB_7220_IntMask_RcvUrg13IntMask_LSB 0x2D | ||
99 | #define QIB_7220_IntMask_RcvUrg13IntMask_RMASK 0x1 | ||
100 | #define QIB_7220_IntMask_RcvUrg12IntMask_LSB 0x2C | ||
101 | #define QIB_7220_IntMask_RcvUrg12IntMask_RMASK 0x1 | ||
102 | #define QIB_7220_IntMask_RcvUrg11IntMask_LSB 0x2B | ||
103 | #define QIB_7220_IntMask_RcvUrg11IntMask_RMASK 0x1 | ||
104 | #define QIB_7220_IntMask_RcvUrg10IntMask_LSB 0x2A | ||
105 | #define QIB_7220_IntMask_RcvUrg10IntMask_RMASK 0x1 | ||
106 | #define QIB_7220_IntMask_RcvUrg9IntMask_LSB 0x29 | ||
107 | #define QIB_7220_IntMask_RcvUrg9IntMask_RMASK 0x1 | ||
108 | #define QIB_7220_IntMask_RcvUrg8IntMask_LSB 0x28 | ||
109 | #define QIB_7220_IntMask_RcvUrg8IntMask_RMASK 0x1 | ||
110 | #define QIB_7220_IntMask_RcvUrg7IntMask_LSB 0x27 | ||
111 | #define QIB_7220_IntMask_RcvUrg7IntMask_RMASK 0x1 | ||
112 | #define QIB_7220_IntMask_RcvUrg6IntMask_LSB 0x26 | ||
113 | #define QIB_7220_IntMask_RcvUrg6IntMask_RMASK 0x1 | ||
114 | #define QIB_7220_IntMask_RcvUrg5IntMask_LSB 0x25 | ||
115 | #define QIB_7220_IntMask_RcvUrg5IntMask_RMASK 0x1 | ||
116 | #define QIB_7220_IntMask_RcvUrg4IntMask_LSB 0x24 | ||
117 | #define QIB_7220_IntMask_RcvUrg4IntMask_RMASK 0x1 | ||
118 | #define QIB_7220_IntMask_RcvUrg3IntMask_LSB 0x23 | ||
119 | #define QIB_7220_IntMask_RcvUrg3IntMask_RMASK 0x1 | ||
120 | #define QIB_7220_IntMask_RcvUrg2IntMask_LSB 0x22 | ||
121 | #define QIB_7220_IntMask_RcvUrg2IntMask_RMASK 0x1 | ||
122 | #define QIB_7220_IntMask_RcvUrg1IntMask_LSB 0x21 | ||
123 | #define QIB_7220_IntMask_RcvUrg1IntMask_RMASK 0x1 | ||
124 | #define QIB_7220_IntMask_RcvUrg0IntMask_LSB 0x20 | ||
125 | #define QIB_7220_IntMask_RcvUrg0IntMask_RMASK 0x1 | ||
126 | #define QIB_7220_IntMask_ErrorIntMask_LSB 0x1F | ||
127 | #define QIB_7220_IntMask_ErrorIntMask_RMASK 0x1 | ||
128 | #define QIB_7220_IntMask_PioSetIntMask_LSB 0x1E | ||
129 | #define QIB_7220_IntMask_PioSetIntMask_RMASK 0x1 | ||
130 | #define QIB_7220_IntMask_PioBufAvailIntMask_LSB 0x1D | ||
131 | #define QIB_7220_IntMask_PioBufAvailIntMask_RMASK 0x1 | ||
132 | #define QIB_7220_IntMask_assertGPIOIntMask_LSB 0x1C | ||
133 | #define QIB_7220_IntMask_assertGPIOIntMask_RMASK 0x1 | ||
134 | #define QIB_7220_IntMask_IBSerdesTrimDoneIntMask_LSB 0x1B | ||
135 | #define QIB_7220_IntMask_IBSerdesTrimDoneIntMask_RMASK 0x1 | ||
136 | #define QIB_7220_IntMask_JIntMask_LSB 0x1A | ||
137 | #define QIB_7220_IntMask_JIntMask_RMASK 0x1 | ||
138 | #define QIB_7220_IntMask_Reserved1_LSB 0x11 | ||
139 | #define QIB_7220_IntMask_Reserved1_RMASK 0x1FF | ||
140 | #define QIB_7220_IntMask_RcvAvail16IntMask_LSB 0x10 | ||
141 | #define QIB_7220_IntMask_RcvAvail16IntMask_RMASK 0x1 | ||
142 | #define QIB_7220_IntMask_RcvAvail15IntMask_LSB 0xF | ||
143 | #define QIB_7220_IntMask_RcvAvail15IntMask_RMASK 0x1 | ||
144 | #define QIB_7220_IntMask_RcvAvail14IntMask_LSB 0xE | ||
145 | #define QIB_7220_IntMask_RcvAvail14IntMask_RMASK 0x1 | ||
146 | #define QIB_7220_IntMask_RcvAvail13IntMask_LSB 0xD | ||
147 | #define QIB_7220_IntMask_RcvAvail13IntMask_RMASK 0x1 | ||
148 | #define QIB_7220_IntMask_RcvAvail12IntMask_LSB 0xC | ||
149 | #define QIB_7220_IntMask_RcvAvail12IntMask_RMASK 0x1 | ||
150 | #define QIB_7220_IntMask_RcvAvail11IntMask_LSB 0xB | ||
151 | #define QIB_7220_IntMask_RcvAvail11IntMask_RMASK 0x1 | ||
152 | #define QIB_7220_IntMask_RcvAvail10IntMask_LSB 0xA | ||
153 | #define QIB_7220_IntMask_RcvAvail10IntMask_RMASK 0x1 | ||
154 | #define QIB_7220_IntMask_RcvAvail9IntMask_LSB 0x9 | ||
155 | #define QIB_7220_IntMask_RcvAvail9IntMask_RMASK 0x1 | ||
156 | #define QIB_7220_IntMask_RcvAvail8IntMask_LSB 0x8 | ||
157 | #define QIB_7220_IntMask_RcvAvail8IntMask_RMASK 0x1 | ||
158 | #define QIB_7220_IntMask_RcvAvail7IntMask_LSB 0x7 | ||
159 | #define QIB_7220_IntMask_RcvAvail7IntMask_RMASK 0x1 | ||
160 | #define QIB_7220_IntMask_RcvAvail6IntMask_LSB 0x6 | ||
161 | #define QIB_7220_IntMask_RcvAvail6IntMask_RMASK 0x1 | ||
162 | #define QIB_7220_IntMask_RcvAvail5IntMask_LSB 0x5 | ||
163 | #define QIB_7220_IntMask_RcvAvail5IntMask_RMASK 0x1 | ||
164 | #define QIB_7220_IntMask_RcvAvail4IntMask_LSB 0x4 | ||
165 | #define QIB_7220_IntMask_RcvAvail4IntMask_RMASK 0x1 | ||
166 | #define QIB_7220_IntMask_RcvAvail3IntMask_LSB 0x3 | ||
167 | #define QIB_7220_IntMask_RcvAvail3IntMask_RMASK 0x1 | ||
168 | #define QIB_7220_IntMask_RcvAvail2IntMask_LSB 0x2 | ||
169 | #define QIB_7220_IntMask_RcvAvail2IntMask_RMASK 0x1 | ||
170 | #define QIB_7220_IntMask_RcvAvail1IntMask_LSB 0x1 | ||
171 | #define QIB_7220_IntMask_RcvAvail1IntMask_RMASK 0x1 | ||
172 | #define QIB_7220_IntMask_RcvAvail0IntMask_LSB 0x0 | ||
173 | #define QIB_7220_IntMask_RcvAvail0IntMask_RMASK 0x1 | ||
174 | |||
175 | #define QIB_7220_IntStatus_OFFS 0x70 | ||
176 | #define QIB_7220_IntStatus_SDmaInt_LSB 0x3F | ||
177 | #define QIB_7220_IntStatus_SDmaInt_RMASK 0x1 | ||
178 | #define QIB_7220_IntStatus_SDmaDisabled_LSB 0x3E | ||
179 | #define QIB_7220_IntStatus_SDmaDisabled_RMASK 0x1 | ||
180 | #define QIB_7220_IntStatus_Reserved_LSB 0x31 | ||
181 | #define QIB_7220_IntStatus_Reserved_RMASK 0x1FFF | ||
182 | #define QIB_7220_IntStatus_RcvUrg16_LSB 0x30 | ||
183 | #define QIB_7220_IntStatus_RcvUrg16_RMASK 0x1 | ||
184 | #define QIB_7220_IntStatus_RcvUrg15_LSB 0x2F | ||
185 | #define QIB_7220_IntStatus_RcvUrg15_RMASK 0x1 | ||
186 | #define QIB_7220_IntStatus_RcvUrg14_LSB 0x2E | ||
187 | #define QIB_7220_IntStatus_RcvUrg14_RMASK 0x1 | ||
188 | #define QIB_7220_IntStatus_RcvUrg13_LSB 0x2D | ||
189 | #define QIB_7220_IntStatus_RcvUrg13_RMASK 0x1 | ||
190 | #define QIB_7220_IntStatus_RcvUrg12_LSB 0x2C | ||
191 | #define QIB_7220_IntStatus_RcvUrg12_RMASK 0x1 | ||
192 | #define QIB_7220_IntStatus_RcvUrg11_LSB 0x2B | ||
193 | #define QIB_7220_IntStatus_RcvUrg11_RMASK 0x1 | ||
194 | #define QIB_7220_IntStatus_RcvUrg10_LSB 0x2A | ||
195 | #define QIB_7220_IntStatus_RcvUrg10_RMASK 0x1 | ||
196 | #define QIB_7220_IntStatus_RcvUrg9_LSB 0x29 | ||
197 | #define QIB_7220_IntStatus_RcvUrg9_RMASK 0x1 | ||
198 | #define QIB_7220_IntStatus_RcvUrg8_LSB 0x28 | ||
199 | #define QIB_7220_IntStatus_RcvUrg8_RMASK 0x1 | ||
200 | #define QIB_7220_IntStatus_RcvUrg7_LSB 0x27 | ||
201 | #define QIB_7220_IntStatus_RcvUrg7_RMASK 0x1 | ||
202 | #define QIB_7220_IntStatus_RcvUrg6_LSB 0x26 | ||
203 | #define QIB_7220_IntStatus_RcvUrg6_RMASK 0x1 | ||
204 | #define QIB_7220_IntStatus_RcvUrg5_LSB 0x25 | ||
205 | #define QIB_7220_IntStatus_RcvUrg5_RMASK 0x1 | ||
206 | #define QIB_7220_IntStatus_RcvUrg4_LSB 0x24 | ||
207 | #define QIB_7220_IntStatus_RcvUrg4_RMASK 0x1 | ||
208 | #define QIB_7220_IntStatus_RcvUrg3_LSB 0x23 | ||
209 | #define QIB_7220_IntStatus_RcvUrg3_RMASK 0x1 | ||
210 | #define QIB_7220_IntStatus_RcvUrg2_LSB 0x22 | ||
211 | #define QIB_7220_IntStatus_RcvUrg2_RMASK 0x1 | ||
212 | #define QIB_7220_IntStatus_RcvUrg1_LSB 0x21 | ||
213 | #define QIB_7220_IntStatus_RcvUrg1_RMASK 0x1 | ||
214 | #define QIB_7220_IntStatus_RcvUrg0_LSB 0x20 | ||
215 | #define QIB_7220_IntStatus_RcvUrg0_RMASK 0x1 | ||
216 | #define QIB_7220_IntStatus_Error_LSB 0x1F | ||
217 | #define QIB_7220_IntStatus_Error_RMASK 0x1 | ||
218 | #define QIB_7220_IntStatus_PioSent_LSB 0x1E | ||
219 | #define QIB_7220_IntStatus_PioSent_RMASK 0x1 | ||
220 | #define QIB_7220_IntStatus_PioBufAvail_LSB 0x1D | ||
221 | #define QIB_7220_IntStatus_PioBufAvail_RMASK 0x1 | ||
222 | #define QIB_7220_IntStatus_assertGPIO_LSB 0x1C | ||
223 | #define QIB_7220_IntStatus_assertGPIO_RMASK 0x1 | ||
224 | #define QIB_7220_IntStatus_IBSerdesTrimDone_LSB 0x1B | ||
225 | #define QIB_7220_IntStatus_IBSerdesTrimDone_RMASK 0x1 | ||
226 | #define QIB_7220_IntStatus_JInt_LSB 0x1A | ||
227 | #define QIB_7220_IntStatus_JInt_RMASK 0x1 | ||
228 | #define QIB_7220_IntStatus_Reserved1_LSB 0x11 | ||
229 | #define QIB_7220_IntStatus_Reserved1_RMASK 0x1FF | ||
230 | #define QIB_7220_IntStatus_RcvAvail16_LSB 0x10 | ||
231 | #define QIB_7220_IntStatus_RcvAvail16_RMASK 0x1 | ||
232 | #define QIB_7220_IntStatus_RcvAvail15_LSB 0xF | ||
233 | #define QIB_7220_IntStatus_RcvAvail15_RMASK 0x1 | ||
234 | #define QIB_7220_IntStatus_RcvAvail14_LSB 0xE | ||
235 | #define QIB_7220_IntStatus_RcvAvail14_RMASK 0x1 | ||
236 | #define QIB_7220_IntStatus_RcvAvail13_LSB 0xD | ||
237 | #define QIB_7220_IntStatus_RcvAvail13_RMASK 0x1 | ||
238 | #define QIB_7220_IntStatus_RcvAvail12_LSB 0xC | ||
239 | #define QIB_7220_IntStatus_RcvAvail12_RMASK 0x1 | ||
240 | #define QIB_7220_IntStatus_RcvAvail11_LSB 0xB | ||
241 | #define QIB_7220_IntStatus_RcvAvail11_RMASK 0x1 | ||
242 | #define QIB_7220_IntStatus_RcvAvail10_LSB 0xA | ||
243 | #define QIB_7220_IntStatus_RcvAvail10_RMASK 0x1 | ||
244 | #define QIB_7220_IntStatus_RcvAvail9_LSB 0x9 | ||
245 | #define QIB_7220_IntStatus_RcvAvail9_RMASK 0x1 | ||
246 | #define QIB_7220_IntStatus_RcvAvail8_LSB 0x8 | ||
247 | #define QIB_7220_IntStatus_RcvAvail8_RMASK 0x1 | ||
248 | #define QIB_7220_IntStatus_RcvAvail7_LSB 0x7 | ||
249 | #define QIB_7220_IntStatus_RcvAvail7_RMASK 0x1 | ||
250 | #define QIB_7220_IntStatus_RcvAvail6_LSB 0x6 | ||
251 | #define QIB_7220_IntStatus_RcvAvail6_RMASK 0x1 | ||
252 | #define QIB_7220_IntStatus_RcvAvail5_LSB 0x5 | ||
253 | #define QIB_7220_IntStatus_RcvAvail5_RMASK 0x1 | ||
254 | #define QIB_7220_IntStatus_RcvAvail4_LSB 0x4 | ||
255 | #define QIB_7220_IntStatus_RcvAvail4_RMASK 0x1 | ||
256 | #define QIB_7220_IntStatus_RcvAvail3_LSB 0x3 | ||
257 | #define QIB_7220_IntStatus_RcvAvail3_RMASK 0x1 | ||
258 | #define QIB_7220_IntStatus_RcvAvail2_LSB 0x2 | ||
259 | #define QIB_7220_IntStatus_RcvAvail2_RMASK 0x1 | ||
260 | #define QIB_7220_IntStatus_RcvAvail1_LSB 0x1 | ||
261 | #define QIB_7220_IntStatus_RcvAvail1_RMASK 0x1 | ||
262 | #define QIB_7220_IntStatus_RcvAvail0_LSB 0x0 | ||
263 | #define QIB_7220_IntStatus_RcvAvail0_RMASK 0x1 | ||
264 | |||
265 | #define QIB_7220_IntClear_OFFS 0x78 | ||
266 | #define QIB_7220_IntClear_SDmaIntClear_LSB 0x3F | ||
267 | #define QIB_7220_IntClear_SDmaIntClear_RMASK 0x1 | ||
268 | #define QIB_7220_IntClear_SDmaDisabledClear_LSB 0x3E | ||
269 | #define QIB_7220_IntClear_SDmaDisabledClear_RMASK 0x1 | ||
270 | #define QIB_7220_IntClear_Reserved_LSB 0x31 | ||
271 | #define QIB_7220_IntClear_Reserved_RMASK 0x1FFF | ||
272 | #define QIB_7220_IntClear_RcvUrg16IntClear_LSB 0x30 | ||
273 | #define QIB_7220_IntClear_RcvUrg16IntClear_RMASK 0x1 | ||
274 | #define QIB_7220_IntClear_RcvUrg15IntClear_LSB 0x2F | ||
275 | #define QIB_7220_IntClear_RcvUrg15IntClear_RMASK 0x1 | ||
276 | #define QIB_7220_IntClear_RcvUrg14IntClear_LSB 0x2E | ||
277 | #define QIB_7220_IntClear_RcvUrg14IntClear_RMASK 0x1 | ||
278 | #define QIB_7220_IntClear_RcvUrg13IntClear_LSB 0x2D | ||
279 | #define QIB_7220_IntClear_RcvUrg13IntClear_RMASK 0x1 | ||
280 | #define QIB_7220_IntClear_RcvUrg12IntClear_LSB 0x2C | ||
281 | #define QIB_7220_IntClear_RcvUrg12IntClear_RMASK 0x1 | ||
282 | #define QIB_7220_IntClear_RcvUrg11IntClear_LSB 0x2B | ||
283 | #define QIB_7220_IntClear_RcvUrg11IntClear_RMASK 0x1 | ||
284 | #define QIB_7220_IntClear_RcvUrg10IntClear_LSB 0x2A | ||
285 | #define QIB_7220_IntClear_RcvUrg10IntClear_RMASK 0x1 | ||
286 | #define QIB_7220_IntClear_RcvUrg9IntClear_LSB 0x29 | ||
287 | #define QIB_7220_IntClear_RcvUrg9IntClear_RMASK 0x1 | ||
288 | #define QIB_7220_IntClear_RcvUrg8IntClear_LSB 0x28 | ||
289 | #define QIB_7220_IntClear_RcvUrg8IntClear_RMASK 0x1 | ||
290 | #define QIB_7220_IntClear_RcvUrg7IntClear_LSB 0x27 | ||
291 | #define QIB_7220_IntClear_RcvUrg7IntClear_RMASK 0x1 | ||
292 | #define QIB_7220_IntClear_RcvUrg6IntClear_LSB 0x26 | ||
293 | #define QIB_7220_IntClear_RcvUrg6IntClear_RMASK 0x1 | ||
294 | #define QIB_7220_IntClear_RcvUrg5IntClear_LSB 0x25 | ||
295 | #define QIB_7220_IntClear_RcvUrg5IntClear_RMASK 0x1 | ||
296 | #define QIB_7220_IntClear_RcvUrg4IntClear_LSB 0x24 | ||
297 | #define QIB_7220_IntClear_RcvUrg4IntClear_RMASK 0x1 | ||
298 | #define QIB_7220_IntClear_RcvUrg3IntClear_LSB 0x23 | ||
299 | #define QIB_7220_IntClear_RcvUrg3IntClear_RMASK 0x1 | ||
300 | #define QIB_7220_IntClear_RcvUrg2IntClear_LSB 0x22 | ||
301 | #define QIB_7220_IntClear_RcvUrg2IntClear_RMASK 0x1 | ||
302 | #define QIB_7220_IntClear_RcvUrg1IntClear_LSB 0x21 | ||
303 | #define QIB_7220_IntClear_RcvUrg1IntClear_RMASK 0x1 | ||
304 | #define QIB_7220_IntClear_RcvUrg0IntClear_LSB 0x20 | ||
305 | #define QIB_7220_IntClear_RcvUrg0IntClear_RMASK 0x1 | ||
306 | #define QIB_7220_IntClear_ErrorIntClear_LSB 0x1F | ||
307 | #define QIB_7220_IntClear_ErrorIntClear_RMASK 0x1 | ||
308 | #define QIB_7220_IntClear_PioSetIntClear_LSB 0x1E | ||
309 | #define QIB_7220_IntClear_PioSetIntClear_RMASK 0x1 | ||
310 | #define QIB_7220_IntClear_PioBufAvailIntClear_LSB 0x1D | ||
311 | #define QIB_7220_IntClear_PioBufAvailIntClear_RMASK 0x1 | ||
312 | #define QIB_7220_IntClear_assertGPIOIntClear_LSB 0x1C | ||
313 | #define QIB_7220_IntClear_assertGPIOIntClear_RMASK 0x1 | ||
314 | #define QIB_7220_IntClear_IBSerdesTrimDoneClear_LSB 0x1B | ||
315 | #define QIB_7220_IntClear_IBSerdesTrimDoneClear_RMASK 0x1 | ||
316 | #define QIB_7220_IntClear_JIntClear_LSB 0x1A | ||
317 | #define QIB_7220_IntClear_JIntClear_RMASK 0x1 | ||
318 | #define QIB_7220_IntClear_Reserved1_LSB 0x11 | ||
319 | #define QIB_7220_IntClear_Reserved1_RMASK 0x1FF | ||
320 | #define QIB_7220_IntClear_RcvAvail16IntClear_LSB 0x10 | ||
321 | #define QIB_7220_IntClear_RcvAvail16IntClear_RMASK 0x1 | ||
322 | #define QIB_7220_IntClear_RcvAvail15IntClear_LSB 0xF | ||
323 | #define QIB_7220_IntClear_RcvAvail15IntClear_RMASK 0x1 | ||
324 | #define QIB_7220_IntClear_RcvAvail14IntClear_LSB 0xE | ||
325 | #define QIB_7220_IntClear_RcvAvail14IntClear_RMASK 0x1 | ||
326 | #define QIB_7220_IntClear_RcvAvail13IntClear_LSB 0xD | ||
327 | #define QIB_7220_IntClear_RcvAvail13IntClear_RMASK 0x1 | ||
328 | #define QIB_7220_IntClear_RcvAvail12IntClear_LSB 0xC | ||
329 | #define QIB_7220_IntClear_RcvAvail12IntClear_RMASK 0x1 | ||
330 | #define QIB_7220_IntClear_RcvAvail11IntClear_LSB 0xB | ||
331 | #define QIB_7220_IntClear_RcvAvail11IntClear_RMASK 0x1 | ||
332 | #define QIB_7220_IntClear_RcvAvail10IntClear_LSB 0xA | ||
333 | #define QIB_7220_IntClear_RcvAvail10IntClear_RMASK 0x1 | ||
334 | #define QIB_7220_IntClear_RcvAvail9IntClear_LSB 0x9 | ||
335 | #define QIB_7220_IntClear_RcvAvail9IntClear_RMASK 0x1 | ||
336 | #define QIB_7220_IntClear_RcvAvail8IntClear_LSB 0x8 | ||
337 | #define QIB_7220_IntClear_RcvAvail8IntClear_RMASK 0x1 | ||
338 | #define QIB_7220_IntClear_RcvAvail7IntClear_LSB 0x7 | ||
339 | #define QIB_7220_IntClear_RcvAvail7IntClear_RMASK 0x1 | ||
340 | #define QIB_7220_IntClear_RcvAvail6IntClear_LSB 0x6 | ||
341 | #define QIB_7220_IntClear_RcvAvail6IntClear_RMASK 0x1 | ||
342 | #define QIB_7220_IntClear_RcvAvail5IntClear_LSB 0x5 | ||
343 | #define QIB_7220_IntClear_RcvAvail5IntClear_RMASK 0x1 | ||
344 | #define QIB_7220_IntClear_RcvAvail4IntClear_LSB 0x4 | ||
345 | #define QIB_7220_IntClear_RcvAvail4IntClear_RMASK 0x1 | ||
346 | #define QIB_7220_IntClear_RcvAvail3IntClear_LSB 0x3 | ||
347 | #define QIB_7220_IntClear_RcvAvail3IntClear_RMASK 0x1 | ||
348 | #define QIB_7220_IntClear_RcvAvail2IntClear_LSB 0x2 | ||
349 | #define QIB_7220_IntClear_RcvAvail2IntClear_RMASK 0x1 | ||
350 | #define QIB_7220_IntClear_RcvAvail1IntClear_LSB 0x1 | ||
351 | #define QIB_7220_IntClear_RcvAvail1IntClear_RMASK 0x1 | ||
352 | #define QIB_7220_IntClear_RcvAvail0IntClear_LSB 0x0 | ||
353 | #define QIB_7220_IntClear_RcvAvail0IntClear_RMASK 0x1 | ||
354 | |||
355 | #define QIB_7220_ErrMask_OFFS 0x80 | ||
356 | #define QIB_7220_ErrMask_Reserved_LSB 0x36 | ||
357 | #define QIB_7220_ErrMask_Reserved_RMASK 0x3FF | ||
358 | #define QIB_7220_ErrMask_InvalidEEPCmdMask_LSB 0x35 | ||
359 | #define QIB_7220_ErrMask_InvalidEEPCmdMask_RMASK 0x1 | ||
360 | #define QIB_7220_ErrMask_SDmaDescAddrMisalignErrMask_LSB 0x34 | ||
361 | #define QIB_7220_ErrMask_SDmaDescAddrMisalignErrMask_RMASK 0x1 | ||
362 | #define QIB_7220_ErrMask_HardwareErrMask_LSB 0x33 | ||
363 | #define QIB_7220_ErrMask_HardwareErrMask_RMASK 0x1 | ||
364 | #define QIB_7220_ErrMask_ResetNegatedMask_LSB 0x32 | ||
365 | #define QIB_7220_ErrMask_ResetNegatedMask_RMASK 0x1 | ||
366 | #define QIB_7220_ErrMask_InvalidAddrErrMask_LSB 0x31 | ||
367 | #define QIB_7220_ErrMask_InvalidAddrErrMask_RMASK 0x1 | ||
368 | #define QIB_7220_ErrMask_IBStatusChangedMask_LSB 0x30 | ||
369 | #define QIB_7220_ErrMask_IBStatusChangedMask_RMASK 0x1 | ||
370 | #define QIB_7220_ErrMask_SDmaUnexpDataErrMask_LSB 0x2F | ||
371 | #define QIB_7220_ErrMask_SDmaUnexpDataErrMask_RMASK 0x1 | ||
372 | #define QIB_7220_ErrMask_SDmaMissingDwErrMask_LSB 0x2E | ||
373 | #define QIB_7220_ErrMask_SDmaMissingDwErrMask_RMASK 0x1 | ||
374 | #define QIB_7220_ErrMask_SDmaDwEnErrMask_LSB 0x2D | ||
375 | #define QIB_7220_ErrMask_SDmaDwEnErrMask_RMASK 0x1 | ||
376 | #define QIB_7220_ErrMask_SDmaRpyTagErrMask_LSB 0x2C | ||
377 | #define QIB_7220_ErrMask_SDmaRpyTagErrMask_RMASK 0x1 | ||
378 | #define QIB_7220_ErrMask_SDma1stDescErrMask_LSB 0x2B | ||
379 | #define QIB_7220_ErrMask_SDma1stDescErrMask_RMASK 0x1 | ||
380 | #define QIB_7220_ErrMask_SDmaBaseErrMask_LSB 0x2A | ||
381 | #define QIB_7220_ErrMask_SDmaBaseErrMask_RMASK 0x1 | ||
382 | #define QIB_7220_ErrMask_SDmaTailOutOfBoundErrMask_LSB 0x29 | ||
383 | #define QIB_7220_ErrMask_SDmaTailOutOfBoundErrMask_RMASK 0x1 | ||
384 | #define QIB_7220_ErrMask_SDmaOutOfBoundErrMask_LSB 0x28 | ||
385 | #define QIB_7220_ErrMask_SDmaOutOfBoundErrMask_RMASK 0x1 | ||
386 | #define QIB_7220_ErrMask_SDmaGenMismatchErrMask_LSB 0x27 | ||
387 | #define QIB_7220_ErrMask_SDmaGenMismatchErrMask_RMASK 0x1 | ||
388 | #define QIB_7220_ErrMask_SendBufMisuseErrMask_LSB 0x26 | ||
389 | #define QIB_7220_ErrMask_SendBufMisuseErrMask_RMASK 0x1 | ||
390 | #define QIB_7220_ErrMask_SendUnsupportedVLErrMask_LSB 0x25 | ||
391 | #define QIB_7220_ErrMask_SendUnsupportedVLErrMask_RMASK 0x1 | ||
392 | #define QIB_7220_ErrMask_SendUnexpectedPktNumErrMask_LSB 0x24 | ||
393 | #define QIB_7220_ErrMask_SendUnexpectedPktNumErrMask_RMASK 0x1 | ||
394 | #define QIB_7220_ErrMask_SendPioArmLaunchErrMask_LSB 0x23 | ||
395 | #define QIB_7220_ErrMask_SendPioArmLaunchErrMask_RMASK 0x1 | ||
396 | #define QIB_7220_ErrMask_SendDroppedDataPktErrMask_LSB 0x22 | ||
397 | #define QIB_7220_ErrMask_SendDroppedDataPktErrMask_RMASK 0x1 | ||
398 | #define QIB_7220_ErrMask_SendDroppedSmpPktErrMask_LSB 0x21 | ||
399 | #define QIB_7220_ErrMask_SendDroppedSmpPktErrMask_RMASK 0x1 | ||
400 | #define QIB_7220_ErrMask_SendPktLenErrMask_LSB 0x20 | ||
401 | #define QIB_7220_ErrMask_SendPktLenErrMask_RMASK 0x1 | ||
402 | #define QIB_7220_ErrMask_SendUnderRunErrMask_LSB 0x1F | ||
403 | #define QIB_7220_ErrMask_SendUnderRunErrMask_RMASK 0x1 | ||
404 | #define QIB_7220_ErrMask_SendMaxPktLenErrMask_LSB 0x1E | ||
405 | #define QIB_7220_ErrMask_SendMaxPktLenErrMask_RMASK 0x1 | ||
406 | #define QIB_7220_ErrMask_SendMinPktLenErrMask_LSB 0x1D | ||
407 | #define QIB_7220_ErrMask_SendMinPktLenErrMask_RMASK 0x1 | ||
408 | #define QIB_7220_ErrMask_SDmaDisabledErrMask_LSB 0x1C | ||
409 | #define QIB_7220_ErrMask_SDmaDisabledErrMask_RMASK 0x1 | ||
410 | #define QIB_7220_ErrMask_SendSpecialTriggerErrMask_LSB 0x1B | ||
411 | #define QIB_7220_ErrMask_SendSpecialTriggerErrMask_RMASK 0x1 | ||
412 | #define QIB_7220_ErrMask_Reserved1_LSB 0x12 | ||
413 | #define QIB_7220_ErrMask_Reserved1_RMASK 0x1FF | ||
414 | #define QIB_7220_ErrMask_RcvIBLostLinkErrMask_LSB 0x11 | ||
415 | #define QIB_7220_ErrMask_RcvIBLostLinkErrMask_RMASK 0x1 | ||
416 | #define QIB_7220_ErrMask_RcvHdrErrMask_LSB 0x10 | ||
417 | #define QIB_7220_ErrMask_RcvHdrErrMask_RMASK 0x1 | ||
418 | #define QIB_7220_ErrMask_RcvHdrLenErrMask_LSB 0xF | ||
419 | #define QIB_7220_ErrMask_RcvHdrLenErrMask_RMASK 0x1 | ||
420 | #define QIB_7220_ErrMask_RcvBadTidErrMask_LSB 0xE | ||
421 | #define QIB_7220_ErrMask_RcvBadTidErrMask_RMASK 0x1 | ||
422 | #define QIB_7220_ErrMask_RcvHdrFullErrMask_LSB 0xD | ||
423 | #define QIB_7220_ErrMask_RcvHdrFullErrMask_RMASK 0x1 | ||
424 | #define QIB_7220_ErrMask_RcvEgrFullErrMask_LSB 0xC | ||
425 | #define QIB_7220_ErrMask_RcvEgrFullErrMask_RMASK 0x1 | ||
426 | #define QIB_7220_ErrMask_RcvBadVersionErrMask_LSB 0xB | ||
427 | #define QIB_7220_ErrMask_RcvBadVersionErrMask_RMASK 0x1 | ||
428 | #define QIB_7220_ErrMask_RcvIBFlowErrMask_LSB 0xA | ||
429 | #define QIB_7220_ErrMask_RcvIBFlowErrMask_RMASK 0x1 | ||
430 | #define QIB_7220_ErrMask_RcvEBPErrMask_LSB 0x9 | ||
431 | #define QIB_7220_ErrMask_RcvEBPErrMask_RMASK 0x1 | ||
432 | #define QIB_7220_ErrMask_RcvUnsupportedVLErrMask_LSB 0x8 | ||
433 | #define QIB_7220_ErrMask_RcvUnsupportedVLErrMask_RMASK 0x1 | ||
434 | #define QIB_7220_ErrMask_RcvUnexpectedCharErrMask_LSB 0x7 | ||
435 | #define QIB_7220_ErrMask_RcvUnexpectedCharErrMask_RMASK 0x1 | ||
436 | #define QIB_7220_ErrMask_RcvShortPktLenErrMask_LSB 0x6 | ||
437 | #define QIB_7220_ErrMask_RcvShortPktLenErrMask_RMASK 0x1 | ||
438 | #define QIB_7220_ErrMask_RcvLongPktLenErrMask_LSB 0x5 | ||
439 | #define QIB_7220_ErrMask_RcvLongPktLenErrMask_RMASK 0x1 | ||
440 | #define QIB_7220_ErrMask_RcvMaxPktLenErrMask_LSB 0x4 | ||
441 | #define QIB_7220_ErrMask_RcvMaxPktLenErrMask_RMASK 0x1 | ||
442 | #define QIB_7220_ErrMask_RcvMinPktLenErrMask_LSB 0x3 | ||
443 | #define QIB_7220_ErrMask_RcvMinPktLenErrMask_RMASK 0x1 | ||
444 | #define QIB_7220_ErrMask_RcvICRCErrMask_LSB 0x2 | ||
445 | #define QIB_7220_ErrMask_RcvICRCErrMask_RMASK 0x1 | ||
446 | #define QIB_7220_ErrMask_RcvVCRCErrMask_LSB 0x1 | ||
447 | #define QIB_7220_ErrMask_RcvVCRCErrMask_RMASK 0x1 | ||
448 | #define QIB_7220_ErrMask_RcvFormatErrMask_LSB 0x0 | ||
449 | #define QIB_7220_ErrMask_RcvFormatErrMask_RMASK 0x1 | ||
450 | |||
451 | #define QIB_7220_ErrStatus_OFFS 0x88 | ||
452 | #define QIB_7220_ErrStatus_Reserved_LSB 0x36 | ||
453 | #define QIB_7220_ErrStatus_Reserved_RMASK 0x3FF | ||
454 | #define QIB_7220_ErrStatus_InvalidEEPCmdErr_LSB 0x35 | ||
455 | #define QIB_7220_ErrStatus_InvalidEEPCmdErr_RMASK 0x1 | ||
456 | #define QIB_7220_ErrStatus_SDmaDescAddrMisalignErr_LSB 0x34 | ||
457 | #define QIB_7220_ErrStatus_SDmaDescAddrMisalignErr_RMASK 0x1 | ||
458 | #define QIB_7220_ErrStatus_HardwareErr_LSB 0x33 | ||
459 | #define QIB_7220_ErrStatus_HardwareErr_RMASK 0x1 | ||
460 | #define QIB_7220_ErrStatus_ResetNegated_LSB 0x32 | ||
461 | #define QIB_7220_ErrStatus_ResetNegated_RMASK 0x1 | ||
462 | #define QIB_7220_ErrStatus_InvalidAddrErr_LSB 0x31 | ||
463 | #define QIB_7220_ErrStatus_InvalidAddrErr_RMASK 0x1 | ||
464 | #define QIB_7220_ErrStatus_IBStatusChanged_LSB 0x30 | ||
465 | #define QIB_7220_ErrStatus_IBStatusChanged_RMASK 0x1 | ||
466 | #define QIB_7220_ErrStatus_SDmaUnexpDataErr_LSB 0x2F | ||
467 | #define QIB_7220_ErrStatus_SDmaUnexpDataErr_RMASK 0x1 | ||
468 | #define QIB_7220_ErrStatus_SDmaMissingDwErr_LSB 0x2E | ||
469 | #define QIB_7220_ErrStatus_SDmaMissingDwErr_RMASK 0x1 | ||
470 | #define QIB_7220_ErrStatus_SDmaDwEnErr_LSB 0x2D | ||
471 | #define QIB_7220_ErrStatus_SDmaDwEnErr_RMASK 0x1 | ||
472 | #define QIB_7220_ErrStatus_SDmaRpyTagErr_LSB 0x2C | ||
473 | #define QIB_7220_ErrStatus_SDmaRpyTagErr_RMASK 0x1 | ||
474 | #define QIB_7220_ErrStatus_SDma1stDescErr_LSB 0x2B | ||
475 | #define QIB_7220_ErrStatus_SDma1stDescErr_RMASK 0x1 | ||
476 | #define QIB_7220_ErrStatus_SDmaBaseErr_LSB 0x2A | ||
477 | #define QIB_7220_ErrStatus_SDmaBaseErr_RMASK 0x1 | ||
478 | #define QIB_7220_ErrStatus_SDmaTailOutOfBoundErr_LSB 0x29 | ||
479 | #define QIB_7220_ErrStatus_SDmaTailOutOfBoundErr_RMASK 0x1 | ||
480 | #define QIB_7220_ErrStatus_SDmaOutOfBoundErr_LSB 0x28 | ||
481 | #define QIB_7220_ErrStatus_SDmaOutOfBoundErr_RMASK 0x1 | ||
482 | #define QIB_7220_ErrStatus_SDmaGenMismatchErr_LSB 0x27 | ||
483 | #define QIB_7220_ErrStatus_SDmaGenMismatchErr_RMASK 0x1 | ||
484 | #define QIB_7220_ErrStatus_SendBufMisuseErr_LSB 0x26 | ||
485 | #define QIB_7220_ErrStatus_SendBufMisuseErr_RMASK 0x1 | ||
486 | #define QIB_7220_ErrStatus_SendUnsupportedVLErr_LSB 0x25 | ||
487 | #define QIB_7220_ErrStatus_SendUnsupportedVLErr_RMASK 0x1 | ||
488 | #define QIB_7220_ErrStatus_SendUnexpectedPktNumErr_LSB 0x24 | ||
489 | #define QIB_7220_ErrStatus_SendUnexpectedPktNumErr_RMASK 0x1 | ||
490 | #define QIB_7220_ErrStatus_SendPioArmLaunchErr_LSB 0x23 | ||
491 | #define QIB_7220_ErrStatus_SendPioArmLaunchErr_RMASK 0x1 | ||
492 | #define QIB_7220_ErrStatus_SendDroppedDataPktErr_LSB 0x22 | ||
493 | #define QIB_7220_ErrStatus_SendDroppedDataPktErr_RMASK 0x1 | ||
494 | #define QIB_7220_ErrStatus_SendDroppedSmpPktErr_LSB 0x21 | ||
495 | #define QIB_7220_ErrStatus_SendDroppedSmpPktErr_RMASK 0x1 | ||
496 | #define QIB_7220_ErrStatus_SendPktLenErr_LSB 0x20 | ||
497 | #define QIB_7220_ErrStatus_SendPktLenErr_RMASK 0x1 | ||
498 | #define QIB_7220_ErrStatus_SendUnderRunErr_LSB 0x1F | ||
499 | #define QIB_7220_ErrStatus_SendUnderRunErr_RMASK 0x1 | ||
500 | #define QIB_7220_ErrStatus_SendMaxPktLenErr_LSB 0x1E | ||
501 | #define QIB_7220_ErrStatus_SendMaxPktLenErr_RMASK 0x1 | ||
502 | #define QIB_7220_ErrStatus_SendMinPktLenErr_LSB 0x1D | ||
503 | #define QIB_7220_ErrStatus_SendMinPktLenErr_RMASK 0x1 | ||
504 | #define QIB_7220_ErrStatus_SDmaDisabledErr_LSB 0x1C | ||
505 | #define QIB_7220_ErrStatus_SDmaDisabledErr_RMASK 0x1 | ||
506 | #define QIB_7220_ErrStatus_SendSpecialTriggerErr_LSB 0x1B | ||
507 | #define QIB_7220_ErrStatus_SendSpecialTriggerErr_RMASK 0x1 | ||
508 | #define QIB_7220_ErrStatus_Reserved1_LSB 0x12 | ||
509 | #define QIB_7220_ErrStatus_Reserved1_RMASK 0x1FF | ||
510 | #define QIB_7220_ErrStatus_RcvIBLostLinkErr_LSB 0x11 | ||
511 | #define QIB_7220_ErrStatus_RcvIBLostLinkErr_RMASK 0x1 | ||
512 | #define QIB_7220_ErrStatus_RcvHdrErr_LSB 0x10 | ||
513 | #define QIB_7220_ErrStatus_RcvHdrErr_RMASK 0x1 | ||
514 | #define QIB_7220_ErrStatus_RcvHdrLenErr_LSB 0xF | ||
515 | #define QIB_7220_ErrStatus_RcvHdrLenErr_RMASK 0x1 | ||
516 | #define QIB_7220_ErrStatus_RcvBadTidErr_LSB 0xE | ||
517 | #define QIB_7220_ErrStatus_RcvBadTidErr_RMASK 0x1 | ||
518 | #define QIB_7220_ErrStatus_RcvHdrFullErr_LSB 0xD | ||
519 | #define QIB_7220_ErrStatus_RcvHdrFullErr_RMASK 0x1 | ||
520 | #define QIB_7220_ErrStatus_RcvEgrFullErr_LSB 0xC | ||
521 | #define QIB_7220_ErrStatus_RcvEgrFullErr_RMASK 0x1 | ||
522 | #define QIB_7220_ErrStatus_RcvBadVersionErr_LSB 0xB | ||
523 | #define QIB_7220_ErrStatus_RcvBadVersionErr_RMASK 0x1 | ||
524 | #define QIB_7220_ErrStatus_RcvIBFlowErr_LSB 0xA | ||
525 | #define QIB_7220_ErrStatus_RcvIBFlowErr_RMASK 0x1 | ||
526 | #define QIB_7220_ErrStatus_RcvEBPErr_LSB 0x9 | ||
527 | #define QIB_7220_ErrStatus_RcvEBPErr_RMASK 0x1 | ||
528 | #define QIB_7220_ErrStatus_RcvUnsupportedVLErr_LSB 0x8 | ||
529 | #define QIB_7220_ErrStatus_RcvUnsupportedVLErr_RMASK 0x1 | ||
530 | #define QIB_7220_ErrStatus_RcvUnexpectedCharErr_LSB 0x7 | ||
531 | #define QIB_7220_ErrStatus_RcvUnexpectedCharErr_RMASK 0x1 | ||
532 | #define QIB_7220_ErrStatus_RcvShortPktLenErr_LSB 0x6 | ||
533 | #define QIB_7220_ErrStatus_RcvShortPktLenErr_RMASK 0x1 | ||
534 | #define QIB_7220_ErrStatus_RcvLongPktLenErr_LSB 0x5 | ||
535 | #define QIB_7220_ErrStatus_RcvLongPktLenErr_RMASK 0x1 | ||
536 | #define QIB_7220_ErrStatus_RcvMaxPktLenErr_LSB 0x4 | ||
537 | #define QIB_7220_ErrStatus_RcvMaxPktLenErr_RMASK 0x1 | ||
538 | #define QIB_7220_ErrStatus_RcvMinPktLenErr_LSB 0x3 | ||
539 | #define QIB_7220_ErrStatus_RcvMinPktLenErr_RMASK 0x1 | ||
540 | #define QIB_7220_ErrStatus_RcvICRCErr_LSB 0x2 | ||
541 | #define QIB_7220_ErrStatus_RcvICRCErr_RMASK 0x1 | ||
542 | #define QIB_7220_ErrStatus_RcvVCRCErr_LSB 0x1 | ||
543 | #define QIB_7220_ErrStatus_RcvVCRCErr_RMASK 0x1 | ||
544 | #define QIB_7220_ErrStatus_RcvFormatErr_LSB 0x0 | ||
545 | #define QIB_7220_ErrStatus_RcvFormatErr_RMASK 0x1 | ||
546 | |||
547 | #define QIB_7220_ErrClear_OFFS 0x90 | ||
548 | #define QIB_7220_ErrClear_Reserved_LSB 0x36 | ||
549 | #define QIB_7220_ErrClear_Reserved_RMASK 0x3FF | ||
550 | #define QIB_7220_ErrClear_InvalidEEPCmdErrClear_LSB 0x35 | ||
551 | #define QIB_7220_ErrClear_InvalidEEPCmdErrClear_RMASK 0x1 | ||
552 | #define QIB_7220_ErrClear_SDmaDescAddrMisalignErrClear_LSB 0x34 | ||
553 | #define QIB_7220_ErrClear_SDmaDescAddrMisalignErrClear_RMASK 0x1 | ||
554 | #define QIB_7220_ErrClear_HardwareErrClear_LSB 0x33 | ||
555 | #define QIB_7220_ErrClear_HardwareErrClear_RMASK 0x1 | ||
556 | #define QIB_7220_ErrClear_ResetNegatedClear_LSB 0x32 | ||
557 | #define QIB_7220_ErrClear_ResetNegatedClear_RMASK 0x1 | ||
558 | #define QIB_7220_ErrClear_InvalidAddrErrClear_LSB 0x31 | ||
559 | #define QIB_7220_ErrClear_InvalidAddrErrClear_RMASK 0x1 | ||
560 | #define QIB_7220_ErrClear_IBStatusChangedClear_LSB 0x30 | ||
561 | #define QIB_7220_ErrClear_IBStatusChangedClear_RMASK 0x1 | ||
562 | #define QIB_7220_ErrClear_SDmaUnexpDataErrClear_LSB 0x2F | ||
563 | #define QIB_7220_ErrClear_SDmaUnexpDataErrClear_RMASK 0x1 | ||
564 | #define QIB_7220_ErrClear_SDmaMissingDwErrClear_LSB 0x2E | ||
565 | #define QIB_7220_ErrClear_SDmaMissingDwErrClear_RMASK 0x1 | ||
566 | #define QIB_7220_ErrClear_SDmaDwEnErrClear_LSB 0x2D | ||
567 | #define QIB_7220_ErrClear_SDmaDwEnErrClear_RMASK 0x1 | ||
568 | #define QIB_7220_ErrClear_SDmaRpyTagErrClear_LSB 0x2C | ||
569 | #define QIB_7220_ErrClear_SDmaRpyTagErrClear_RMASK 0x1 | ||
570 | #define QIB_7220_ErrClear_SDma1stDescErrClear_LSB 0x2B | ||
571 | #define QIB_7220_ErrClear_SDma1stDescErrClear_RMASK 0x1 | ||
572 | #define QIB_7220_ErrClear_SDmaBaseErrClear_LSB 0x2A | ||
573 | #define QIB_7220_ErrClear_SDmaBaseErrClear_RMASK 0x1 | ||
574 | #define QIB_7220_ErrClear_SDmaTailOutOfBoundErrClear_LSB 0x29 | ||
575 | #define QIB_7220_ErrClear_SDmaTailOutOfBoundErrClear_RMASK 0x1 | ||
576 | #define QIB_7220_ErrClear_SDmaOutOfBoundErrClear_LSB 0x28 | ||
577 | #define QIB_7220_ErrClear_SDmaOutOfBoundErrClear_RMASK 0x1 | ||
578 | #define QIB_7220_ErrClear_SDmaGenMismatchErrClear_LSB 0x27 | ||
579 | #define QIB_7220_ErrClear_SDmaGenMismatchErrClear_RMASK 0x1 | ||
580 | #define QIB_7220_ErrClear_SendBufMisuseErrClear_LSB 0x26 | ||
581 | #define QIB_7220_ErrClear_SendBufMisuseErrClear_RMASK 0x1 | ||
582 | #define QIB_7220_ErrClear_SendUnsupportedVLErrClear_LSB 0x25 | ||
583 | #define QIB_7220_ErrClear_SendUnsupportedVLErrClear_RMASK 0x1 | ||
584 | #define QIB_7220_ErrClear_SendUnexpectedPktNumErrClear_LSB 0x24 | ||
585 | #define QIB_7220_ErrClear_SendUnexpectedPktNumErrClear_RMASK 0x1 | ||
586 | #define QIB_7220_ErrClear_SendPioArmLaunchErrClear_LSB 0x23 | ||
587 | #define QIB_7220_ErrClear_SendPioArmLaunchErrClear_RMASK 0x1 | ||
588 | #define QIB_7220_ErrClear_SendDroppedDataPktErrClear_LSB 0x22 | ||
589 | #define QIB_7220_ErrClear_SendDroppedDataPktErrClear_RMASK 0x1 | ||
590 | #define QIB_7220_ErrClear_SendDroppedSmpPktErrClear_LSB 0x21 | ||
591 | #define QIB_7220_ErrClear_SendDroppedSmpPktErrClear_RMASK 0x1 | ||
592 | #define QIB_7220_ErrClear_SendPktLenErrClear_LSB 0x20 | ||
593 | #define QIB_7220_ErrClear_SendPktLenErrClear_RMASK 0x1 | ||
594 | #define QIB_7220_ErrClear_SendUnderRunErrClear_LSB 0x1F | ||
595 | #define QIB_7220_ErrClear_SendUnderRunErrClear_RMASK 0x1 | ||
596 | #define QIB_7220_ErrClear_SendMaxPktLenErrClear_LSB 0x1E | ||
597 | #define QIB_7220_ErrClear_SendMaxPktLenErrClear_RMASK 0x1 | ||
598 | #define QIB_7220_ErrClear_SendMinPktLenErrClear_LSB 0x1D | ||
599 | #define QIB_7220_ErrClear_SendMinPktLenErrClear_RMASK 0x1 | ||
600 | #define QIB_7220_ErrClear_SDmaDisabledErrClear_LSB 0x1C | ||
601 | #define QIB_7220_ErrClear_SDmaDisabledErrClear_RMASK 0x1 | ||
602 | #define QIB_7220_ErrClear_SendSpecialTriggerErrClear_LSB 0x1B | ||
603 | #define QIB_7220_ErrClear_SendSpecialTriggerErrClear_RMASK 0x1 | ||
604 | #define QIB_7220_ErrClear_Reserved1_LSB 0x12 | ||
605 | #define QIB_7220_ErrClear_Reserved1_RMASK 0x1FF | ||
606 | #define QIB_7220_ErrClear_RcvIBLostLinkErrClear_LSB 0x11 | ||
607 | #define QIB_7220_ErrClear_RcvIBLostLinkErrClear_RMASK 0x1 | ||
608 | #define QIB_7220_ErrClear_RcvHdrErrClear_LSB 0x10 | ||
609 | #define QIB_7220_ErrClear_RcvHdrErrClear_RMASK 0x1 | ||
610 | #define QIB_7220_ErrClear_RcvHdrLenErrClear_LSB 0xF | ||
611 | #define QIB_7220_ErrClear_RcvHdrLenErrClear_RMASK 0x1 | ||
612 | #define QIB_7220_ErrClear_RcvBadTidErrClear_LSB 0xE | ||
613 | #define QIB_7220_ErrClear_RcvBadTidErrClear_RMASK 0x1 | ||
614 | #define QIB_7220_ErrClear_RcvHdrFullErrClear_LSB 0xD | ||
615 | #define QIB_7220_ErrClear_RcvHdrFullErrClear_RMASK 0x1 | ||
616 | #define QIB_7220_ErrClear_RcvEgrFullErrClear_LSB 0xC | ||
617 | #define QIB_7220_ErrClear_RcvEgrFullErrClear_RMASK 0x1 | ||
618 | #define QIB_7220_ErrClear_RcvBadVersionErrClear_LSB 0xB | ||
619 | #define QIB_7220_ErrClear_RcvBadVersionErrClear_RMASK 0x1 | ||
620 | #define QIB_7220_ErrClear_RcvIBFlowErrClear_LSB 0xA | ||
621 | #define QIB_7220_ErrClear_RcvIBFlowErrClear_RMASK 0x1 | ||
622 | #define QIB_7220_ErrClear_RcvEBPErrClear_LSB 0x9 | ||
623 | #define QIB_7220_ErrClear_RcvEBPErrClear_RMASK 0x1 | ||
624 | #define QIB_7220_ErrClear_RcvUnsupportedVLErrClear_LSB 0x8 | ||
625 | #define QIB_7220_ErrClear_RcvUnsupportedVLErrClear_RMASK 0x1 | ||
626 | #define QIB_7220_ErrClear_RcvUnexpectedCharErrClear_LSB 0x7 | ||
627 | #define QIB_7220_ErrClear_RcvUnexpectedCharErrClear_RMASK 0x1 | ||
628 | #define QIB_7220_ErrClear_RcvShortPktLenErrClear_LSB 0x6 | ||
629 | #define QIB_7220_ErrClear_RcvShortPktLenErrClear_RMASK 0x1 | ||
630 | #define QIB_7220_ErrClear_RcvLongPktLenErrClear_LSB 0x5 | ||
631 | #define QIB_7220_ErrClear_RcvLongPktLenErrClear_RMASK 0x1 | ||
632 | #define QIB_7220_ErrClear_RcvMaxPktLenErrClear_LSB 0x4 | ||
633 | #define QIB_7220_ErrClear_RcvMaxPktLenErrClear_RMASK 0x1 | ||
634 | #define QIB_7220_ErrClear_RcvMinPktLenErrClear_LSB 0x3 | ||
635 | #define QIB_7220_ErrClear_RcvMinPktLenErrClear_RMASK 0x1 | ||
636 | #define QIB_7220_ErrClear_RcvICRCErrClear_LSB 0x2 | ||
637 | #define QIB_7220_ErrClear_RcvICRCErrClear_RMASK 0x1 | ||
638 | #define QIB_7220_ErrClear_RcvVCRCErrClear_LSB 0x1 | ||
639 | #define QIB_7220_ErrClear_RcvVCRCErrClear_RMASK 0x1 | ||
640 | #define QIB_7220_ErrClear_RcvFormatErrClear_LSB 0x0 | ||
641 | #define QIB_7220_ErrClear_RcvFormatErrClear_RMASK 0x1 | ||
642 | |||
643 | #define QIB_7220_HwErrMask_OFFS 0x98 | ||
644 | #define QIB_7220_HwErrMask_IBCBusFromSPCParityErrMask_LSB 0x3F | ||
645 | #define QIB_7220_HwErrMask_IBCBusFromSPCParityErrMask_RMASK 0x1 | ||
646 | #define QIB_7220_HwErrMask_IBCBusToSPCParityErrMask_LSB 0x3E | ||
647 | #define QIB_7220_HwErrMask_IBCBusToSPCParityErrMask_RMASK 0x1 | ||
648 | #define QIB_7220_HwErrMask_Clk_uC_PLLNotLockedMask_LSB 0x3D | ||
649 | #define QIB_7220_HwErrMask_Clk_uC_PLLNotLockedMask_RMASK 0x1 | ||
650 | #define QIB_7220_HwErrMask_IBSerdesPClkNotDetectMask_LSB 0x3C | ||
651 | #define QIB_7220_HwErrMask_IBSerdesPClkNotDetectMask_RMASK 0x1 | ||
652 | #define QIB_7220_HwErrMask_PCIESerdesQ3PClkNotDetectMask_LSB 0x3B | ||
653 | #define QIB_7220_HwErrMask_PCIESerdesQ3PClkNotDetectMask_RMASK 0x1 | ||
654 | #define QIB_7220_HwErrMask_PCIESerdesQ2PClkNotDetectMask_LSB 0x3A | ||
655 | #define QIB_7220_HwErrMask_PCIESerdesQ2PClkNotDetectMask_RMASK 0x1 | ||
656 | #define QIB_7220_HwErrMask_PCIESerdesQ1PClkNotDetectMask_LSB 0x39 | ||
657 | #define QIB_7220_HwErrMask_PCIESerdesQ1PClkNotDetectMask_RMASK 0x1 | ||
658 | #define QIB_7220_HwErrMask_PCIESerdesQ0PClkNotDetectMask_LSB 0x38 | ||
659 | #define QIB_7220_HwErrMask_PCIESerdesQ0PClkNotDetectMask_RMASK 0x1 | ||
660 | #define QIB_7220_HwErrMask_Reserved_LSB 0x37 | ||
661 | #define QIB_7220_HwErrMask_Reserved_RMASK 0x1 | ||
662 | #define QIB_7220_HwErrMask_PowerOnBISTFailedMask_LSB 0x36 | ||
663 | #define QIB_7220_HwErrMask_PowerOnBISTFailedMask_RMASK 0x1 | ||
664 | #define QIB_7220_HwErrMask_Reserved1_LSB 0x33 | ||
665 | #define QIB_7220_HwErrMask_Reserved1_RMASK 0x7 | ||
666 | #define QIB_7220_HwErrMask_RXEMemParityErrMask_LSB 0x2C | ||
667 | #define QIB_7220_HwErrMask_RXEMemParityErrMask_RMASK 0x7F | ||
668 | #define QIB_7220_HwErrMask_TXEMemParityErrMask_LSB 0x28 | ||
669 | #define QIB_7220_HwErrMask_TXEMemParityErrMask_RMASK 0xF | ||
670 | #define QIB_7220_HwErrMask_DDSRXEQMemoryParityErrMask_LSB 0x27 | ||
671 | #define QIB_7220_HwErrMask_DDSRXEQMemoryParityErrMask_RMASK 0x1 | ||
672 | #define QIB_7220_HwErrMask_IB_uC_MemoryParityErrMask_LSB 0x26 | ||
673 | #define QIB_7220_HwErrMask_IB_uC_MemoryParityErrMask_RMASK 0x1 | ||
674 | #define QIB_7220_HwErrMask_PCIEOct1_uC_MemoryParityErrMask_LSB 0x25 | ||
675 | #define QIB_7220_HwErrMask_PCIEOct1_uC_MemoryParityErrMask_RMASK 0x1 | ||
676 | #define QIB_7220_HwErrMask_PCIEOct0_uC_MemoryParityErrMask_LSB 0x24 | ||
677 | #define QIB_7220_HwErrMask_PCIEOct0_uC_MemoryParityErrMask_RMASK 0x1 | ||
678 | #define QIB_7220_HwErrMask_Reserved2_LSB 0x22 | ||
679 | #define QIB_7220_HwErrMask_Reserved2_RMASK 0x3 | ||
680 | #define QIB_7220_HwErrMask_PCIeBusParityErrMask_LSB 0x1F | ||
681 | #define QIB_7220_HwErrMask_PCIeBusParityErrMask_RMASK 0x7 | ||
682 | #define QIB_7220_HwErrMask_PcieCplTimeoutMask_LSB 0x1E | ||
683 | #define QIB_7220_HwErrMask_PcieCplTimeoutMask_RMASK 0x1 | ||
684 | #define QIB_7220_HwErrMask_PoisonedTLPMask_LSB 0x1D | ||
685 | #define QIB_7220_HwErrMask_PoisonedTLPMask_RMASK 0x1 | ||
686 | #define QIB_7220_HwErrMask_SDmaMemReadErrMask_LSB 0x1C | ||
687 | #define QIB_7220_HwErrMask_SDmaMemReadErrMask_RMASK 0x1 | ||
688 | #define QIB_7220_HwErrMask_Reserved3_LSB 0x8 | ||
689 | #define QIB_7220_HwErrMask_Reserved3_RMASK 0xFFFFF | ||
690 | #define QIB_7220_HwErrMask_PCIeMemParityErrMask_LSB 0x0 | ||
691 | #define QIB_7220_HwErrMask_PCIeMemParityErrMask_RMASK 0xFF | ||
692 | |||
693 | #define QIB_7220_HwErrStatus_OFFS 0xA0 | ||
694 | #define QIB_7220_HwErrStatus_IBCBusFromSPCParityErr_LSB 0x3F | ||
695 | #define QIB_7220_HwErrStatus_IBCBusFromSPCParityErr_RMASK 0x1 | ||
696 | #define QIB_7220_HwErrStatus_IBCBusToSPCParityErr_LSB 0x3E | ||
697 | #define QIB_7220_HwErrStatus_IBCBusToSPCParityErr_RMASK 0x1 | ||
698 | #define QIB_7220_HwErrStatus_Clk_uC_PLLNotLocked_LSB 0x3D | ||
699 | #define QIB_7220_HwErrStatus_Clk_uC_PLLNotLocked_RMASK 0x1 | ||
700 | #define QIB_7220_HwErrStatus_IBSerdesPClkNotDetect_LSB 0x3C | ||
701 | #define QIB_7220_HwErrStatus_IBSerdesPClkNotDetect_RMASK 0x1 | ||
702 | #define QIB_7220_HwErrStatus_PCIESerdesQ3PClkNotDetect_LSB 0x3B | ||
703 | #define QIB_7220_HwErrStatus_PCIESerdesQ3PClkNotDetect_RMASK 0x1 | ||
704 | #define QIB_7220_HwErrStatus_PCIESerdesQ2PClkNotDetect_LSB 0x3A | ||
705 | #define QIB_7220_HwErrStatus_PCIESerdesQ2PClkNotDetect_RMASK 0x1 | ||
706 | #define QIB_7220_HwErrStatus_PCIESerdesQ1PClkNotDetect_LSB 0x39 | ||
707 | #define QIB_7220_HwErrStatus_PCIESerdesQ1PClkNotDetect_RMASK 0x1 | ||
708 | #define QIB_7220_HwErrStatus_PCIESerdesQ0PClkNotDetect_LSB 0x38 | ||
709 | #define QIB_7220_HwErrStatus_PCIESerdesQ0PClkNotDetect_RMASK 0x1 | ||
710 | #define QIB_7220_HwErrStatus_Reserved_LSB 0x37 | ||
711 | #define QIB_7220_HwErrStatus_Reserved_RMASK 0x1 | ||
712 | #define QIB_7220_HwErrStatus_PowerOnBISTFailed_LSB 0x36 | ||
713 | #define QIB_7220_HwErrStatus_PowerOnBISTFailed_RMASK 0x1 | ||
714 | #define QIB_7220_HwErrStatus_Reserved1_LSB 0x33 | ||
715 | #define QIB_7220_HwErrStatus_Reserved1_RMASK 0x7 | ||
716 | #define QIB_7220_HwErrStatus_RXEMemParity_LSB 0x2C | ||
717 | #define QIB_7220_HwErrStatus_RXEMemParity_RMASK 0x7F | ||
718 | #define QIB_7220_HwErrStatus_TXEMemParity_LSB 0x28 | ||
719 | #define QIB_7220_HwErrStatus_TXEMemParity_RMASK 0xF | ||
720 | #define QIB_7220_HwErrStatus_DDSRXEQMemoryParityErr_LSB 0x27 | ||
721 | #define QIB_7220_HwErrStatus_DDSRXEQMemoryParityErr_RMASK 0x1 | ||
722 | #define QIB_7220_HwErrStatus_IB_uC_MemoryParityErr_LSB 0x26 | ||
723 | #define QIB_7220_HwErrStatus_IB_uC_MemoryParityErr_RMASK 0x1 | ||
724 | #define QIB_7220_HwErrStatus_PCIE_uC_Oct1MemoryParityErr_LSB 0x25 | ||
725 | #define QIB_7220_HwErrStatus_PCIE_uC_Oct1MemoryParityErr_RMASK 0x1 | ||
726 | #define QIB_7220_HwErrStatus_PCIE_uC_Oct0MemoryParityErr_LSB 0x24 | ||
727 | #define QIB_7220_HwErrStatus_PCIE_uC_Oct0MemoryParityErr_RMASK 0x1 | ||
728 | #define QIB_7220_HwErrStatus_Reserved2_LSB 0x22 | ||
729 | #define QIB_7220_HwErrStatus_Reserved2_RMASK 0x3 | ||
730 | #define QIB_7220_HwErrStatus_PCIeBusParity_LSB 0x1F | ||
731 | #define QIB_7220_HwErrStatus_PCIeBusParity_RMASK 0x7 | ||
732 | #define QIB_7220_HwErrStatus_PcieCplTimeout_LSB 0x1E | ||
733 | #define QIB_7220_HwErrStatus_PcieCplTimeout_RMASK 0x1 | ||
734 | #define QIB_7220_HwErrStatus_PoisenedTLP_LSB 0x1D | ||
735 | #define QIB_7220_HwErrStatus_PoisenedTLP_RMASK 0x1 | ||
736 | #define QIB_7220_HwErrStatus_SDmaMemReadErr_LSB 0x1C | ||
737 | #define QIB_7220_HwErrStatus_SDmaMemReadErr_RMASK 0x1 | ||
738 | #define QIB_7220_HwErrStatus_Reserved3_LSB 0x8 | ||
739 | #define QIB_7220_HwErrStatus_Reserved3_RMASK 0xFFFFF | ||
740 | #define QIB_7220_HwErrStatus_PCIeMemParity_LSB 0x0 | ||
741 | #define QIB_7220_HwErrStatus_PCIeMemParity_RMASK 0xFF | ||
742 | |||
743 | #define QIB_7220_HwErrClear_OFFS 0xA8 | ||
744 | #define QIB_7220_HwErrClear_IBCBusFromSPCParityErrClear_LSB 0x3F | ||
745 | #define QIB_7220_HwErrClear_IBCBusFromSPCParityErrClear_RMASK 0x1 | ||
746 | #define QIB_7220_HwErrClear_IBCBusToSPCparityErrClear_LSB 0x3E | ||
747 | #define QIB_7220_HwErrClear_IBCBusToSPCparityErrClear_RMASK 0x1 | ||
748 | #define QIB_7220_HwErrClear_Clk_uC_PLLNotLockedClear_LSB 0x3D | ||
749 | #define QIB_7220_HwErrClear_Clk_uC_PLLNotLockedClear_RMASK 0x1 | ||
750 | #define QIB_7220_HwErrClear_IBSerdesPClkNotDetectClear_LSB 0x3C | ||
751 | #define QIB_7220_HwErrClear_IBSerdesPClkNotDetectClear_RMASK 0x1 | ||
752 | #define QIB_7220_HwErrClear_PCIESerdesQ3PClkNotDetectClear_LSB 0x3B | ||
753 | #define QIB_7220_HwErrClear_PCIESerdesQ3PClkNotDetectClear_RMASK 0x1 | ||
754 | #define QIB_7220_HwErrClear_PCIESerdesQ2PClkNotDetectClear_LSB 0x3A | ||
755 | #define QIB_7220_HwErrClear_PCIESerdesQ2PClkNotDetectClear_RMASK 0x1 | ||
756 | #define QIB_7220_HwErrClear_PCIESerdesQ1PClkNotDetectClear_LSB 0x39 | ||
757 | #define QIB_7220_HwErrClear_PCIESerdesQ1PClkNotDetectClear_RMASK 0x1 | ||
758 | #define QIB_7220_HwErrClear_PCIESerdesQ0PClkNotDetectClear_LSB 0x38 | ||
759 | #define QIB_7220_HwErrClear_PCIESerdesQ0PClkNotDetectClear_RMASK 0x1 | ||
760 | #define QIB_7220_HwErrClear_Reserved_LSB 0x37 | ||
761 | #define QIB_7220_HwErrClear_Reserved_RMASK 0x1 | ||
762 | #define QIB_7220_HwErrClear_PowerOnBISTFailedClear_LSB 0x36 | ||
763 | #define QIB_7220_HwErrClear_PowerOnBISTFailedClear_RMASK 0x1 | ||
764 | #define QIB_7220_HwErrClear_Reserved1_LSB 0x33 | ||
765 | #define QIB_7220_HwErrClear_Reserved1_RMASK 0x7 | ||
766 | #define QIB_7220_HwErrClear_RXEMemParityClear_LSB 0x2C | ||
767 | #define QIB_7220_HwErrClear_RXEMemParityClear_RMASK 0x7F | ||
768 | #define QIB_7220_HwErrClear_TXEMemParityClear_LSB 0x28 | ||
769 | #define QIB_7220_HwErrClear_TXEMemParityClear_RMASK 0xF | ||
770 | #define QIB_7220_HwErrClear_DDSRXEQMemoryParityErrClear_LSB 0x27 | ||
771 | #define QIB_7220_HwErrClear_DDSRXEQMemoryParityErrClear_RMASK 0x1 | ||
772 | #define QIB_7220_HwErrClear_IB_uC_MemoryParityErrClear_LSB 0x26 | ||
773 | #define QIB_7220_HwErrClear_IB_uC_MemoryParityErrClear_RMASK 0x1 | ||
774 | #define QIB_7220_HwErrClear_PCIE_uC_Oct1MemoryParityErrClear_LSB 0x25 | ||
775 | #define QIB_7220_HwErrClear_PCIE_uC_Oct1MemoryParityErrClear_RMASK 0x1 | ||
776 | #define QIB_7220_HwErrClear_PCIE_uC_Oct0MemoryParityErrClear_LSB 0x24 | ||
777 | #define QIB_7220_HwErrClear_PCIE_uC_Oct0MemoryParityErrClear_RMASK 0x1 | ||
778 | #define QIB_7220_HwErrClear_Reserved2_LSB 0x22 | ||
779 | #define QIB_7220_HwErrClear_Reserved2_RMASK 0x3 | ||
780 | #define QIB_7220_HwErrClear_PCIeBusParityClr_LSB 0x1F | ||
781 | #define QIB_7220_HwErrClear_PCIeBusParityClr_RMASK 0x7 | ||
782 | #define QIB_7220_HwErrClear_PcieCplTimeoutClear_LSB 0x1E | ||
783 | #define QIB_7220_HwErrClear_PcieCplTimeoutClear_RMASK 0x1 | ||
784 | #define QIB_7220_HwErrClear_PoisonedTLPClear_LSB 0x1D | ||
785 | #define QIB_7220_HwErrClear_PoisonedTLPClear_RMASK 0x1 | ||
786 | #define QIB_7220_HwErrClear_SDmaMemReadErrClear_LSB 0x1C | ||
787 | #define QIB_7220_HwErrClear_SDmaMemReadErrClear_RMASK 0x1 | ||
788 | #define QIB_7220_HwErrClear_Reserved3_LSB 0x8 | ||
789 | #define QIB_7220_HwErrClear_Reserved3_RMASK 0xFFFFF | ||
790 | #define QIB_7220_HwErrClear_PCIeMemParityClr_LSB 0x0 | ||
791 | #define QIB_7220_HwErrClear_PCIeMemParityClr_RMASK 0xFF | ||
792 | |||
793 | #define QIB_7220_HwDiagCtrl_OFFS 0xB0 | ||
794 | #define QIB_7220_HwDiagCtrl_ForceIBCBusFromSPCParityErr_LSB 0x3F | ||
795 | #define QIB_7220_HwDiagCtrl_ForceIBCBusFromSPCParityErr_RMASK 0x1 | ||
796 | #define QIB_7220_HwDiagCtrl_ForceIBCBusToSPCParityErr_LSB 0x3E | ||
797 | #define QIB_7220_HwDiagCtrl_ForceIBCBusToSPCParityErr_RMASK 0x1 | ||
798 | #define QIB_7220_HwDiagCtrl_CounterWrEnable_LSB 0x3D | ||
799 | #define QIB_7220_HwDiagCtrl_CounterWrEnable_RMASK 0x1 | ||
800 | #define QIB_7220_HwDiagCtrl_CounterDisable_LSB 0x3C | ||
801 | #define QIB_7220_HwDiagCtrl_CounterDisable_RMASK 0x1 | ||
802 | #define QIB_7220_HwDiagCtrl_Reserved_LSB 0x33 | ||
803 | #define QIB_7220_HwDiagCtrl_Reserved_RMASK 0x1FF | ||
804 | #define QIB_7220_HwDiagCtrl_ForceRxMemParityErr_LSB 0x2C | ||
805 | #define QIB_7220_HwDiagCtrl_ForceRxMemParityErr_RMASK 0x7F | ||
806 | #define QIB_7220_HwDiagCtrl_ForceTxMemparityErr_LSB 0x28 | ||
807 | #define QIB_7220_HwDiagCtrl_ForceTxMemparityErr_RMASK 0xF | ||
808 | #define QIB_7220_HwDiagCtrl_ForceDDSRXEQMemoryParityErr_LSB 0x27 | ||
809 | #define QIB_7220_HwDiagCtrl_ForceDDSRXEQMemoryParityErr_RMASK 0x1 | ||
810 | #define QIB_7220_HwDiagCtrl_ForceIB_uC_MemoryParityErr_LSB 0x26 | ||
811 | #define QIB_7220_HwDiagCtrl_ForceIB_uC_MemoryParityErr_RMASK 0x1 | ||
812 | #define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct1MemoryParityErr_LSB 0x25 | ||
813 | #define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct1MemoryParityErr_RMASK 0x1 | ||
814 | #define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct0MemoryParityErr_LSB 0x24 | ||
815 | #define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct0MemoryParityErr_RMASK 0x1 | ||
816 | #define QIB_7220_HwDiagCtrl_Reserved1_LSB 0x23 | ||
817 | #define QIB_7220_HwDiagCtrl_Reserved1_RMASK 0x1 | ||
818 | #define QIB_7220_HwDiagCtrl_forcePCIeBusParity_LSB 0x1F | ||
819 | #define QIB_7220_HwDiagCtrl_forcePCIeBusParity_RMASK 0xF | ||
820 | #define QIB_7220_HwDiagCtrl_Reserved2_LSB 0x8 | ||
821 | #define QIB_7220_HwDiagCtrl_Reserved2_RMASK 0x7FFFFF | ||
822 | #define QIB_7220_HwDiagCtrl_forcePCIeMemParity_LSB 0x0 | ||
823 | #define QIB_7220_HwDiagCtrl_forcePCIeMemParity_RMASK 0xFF | ||
824 | |||
825 | #define QIB_7220_REG_0000B8_OFFS 0xB8 | ||
826 | |||
827 | #define QIB_7220_IBCStatus_OFFS 0xC0 | ||
828 | #define QIB_7220_IBCStatus_TxCreditOk_LSB 0x1F | ||
829 | #define QIB_7220_IBCStatus_TxCreditOk_RMASK 0x1 | ||
830 | #define QIB_7220_IBCStatus_TxReady_LSB 0x1E | ||
831 | #define QIB_7220_IBCStatus_TxReady_RMASK 0x1 | ||
832 | #define QIB_7220_IBCStatus_Reserved_LSB 0xE | ||
833 | #define QIB_7220_IBCStatus_Reserved_RMASK 0xFFFF | ||
834 | #define QIB_7220_IBCStatus_IBTxLaneReversed_LSB 0xD | ||
835 | #define QIB_7220_IBCStatus_IBTxLaneReversed_RMASK 0x1 | ||
836 | #define QIB_7220_IBCStatus_IBRxLaneReversed_LSB 0xC | ||
837 | #define QIB_7220_IBCStatus_IBRxLaneReversed_RMASK 0x1 | ||
838 | #define QIB_7220_IBCStatus_IB_SERDES_TRIM_DONE_LSB 0xB | ||
839 | #define QIB_7220_IBCStatus_IB_SERDES_TRIM_DONE_RMASK 0x1 | ||
840 | #define QIB_7220_IBCStatus_DDS_RXEQ_FAIL_LSB 0xA | ||
841 | #define QIB_7220_IBCStatus_DDS_RXEQ_FAIL_RMASK 0x1 | ||
842 | #define QIB_7220_IBCStatus_LinkWidthActive_LSB 0x9 | ||
843 | #define QIB_7220_IBCStatus_LinkWidthActive_RMASK 0x1 | ||
844 | #define QIB_7220_IBCStatus_LinkSpeedActive_LSB 0x8 | ||
845 | #define QIB_7220_IBCStatus_LinkSpeedActive_RMASK 0x1 | ||
846 | #define QIB_7220_IBCStatus_LinkState_LSB 0x5 | ||
847 | #define QIB_7220_IBCStatus_LinkState_RMASK 0x7 | ||
848 | #define QIB_7220_IBCStatus_LinkTrainingState_LSB 0x0 | ||
849 | #define QIB_7220_IBCStatus_LinkTrainingState_RMASK 0x1F | ||
850 | |||
851 | #define QIB_7220_IBCCtrl_OFFS 0xC8 | ||
852 | #define QIB_7220_IBCCtrl_Loopback_LSB 0x3F | ||
853 | #define QIB_7220_IBCCtrl_Loopback_RMASK 0x1 | ||
854 | #define QIB_7220_IBCCtrl_LinkDownDefaultState_LSB 0x3E | ||
855 | #define QIB_7220_IBCCtrl_LinkDownDefaultState_RMASK 0x1 | ||
856 | #define QIB_7220_IBCCtrl_Reserved_LSB 0x2B | ||
857 | #define QIB_7220_IBCCtrl_Reserved_RMASK 0x7FFFF | ||
858 | #define QIB_7220_IBCCtrl_CreditScale_LSB 0x28 | ||
859 | #define QIB_7220_IBCCtrl_CreditScale_RMASK 0x7 | ||
860 | #define QIB_7220_IBCCtrl_OverrunThreshold_LSB 0x24 | ||
861 | #define QIB_7220_IBCCtrl_OverrunThreshold_RMASK 0xF | ||
862 | #define QIB_7220_IBCCtrl_PhyerrThreshold_LSB 0x20 | ||
863 | #define QIB_7220_IBCCtrl_PhyerrThreshold_RMASK 0xF | ||
864 | #define QIB_7220_IBCCtrl_MaxPktLen_LSB 0x15 | ||
865 | #define QIB_7220_IBCCtrl_MaxPktLen_RMASK 0x7FF | ||
866 | #define QIB_7220_IBCCtrl_LinkCmd_LSB 0x13 | ||
867 | #define QIB_7220_IBCCtrl_LinkCmd_RMASK 0x3 | ||
868 | #define QIB_7220_IBCCtrl_LinkInitCmd_LSB 0x10 | ||
869 | #define QIB_7220_IBCCtrl_LinkInitCmd_RMASK 0x7 | ||
870 | #define QIB_7220_IBCCtrl_FlowCtrlWaterMark_LSB 0x8 | ||
871 | #define QIB_7220_IBCCtrl_FlowCtrlWaterMark_RMASK 0xFF | ||
872 | #define QIB_7220_IBCCtrl_FlowCtrlPeriod_LSB 0x0 | ||
873 | #define QIB_7220_IBCCtrl_FlowCtrlPeriod_RMASK 0xFF | ||
874 | |||
875 | #define QIB_7220_EXTStatus_OFFS 0xD0 | ||
876 | #define QIB_7220_EXTStatus_GPIOIn_LSB 0x30 | ||
877 | #define QIB_7220_EXTStatus_GPIOIn_RMASK 0xFFFF | ||
878 | #define QIB_7220_EXTStatus_Reserved_LSB 0x20 | ||
879 | #define QIB_7220_EXTStatus_Reserved_RMASK 0xFFFF | ||
880 | #define QIB_7220_EXTStatus_Reserved1_LSB 0x10 | ||
881 | #define QIB_7220_EXTStatus_Reserved1_RMASK 0xFFFF | ||
882 | #define QIB_7220_EXTStatus_MemBISTDisabled_LSB 0xF | ||
883 | #define QIB_7220_EXTStatus_MemBISTDisabled_RMASK 0x1 | ||
884 | #define QIB_7220_EXTStatus_MemBISTEndTest_LSB 0xE | ||
885 | #define QIB_7220_EXTStatus_MemBISTEndTest_RMASK 0x1 | ||
886 | #define QIB_7220_EXTStatus_Reserved2_LSB 0x0 | ||
887 | #define QIB_7220_EXTStatus_Reserved2_RMASK 0x3FFF | ||
888 | |||
889 | #define QIB_7220_EXTCtrl_OFFS 0xD8 | ||
890 | #define QIB_7220_EXTCtrl_GPIOOe_LSB 0x30 | ||
891 | #define QIB_7220_EXTCtrl_GPIOOe_RMASK 0xFFFF | ||
892 | #define QIB_7220_EXTCtrl_GPIOInvert_LSB 0x20 | ||
893 | #define QIB_7220_EXTCtrl_GPIOInvert_RMASK 0xFFFF | ||
894 | #define QIB_7220_EXTCtrl_Reserved_LSB 0x4 | ||
895 | #define QIB_7220_EXTCtrl_Reserved_RMASK 0xFFFFFFF | ||
896 | #define QIB_7220_EXTCtrl_LEDPriPortGreenOn_LSB 0x3 | ||
897 | #define QIB_7220_EXTCtrl_LEDPriPortGreenOn_RMASK 0x1 | ||
898 | #define QIB_7220_EXTCtrl_LEDPriPortYellowOn_LSB 0x2 | ||
899 | #define QIB_7220_EXTCtrl_LEDPriPortYellowOn_RMASK 0x1 | ||
900 | #define QIB_7220_EXTCtrl_LEDGblOkGreenOn_LSB 0x1 | ||
901 | #define QIB_7220_EXTCtrl_LEDGblOkGreenOn_RMASK 0x1 | ||
902 | #define QIB_7220_EXTCtrl_LEDGblErrRedOff_LSB 0x0 | ||
903 | #define QIB_7220_EXTCtrl_LEDGblErrRedOff_RMASK 0x1 | ||
904 | |||
905 | #define QIB_7220_GPIOOut_OFFS 0xE0 | ||
906 | |||
907 | #define QIB_7220_GPIOMask_OFFS 0xE8 | ||
908 | |||
909 | #define QIB_7220_GPIOStatus_OFFS 0xF0 | ||
910 | |||
911 | #define QIB_7220_GPIOClear_OFFS 0xF8 | ||
912 | |||
913 | #define QIB_7220_RcvCtrl_OFFS 0x100 | ||
914 | #define QIB_7220_RcvCtrl_Reserved_LSB 0x27 | ||
915 | #define QIB_7220_RcvCtrl_Reserved_RMASK 0x1FFFFFF | ||
916 | #define QIB_7220_RcvCtrl_RcvQPMapEnable_LSB 0x26 | ||
917 | #define QIB_7220_RcvCtrl_RcvQPMapEnable_RMASK 0x1 | ||
918 | #define QIB_7220_RcvCtrl_PortCfg_LSB 0x24 | ||
919 | #define QIB_7220_RcvCtrl_PortCfg_RMASK 0x3 | ||
920 | #define QIB_7220_RcvCtrl_TailUpd_LSB 0x23 | ||
921 | #define QIB_7220_RcvCtrl_TailUpd_RMASK 0x1 | ||
922 | #define QIB_7220_RcvCtrl_RcvPartitionKeyDisable_LSB 0x22 | ||
923 | #define QIB_7220_RcvCtrl_RcvPartitionKeyDisable_RMASK 0x1 | ||
924 | #define QIB_7220_RcvCtrl_IntrAvail_LSB 0x11 | ||
925 | #define QIB_7220_RcvCtrl_IntrAvail_RMASK 0x1FFFF | ||
926 | #define QIB_7220_RcvCtrl_PortEnable_LSB 0x0 | ||
927 | #define QIB_7220_RcvCtrl_PortEnable_RMASK 0x1FFFF | ||
928 | |||
929 | #define QIB_7220_RcvBTHQP_OFFS 0x108 | ||
930 | #define QIB_7220_RcvBTHQP_Reserved_LSB 0x18 | ||
931 | #define QIB_7220_RcvBTHQP_Reserved_RMASK 0xFF | ||
932 | #define QIB_7220_RcvBTHQP_RcvBTHQP_LSB 0x0 | ||
933 | #define QIB_7220_RcvBTHQP_RcvBTHQP_RMASK 0xFFFFFF | ||
934 | |||
935 | #define QIB_7220_RcvHdrSize_OFFS 0x110 | ||
936 | |||
937 | #define QIB_7220_RcvHdrCnt_OFFS 0x118 | ||
938 | |||
939 | #define QIB_7220_RcvHdrEntSize_OFFS 0x120 | ||
940 | |||
941 | #define QIB_7220_RcvTIDBase_OFFS 0x128 | ||
942 | |||
943 | #define QIB_7220_RcvTIDCnt_OFFS 0x130 | ||
944 | |||
945 | #define QIB_7220_RcvEgrBase_OFFS 0x138 | ||
946 | |||
947 | #define QIB_7220_RcvEgrCnt_OFFS 0x140 | ||
948 | |||
949 | #define QIB_7220_RcvBufBase_OFFS 0x148 | ||
950 | |||
951 | #define QIB_7220_RcvBufSize_OFFS 0x150 | ||
952 | |||
953 | #define QIB_7220_RxIntMemBase_OFFS 0x158 | ||
954 | |||
955 | #define QIB_7220_RxIntMemSize_OFFS 0x160 | ||
956 | |||
957 | #define QIB_7220_RcvPartitionKey_OFFS 0x168 | ||
958 | |||
959 | #define QIB_7220_RcvQPMulticastPort_OFFS 0x170 | ||
960 | #define QIB_7220_RcvQPMulticastPort_Reserved_LSB 0x5 | ||
961 | #define QIB_7220_RcvQPMulticastPort_Reserved_RMASK 0x7FFFFFFFFFFFFFF | ||
962 | #define QIB_7220_RcvQPMulticastPort_RcvQpMcPort_LSB 0x0 | ||
963 | #define QIB_7220_RcvQPMulticastPort_RcvQpMcPort_RMASK 0x1F | ||
964 | |||
965 | #define QIB_7220_RcvPktLEDCnt_OFFS 0x178 | ||
966 | #define QIB_7220_RcvPktLEDCnt_ONperiod_LSB 0x20 | ||
967 | #define QIB_7220_RcvPktLEDCnt_ONperiod_RMASK 0xFFFFFFFF | ||
968 | #define QIB_7220_RcvPktLEDCnt_OFFperiod_LSB 0x0 | ||
969 | #define QIB_7220_RcvPktLEDCnt_OFFperiod_RMASK 0xFFFFFFFF | ||
970 | |||
971 | #define QIB_7220_IBCDDRCtrl_OFFS 0x180 | ||
972 | #define QIB_7220_IBCDDRCtrl_IB_DLID_MASK_LSB 0x30 | ||
973 | #define QIB_7220_IBCDDRCtrl_IB_DLID_MASK_RMASK 0xFFFF | ||
974 | #define QIB_7220_IBCDDRCtrl_IB_DLID_LSB 0x20 | ||
975 | #define QIB_7220_IBCDDRCtrl_IB_DLID_RMASK 0xFFFF | ||
976 | #define QIB_7220_IBCDDRCtrl_Reserved_LSB 0x1B | ||
977 | #define QIB_7220_IBCDDRCtrl_Reserved_RMASK 0x1F | ||
978 | #define QIB_7220_IBCDDRCtrl_HRTBT_REQ_LSB 0x1A | ||
979 | #define QIB_7220_IBCDDRCtrl_HRTBT_REQ_RMASK 0x1 | ||
980 | #define QIB_7220_IBCDDRCtrl_HRTBT_PORT_LSB 0x12 | ||
981 | #define QIB_7220_IBCDDRCtrl_HRTBT_PORT_RMASK 0xFF | ||
982 | #define QIB_7220_IBCDDRCtrl_HRTBT_AUTO_LSB 0x11 | ||
983 | #define QIB_7220_IBCDDRCtrl_HRTBT_AUTO_RMASK 0x1 | ||
984 | #define QIB_7220_IBCDDRCtrl_HRTBT_ENB_LSB 0x10 | ||
985 | #define QIB_7220_IBCDDRCtrl_HRTBT_ENB_RMASK 0x1 | ||
986 | #define QIB_7220_IBCDDRCtrl_SD_DDS_LSB 0xC | ||
987 | #define QIB_7220_IBCDDRCtrl_SD_DDS_RMASK 0xF | ||
988 | #define QIB_7220_IBCDDRCtrl_SD_DDSV_LSB 0xB | ||
989 | #define QIB_7220_IBCDDRCtrl_SD_DDSV_RMASK 0x1 | ||
990 | #define QIB_7220_IBCDDRCtrl_SD_ADD_ENB_LSB 0xA | ||
991 | #define QIB_7220_IBCDDRCtrl_SD_ADD_ENB_RMASK 0x1 | ||
992 | #define QIB_7220_IBCDDRCtrl_SD_RX_EQUAL_ENABLE_LSB 0x9 | ||
993 | #define QIB_7220_IBCDDRCtrl_SD_RX_EQUAL_ENABLE_RMASK 0x1 | ||
994 | #define QIB_7220_IBCDDRCtrl_IB_LANE_REV_SUPPORTED_LSB 0x8 | ||
995 | #define QIB_7220_IBCDDRCtrl_IB_LANE_REV_SUPPORTED_RMASK 0x1 | ||
996 | #define QIB_7220_IBCDDRCtrl_IB_POLARITY_REV_SUPP_LSB 0x7 | ||
997 | #define QIB_7220_IBCDDRCtrl_IB_POLARITY_REV_SUPP_RMASK 0x1 | ||
998 | #define QIB_7220_IBCDDRCtrl_IB_NUM_CHANNELS_LSB 0x5 | ||
999 | #define QIB_7220_IBCDDRCtrl_IB_NUM_CHANNELS_RMASK 0x3 | ||
1000 | #define QIB_7220_IBCDDRCtrl_SD_SPEED_QDR_LSB 0x4 | ||
1001 | #define QIB_7220_IBCDDRCtrl_SD_SPEED_QDR_RMASK 0x1 | ||
1002 | #define QIB_7220_IBCDDRCtrl_SD_SPEED_DDR_LSB 0x3 | ||
1003 | #define QIB_7220_IBCDDRCtrl_SD_SPEED_DDR_RMASK 0x1 | ||
1004 | #define QIB_7220_IBCDDRCtrl_SD_SPEED_SDR_LSB 0x2 | ||
1005 | #define QIB_7220_IBCDDRCtrl_SD_SPEED_SDR_RMASK 0x1 | ||
1006 | #define QIB_7220_IBCDDRCtrl_SD_SPEED_LSB 0x1 | ||
1007 | #define QIB_7220_IBCDDRCtrl_SD_SPEED_RMASK 0x1 | ||
1008 | #define QIB_7220_IBCDDRCtrl_IB_ENHANCED_MODE_LSB 0x0 | ||
1009 | #define QIB_7220_IBCDDRCtrl_IB_ENHANCED_MODE_RMASK 0x1 | ||
1010 | |||
1011 | #define QIB_7220_HRTBT_GUID_OFFS 0x188 | ||
1012 | |||
1013 | #define QIB_7220_IBCDDRCtrl2_OFFS 0x1A0 | ||
1014 | #define QIB_7220_IBCDDRCtrl2_IB_BACK_PORCH_LSB 0x5 | ||
1015 | #define QIB_7220_IBCDDRCtrl2_IB_BACK_PORCH_RMASK 0x1F | ||
1016 | #define QIB_7220_IBCDDRCtrl2_IB_FRONT_PORCH_LSB 0x0 | ||
1017 | #define QIB_7220_IBCDDRCtrl2_IB_FRONT_PORCH_RMASK 0x1F | ||
1018 | |||
1019 | #define QIB_7220_IBCDDRStatus_OFFS 0x1A8 | ||
1020 | #define QIB_7220_IBCDDRStatus_heartbeat_timed_out_LSB 0x24 | ||
1021 | #define QIB_7220_IBCDDRStatus_heartbeat_timed_out_RMASK 0x1 | ||
1022 | #define QIB_7220_IBCDDRStatus_heartbeat_crosstalk_LSB 0x20 | ||
1023 | #define QIB_7220_IBCDDRStatus_heartbeat_crosstalk_RMASK 0xF | ||
1024 | #define QIB_7220_IBCDDRStatus_RxEqLocalDevice_LSB 0x1E | ||
1025 | #define QIB_7220_IBCDDRStatus_RxEqLocalDevice_RMASK 0x3 | ||
1026 | #define QIB_7220_IBCDDRStatus_ReqDDSLocalFromRmt_LSB 0x1A | ||
1027 | #define QIB_7220_IBCDDRStatus_ReqDDSLocalFromRmt_RMASK 0xF | ||
1028 | #define QIB_7220_IBCDDRStatus_LinkRoundTripLatency_LSB 0x0 | ||
1029 | #define QIB_7220_IBCDDRStatus_LinkRoundTripLatency_RMASK 0x3FFFFFF | ||
1030 | |||
1031 | #define QIB_7220_JIntReload_OFFS 0x1B0 | ||
1032 | #define QIB_7220_JIntReload_J_limit_reload_LSB 0x10 | ||
1033 | #define QIB_7220_JIntReload_J_limit_reload_RMASK 0xFFFF | ||
1034 | #define QIB_7220_JIntReload_J_reload_LSB 0x0 | ||
1035 | #define QIB_7220_JIntReload_J_reload_RMASK 0xFFFF | ||
1036 | |||
1037 | #define QIB_7220_IBNCModeCtrl_OFFS 0x1B8 | ||
1038 | #define QIB_7220_IBNCModeCtrl_Reserved_LSB 0x1A | ||
1039 | #define QIB_7220_IBNCModeCtrl_Reserved_RMASK 0x3FFFFFFFFF | ||
1040 | #define QIB_7220_IBNCModeCtrl_TSMCode_TS2_LSB 0x11 | ||
1041 | #define QIB_7220_IBNCModeCtrl_TSMCode_TS2_RMASK 0x1FF | ||
1042 | #define QIB_7220_IBNCModeCtrl_TSMCode_TS1_LSB 0x8 | ||
1043 | #define QIB_7220_IBNCModeCtrl_TSMCode_TS1_RMASK 0x1FF | ||
1044 | #define QIB_7220_IBNCModeCtrl_Reserved1_LSB 0x3 | ||
1045 | #define QIB_7220_IBNCModeCtrl_Reserved1_RMASK 0x1F | ||
1046 | #define QIB_7220_IBNCModeCtrl_TSMEnable_ignore_TSM_on_rx_LSB 0x2 | ||
1047 | #define QIB_7220_IBNCModeCtrl_TSMEnable_ignore_TSM_on_rx_RMASK 0x1 | ||
1048 | #define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS2_LSB 0x1 | ||
1049 | #define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS2_RMASK 0x1 | ||
1050 | #define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS1_LSB 0x0 | ||
1051 | #define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS1_RMASK 0x1 | ||
1052 | |||
1053 | #define QIB_7220_SendCtrl_OFFS 0x1C0 | ||
1054 | #define QIB_7220_SendCtrl_Disarm_LSB 0x1F | ||
1055 | #define QIB_7220_SendCtrl_Disarm_RMASK 0x1 | ||
1056 | #define QIB_7220_SendCtrl_Reserved_LSB 0x1D | ||
1057 | #define QIB_7220_SendCtrl_Reserved_RMASK 0x3 | ||
1058 | #define QIB_7220_SendCtrl_AvailUpdThld_LSB 0x18 | ||
1059 | #define QIB_7220_SendCtrl_AvailUpdThld_RMASK 0x1F | ||
1060 | #define QIB_7220_SendCtrl_DisarmPIOBuf_LSB 0x10 | ||
1061 | #define QIB_7220_SendCtrl_DisarmPIOBuf_RMASK 0xFF | ||
1062 | #define QIB_7220_SendCtrl_Reserved1_LSB 0xD | ||
1063 | #define QIB_7220_SendCtrl_Reserved1_RMASK 0x7 | ||
1064 | #define QIB_7220_SendCtrl_SDmaHalt_LSB 0xC | ||
1065 | #define QIB_7220_SendCtrl_SDmaHalt_RMASK 0x1 | ||
1066 | #define QIB_7220_SendCtrl_SDmaEnable_LSB 0xB | ||
1067 | #define QIB_7220_SendCtrl_SDmaEnable_RMASK 0x1 | ||
1068 | #define QIB_7220_SendCtrl_SDmaSingleDescriptor_LSB 0xA | ||
1069 | #define QIB_7220_SendCtrl_SDmaSingleDescriptor_RMASK 0x1 | ||
1070 | #define QIB_7220_SendCtrl_SDmaIntEnable_LSB 0x9 | ||
1071 | #define QIB_7220_SendCtrl_SDmaIntEnable_RMASK 0x1 | ||
1072 | #define QIB_7220_SendCtrl_Reserved2_LSB 0x5 | ||
1073 | #define QIB_7220_SendCtrl_Reserved2_RMASK 0xF | ||
1074 | #define QIB_7220_SendCtrl_SSpecialTriggerEn_LSB 0x4 | ||
1075 | #define QIB_7220_SendCtrl_SSpecialTriggerEn_RMASK 0x1 | ||
1076 | #define QIB_7220_SendCtrl_SPioEnable_LSB 0x3 | ||
1077 | #define QIB_7220_SendCtrl_SPioEnable_RMASK 0x1 | ||
1078 | #define QIB_7220_SendCtrl_SendBufAvailUpd_LSB 0x2 | ||
1079 | #define QIB_7220_SendCtrl_SendBufAvailUpd_RMASK 0x1 | ||
1080 | #define QIB_7220_SendCtrl_SendIntBufAvail_LSB 0x1 | ||
1081 | #define QIB_7220_SendCtrl_SendIntBufAvail_RMASK 0x1 | ||
1082 | #define QIB_7220_SendCtrl_Abort_LSB 0x0 | ||
1083 | #define QIB_7220_SendCtrl_Abort_RMASK 0x1 | ||
1084 | |||
1085 | #define QIB_7220_SendBufBase_OFFS 0x1C8 | ||
1086 | #define QIB_7220_SendBufBase_Reserved_LSB 0x35 | ||
1087 | #define QIB_7220_SendBufBase_Reserved_RMASK 0x7FF | ||
1088 | #define QIB_7220_SendBufBase_BaseAddr_LargePIO_LSB 0x20 | ||
1089 | #define QIB_7220_SendBufBase_BaseAddr_LargePIO_RMASK 0x1FFFFF | ||
1090 | #define QIB_7220_SendBufBase_Reserved1_LSB 0x15 | ||
1091 | #define QIB_7220_SendBufBase_Reserved1_RMASK 0x7FF | ||
1092 | #define QIB_7220_SendBufBase_BaseAddr_SmallPIO_LSB 0x0 | ||
1093 | #define QIB_7220_SendBufBase_BaseAddr_SmallPIO_RMASK 0x1FFFFF | ||
1094 | |||
1095 | #define QIB_7220_SendBufSize_OFFS 0x1D0 | ||
1096 | #define QIB_7220_SendBufSize_Reserved_LSB 0x2D | ||
1097 | #define QIB_7220_SendBufSize_Reserved_RMASK 0xFFFFF | ||
1098 | #define QIB_7220_SendBufSize_Size_LargePIO_LSB 0x20 | ||
1099 | #define QIB_7220_SendBufSize_Size_LargePIO_RMASK 0x1FFF | ||
1100 | #define QIB_7220_SendBufSize_Reserved1_LSB 0xC | ||
1101 | #define QIB_7220_SendBufSize_Reserved1_RMASK 0xFFFFF | ||
1102 | #define QIB_7220_SendBufSize_Size_SmallPIO_LSB 0x0 | ||
1103 | #define QIB_7220_SendBufSize_Size_SmallPIO_RMASK 0xFFF | ||
1104 | |||
1105 | #define QIB_7220_SendBufCnt_OFFS 0x1D8 | ||
1106 | #define QIB_7220_SendBufCnt_Reserved_LSB 0x24 | ||
1107 | #define QIB_7220_SendBufCnt_Reserved_RMASK 0xFFFFFFF | ||
1108 | #define QIB_7220_SendBufCnt_Num_LargeBuffers_LSB 0x20 | ||
1109 | #define QIB_7220_SendBufCnt_Num_LargeBuffers_RMASK 0xF | ||
1110 | #define QIB_7220_SendBufCnt_Reserved1_LSB 0x9 | ||
1111 | #define QIB_7220_SendBufCnt_Reserved1_RMASK 0x7FFFFF | ||
1112 | #define QIB_7220_SendBufCnt_Num_SmallBuffers_LSB 0x0 | ||
1113 | #define QIB_7220_SendBufCnt_Num_SmallBuffers_RMASK 0x1FF | ||
1114 | |||
1115 | #define QIB_7220_SendBufAvailAddr_OFFS 0x1E0 | ||
1116 | #define QIB_7220_SendBufAvailAddr_SendBufAvailAddr_LSB 0x6 | ||
1117 | #define QIB_7220_SendBufAvailAddr_SendBufAvailAddr_RMASK 0x3FFFFFFFF | ||
1118 | #define QIB_7220_SendBufAvailAddr_Reserved_LSB 0x0 | ||
1119 | #define QIB_7220_SendBufAvailAddr_Reserved_RMASK 0x3F | ||
1120 | |||
1121 | #define QIB_7220_TxIntMemBase_OFFS 0x1E8 | ||
1122 | |||
1123 | #define QIB_7220_TxIntMemSize_OFFS 0x1F0 | ||
1124 | |||
1125 | #define QIB_7220_SendDmaBase_OFFS 0x1F8 | ||
1126 | #define QIB_7220_SendDmaBase_Reserved_LSB 0x30 | ||
1127 | #define QIB_7220_SendDmaBase_Reserved_RMASK 0xFFFF | ||
1128 | #define QIB_7220_SendDmaBase_SendDmaBase_LSB 0x0 | ||
1129 | #define QIB_7220_SendDmaBase_SendDmaBase_RMASK 0xFFFFFFFFFFFF | ||
1130 | |||
1131 | #define QIB_7220_SendDmaLenGen_OFFS 0x200 | ||
1132 | #define QIB_7220_SendDmaLenGen_Reserved_LSB 0x13 | ||
1133 | #define QIB_7220_SendDmaLenGen_Reserved_RMASK 0x1FFFFFFFFFFF | ||
1134 | #define QIB_7220_SendDmaLenGen_Generation_LSB 0x10 | ||
1135 | #define QIB_7220_SendDmaLenGen_Generation_MSB 0x12 | ||
1136 | #define QIB_7220_SendDmaLenGen_Generation_RMASK 0x7 | ||
1137 | #define QIB_7220_SendDmaLenGen_Length_LSB 0x0 | ||
1138 | #define QIB_7220_SendDmaLenGen_Length_RMASK 0xFFFF | ||
1139 | |||
1140 | #define QIB_7220_SendDmaTail_OFFS 0x208 | ||
1141 | #define QIB_7220_SendDmaTail_Reserved_LSB 0x10 | ||
1142 | #define QIB_7220_SendDmaTail_Reserved_RMASK 0xFFFFFFFFFFFF | ||
1143 | #define QIB_7220_SendDmaTail_SendDmaTail_LSB 0x0 | ||
1144 | #define QIB_7220_SendDmaTail_SendDmaTail_RMASK 0xFFFF | ||
1145 | |||
1146 | #define QIB_7220_SendDmaHead_OFFS 0x210 | ||
1147 | #define QIB_7220_SendDmaHead_Reserved_LSB 0x30 | ||
1148 | #define QIB_7220_SendDmaHead_Reserved_RMASK 0xFFFF | ||
1149 | #define QIB_7220_SendDmaHead_InternalSendDmaHead_LSB 0x20 | ||
1150 | #define QIB_7220_SendDmaHead_InternalSendDmaHead_RMASK 0xFFFF | ||
1151 | #define QIB_7220_SendDmaHead_Reserved1_LSB 0x10 | ||
1152 | #define QIB_7220_SendDmaHead_Reserved1_RMASK 0xFFFF | ||
1153 | #define QIB_7220_SendDmaHead_SendDmaHead_LSB 0x0 | ||
1154 | #define QIB_7220_SendDmaHead_SendDmaHead_RMASK 0xFFFF | ||
1155 | |||
1156 | #define QIB_7220_SendDmaHeadAddr_OFFS 0x218 | ||
1157 | #define QIB_7220_SendDmaHeadAddr_Reserved_LSB 0x30 | ||
1158 | #define QIB_7220_SendDmaHeadAddr_Reserved_RMASK 0xFFFF | ||
1159 | #define QIB_7220_SendDmaHeadAddr_SendDmaHeadAddr_LSB 0x0 | ||
1160 | #define QIB_7220_SendDmaHeadAddr_SendDmaHeadAddr_RMASK 0xFFFFFFFFFFFF | ||
1161 | |||
1162 | #define QIB_7220_SendDmaBufMask0_OFFS 0x220 | ||
1163 | #define QIB_7220_SendDmaBufMask0_BufMask_63_0_LSB 0x0 | ||
1164 | #define QIB_7220_SendDmaBufMask0_BufMask_63_0_RMASK 0x0 | ||
1165 | |||
1166 | #define QIB_7220_SendDmaStatus_OFFS 0x238 | ||
1167 | #define QIB_7220_SendDmaStatus_ScoreBoardDrainInProg_LSB 0x3F | ||
1168 | #define QIB_7220_SendDmaStatus_ScoreBoardDrainInProg_RMASK 0x1 | ||
1169 | #define QIB_7220_SendDmaStatus_AbortInProg_LSB 0x3E | ||
1170 | #define QIB_7220_SendDmaStatus_AbortInProg_RMASK 0x1 | ||
1171 | #define QIB_7220_SendDmaStatus_InternalSDmaEnable_LSB 0x3D | ||
1172 | #define QIB_7220_SendDmaStatus_InternalSDmaEnable_RMASK 0x1 | ||
1173 | #define QIB_7220_SendDmaStatus_ScbDescIndex_13_0_LSB 0x2F | ||
1174 | #define QIB_7220_SendDmaStatus_ScbDescIndex_13_0_RMASK 0x3FFF | ||
1175 | #define QIB_7220_SendDmaStatus_RpyLowAddr_6_0_LSB 0x28 | ||
1176 | #define QIB_7220_SendDmaStatus_RpyLowAddr_6_0_RMASK 0x7F | ||
1177 | #define QIB_7220_SendDmaStatus_RpyTag_7_0_LSB 0x20 | ||
1178 | #define QIB_7220_SendDmaStatus_RpyTag_7_0_RMASK 0xFF | ||
1179 | #define QIB_7220_SendDmaStatus_ScbFull_LSB 0x1F | ||
1180 | #define QIB_7220_SendDmaStatus_ScbFull_RMASK 0x1 | ||
1181 | #define QIB_7220_SendDmaStatus_ScbEmpty_LSB 0x1E | ||
1182 | #define QIB_7220_SendDmaStatus_ScbEmpty_RMASK 0x1 | ||
1183 | #define QIB_7220_SendDmaStatus_ScbEntryValid_LSB 0x1D | ||
1184 | #define QIB_7220_SendDmaStatus_ScbEntryValid_RMASK 0x1 | ||
1185 | #define QIB_7220_SendDmaStatus_ScbFetchDescFlag_LSB 0x1C | ||
1186 | #define QIB_7220_SendDmaStatus_ScbFetchDescFlag_RMASK 0x1 | ||
1187 | #define QIB_7220_SendDmaStatus_SplFifoReadyToGo_LSB 0x1B | ||
1188 | #define QIB_7220_SendDmaStatus_SplFifoReadyToGo_RMASK 0x1 | ||
1189 | #define QIB_7220_SendDmaStatus_SplFifoDisarmed_LSB 0x1A | ||
1190 | #define QIB_7220_SendDmaStatus_SplFifoDisarmed_RMASK 0x1 | ||
1191 | #define QIB_7220_SendDmaStatus_SplFifoEmpty_LSB 0x19 | ||
1192 | #define QIB_7220_SendDmaStatus_SplFifoEmpty_RMASK 0x1 | ||
1193 | #define QIB_7220_SendDmaStatus_SplFifoFull_LSB 0x18 | ||
1194 | #define QIB_7220_SendDmaStatus_SplFifoFull_RMASK 0x1 | ||
1195 | #define QIB_7220_SendDmaStatus_SplFifoBufNum_LSB 0x10 | ||
1196 | #define QIB_7220_SendDmaStatus_SplFifoBufNum_RMASK 0xFF | ||
1197 | #define QIB_7220_SendDmaStatus_SplFifoDescIndex_LSB 0x0 | ||
1198 | #define QIB_7220_SendDmaStatus_SplFifoDescIndex_RMASK 0xFFFF | ||
1199 | |||
1200 | #define QIB_7220_SendBufErr0_OFFS 0x240 | ||
1201 | #define QIB_7220_SendBufErr0_SendBufErr_63_0_LSB 0x0 | ||
1202 | #define QIB_7220_SendBufErr0_SendBufErr_63_0_RMASK 0x0 | ||
1203 | |||
1204 | #define QIB_7220_RcvHdrAddr0_OFFS 0x270 | ||
1205 | #define QIB_7220_RcvHdrAddr0_RcvHdrAddr0_LSB 0x2 | ||
1206 | #define QIB_7220_RcvHdrAddr0_RcvHdrAddr0_RMASK 0x3FFFFFFFFF | ||
1207 | #define QIB_7220_RcvHdrAddr0_Reserved_LSB 0x0 | ||
1208 | #define QIB_7220_RcvHdrAddr0_Reserved_RMASK 0x3 | ||
1209 | |||
1210 | #define QIB_7220_RcvHdrTailAddr0_OFFS 0x300 | ||
1211 | #define QIB_7220_RcvHdrTailAddr0_RcvHdrTailAddr0_LSB 0x2 | ||
1212 | #define QIB_7220_RcvHdrTailAddr0_RcvHdrTailAddr0_RMASK 0x3FFFFFFFFF | ||
1213 | #define QIB_7220_RcvHdrTailAddr0_Reserved_LSB 0x0 | ||
1214 | #define QIB_7220_RcvHdrTailAddr0_Reserved_RMASK 0x3 | ||
1215 | |||
1216 | #define QIB_7220_ibsd_epb_access_ctrl_OFFS 0x3C0 | ||
1217 | #define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_granted_LSB 0x8 | ||
1218 | #define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_granted_RMASK 0x1 | ||
1219 | #define QIB_7220_ibsd_epb_access_ctrl_Reserved_LSB 0x1 | ||
1220 | #define QIB_7220_ibsd_epb_access_ctrl_Reserved_RMASK 0x7F | ||
1221 | #define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_LSB 0x0 | ||
1222 | #define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_RMASK 0x1 | ||
1223 | |||
1224 | #define QIB_7220_ibsd_epb_transaction_reg_OFFS 0x3C8 | ||
1225 | #define QIB_7220_ibsd_epb_transaction_reg_ib_epb_rdy_LSB 0x1F | ||
1226 | #define QIB_7220_ibsd_epb_transaction_reg_ib_epb_rdy_RMASK 0x1 | ||
1227 | #define QIB_7220_ibsd_epb_transaction_reg_ib_epb_req_error_LSB 0x1E | ||
1228 | #define QIB_7220_ibsd_epb_transaction_reg_ib_epb_req_error_RMASK 0x1 | ||
1229 | #define QIB_7220_ibsd_epb_transaction_reg_Reserved_LSB 0x1D | ||
1230 | #define QIB_7220_ibsd_epb_transaction_reg_Reserved_RMASK 0x1 | ||
1231 | #define QIB_7220_ibsd_epb_transaction_reg_mem_data_parity_LSB 0x1C | ||
1232 | #define QIB_7220_ibsd_epb_transaction_reg_mem_data_parity_RMASK 0x1 | ||
1233 | #define QIB_7220_ibsd_epb_transaction_reg_Reserved1_LSB 0x1B | ||
1234 | #define QIB_7220_ibsd_epb_transaction_reg_Reserved1_RMASK 0x1 | ||
1235 | #define QIB_7220_ibsd_epb_transaction_reg_ib_epb_cs_LSB 0x19 | ||
1236 | #define QIB_7220_ibsd_epb_transaction_reg_ib_epb_cs_RMASK 0x3 | ||
1237 | #define QIB_7220_ibsd_epb_transaction_reg_ib_epb_read_write_LSB 0x18 | ||
1238 | #define QIB_7220_ibsd_epb_transaction_reg_ib_epb_read_write_RMASK 0x1 | ||
1239 | #define QIB_7220_ibsd_epb_transaction_reg_Reserved2_LSB 0x17 | ||
1240 | #define QIB_7220_ibsd_epb_transaction_reg_Reserved2_RMASK 0x1 | ||
1241 | #define QIB_7220_ibsd_epb_transaction_reg_ib_epb_address_LSB 0x8 | ||
1242 | #define QIB_7220_ibsd_epb_transaction_reg_ib_epb_address_RMASK 0x7FFF | ||
1243 | #define QIB_7220_ibsd_epb_transaction_reg_ib_epb_data_LSB 0x0 | ||
1244 | #define QIB_7220_ibsd_epb_transaction_reg_ib_epb_data_RMASK 0xFF | ||
1245 | |||
1246 | #define QIB_7220_XGXSCfg_OFFS 0x3D8 | ||
1247 | #define QIB_7220_XGXSCfg_sel_link_down_for_fctrl_lane_sync_reset_LSB 0x3F | ||
1248 | #define QIB_7220_XGXSCfg_sel_link_down_for_fctrl_lane_sync_reset_RMASK 0x1 | ||
1249 | #define QIB_7220_XGXSCfg_Reserved_LSB 0x13 | ||
1250 | #define QIB_7220_XGXSCfg_Reserved_RMASK 0xFFFFFFFFFFF | ||
1251 | #define QIB_7220_XGXSCfg_link_sync_mask_LSB 0x9 | ||
1252 | #define QIB_7220_XGXSCfg_link_sync_mask_RMASK 0x3FF | ||
1253 | #define QIB_7220_XGXSCfg_Reserved1_LSB 0x3 | ||
1254 | #define QIB_7220_XGXSCfg_Reserved1_RMASK 0x3F | ||
1255 | #define QIB_7220_XGXSCfg_xcv_reset_LSB 0x2 | ||
1256 | #define QIB_7220_XGXSCfg_xcv_reset_RMASK 0x1 | ||
1257 | #define QIB_7220_XGXSCfg_Reserved2_LSB 0x1 | ||
1258 | #define QIB_7220_XGXSCfg_Reserved2_RMASK 0x1 | ||
1259 | #define QIB_7220_XGXSCfg_tx_rx_reset_LSB 0x0 | ||
1260 | #define QIB_7220_XGXSCfg_tx_rx_reset_RMASK 0x1 | ||
1261 | |||
1262 | #define QIB_7220_IBSerDesCtrl_OFFS 0x3E0 | ||
1263 | #define QIB_7220_IBSerDesCtrl_Reserved_LSB 0x2D | ||
1264 | #define QIB_7220_IBSerDesCtrl_Reserved_RMASK 0x7FFFF | ||
1265 | #define QIB_7220_IBSerDesCtrl_INT_uC_LSB 0x2C | ||
1266 | #define QIB_7220_IBSerDesCtrl_INT_uC_RMASK 0x1 | ||
1267 | #define QIB_7220_IBSerDesCtrl_CKSEL_uC_LSB 0x2A | ||
1268 | #define QIB_7220_IBSerDesCtrl_CKSEL_uC_RMASK 0x3 | ||
1269 | #define QIB_7220_IBSerDesCtrl_PLLN_LSB 0x28 | ||
1270 | #define QIB_7220_IBSerDesCtrl_PLLN_RMASK 0x3 | ||
1271 | #define QIB_7220_IBSerDesCtrl_PLLM_LSB 0x25 | ||
1272 | #define QIB_7220_IBSerDesCtrl_PLLM_RMASK 0x7 | ||
1273 | #define QIB_7220_IBSerDesCtrl_TXOBPD_LSB 0x24 | ||
1274 | #define QIB_7220_IBSerDesCtrl_TXOBPD_RMASK 0x1 | ||
1275 | #define QIB_7220_IBSerDesCtrl_TWC_LSB 0x23 | ||
1276 | #define QIB_7220_IBSerDesCtrl_TWC_RMASK 0x1 | ||
1277 | #define QIB_7220_IBSerDesCtrl_RXIDLE_LSB 0x22 | ||
1278 | #define QIB_7220_IBSerDesCtrl_RXIDLE_RMASK 0x1 | ||
1279 | #define QIB_7220_IBSerDesCtrl_RXINV_LSB 0x21 | ||
1280 | #define QIB_7220_IBSerDesCtrl_RXINV_RMASK 0x1 | ||
1281 | #define QIB_7220_IBSerDesCtrl_TXINV_LSB 0x20 | ||
1282 | #define QIB_7220_IBSerDesCtrl_TXINV_RMASK 0x1 | ||
1283 | #define QIB_7220_IBSerDesCtrl_Reserved1_LSB 0x12 | ||
1284 | #define QIB_7220_IBSerDesCtrl_Reserved1_RMASK 0x3FFF | ||
1285 | #define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForRXEQ_LSB 0xD | ||
1286 | #define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForRXEQ_RMASK 0x1F | ||
1287 | #define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForDDS_LSB 0x8 | ||
1288 | #define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForDDS_RMASK 0x1F | ||
1289 | #define QIB_7220_IBSerDesCtrl_Reserved2_LSB 0x1 | ||
1290 | #define QIB_7220_IBSerDesCtrl_Reserved2_RMASK 0x7F | ||
1291 | #define QIB_7220_IBSerDesCtrl_ResetIB_uC_Core_LSB 0x0 | ||
1292 | #define QIB_7220_IBSerDesCtrl_ResetIB_uC_Core_RMASK 0x1 | ||
1293 | |||
1294 | #define QIB_7220_pciesd_epb_access_ctrl_OFFS 0x400 | ||
1295 | #define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_granted_LSB 0x8 | ||
1296 | #define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_granted_RMASK 0x1 | ||
1297 | #define QIB_7220_pciesd_epb_access_ctrl_Reserved_LSB 0x3 | ||
1298 | #define QIB_7220_pciesd_epb_access_ctrl_Reserved_RMASK 0x1F | ||
1299 | #define QIB_7220_pciesd_epb_access_ctrl_sw_pcieepb_star_en_LSB 0x1 | ||
1300 | #define QIB_7220_pciesd_epb_access_ctrl_sw_pcieepb_star_en_RMASK 0x3 | ||
1301 | #define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_LSB 0x0 | ||
1302 | #define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_RMASK 0x1 | ||
1303 | |||
1304 | #define QIB_7220_pciesd_epb_transaction_reg_OFFS 0x408 | ||
1305 | #define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_rdy_LSB 0x1F | ||
1306 | #define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_rdy_RMASK 0x1 | ||
1307 | #define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_req_error_LSB 0x1E | ||
1308 | #define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_req_error_RMASK 0x1 | ||
1309 | #define QIB_7220_pciesd_epb_transaction_reg_Reserved_LSB 0x1D | ||
1310 | #define QIB_7220_pciesd_epb_transaction_reg_Reserved_RMASK 0x1 | ||
1311 | #define QIB_7220_pciesd_epb_transaction_reg_mem_data_parity_LSB 0x1C | ||
1312 | #define QIB_7220_pciesd_epb_transaction_reg_mem_data_parity_RMASK 0x1 | ||
1313 | #define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_cs_LSB 0x19 | ||
1314 | #define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_cs_RMASK 0x7 | ||
1315 | #define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_read_write_LSB 0x18 | ||
1316 | #define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_read_write_RMASK 0x1 | ||
1317 | #define QIB_7220_pciesd_epb_transaction_reg_Reserved1_LSB 0x17 | ||
1318 | #define QIB_7220_pciesd_epb_transaction_reg_Reserved1_RMASK 0x1 | ||
1319 | #define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_address_LSB 0x8 | ||
1320 | #define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_address_RMASK 0x7FFF | ||
1321 | #define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_data_LSB 0x0 | ||
1322 | #define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_data_RMASK 0xFF | ||
1323 | |||
1324 | #define QIB_7220_SerDes_DDSRXEQ0_OFFS 0x500 | ||
1325 | #define QIB_7220_SerDes_DDSRXEQ0_reg_addr_LSB 0x4 | ||
1326 | #define QIB_7220_SerDes_DDSRXEQ0_reg_addr_RMASK 0x3F | ||
1327 | #define QIB_7220_SerDes_DDSRXEQ0_element_num_LSB 0x0 | ||
1328 | #define QIB_7220_SerDes_DDSRXEQ0_element_num_RMASK 0xF | ||
1329 | |||
1330 | #define QIB_7220_LBIntCnt_OFFS 0x13000 | ||
1331 | |||
1332 | #define QIB_7220_LBFlowStallCnt_OFFS 0x13008 | ||
1333 | |||
1334 | #define QIB_7220_TxSDmaDescCnt_OFFS 0x13010 | ||
1335 | |||
1336 | #define QIB_7220_TxUnsupVLErrCnt_OFFS 0x13018 | ||
1337 | |||
1338 | #define QIB_7220_TxDataPktCnt_OFFS 0x13020 | ||
1339 | |||
1340 | #define QIB_7220_TxFlowPktCnt_OFFS 0x13028 | ||
1341 | |||
1342 | #define QIB_7220_TxDwordCnt_OFFS 0x13030 | ||
1343 | |||
1344 | #define QIB_7220_TxLenErrCnt_OFFS 0x13038 | ||
1345 | |||
1346 | #define QIB_7220_TxMaxMinLenErrCnt_OFFS 0x13040 | ||
1347 | |||
1348 | #define QIB_7220_TxUnderrunCnt_OFFS 0x13048 | ||
1349 | |||
1350 | #define QIB_7220_TxFlowStallCnt_OFFS 0x13050 | ||
1351 | |||
1352 | #define QIB_7220_TxDroppedPktCnt_OFFS 0x13058 | ||
1353 | |||
1354 | #define QIB_7220_RxDroppedPktCnt_OFFS 0x13060 | ||
1355 | |||
1356 | #define QIB_7220_RxDataPktCnt_OFFS 0x13068 | ||
1357 | |||
1358 | #define QIB_7220_RxFlowPktCnt_OFFS 0x13070 | ||
1359 | |||
1360 | #define QIB_7220_RxDwordCnt_OFFS 0x13078 | ||
1361 | |||
1362 | #define QIB_7220_RxLenErrCnt_OFFS 0x13080 | ||
1363 | |||
1364 | #define QIB_7220_RxMaxMinLenErrCnt_OFFS 0x13088 | ||
1365 | |||
1366 | #define QIB_7220_RxICRCErrCnt_OFFS 0x13090 | ||
1367 | |||
1368 | #define QIB_7220_RxVCRCErrCnt_OFFS 0x13098 | ||
1369 | |||
1370 | #define QIB_7220_RxFlowCtrlViolCnt_OFFS 0x130A0 | ||
1371 | |||
1372 | #define QIB_7220_RxVersionErrCnt_OFFS 0x130A8 | ||
1373 | |||
1374 | #define QIB_7220_RxLinkMalformCnt_OFFS 0x130B0 | ||
1375 | |||
1376 | #define QIB_7220_RxEBPCnt_OFFS 0x130B8 | ||
1377 | |||
1378 | #define QIB_7220_RxLPCRCErrCnt_OFFS 0x130C0 | ||
1379 | |||
1380 | #define QIB_7220_RxBufOvflCnt_OFFS 0x130C8 | ||
1381 | |||
1382 | #define QIB_7220_RxTIDFullErrCnt_OFFS 0x130D0 | ||
1383 | |||
1384 | #define QIB_7220_RxTIDValidErrCnt_OFFS 0x130D8 | ||
1385 | |||
1386 | #define QIB_7220_RxPKeyMismatchCnt_OFFS 0x130E0 | ||
1387 | |||
1388 | #define QIB_7220_RxP0HdrEgrOvflCnt_OFFS 0x130E8 | ||
1389 | |||
1390 | #define QIB_7220_IBStatusChangeCnt_OFFS 0x13170 | ||
1391 | |||
1392 | #define QIB_7220_IBLinkErrRecoveryCnt_OFFS 0x13178 | ||
1393 | |||
1394 | #define QIB_7220_IBLinkDownedCnt_OFFS 0x13180 | ||
1395 | |||
1396 | #define QIB_7220_IBSymbolErrCnt_OFFS 0x13188 | ||
1397 | |||
1398 | #define QIB_7220_RxVL15DroppedPktCnt_OFFS 0x13190 | ||
1399 | |||
1400 | #define QIB_7220_RxOtherLocalPhyErrCnt_OFFS 0x13198 | ||
1401 | |||
1402 | #define QIB_7220_PcieRetryBufDiagQwordCnt_OFFS 0x131A0 | ||
1403 | |||
1404 | #define QIB_7220_ExcessBufferOvflCnt_OFFS 0x131A8 | ||
1405 | |||
1406 | #define QIB_7220_LocalLinkIntegrityErrCnt_OFFS 0x131B0 | ||
1407 | |||
1408 | #define QIB_7220_RxVlErrCnt_OFFS 0x131B8 | ||
1409 | |||
1410 | #define QIB_7220_RxDlidFltrCnt_OFFS 0x131C0 | ||
1411 | |||
1412 | #define QIB_7220_CNT_0131C8_OFFS 0x131C8 | ||
1413 | |||
1414 | #define QIB_7220_PSStat_OFFS 0x13200 | ||
1415 | |||
1416 | #define QIB_7220_PSStart_OFFS 0x13208 | ||
1417 | |||
1418 | #define QIB_7220_PSInterval_OFFS 0x13210 | ||
1419 | |||
1420 | #define QIB_7220_PSRcvDataCount_OFFS 0x13218 | ||
1421 | |||
1422 | #define QIB_7220_PSRcvPktsCount_OFFS 0x13220 | ||
1423 | |||
1424 | #define QIB_7220_PSXmitDataCount_OFFS 0x13228 | ||
1425 | |||
1426 | #define QIB_7220_PSXmitPktsCount_OFFS 0x13230 | ||
1427 | |||
1428 | #define QIB_7220_PSXmitWaitCount_OFFS 0x13238 | ||
1429 | |||
1430 | #define QIB_7220_CNT_013240_OFFS 0x13240 | ||
1431 | |||
1432 | #define QIB_7220_RcvEgrArray_OFFS 0x14000 | ||
1433 | |||
1434 | #define QIB_7220_MEM_038000_OFFS 0x38000 | ||
1435 | |||
1436 | #define QIB_7220_RcvTIDArray0_OFFS 0x53000 | ||
1437 | |||
1438 | #define QIB_7220_PIOLaunchFIFO_OFFS 0x64000 | ||
1439 | |||
1440 | #define QIB_7220_MEM_064480_OFFS 0x64480 | ||
1441 | |||
1442 | #define QIB_7220_SendPIOpbcCache_OFFS 0x64800 | ||
1443 | |||
1444 | #define QIB_7220_MEM_064C80_OFFS 0x64C80 | ||
1445 | |||
1446 | #define QIB_7220_PreLaunchFIFO_OFFS 0x65000 | ||
1447 | |||
1448 | #define QIB_7220_MEM_065080_OFFS 0x65080 | ||
1449 | |||
1450 | #define QIB_7220_ScoreBoard_OFFS 0x65400 | ||
1451 | |||
1452 | #define QIB_7220_MEM_065440_OFFS 0x65440 | ||
1453 | |||
1454 | #define QIB_7220_DescriptorFIFO_OFFS 0x65800 | ||
1455 | |||
1456 | #define QIB_7220_MEM_065880_OFFS 0x65880 | ||
1457 | |||
1458 | #define QIB_7220_RcvBuf1_OFFS 0x72000 | ||
1459 | |||
1460 | #define QIB_7220_MEM_074800_OFFS 0x74800 | ||
1461 | |||
1462 | #define QIB_7220_RcvBuf2_OFFS 0x75000 | ||
1463 | |||
1464 | #define QIB_7220_MEM_076400_OFFS 0x76400 | ||
1465 | |||
1466 | #define QIB_7220_RcvFlags_OFFS 0x77000 | ||
1467 | |||
1468 | #define QIB_7220_MEM_078400_OFFS 0x78400 | ||
1469 | |||
1470 | #define QIB_7220_RcvLookupBuf1_OFFS 0x79000 | ||
1471 | |||
1472 | #define QIB_7220_MEM_07A400_OFFS 0x7A400 | ||
1473 | |||
1474 | #define QIB_7220_RcvDMADatBuf_OFFS 0x7B000 | ||
1475 | |||
1476 | #define QIB_7220_RcvDMAHdrBuf_OFFS 0x7B800 | ||
1477 | |||
1478 | #define QIB_7220_MiscRXEIntMem_OFFS 0x7C000 | ||
1479 | |||
1480 | #define QIB_7220_MEM_07D400_OFFS 0x7D400 | ||
1481 | |||
1482 | #define QIB_7220_PCIERcvBuf_OFFS 0x80000 | ||
1483 | |||
1484 | #define QIB_7220_PCIERetryBuf_OFFS 0x84000 | ||
1485 | |||
1486 | #define QIB_7220_PCIERcvBufRdToWrAddr_OFFS 0x88000 | ||
1487 | |||
1488 | #define QIB_7220_PCIECplBuf_OFFS 0x90000 | ||
1489 | |||
1490 | #define QIB_7220_IBSerDesMappTable_OFFS 0x94000 | ||
1491 | |||
1492 | #define QIB_7220_MEM_095000_OFFS 0x95000 | ||
1493 | |||
1494 | #define QIB_7220_SendBuf0_MA_OFFS 0x100000 | ||
1495 | |||
1496 | #define QIB_7220_MEM_1A0000_OFFS 0x1A0000 | ||
diff --git a/drivers/infiniband/hw/qib/qib_7322_regs.h b/drivers/infiniband/hw/qib/qib_7322_regs.h new file mode 100644 index 000000000000..a97440ba924c --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_7322_regs.h | |||
@@ -0,0 +1,3163 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | /* This file is mechanically generated from RTL. Any hand-edits will be lost! */ | ||
34 | |||
35 | #define QIB_7322_Revision_OFFS 0x0 | ||
36 | #define QIB_7322_Revision_DEF 0x0000000002010601 | ||
37 | #define QIB_7322_Revision_R_Simulator_LSB 0x3F | ||
38 | #define QIB_7322_Revision_R_Simulator_MSB 0x3F | ||
39 | #define QIB_7322_Revision_R_Simulator_RMASK 0x1 | ||
40 | #define QIB_7322_Revision_R_Emulation_LSB 0x3E | ||
41 | #define QIB_7322_Revision_R_Emulation_MSB 0x3E | ||
42 | #define QIB_7322_Revision_R_Emulation_RMASK 0x1 | ||
43 | #define QIB_7322_Revision_R_Emulation_Revcode_LSB 0x28 | ||
44 | #define QIB_7322_Revision_R_Emulation_Revcode_MSB 0x3D | ||
45 | #define QIB_7322_Revision_R_Emulation_Revcode_RMASK 0x3FFFFF | ||
46 | #define QIB_7322_Revision_BoardID_LSB 0x20 | ||
47 | #define QIB_7322_Revision_BoardID_MSB 0x27 | ||
48 | #define QIB_7322_Revision_BoardID_RMASK 0xFF | ||
49 | #define QIB_7322_Revision_R_SW_LSB 0x18 | ||
50 | #define QIB_7322_Revision_R_SW_MSB 0x1F | ||
51 | #define QIB_7322_Revision_R_SW_RMASK 0xFF | ||
52 | #define QIB_7322_Revision_R_Arch_LSB 0x10 | ||
53 | #define QIB_7322_Revision_R_Arch_MSB 0x17 | ||
54 | #define QIB_7322_Revision_R_Arch_RMASK 0xFF | ||
55 | #define QIB_7322_Revision_R_ChipRevMajor_LSB 0x8 | ||
56 | #define QIB_7322_Revision_R_ChipRevMajor_MSB 0xF | ||
57 | #define QIB_7322_Revision_R_ChipRevMajor_RMASK 0xFF | ||
58 | #define QIB_7322_Revision_R_ChipRevMinor_LSB 0x0 | ||
59 | #define QIB_7322_Revision_R_ChipRevMinor_MSB 0x7 | ||
60 | #define QIB_7322_Revision_R_ChipRevMinor_RMASK 0xFF | ||
61 | |||
62 | #define QIB_7322_Control_OFFS 0x8 | ||
63 | #define QIB_7322_Control_DEF 0x0000000000000000 | ||
64 | #define QIB_7322_Control_PCIECplQDiagEn_LSB 0x6 | ||
65 | #define QIB_7322_Control_PCIECplQDiagEn_MSB 0x6 | ||
66 | #define QIB_7322_Control_PCIECplQDiagEn_RMASK 0x1 | ||
67 | #define QIB_7322_Control_PCIEPostQDiagEn_LSB 0x5 | ||
68 | #define QIB_7322_Control_PCIEPostQDiagEn_MSB 0x5 | ||
69 | #define QIB_7322_Control_PCIEPostQDiagEn_RMASK 0x1 | ||
70 | #define QIB_7322_Control_SDmaDescFetchPriorityEn_LSB 0x4 | ||
71 | #define QIB_7322_Control_SDmaDescFetchPriorityEn_MSB 0x4 | ||
72 | #define QIB_7322_Control_SDmaDescFetchPriorityEn_RMASK 0x1 | ||
73 | #define QIB_7322_Control_PCIERetryBufDiagEn_LSB 0x3 | ||
74 | #define QIB_7322_Control_PCIERetryBufDiagEn_MSB 0x3 | ||
75 | #define QIB_7322_Control_PCIERetryBufDiagEn_RMASK 0x1 | ||
76 | #define QIB_7322_Control_FreezeMode_LSB 0x1 | ||
77 | #define QIB_7322_Control_FreezeMode_MSB 0x1 | ||
78 | #define QIB_7322_Control_FreezeMode_RMASK 0x1 | ||
79 | #define QIB_7322_Control_SyncReset_LSB 0x0 | ||
80 | #define QIB_7322_Control_SyncReset_MSB 0x0 | ||
81 | #define QIB_7322_Control_SyncReset_RMASK 0x1 | ||
82 | |||
83 | #define QIB_7322_PageAlign_OFFS 0x10 | ||
84 | #define QIB_7322_PageAlign_DEF 0x0000000000001000 | ||
85 | |||
86 | #define QIB_7322_ContextCnt_OFFS 0x18 | ||
87 | #define QIB_7322_ContextCnt_DEF 0x0000000000000012 | ||
88 | |||
89 | #define QIB_7322_Scratch_OFFS 0x20 | ||
90 | #define QIB_7322_Scratch_DEF 0x0000000000000000 | ||
91 | |||
92 | #define QIB_7322_CntrRegBase_OFFS 0x28 | ||
93 | #define QIB_7322_CntrRegBase_DEF 0x0000000000011000 | ||
94 | |||
95 | #define QIB_7322_SendRegBase_OFFS 0x30 | ||
96 | #define QIB_7322_SendRegBase_DEF 0x0000000000003000 | ||
97 | |||
98 | #define QIB_7322_UserRegBase_OFFS 0x38 | ||
99 | #define QIB_7322_UserRegBase_DEF 0x0000000000200000 | ||
100 | |||
101 | #define QIB_7322_IntMask_OFFS 0x68 | ||
102 | #define QIB_7322_IntMask_DEF 0x0000000000000000 | ||
103 | #define QIB_7322_IntMask_SDmaIntMask_1_LSB 0x3F | ||
104 | #define QIB_7322_IntMask_SDmaIntMask_1_MSB 0x3F | ||
105 | #define QIB_7322_IntMask_SDmaIntMask_1_RMASK 0x1 | ||
106 | #define QIB_7322_IntMask_SDmaIntMask_0_LSB 0x3E | ||
107 | #define QIB_7322_IntMask_SDmaIntMask_0_MSB 0x3E | ||
108 | #define QIB_7322_IntMask_SDmaIntMask_0_RMASK 0x1 | ||
109 | #define QIB_7322_IntMask_SDmaProgressIntMask_1_LSB 0x3D | ||
110 | #define QIB_7322_IntMask_SDmaProgressIntMask_1_MSB 0x3D | ||
111 | #define QIB_7322_IntMask_SDmaProgressIntMask_1_RMASK 0x1 | ||
112 | #define QIB_7322_IntMask_SDmaProgressIntMask_0_LSB 0x3C | ||
113 | #define QIB_7322_IntMask_SDmaProgressIntMask_0_MSB 0x3C | ||
114 | #define QIB_7322_IntMask_SDmaProgressIntMask_0_RMASK 0x1 | ||
115 | #define QIB_7322_IntMask_SDmaIdleIntMask_1_LSB 0x3B | ||
116 | #define QIB_7322_IntMask_SDmaIdleIntMask_1_MSB 0x3B | ||
117 | #define QIB_7322_IntMask_SDmaIdleIntMask_1_RMASK 0x1 | ||
118 | #define QIB_7322_IntMask_SDmaIdleIntMask_0_LSB 0x3A | ||
119 | #define QIB_7322_IntMask_SDmaIdleIntMask_0_MSB 0x3A | ||
120 | #define QIB_7322_IntMask_SDmaIdleIntMask_0_RMASK 0x1 | ||
121 | #define QIB_7322_IntMask_SDmaCleanupDoneMask_1_LSB 0x39 | ||
122 | #define QIB_7322_IntMask_SDmaCleanupDoneMask_1_MSB 0x39 | ||
123 | #define QIB_7322_IntMask_SDmaCleanupDoneMask_1_RMASK 0x1 | ||
124 | #define QIB_7322_IntMask_SDmaCleanupDoneMask_0_LSB 0x38 | ||
125 | #define QIB_7322_IntMask_SDmaCleanupDoneMask_0_MSB 0x38 | ||
126 | #define QIB_7322_IntMask_SDmaCleanupDoneMask_0_RMASK 0x1 | ||
127 | #define QIB_7322_IntMask_RcvUrg17IntMask_LSB 0x31 | ||
128 | #define QIB_7322_IntMask_RcvUrg17IntMask_MSB 0x31 | ||
129 | #define QIB_7322_IntMask_RcvUrg17IntMask_RMASK 0x1 | ||
130 | #define QIB_7322_IntMask_RcvUrg16IntMask_LSB 0x30 | ||
131 | #define QIB_7322_IntMask_RcvUrg16IntMask_MSB 0x30 | ||
132 | #define QIB_7322_IntMask_RcvUrg16IntMask_RMASK 0x1 | ||
133 | #define QIB_7322_IntMask_RcvUrg15IntMask_LSB 0x2F | ||
134 | #define QIB_7322_IntMask_RcvUrg15IntMask_MSB 0x2F | ||
135 | #define QIB_7322_IntMask_RcvUrg15IntMask_RMASK 0x1 | ||
136 | #define QIB_7322_IntMask_RcvUrg14IntMask_LSB 0x2E | ||
137 | #define QIB_7322_IntMask_RcvUrg14IntMask_MSB 0x2E | ||
138 | #define QIB_7322_IntMask_RcvUrg14IntMask_RMASK 0x1 | ||
139 | #define QIB_7322_IntMask_RcvUrg13IntMask_LSB 0x2D | ||
140 | #define QIB_7322_IntMask_RcvUrg13IntMask_MSB 0x2D | ||
141 | #define QIB_7322_IntMask_RcvUrg13IntMask_RMASK 0x1 | ||
142 | #define QIB_7322_IntMask_RcvUrg12IntMask_LSB 0x2C | ||
143 | #define QIB_7322_IntMask_RcvUrg12IntMask_MSB 0x2C | ||
144 | #define QIB_7322_IntMask_RcvUrg12IntMask_RMASK 0x1 | ||
145 | #define QIB_7322_IntMask_RcvUrg11IntMask_LSB 0x2B | ||
146 | #define QIB_7322_IntMask_RcvUrg11IntMask_MSB 0x2B | ||
147 | #define QIB_7322_IntMask_RcvUrg11IntMask_RMASK 0x1 | ||
148 | #define QIB_7322_IntMask_RcvUrg10IntMask_LSB 0x2A | ||
149 | #define QIB_7322_IntMask_RcvUrg10IntMask_MSB 0x2A | ||
150 | #define QIB_7322_IntMask_RcvUrg10IntMask_RMASK 0x1 | ||
151 | #define QIB_7322_IntMask_RcvUrg9IntMask_LSB 0x29 | ||
152 | #define QIB_7322_IntMask_RcvUrg9IntMask_MSB 0x29 | ||
153 | #define QIB_7322_IntMask_RcvUrg9IntMask_RMASK 0x1 | ||
154 | #define QIB_7322_IntMask_RcvUrg8IntMask_LSB 0x28 | ||
155 | #define QIB_7322_IntMask_RcvUrg8IntMask_MSB 0x28 | ||
156 | #define QIB_7322_IntMask_RcvUrg8IntMask_RMASK 0x1 | ||
157 | #define QIB_7322_IntMask_RcvUrg7IntMask_LSB 0x27 | ||
158 | #define QIB_7322_IntMask_RcvUrg7IntMask_MSB 0x27 | ||
159 | #define QIB_7322_IntMask_RcvUrg7IntMask_RMASK 0x1 | ||
160 | #define QIB_7322_IntMask_RcvUrg6IntMask_LSB 0x26 | ||
161 | #define QIB_7322_IntMask_RcvUrg6IntMask_MSB 0x26 | ||
162 | #define QIB_7322_IntMask_RcvUrg6IntMask_RMASK 0x1 | ||
163 | #define QIB_7322_IntMask_RcvUrg5IntMask_LSB 0x25 | ||
164 | #define QIB_7322_IntMask_RcvUrg5IntMask_MSB 0x25 | ||
165 | #define QIB_7322_IntMask_RcvUrg5IntMask_RMASK 0x1 | ||
166 | #define QIB_7322_IntMask_RcvUrg4IntMask_LSB 0x24 | ||
167 | #define QIB_7322_IntMask_RcvUrg4IntMask_MSB 0x24 | ||
168 | #define QIB_7322_IntMask_RcvUrg4IntMask_RMASK 0x1 | ||
169 | #define QIB_7322_IntMask_RcvUrg3IntMask_LSB 0x23 | ||
170 | #define QIB_7322_IntMask_RcvUrg3IntMask_MSB 0x23 | ||
171 | #define QIB_7322_IntMask_RcvUrg3IntMask_RMASK 0x1 | ||
172 | #define QIB_7322_IntMask_RcvUrg2IntMask_LSB 0x22 | ||
173 | #define QIB_7322_IntMask_RcvUrg2IntMask_MSB 0x22 | ||
174 | #define QIB_7322_IntMask_RcvUrg2IntMask_RMASK 0x1 | ||
175 | #define QIB_7322_IntMask_RcvUrg1IntMask_LSB 0x21 | ||
176 | #define QIB_7322_IntMask_RcvUrg1IntMask_MSB 0x21 | ||
177 | #define QIB_7322_IntMask_RcvUrg1IntMask_RMASK 0x1 | ||
178 | #define QIB_7322_IntMask_RcvUrg0IntMask_LSB 0x20 | ||
179 | #define QIB_7322_IntMask_RcvUrg0IntMask_MSB 0x20 | ||
180 | #define QIB_7322_IntMask_RcvUrg0IntMask_RMASK 0x1 | ||
181 | #define QIB_7322_IntMask_ErrIntMask_1_LSB 0x1F | ||
182 | #define QIB_7322_IntMask_ErrIntMask_1_MSB 0x1F | ||
183 | #define QIB_7322_IntMask_ErrIntMask_1_RMASK 0x1 | ||
184 | #define QIB_7322_IntMask_ErrIntMask_0_LSB 0x1E | ||
185 | #define QIB_7322_IntMask_ErrIntMask_0_MSB 0x1E | ||
186 | #define QIB_7322_IntMask_ErrIntMask_0_RMASK 0x1 | ||
187 | #define QIB_7322_IntMask_ErrIntMask_LSB 0x1D | ||
188 | #define QIB_7322_IntMask_ErrIntMask_MSB 0x1D | ||
189 | #define QIB_7322_IntMask_ErrIntMask_RMASK 0x1 | ||
190 | #define QIB_7322_IntMask_AssertGPIOIntMask_LSB 0x1C | ||
191 | #define QIB_7322_IntMask_AssertGPIOIntMask_MSB 0x1C | ||
192 | #define QIB_7322_IntMask_AssertGPIOIntMask_RMASK 0x1 | ||
193 | #define QIB_7322_IntMask_SendDoneIntMask_1_LSB 0x19 | ||
194 | #define QIB_7322_IntMask_SendDoneIntMask_1_MSB 0x19 | ||
195 | #define QIB_7322_IntMask_SendDoneIntMask_1_RMASK 0x1 | ||
196 | #define QIB_7322_IntMask_SendDoneIntMask_0_LSB 0x18 | ||
197 | #define QIB_7322_IntMask_SendDoneIntMask_0_MSB 0x18 | ||
198 | #define QIB_7322_IntMask_SendDoneIntMask_0_RMASK 0x1 | ||
199 | #define QIB_7322_IntMask_SendBufAvailIntMask_LSB 0x17 | ||
200 | #define QIB_7322_IntMask_SendBufAvailIntMask_MSB 0x17 | ||
201 | #define QIB_7322_IntMask_SendBufAvailIntMask_RMASK 0x1 | ||
202 | #define QIB_7322_IntMask_RcvAvail17IntMask_LSB 0x11 | ||
203 | #define QIB_7322_IntMask_RcvAvail17IntMask_MSB 0x11 | ||
204 | #define QIB_7322_IntMask_RcvAvail17IntMask_RMASK 0x1 | ||
205 | #define QIB_7322_IntMask_RcvAvail16IntMask_LSB 0x10 | ||
206 | #define QIB_7322_IntMask_RcvAvail16IntMask_MSB 0x10 | ||
207 | #define QIB_7322_IntMask_RcvAvail16IntMask_RMASK 0x1 | ||
208 | #define QIB_7322_IntMask_RcvAvail15IntMask_LSB 0xF | ||
209 | #define QIB_7322_IntMask_RcvAvail15IntMask_MSB 0xF | ||
210 | #define QIB_7322_IntMask_RcvAvail15IntMask_RMASK 0x1 | ||
211 | #define QIB_7322_IntMask_RcvAvail14IntMask_LSB 0xE | ||
212 | #define QIB_7322_IntMask_RcvAvail14IntMask_MSB 0xE | ||
213 | #define QIB_7322_IntMask_RcvAvail14IntMask_RMASK 0x1 | ||
214 | #define QIB_7322_IntMask_RcvAvail13IntMask_LSB 0xD | ||
215 | #define QIB_7322_IntMask_RcvAvail13IntMask_MSB 0xD | ||
216 | #define QIB_7322_IntMask_RcvAvail13IntMask_RMASK 0x1 | ||
217 | #define QIB_7322_IntMask_RcvAvail12IntMask_LSB 0xC | ||
218 | #define QIB_7322_IntMask_RcvAvail12IntMask_MSB 0xC | ||
219 | #define QIB_7322_IntMask_RcvAvail12IntMask_RMASK 0x1 | ||
220 | #define QIB_7322_IntMask_RcvAvail11IntMask_LSB 0xB | ||
221 | #define QIB_7322_IntMask_RcvAvail11IntMask_MSB 0xB | ||
222 | #define QIB_7322_IntMask_RcvAvail11IntMask_RMASK 0x1 | ||
223 | #define QIB_7322_IntMask_RcvAvail10IntMask_LSB 0xA | ||
224 | #define QIB_7322_IntMask_RcvAvail10IntMask_MSB 0xA | ||
225 | #define QIB_7322_IntMask_RcvAvail10IntMask_RMASK 0x1 | ||
226 | #define QIB_7322_IntMask_RcvAvail9IntMask_LSB 0x9 | ||
227 | #define QIB_7322_IntMask_RcvAvail9IntMask_MSB 0x9 | ||
228 | #define QIB_7322_IntMask_RcvAvail9IntMask_RMASK 0x1 | ||
229 | #define QIB_7322_IntMask_RcvAvail8IntMask_LSB 0x8 | ||
230 | #define QIB_7322_IntMask_RcvAvail8IntMask_MSB 0x8 | ||
231 | #define QIB_7322_IntMask_RcvAvail8IntMask_RMASK 0x1 | ||
232 | #define QIB_7322_IntMask_RcvAvail7IntMask_LSB 0x7 | ||
233 | #define QIB_7322_IntMask_RcvAvail7IntMask_MSB 0x7 | ||
234 | #define QIB_7322_IntMask_RcvAvail7IntMask_RMASK 0x1 | ||
235 | #define QIB_7322_IntMask_RcvAvail6IntMask_LSB 0x6 | ||
236 | #define QIB_7322_IntMask_RcvAvail6IntMask_MSB 0x6 | ||
237 | #define QIB_7322_IntMask_RcvAvail6IntMask_RMASK 0x1 | ||
238 | #define QIB_7322_IntMask_RcvAvail5IntMask_LSB 0x5 | ||
239 | #define QIB_7322_IntMask_RcvAvail5IntMask_MSB 0x5 | ||
240 | #define QIB_7322_IntMask_RcvAvail5IntMask_RMASK 0x1 | ||
241 | #define QIB_7322_IntMask_RcvAvail4IntMask_LSB 0x4 | ||
242 | #define QIB_7322_IntMask_RcvAvail4IntMask_MSB 0x4 | ||
243 | #define QIB_7322_IntMask_RcvAvail4IntMask_RMASK 0x1 | ||
244 | #define QIB_7322_IntMask_RcvAvail3IntMask_LSB 0x3 | ||
245 | #define QIB_7322_IntMask_RcvAvail3IntMask_MSB 0x3 | ||
246 | #define QIB_7322_IntMask_RcvAvail3IntMask_RMASK 0x1 | ||
247 | #define QIB_7322_IntMask_RcvAvail2IntMask_LSB 0x2 | ||
248 | #define QIB_7322_IntMask_RcvAvail2IntMask_MSB 0x2 | ||
249 | #define QIB_7322_IntMask_RcvAvail2IntMask_RMASK 0x1 | ||
250 | #define QIB_7322_IntMask_RcvAvail1IntMask_LSB 0x1 | ||
251 | #define QIB_7322_IntMask_RcvAvail1IntMask_MSB 0x1 | ||
252 | #define QIB_7322_IntMask_RcvAvail1IntMask_RMASK 0x1 | ||
253 | #define QIB_7322_IntMask_RcvAvail0IntMask_LSB 0x0 | ||
254 | #define QIB_7322_IntMask_RcvAvail0IntMask_MSB 0x0 | ||
255 | #define QIB_7322_IntMask_RcvAvail0IntMask_RMASK 0x1 | ||
256 | |||
257 | #define QIB_7322_IntStatus_OFFS 0x70 | ||
258 | #define QIB_7322_IntStatus_DEF 0x0000000000000000 | ||
259 | #define QIB_7322_IntStatus_SDmaInt_1_LSB 0x3F | ||
260 | #define QIB_7322_IntStatus_SDmaInt_1_MSB 0x3F | ||
261 | #define QIB_7322_IntStatus_SDmaInt_1_RMASK 0x1 | ||
262 | #define QIB_7322_IntStatus_SDmaInt_0_LSB 0x3E | ||
263 | #define QIB_7322_IntStatus_SDmaInt_0_MSB 0x3E | ||
264 | #define QIB_7322_IntStatus_SDmaInt_0_RMASK 0x1 | ||
265 | #define QIB_7322_IntStatus_SDmaProgressInt_1_LSB 0x3D | ||
266 | #define QIB_7322_IntStatus_SDmaProgressInt_1_MSB 0x3D | ||
267 | #define QIB_7322_IntStatus_SDmaProgressInt_1_RMASK 0x1 | ||
268 | #define QIB_7322_IntStatus_SDmaProgressInt_0_LSB 0x3C | ||
269 | #define QIB_7322_IntStatus_SDmaProgressInt_0_MSB 0x3C | ||
270 | #define QIB_7322_IntStatus_SDmaProgressInt_0_RMASK 0x1 | ||
271 | #define QIB_7322_IntStatus_SDmaIdleInt_1_LSB 0x3B | ||
272 | #define QIB_7322_IntStatus_SDmaIdleInt_1_MSB 0x3B | ||
273 | #define QIB_7322_IntStatus_SDmaIdleInt_1_RMASK 0x1 | ||
274 | #define QIB_7322_IntStatus_SDmaIdleInt_0_LSB 0x3A | ||
275 | #define QIB_7322_IntStatus_SDmaIdleInt_0_MSB 0x3A | ||
276 | #define QIB_7322_IntStatus_SDmaIdleInt_0_RMASK 0x1 | ||
277 | #define QIB_7322_IntStatus_SDmaCleanupDone_1_LSB 0x39 | ||
278 | #define QIB_7322_IntStatus_SDmaCleanupDone_1_MSB 0x39 | ||
279 | #define QIB_7322_IntStatus_SDmaCleanupDone_1_RMASK 0x1 | ||
280 | #define QIB_7322_IntStatus_SDmaCleanupDone_0_LSB 0x38 | ||
281 | #define QIB_7322_IntStatus_SDmaCleanupDone_0_MSB 0x38 | ||
282 | #define QIB_7322_IntStatus_SDmaCleanupDone_0_RMASK 0x1 | ||
283 | #define QIB_7322_IntStatus_RcvUrg17_LSB 0x31 | ||
284 | #define QIB_7322_IntStatus_RcvUrg17_MSB 0x31 | ||
285 | #define QIB_7322_IntStatus_RcvUrg17_RMASK 0x1 | ||
286 | #define QIB_7322_IntStatus_RcvUrg16_LSB 0x30 | ||
287 | #define QIB_7322_IntStatus_RcvUrg16_MSB 0x30 | ||
288 | #define QIB_7322_IntStatus_RcvUrg16_RMASK 0x1 | ||
289 | #define QIB_7322_IntStatus_RcvUrg15_LSB 0x2F | ||
290 | #define QIB_7322_IntStatus_RcvUrg15_MSB 0x2F | ||
291 | #define QIB_7322_IntStatus_RcvUrg15_RMASK 0x1 | ||
292 | #define QIB_7322_IntStatus_RcvUrg14_LSB 0x2E | ||
293 | #define QIB_7322_IntStatus_RcvUrg14_MSB 0x2E | ||
294 | #define QIB_7322_IntStatus_RcvUrg14_RMASK 0x1 | ||
295 | #define QIB_7322_IntStatus_RcvUrg13_LSB 0x2D | ||
296 | #define QIB_7322_IntStatus_RcvUrg13_MSB 0x2D | ||
297 | #define QIB_7322_IntStatus_RcvUrg13_RMASK 0x1 | ||
298 | #define QIB_7322_IntStatus_RcvUrg12_LSB 0x2C | ||
299 | #define QIB_7322_IntStatus_RcvUrg12_MSB 0x2C | ||
300 | #define QIB_7322_IntStatus_RcvUrg12_RMASK 0x1 | ||
301 | #define QIB_7322_IntStatus_RcvUrg11_LSB 0x2B | ||
302 | #define QIB_7322_IntStatus_RcvUrg11_MSB 0x2B | ||
303 | #define QIB_7322_IntStatus_RcvUrg11_RMASK 0x1 | ||
304 | #define QIB_7322_IntStatus_RcvUrg10_LSB 0x2A | ||
305 | #define QIB_7322_IntStatus_RcvUrg10_MSB 0x2A | ||
306 | #define QIB_7322_IntStatus_RcvUrg10_RMASK 0x1 | ||
307 | #define QIB_7322_IntStatus_RcvUrg9_LSB 0x29 | ||
308 | #define QIB_7322_IntStatus_RcvUrg9_MSB 0x29 | ||
309 | #define QIB_7322_IntStatus_RcvUrg9_RMASK 0x1 | ||
310 | #define QIB_7322_IntStatus_RcvUrg8_LSB 0x28 | ||
311 | #define QIB_7322_IntStatus_RcvUrg8_MSB 0x28 | ||
312 | #define QIB_7322_IntStatus_RcvUrg8_RMASK 0x1 | ||
313 | #define QIB_7322_IntStatus_RcvUrg7_LSB 0x27 | ||
314 | #define QIB_7322_IntStatus_RcvUrg7_MSB 0x27 | ||
315 | #define QIB_7322_IntStatus_RcvUrg7_RMASK 0x1 | ||
316 | #define QIB_7322_IntStatus_RcvUrg6_LSB 0x26 | ||
317 | #define QIB_7322_IntStatus_RcvUrg6_MSB 0x26 | ||
318 | #define QIB_7322_IntStatus_RcvUrg6_RMASK 0x1 | ||
319 | #define QIB_7322_IntStatus_RcvUrg5_LSB 0x25 | ||
320 | #define QIB_7322_IntStatus_RcvUrg5_MSB 0x25 | ||
321 | #define QIB_7322_IntStatus_RcvUrg5_RMASK 0x1 | ||
322 | #define QIB_7322_IntStatus_RcvUrg4_LSB 0x24 | ||
323 | #define QIB_7322_IntStatus_RcvUrg4_MSB 0x24 | ||
324 | #define QIB_7322_IntStatus_RcvUrg4_RMASK 0x1 | ||
325 | #define QIB_7322_IntStatus_RcvUrg3_LSB 0x23 | ||
326 | #define QIB_7322_IntStatus_RcvUrg3_MSB 0x23 | ||
327 | #define QIB_7322_IntStatus_RcvUrg3_RMASK 0x1 | ||
328 | #define QIB_7322_IntStatus_RcvUrg2_LSB 0x22 | ||
329 | #define QIB_7322_IntStatus_RcvUrg2_MSB 0x22 | ||
330 | #define QIB_7322_IntStatus_RcvUrg2_RMASK 0x1 | ||
331 | #define QIB_7322_IntStatus_RcvUrg1_LSB 0x21 | ||
332 | #define QIB_7322_IntStatus_RcvUrg1_MSB 0x21 | ||
333 | #define QIB_7322_IntStatus_RcvUrg1_RMASK 0x1 | ||
334 | #define QIB_7322_IntStatus_RcvUrg0_LSB 0x20 | ||
335 | #define QIB_7322_IntStatus_RcvUrg0_MSB 0x20 | ||
336 | #define QIB_7322_IntStatus_RcvUrg0_RMASK 0x1 | ||
337 | #define QIB_7322_IntStatus_Err_1_LSB 0x1F | ||
338 | #define QIB_7322_IntStatus_Err_1_MSB 0x1F | ||
339 | #define QIB_7322_IntStatus_Err_1_RMASK 0x1 | ||
340 | #define QIB_7322_IntStatus_Err_0_LSB 0x1E | ||
341 | #define QIB_7322_IntStatus_Err_0_MSB 0x1E | ||
342 | #define QIB_7322_IntStatus_Err_0_RMASK 0x1 | ||
343 | #define QIB_7322_IntStatus_Err_LSB 0x1D | ||
344 | #define QIB_7322_IntStatus_Err_MSB 0x1D | ||
345 | #define QIB_7322_IntStatus_Err_RMASK 0x1 | ||
346 | #define QIB_7322_IntStatus_AssertGPIO_LSB 0x1C | ||
347 | #define QIB_7322_IntStatus_AssertGPIO_MSB 0x1C | ||
348 | #define QIB_7322_IntStatus_AssertGPIO_RMASK 0x1 | ||
349 | #define QIB_7322_IntStatus_SendDone_1_LSB 0x19 | ||
350 | #define QIB_7322_IntStatus_SendDone_1_MSB 0x19 | ||
351 | #define QIB_7322_IntStatus_SendDone_1_RMASK 0x1 | ||
352 | #define QIB_7322_IntStatus_SendDone_0_LSB 0x18 | ||
353 | #define QIB_7322_IntStatus_SendDone_0_MSB 0x18 | ||
354 | #define QIB_7322_IntStatus_SendDone_0_RMASK 0x1 | ||
355 | #define QIB_7322_IntStatus_SendBufAvail_LSB 0x17 | ||
356 | #define QIB_7322_IntStatus_SendBufAvail_MSB 0x17 | ||
357 | #define QIB_7322_IntStatus_SendBufAvail_RMASK 0x1 | ||
358 | #define QIB_7322_IntStatus_RcvAvail17_LSB 0x11 | ||
359 | #define QIB_7322_IntStatus_RcvAvail17_MSB 0x11 | ||
360 | #define QIB_7322_IntStatus_RcvAvail17_RMASK 0x1 | ||
361 | #define QIB_7322_IntStatus_RcvAvail16_LSB 0x10 | ||
362 | #define QIB_7322_IntStatus_RcvAvail16_MSB 0x10 | ||
363 | #define QIB_7322_IntStatus_RcvAvail16_RMASK 0x1 | ||
364 | #define QIB_7322_IntStatus_RcvAvail15_LSB 0xF | ||
365 | #define QIB_7322_IntStatus_RcvAvail15_MSB 0xF | ||
366 | #define QIB_7322_IntStatus_RcvAvail15_RMASK 0x1 | ||
367 | #define QIB_7322_IntStatus_RcvAvail14_LSB 0xE | ||
368 | #define QIB_7322_IntStatus_RcvAvail14_MSB 0xE | ||
369 | #define QIB_7322_IntStatus_RcvAvail14_RMASK 0x1 | ||
370 | #define QIB_7322_IntStatus_RcvAvail13_LSB 0xD | ||
371 | #define QIB_7322_IntStatus_RcvAvail13_MSB 0xD | ||
372 | #define QIB_7322_IntStatus_RcvAvail13_RMASK 0x1 | ||
373 | #define QIB_7322_IntStatus_RcvAvail12_LSB 0xC | ||
374 | #define QIB_7322_IntStatus_RcvAvail12_MSB 0xC | ||
375 | #define QIB_7322_IntStatus_RcvAvail12_RMASK 0x1 | ||
376 | #define QIB_7322_IntStatus_RcvAvail11_LSB 0xB | ||
377 | #define QIB_7322_IntStatus_RcvAvail11_MSB 0xB | ||
378 | #define QIB_7322_IntStatus_RcvAvail11_RMASK 0x1 | ||
379 | #define QIB_7322_IntStatus_RcvAvail10_LSB 0xA | ||
380 | #define QIB_7322_IntStatus_RcvAvail10_MSB 0xA | ||
381 | #define QIB_7322_IntStatus_RcvAvail10_RMASK 0x1 | ||
382 | #define QIB_7322_IntStatus_RcvAvail9_LSB 0x9 | ||
383 | #define QIB_7322_IntStatus_RcvAvail9_MSB 0x9 | ||
384 | #define QIB_7322_IntStatus_RcvAvail9_RMASK 0x1 | ||
385 | #define QIB_7322_IntStatus_RcvAvail8_LSB 0x8 | ||
386 | #define QIB_7322_IntStatus_RcvAvail8_MSB 0x8 | ||
387 | #define QIB_7322_IntStatus_RcvAvail8_RMASK 0x1 | ||
388 | #define QIB_7322_IntStatus_RcvAvail7_LSB 0x7 | ||
389 | #define QIB_7322_IntStatus_RcvAvail7_MSB 0x7 | ||
390 | #define QIB_7322_IntStatus_RcvAvail7_RMASK 0x1 | ||
391 | #define QIB_7322_IntStatus_RcvAvail6_LSB 0x6 | ||
392 | #define QIB_7322_IntStatus_RcvAvail6_MSB 0x6 | ||
393 | #define QIB_7322_IntStatus_RcvAvail6_RMASK 0x1 | ||
394 | #define QIB_7322_IntStatus_RcvAvail5_LSB 0x5 | ||
395 | #define QIB_7322_IntStatus_RcvAvail5_MSB 0x5 | ||
396 | #define QIB_7322_IntStatus_RcvAvail5_RMASK 0x1 | ||
397 | #define QIB_7322_IntStatus_RcvAvail4_LSB 0x4 | ||
398 | #define QIB_7322_IntStatus_RcvAvail4_MSB 0x4 | ||
399 | #define QIB_7322_IntStatus_RcvAvail4_RMASK 0x1 | ||
400 | #define QIB_7322_IntStatus_RcvAvail3_LSB 0x3 | ||
401 | #define QIB_7322_IntStatus_RcvAvail3_MSB 0x3 | ||
402 | #define QIB_7322_IntStatus_RcvAvail3_RMASK 0x1 | ||
403 | #define QIB_7322_IntStatus_RcvAvail2_LSB 0x2 | ||
404 | #define QIB_7322_IntStatus_RcvAvail2_MSB 0x2 | ||
405 | #define QIB_7322_IntStatus_RcvAvail2_RMASK 0x1 | ||
406 | #define QIB_7322_IntStatus_RcvAvail1_LSB 0x1 | ||
407 | #define QIB_7322_IntStatus_RcvAvail1_MSB 0x1 | ||
408 | #define QIB_7322_IntStatus_RcvAvail1_RMASK 0x1 | ||
409 | #define QIB_7322_IntStatus_RcvAvail0_LSB 0x0 | ||
410 | #define QIB_7322_IntStatus_RcvAvail0_MSB 0x0 | ||
411 | #define QIB_7322_IntStatus_RcvAvail0_RMASK 0x1 | ||
412 | |||
413 | #define QIB_7322_IntClear_OFFS 0x78 | ||
414 | #define QIB_7322_IntClear_DEF 0x0000000000000000 | ||
415 | #define QIB_7322_IntClear_SDmaIntClear_1_LSB 0x3F | ||
416 | #define QIB_7322_IntClear_SDmaIntClear_1_MSB 0x3F | ||
417 | #define QIB_7322_IntClear_SDmaIntClear_1_RMASK 0x1 | ||
418 | #define QIB_7322_IntClear_SDmaIntClear_0_LSB 0x3E | ||
419 | #define QIB_7322_IntClear_SDmaIntClear_0_MSB 0x3E | ||
420 | #define QIB_7322_IntClear_SDmaIntClear_0_RMASK 0x1 | ||
421 | #define QIB_7322_IntClear_SDmaProgressIntClear_1_LSB 0x3D | ||
422 | #define QIB_7322_IntClear_SDmaProgressIntClear_1_MSB 0x3D | ||
423 | #define QIB_7322_IntClear_SDmaProgressIntClear_1_RMASK 0x1 | ||
424 | #define QIB_7322_IntClear_SDmaProgressIntClear_0_LSB 0x3C | ||
425 | #define QIB_7322_IntClear_SDmaProgressIntClear_0_MSB 0x3C | ||
426 | #define QIB_7322_IntClear_SDmaProgressIntClear_0_RMASK 0x1 | ||
427 | #define QIB_7322_IntClear_SDmaIdleIntClear_1_LSB 0x3B | ||
428 | #define QIB_7322_IntClear_SDmaIdleIntClear_1_MSB 0x3B | ||
429 | #define QIB_7322_IntClear_SDmaIdleIntClear_1_RMASK 0x1 | ||
430 | #define QIB_7322_IntClear_SDmaIdleIntClear_0_LSB 0x3A | ||
431 | #define QIB_7322_IntClear_SDmaIdleIntClear_0_MSB 0x3A | ||
432 | #define QIB_7322_IntClear_SDmaIdleIntClear_0_RMASK 0x1 | ||
433 | #define QIB_7322_IntClear_SDmaCleanupDoneClear_1_LSB 0x39 | ||
434 | #define QIB_7322_IntClear_SDmaCleanupDoneClear_1_MSB 0x39 | ||
435 | #define QIB_7322_IntClear_SDmaCleanupDoneClear_1_RMASK 0x1 | ||
436 | #define QIB_7322_IntClear_SDmaCleanupDoneClear_0_LSB 0x38 | ||
437 | #define QIB_7322_IntClear_SDmaCleanupDoneClear_0_MSB 0x38 | ||
438 | #define QIB_7322_IntClear_SDmaCleanupDoneClear_0_RMASK 0x1 | ||
439 | #define QIB_7322_IntClear_RcvUrg17IntClear_LSB 0x31 | ||
440 | #define QIB_7322_IntClear_RcvUrg17IntClear_MSB 0x31 | ||
441 | #define QIB_7322_IntClear_RcvUrg17IntClear_RMASK 0x1 | ||
442 | #define QIB_7322_IntClear_RcvUrg16IntClear_LSB 0x30 | ||
443 | #define QIB_7322_IntClear_RcvUrg16IntClear_MSB 0x30 | ||
444 | #define QIB_7322_IntClear_RcvUrg16IntClear_RMASK 0x1 | ||
445 | #define QIB_7322_IntClear_RcvUrg15IntClear_LSB 0x2F | ||
446 | #define QIB_7322_IntClear_RcvUrg15IntClear_MSB 0x2F | ||
447 | #define QIB_7322_IntClear_RcvUrg15IntClear_RMASK 0x1 | ||
448 | #define QIB_7322_IntClear_RcvUrg14IntClear_LSB 0x2E | ||
449 | #define QIB_7322_IntClear_RcvUrg14IntClear_MSB 0x2E | ||
450 | #define QIB_7322_IntClear_RcvUrg14IntClear_RMASK 0x1 | ||
451 | #define QIB_7322_IntClear_RcvUrg13IntClear_LSB 0x2D | ||
452 | #define QIB_7322_IntClear_RcvUrg13IntClear_MSB 0x2D | ||
453 | #define QIB_7322_IntClear_RcvUrg13IntClear_RMASK 0x1 | ||
454 | #define QIB_7322_IntClear_RcvUrg12IntClear_LSB 0x2C | ||
455 | #define QIB_7322_IntClear_RcvUrg12IntClear_MSB 0x2C | ||
456 | #define QIB_7322_IntClear_RcvUrg12IntClear_RMASK 0x1 | ||
457 | #define QIB_7322_IntClear_RcvUrg11IntClear_LSB 0x2B | ||
458 | #define QIB_7322_IntClear_RcvUrg11IntClear_MSB 0x2B | ||
459 | #define QIB_7322_IntClear_RcvUrg11IntClear_RMASK 0x1 | ||
460 | #define QIB_7322_IntClear_RcvUrg10IntClear_LSB 0x2A | ||
461 | #define QIB_7322_IntClear_RcvUrg10IntClear_MSB 0x2A | ||
462 | #define QIB_7322_IntClear_RcvUrg10IntClear_RMASK 0x1 | ||
463 | #define QIB_7322_IntClear_RcvUrg9IntClear_LSB 0x29 | ||
464 | #define QIB_7322_IntClear_RcvUrg9IntClear_MSB 0x29 | ||
465 | #define QIB_7322_IntClear_RcvUrg9IntClear_RMASK 0x1 | ||
466 | #define QIB_7322_IntClear_RcvUrg8IntClear_LSB 0x28 | ||
467 | #define QIB_7322_IntClear_RcvUrg8IntClear_MSB 0x28 | ||
468 | #define QIB_7322_IntClear_RcvUrg8IntClear_RMASK 0x1 | ||
469 | #define QIB_7322_IntClear_RcvUrg7IntClear_LSB 0x27 | ||
470 | #define QIB_7322_IntClear_RcvUrg7IntClear_MSB 0x27 | ||
471 | #define QIB_7322_IntClear_RcvUrg7IntClear_RMASK 0x1 | ||
472 | #define QIB_7322_IntClear_RcvUrg6IntClear_LSB 0x26 | ||
473 | #define QIB_7322_IntClear_RcvUrg6IntClear_MSB 0x26 | ||
474 | #define QIB_7322_IntClear_RcvUrg6IntClear_RMASK 0x1 | ||
475 | #define QIB_7322_IntClear_RcvUrg5IntClear_LSB 0x25 | ||
476 | #define QIB_7322_IntClear_RcvUrg5IntClear_MSB 0x25 | ||
477 | #define QIB_7322_IntClear_RcvUrg5IntClear_RMASK 0x1 | ||
478 | #define QIB_7322_IntClear_RcvUrg4IntClear_LSB 0x24 | ||
479 | #define QIB_7322_IntClear_RcvUrg4IntClear_MSB 0x24 | ||
480 | #define QIB_7322_IntClear_RcvUrg4IntClear_RMASK 0x1 | ||
481 | #define QIB_7322_IntClear_RcvUrg3IntClear_LSB 0x23 | ||
482 | #define QIB_7322_IntClear_RcvUrg3IntClear_MSB 0x23 | ||
483 | #define QIB_7322_IntClear_RcvUrg3IntClear_RMASK 0x1 | ||
484 | #define QIB_7322_IntClear_RcvUrg2IntClear_LSB 0x22 | ||
485 | #define QIB_7322_IntClear_RcvUrg2IntClear_MSB 0x22 | ||
486 | #define QIB_7322_IntClear_RcvUrg2IntClear_RMASK 0x1 | ||
487 | #define QIB_7322_IntClear_RcvUrg1IntClear_LSB 0x21 | ||
488 | #define QIB_7322_IntClear_RcvUrg1IntClear_MSB 0x21 | ||
489 | #define QIB_7322_IntClear_RcvUrg1IntClear_RMASK 0x1 | ||
490 | #define QIB_7322_IntClear_RcvUrg0IntClear_LSB 0x20 | ||
491 | #define QIB_7322_IntClear_RcvUrg0IntClear_MSB 0x20 | ||
492 | #define QIB_7322_IntClear_RcvUrg0IntClear_RMASK 0x1 | ||
493 | #define QIB_7322_IntClear_ErrIntClear_1_LSB 0x1F | ||
494 | #define QIB_7322_IntClear_ErrIntClear_1_MSB 0x1F | ||
495 | #define QIB_7322_IntClear_ErrIntClear_1_RMASK 0x1 | ||
496 | #define QIB_7322_IntClear_ErrIntClear_0_LSB 0x1E | ||
497 | #define QIB_7322_IntClear_ErrIntClear_0_MSB 0x1E | ||
498 | #define QIB_7322_IntClear_ErrIntClear_0_RMASK 0x1 | ||
499 | #define QIB_7322_IntClear_ErrIntClear_LSB 0x1D | ||
500 | #define QIB_7322_IntClear_ErrIntClear_MSB 0x1D | ||
501 | #define QIB_7322_IntClear_ErrIntClear_RMASK 0x1 | ||
502 | #define QIB_7322_IntClear_AssertGPIOIntClear_LSB 0x1C | ||
503 | #define QIB_7322_IntClear_AssertGPIOIntClear_MSB 0x1C | ||
504 | #define QIB_7322_IntClear_AssertGPIOIntClear_RMASK 0x1 | ||
505 | #define QIB_7322_IntClear_SendDoneIntClear_1_LSB 0x19 | ||
506 | #define QIB_7322_IntClear_SendDoneIntClear_1_MSB 0x19 | ||
507 | #define QIB_7322_IntClear_SendDoneIntClear_1_RMASK 0x1 | ||
508 | #define QIB_7322_IntClear_SendDoneIntClear_0_LSB 0x18 | ||
509 | #define QIB_7322_IntClear_SendDoneIntClear_0_MSB 0x18 | ||
510 | #define QIB_7322_IntClear_SendDoneIntClear_0_RMASK 0x1 | ||
511 | #define QIB_7322_IntClear_SendBufAvailIntClear_LSB 0x17 | ||
512 | #define QIB_7322_IntClear_SendBufAvailIntClear_MSB 0x17 | ||
513 | #define QIB_7322_IntClear_SendBufAvailIntClear_RMASK 0x1 | ||
514 | #define QIB_7322_IntClear_RcvAvail17IntClear_LSB 0x11 | ||
515 | #define QIB_7322_IntClear_RcvAvail17IntClear_MSB 0x11 | ||
516 | #define QIB_7322_IntClear_RcvAvail17IntClear_RMASK 0x1 | ||
517 | #define QIB_7322_IntClear_RcvAvail16IntClear_LSB 0x10 | ||
518 | #define QIB_7322_IntClear_RcvAvail16IntClear_MSB 0x10 | ||
519 | #define QIB_7322_IntClear_RcvAvail16IntClear_RMASK 0x1 | ||
520 | #define QIB_7322_IntClear_RcvAvail15IntClear_LSB 0xF | ||
521 | #define QIB_7322_IntClear_RcvAvail15IntClear_MSB 0xF | ||
522 | #define QIB_7322_IntClear_RcvAvail15IntClear_RMASK 0x1 | ||
523 | #define QIB_7322_IntClear_RcvAvail14IntClear_LSB 0xE | ||
524 | #define QIB_7322_IntClear_RcvAvail14IntClear_MSB 0xE | ||
525 | #define QIB_7322_IntClear_RcvAvail14IntClear_RMASK 0x1 | ||
526 | #define QIB_7322_IntClear_RcvAvail13IntClear_LSB 0xD | ||
527 | #define QIB_7322_IntClear_RcvAvail13IntClear_MSB 0xD | ||
528 | #define QIB_7322_IntClear_RcvAvail13IntClear_RMASK 0x1 | ||
529 | #define QIB_7322_IntClear_RcvAvail12IntClear_LSB 0xC | ||
530 | #define QIB_7322_IntClear_RcvAvail12IntClear_MSB 0xC | ||
531 | #define QIB_7322_IntClear_RcvAvail12IntClear_RMASK 0x1 | ||
532 | #define QIB_7322_IntClear_RcvAvail11IntClear_LSB 0xB | ||
533 | #define QIB_7322_IntClear_RcvAvail11IntClear_MSB 0xB | ||
534 | #define QIB_7322_IntClear_RcvAvail11IntClear_RMASK 0x1 | ||
535 | #define QIB_7322_IntClear_RcvAvail10IntClear_LSB 0xA | ||
536 | #define QIB_7322_IntClear_RcvAvail10IntClear_MSB 0xA | ||
537 | #define QIB_7322_IntClear_RcvAvail10IntClear_RMASK 0x1 | ||
538 | #define QIB_7322_IntClear_RcvAvail9IntClear_LSB 0x9 | ||
539 | #define QIB_7322_IntClear_RcvAvail9IntClear_MSB 0x9 | ||
540 | #define QIB_7322_IntClear_RcvAvail9IntClear_RMASK 0x1 | ||
541 | #define QIB_7322_IntClear_RcvAvail8IntClear_LSB 0x8 | ||
542 | #define QIB_7322_IntClear_RcvAvail8IntClear_MSB 0x8 | ||
543 | #define QIB_7322_IntClear_RcvAvail8IntClear_RMASK 0x1 | ||
544 | #define QIB_7322_IntClear_RcvAvail7IntClear_LSB 0x7 | ||
545 | #define QIB_7322_IntClear_RcvAvail7IntClear_MSB 0x7 | ||
546 | #define QIB_7322_IntClear_RcvAvail7IntClear_RMASK 0x1 | ||
547 | #define QIB_7322_IntClear_RcvAvail6IntClear_LSB 0x6 | ||
548 | #define QIB_7322_IntClear_RcvAvail6IntClear_MSB 0x6 | ||
549 | #define QIB_7322_IntClear_RcvAvail6IntClear_RMASK 0x1 | ||
550 | #define QIB_7322_IntClear_RcvAvail5IntClear_LSB 0x5 | ||
551 | #define QIB_7322_IntClear_RcvAvail5IntClear_MSB 0x5 | ||
552 | #define QIB_7322_IntClear_RcvAvail5IntClear_RMASK 0x1 | ||
553 | #define QIB_7322_IntClear_RcvAvail4IntClear_LSB 0x4 | ||
554 | #define QIB_7322_IntClear_RcvAvail4IntClear_MSB 0x4 | ||
555 | #define QIB_7322_IntClear_RcvAvail4IntClear_RMASK 0x1 | ||
556 | #define QIB_7322_IntClear_RcvAvail3IntClear_LSB 0x3 | ||
557 | #define QIB_7322_IntClear_RcvAvail3IntClear_MSB 0x3 | ||
558 | #define QIB_7322_IntClear_RcvAvail3IntClear_RMASK 0x1 | ||
559 | #define QIB_7322_IntClear_RcvAvail2IntClear_LSB 0x2 | ||
560 | #define QIB_7322_IntClear_RcvAvail2IntClear_MSB 0x2 | ||
561 | #define QIB_7322_IntClear_RcvAvail2IntClear_RMASK 0x1 | ||
562 | #define QIB_7322_IntClear_RcvAvail1IntClear_LSB 0x1 | ||
563 | #define QIB_7322_IntClear_RcvAvail1IntClear_MSB 0x1 | ||
564 | #define QIB_7322_IntClear_RcvAvail1IntClear_RMASK 0x1 | ||
565 | #define QIB_7322_IntClear_RcvAvail0IntClear_LSB 0x0 | ||
566 | #define QIB_7322_IntClear_RcvAvail0IntClear_MSB 0x0 | ||
567 | #define QIB_7322_IntClear_RcvAvail0IntClear_RMASK 0x1 | ||
568 | |||
569 | #define QIB_7322_ErrMask_OFFS 0x80 | ||
570 | #define QIB_7322_ErrMask_DEF 0x0000000000000000 | ||
571 | #define QIB_7322_ErrMask_ResetNegatedMask_LSB 0x3F | ||
572 | #define QIB_7322_ErrMask_ResetNegatedMask_MSB 0x3F | ||
573 | #define QIB_7322_ErrMask_ResetNegatedMask_RMASK 0x1 | ||
574 | #define QIB_7322_ErrMask_HardwareErrMask_LSB 0x3E | ||
575 | #define QIB_7322_ErrMask_HardwareErrMask_MSB 0x3E | ||
576 | #define QIB_7322_ErrMask_HardwareErrMask_RMASK 0x1 | ||
577 | #define QIB_7322_ErrMask_InvalidAddrErrMask_LSB 0x3D | ||
578 | #define QIB_7322_ErrMask_InvalidAddrErrMask_MSB 0x3D | ||
579 | #define QIB_7322_ErrMask_InvalidAddrErrMask_RMASK 0x1 | ||
580 | #define QIB_7322_ErrMask_SDmaVL15ErrMask_LSB 0x38 | ||
581 | #define QIB_7322_ErrMask_SDmaVL15ErrMask_MSB 0x38 | ||
582 | #define QIB_7322_ErrMask_SDmaVL15ErrMask_RMASK 0x1 | ||
583 | #define QIB_7322_ErrMask_SBufVL15MisUseErrMask_LSB 0x37 | ||
584 | #define QIB_7322_ErrMask_SBufVL15MisUseErrMask_MSB 0x37 | ||
585 | #define QIB_7322_ErrMask_SBufVL15MisUseErrMask_RMASK 0x1 | ||
586 | #define QIB_7322_ErrMask_InvalidEEPCmdMask_LSB 0x35 | ||
587 | #define QIB_7322_ErrMask_InvalidEEPCmdMask_MSB 0x35 | ||
588 | #define QIB_7322_ErrMask_InvalidEEPCmdMask_RMASK 0x1 | ||
589 | #define QIB_7322_ErrMask_RcvContextShareErrMask_LSB 0x34 | ||
590 | #define QIB_7322_ErrMask_RcvContextShareErrMask_MSB 0x34 | ||
591 | #define QIB_7322_ErrMask_RcvContextShareErrMask_RMASK 0x1 | ||
592 | #define QIB_7322_ErrMask_SendVLMismatchErrMask_LSB 0x24 | ||
593 | #define QIB_7322_ErrMask_SendVLMismatchErrMask_MSB 0x24 | ||
594 | #define QIB_7322_ErrMask_SendVLMismatchErrMask_RMASK 0x1 | ||
595 | #define QIB_7322_ErrMask_SendArmLaunchErrMask_LSB 0x23 | ||
596 | #define QIB_7322_ErrMask_SendArmLaunchErrMask_MSB 0x23 | ||
597 | #define QIB_7322_ErrMask_SendArmLaunchErrMask_RMASK 0x1 | ||
598 | #define QIB_7322_ErrMask_SendSpecialTriggerErrMask_LSB 0x1B | ||
599 | #define QIB_7322_ErrMask_SendSpecialTriggerErrMask_MSB 0x1B | ||
600 | #define QIB_7322_ErrMask_SendSpecialTriggerErrMask_RMASK 0x1 | ||
601 | #define QIB_7322_ErrMask_SDmaWrongPortErrMask_LSB 0x1A | ||
602 | #define QIB_7322_ErrMask_SDmaWrongPortErrMask_MSB 0x1A | ||
603 | #define QIB_7322_ErrMask_SDmaWrongPortErrMask_RMASK 0x1 | ||
604 | #define QIB_7322_ErrMask_SDmaBufMaskDuplicateErrMask_LSB 0x19 | ||
605 | #define QIB_7322_ErrMask_SDmaBufMaskDuplicateErrMask_MSB 0x19 | ||
606 | #define QIB_7322_ErrMask_SDmaBufMaskDuplicateErrMask_RMASK 0x1 | ||
607 | #define QIB_7322_ErrMask_RcvHdrFullErrMask_LSB 0xD | ||
608 | #define QIB_7322_ErrMask_RcvHdrFullErrMask_MSB 0xD | ||
609 | #define QIB_7322_ErrMask_RcvHdrFullErrMask_RMASK 0x1 | ||
610 | #define QIB_7322_ErrMask_RcvEgrFullErrMask_LSB 0xC | ||
611 | #define QIB_7322_ErrMask_RcvEgrFullErrMask_MSB 0xC | ||
612 | #define QIB_7322_ErrMask_RcvEgrFullErrMask_RMASK 0x1 | ||
613 | |||
614 | #define QIB_7322_ErrStatus_OFFS 0x88 | ||
615 | #define QIB_7322_ErrStatus_DEF 0x0000000000000000 | ||
616 | #define QIB_7322_ErrStatus_ResetNegated_LSB 0x3F | ||
617 | #define QIB_7322_ErrStatus_ResetNegated_MSB 0x3F | ||
618 | #define QIB_7322_ErrStatus_ResetNegated_RMASK 0x1 | ||
619 | #define QIB_7322_ErrStatus_HardwareErr_LSB 0x3E | ||
620 | #define QIB_7322_ErrStatus_HardwareErr_MSB 0x3E | ||
621 | #define QIB_7322_ErrStatus_HardwareErr_RMASK 0x1 | ||
622 | #define QIB_7322_ErrStatus_InvalidAddrErr_LSB 0x3D | ||
623 | #define QIB_7322_ErrStatus_InvalidAddrErr_MSB 0x3D | ||
624 | #define QIB_7322_ErrStatus_InvalidAddrErr_RMASK 0x1 | ||
625 | #define QIB_7322_ErrStatus_SDmaVL15Err_LSB 0x38 | ||
626 | #define QIB_7322_ErrStatus_SDmaVL15Err_MSB 0x38 | ||
627 | #define QIB_7322_ErrStatus_SDmaVL15Err_RMASK 0x1 | ||
628 | #define QIB_7322_ErrStatus_SBufVL15MisUseErr_LSB 0x37 | ||
629 | #define QIB_7322_ErrStatus_SBufVL15MisUseErr_MSB 0x37 | ||
630 | #define QIB_7322_ErrStatus_SBufVL15MisUseErr_RMASK 0x1 | ||
631 | #define QIB_7322_ErrStatus_InvalidEEPCmdErr_LSB 0x35 | ||
632 | #define QIB_7322_ErrStatus_InvalidEEPCmdErr_MSB 0x35 | ||
633 | #define QIB_7322_ErrStatus_InvalidEEPCmdErr_RMASK 0x1 | ||
634 | #define QIB_7322_ErrStatus_RcvContextShareErr_LSB 0x34 | ||
635 | #define QIB_7322_ErrStatus_RcvContextShareErr_MSB 0x34 | ||
636 | #define QIB_7322_ErrStatus_RcvContextShareErr_RMASK 0x1 | ||
637 | #define QIB_7322_ErrStatus_SendVLMismatchErr_LSB 0x24 | ||
638 | #define QIB_7322_ErrStatus_SendVLMismatchErr_MSB 0x24 | ||
639 | #define QIB_7322_ErrStatus_SendVLMismatchErr_RMASK 0x1 | ||
640 | #define QIB_7322_ErrStatus_SendArmLaunchErr_LSB 0x23 | ||
641 | #define QIB_7322_ErrStatus_SendArmLaunchErr_MSB 0x23 | ||
642 | #define QIB_7322_ErrStatus_SendArmLaunchErr_RMASK 0x1 | ||
643 | #define QIB_7322_ErrStatus_SendSpecialTriggerErr_LSB 0x1B | ||
644 | #define QIB_7322_ErrStatus_SendSpecialTriggerErr_MSB 0x1B | ||
645 | #define QIB_7322_ErrStatus_SendSpecialTriggerErr_RMASK 0x1 | ||
646 | #define QIB_7322_ErrStatus_SDmaWrongPortErr_LSB 0x1A | ||
647 | #define QIB_7322_ErrStatus_SDmaWrongPortErr_MSB 0x1A | ||
648 | #define QIB_7322_ErrStatus_SDmaWrongPortErr_RMASK 0x1 | ||
649 | #define QIB_7322_ErrStatus_SDmaBufMaskDuplicateErr_LSB 0x19 | ||
650 | #define QIB_7322_ErrStatus_SDmaBufMaskDuplicateErr_MSB 0x19 | ||
651 | #define QIB_7322_ErrStatus_SDmaBufMaskDuplicateErr_RMASK 0x1 | ||
652 | #define QIB_7322_ErrStatus_RcvHdrFullErr_LSB 0xD | ||
653 | #define QIB_7322_ErrStatus_RcvHdrFullErr_MSB 0xD | ||
654 | #define QIB_7322_ErrStatus_RcvHdrFullErr_RMASK 0x1 | ||
655 | #define QIB_7322_ErrStatus_RcvEgrFullErr_LSB 0xC | ||
656 | #define QIB_7322_ErrStatus_RcvEgrFullErr_MSB 0xC | ||
657 | #define QIB_7322_ErrStatus_RcvEgrFullErr_RMASK 0x1 | ||
658 | |||
659 | #define QIB_7322_ErrClear_OFFS 0x90 | ||
660 | #define QIB_7322_ErrClear_DEF 0x0000000000000000 | ||
661 | #define QIB_7322_ErrClear_ResetNegatedClear_LSB 0x3F | ||
662 | #define QIB_7322_ErrClear_ResetNegatedClear_MSB 0x3F | ||
663 | #define QIB_7322_ErrClear_ResetNegatedClear_RMASK 0x1 | ||
664 | #define QIB_7322_ErrClear_HardwareErrClear_LSB 0x3E | ||
665 | #define QIB_7322_ErrClear_HardwareErrClear_MSB 0x3E | ||
666 | #define QIB_7322_ErrClear_HardwareErrClear_RMASK 0x1 | ||
667 | #define QIB_7322_ErrClear_InvalidAddrErrClear_LSB 0x3D | ||
668 | #define QIB_7322_ErrClear_InvalidAddrErrClear_MSB 0x3D | ||
669 | #define QIB_7322_ErrClear_InvalidAddrErrClear_RMASK 0x1 | ||
670 | #define QIB_7322_ErrClear_SDmaVL15ErrClear_LSB 0x38 | ||
671 | #define QIB_7322_ErrClear_SDmaVL15ErrClear_MSB 0x38 | ||
672 | #define QIB_7322_ErrClear_SDmaVL15ErrClear_RMASK 0x1 | ||
673 | #define QIB_7322_ErrClear_SBufVL15MisUseErrClear_LSB 0x37 | ||
674 | #define QIB_7322_ErrClear_SBufVL15MisUseErrClear_MSB 0x37 | ||
675 | #define QIB_7322_ErrClear_SBufVL15MisUseErrClear_RMASK 0x1 | ||
676 | #define QIB_7322_ErrClear_InvalidEEPCmdErrClear_LSB 0x35 | ||
677 | #define QIB_7322_ErrClear_InvalidEEPCmdErrClear_MSB 0x35 | ||
678 | #define QIB_7322_ErrClear_InvalidEEPCmdErrClear_RMASK 0x1 | ||
679 | #define QIB_7322_ErrClear_RcvContextShareErrClear_LSB 0x34 | ||
680 | #define QIB_7322_ErrClear_RcvContextShareErrClear_MSB 0x34 | ||
681 | #define QIB_7322_ErrClear_RcvContextShareErrClear_RMASK 0x1 | ||
682 | #define QIB_7322_ErrClear_SendVLMismatchErrMask_LSB 0x24 | ||
683 | #define QIB_7322_ErrClear_SendVLMismatchErrMask_MSB 0x24 | ||
684 | #define QIB_7322_ErrClear_SendVLMismatchErrMask_RMASK 0x1 | ||
685 | #define QIB_7322_ErrClear_SendArmLaunchErrClear_LSB 0x23 | ||
686 | #define QIB_7322_ErrClear_SendArmLaunchErrClear_MSB 0x23 | ||
687 | #define QIB_7322_ErrClear_SendArmLaunchErrClear_RMASK 0x1 | ||
688 | #define QIB_7322_ErrClear_SendSpecialTriggerErrClear_LSB 0x1B | ||
689 | #define QIB_7322_ErrClear_SendSpecialTriggerErrClear_MSB 0x1B | ||
690 | #define QIB_7322_ErrClear_SendSpecialTriggerErrClear_RMASK 0x1 | ||
691 | #define QIB_7322_ErrClear_SDmaWrongPortErrClear_LSB 0x1A | ||
692 | #define QIB_7322_ErrClear_SDmaWrongPortErrClear_MSB 0x1A | ||
693 | #define QIB_7322_ErrClear_SDmaWrongPortErrClear_RMASK 0x1 | ||
694 | #define QIB_7322_ErrClear_SDmaBufMaskDuplicateErrClear_LSB 0x19 | ||
695 | #define QIB_7322_ErrClear_SDmaBufMaskDuplicateErrClear_MSB 0x19 | ||
696 | #define QIB_7322_ErrClear_SDmaBufMaskDuplicateErrClear_RMASK 0x1 | ||
697 | #define QIB_7322_ErrClear_RcvHdrFullErrClear_LSB 0xD | ||
698 | #define QIB_7322_ErrClear_RcvHdrFullErrClear_MSB 0xD | ||
699 | #define QIB_7322_ErrClear_RcvHdrFullErrClear_RMASK 0x1 | ||
700 | #define QIB_7322_ErrClear_RcvEgrFullErrClear_LSB 0xC | ||
701 | #define QIB_7322_ErrClear_RcvEgrFullErrClear_MSB 0xC | ||
702 | #define QIB_7322_ErrClear_RcvEgrFullErrClear_RMASK 0x1 | ||
703 | |||
704 | #define QIB_7322_HwErrMask_OFFS 0x98 | ||
705 | #define QIB_7322_HwErrMask_DEF 0x0000000000000000 | ||
706 | #define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_1_LSB 0x3F | ||
707 | #define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_1_MSB 0x3F | ||
708 | #define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_1_RMASK 0x1 | ||
709 | #define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_0_LSB 0x3E | ||
710 | #define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_0_MSB 0x3E | ||
711 | #define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_0_RMASK 0x1 | ||
712 | #define QIB_7322_HwErrMask_PCIESerdesPClkNotDetectMask_LSB 0x37 | ||
713 | #define QIB_7322_HwErrMask_PCIESerdesPClkNotDetectMask_MSB 0x37 | ||
714 | #define QIB_7322_HwErrMask_PCIESerdesPClkNotDetectMask_RMASK 0x1 | ||
715 | #define QIB_7322_HwErrMask_PowerOnBISTFailedMask_LSB 0x36 | ||
716 | #define QIB_7322_HwErrMask_PowerOnBISTFailedMask_MSB 0x36 | ||
717 | #define QIB_7322_HwErrMask_PowerOnBISTFailedMask_RMASK 0x1 | ||
718 | #define QIB_7322_HwErrMask_TempsenseTholdReachedMask_LSB 0x35 | ||
719 | #define QIB_7322_HwErrMask_TempsenseTholdReachedMask_MSB 0x35 | ||
720 | #define QIB_7322_HwErrMask_TempsenseTholdReachedMask_RMASK 0x1 | ||
721 | #define QIB_7322_HwErrMask_MemoryErrMask_LSB 0x30 | ||
722 | #define QIB_7322_HwErrMask_MemoryErrMask_MSB 0x30 | ||
723 | #define QIB_7322_HwErrMask_MemoryErrMask_RMASK 0x1 | ||
724 | #define QIB_7322_HwErrMask_pcie_phy_txParityErr_LSB 0x22 | ||
725 | #define QIB_7322_HwErrMask_pcie_phy_txParityErr_MSB 0x22 | ||
726 | #define QIB_7322_HwErrMask_pcie_phy_txParityErr_RMASK 0x1 | ||
727 | #define QIB_7322_HwErrMask_PCIeBusParityErrMask_LSB 0x1F | ||
728 | #define QIB_7322_HwErrMask_PCIeBusParityErrMask_MSB 0x21 | ||
729 | #define QIB_7322_HwErrMask_PCIeBusParityErrMask_RMASK 0x7 | ||
730 | #define QIB_7322_HwErrMask_PcieCplTimeoutMask_LSB 0x1E | ||
731 | #define QIB_7322_HwErrMask_PcieCplTimeoutMask_MSB 0x1E | ||
732 | #define QIB_7322_HwErrMask_PcieCplTimeoutMask_RMASK 0x1 | ||
733 | #define QIB_7322_HwErrMask_PciePoisonedTLPMask_LSB 0x1D | ||
734 | #define QIB_7322_HwErrMask_PciePoisonedTLPMask_MSB 0x1D | ||
735 | #define QIB_7322_HwErrMask_PciePoisonedTLPMask_RMASK 0x1 | ||
736 | #define QIB_7322_HwErrMask_SDmaMemReadErrMask_1_LSB 0x1C | ||
737 | #define QIB_7322_HwErrMask_SDmaMemReadErrMask_1_MSB 0x1C | ||
738 | #define QIB_7322_HwErrMask_SDmaMemReadErrMask_1_RMASK 0x1 | ||
739 | #define QIB_7322_HwErrMask_SDmaMemReadErrMask_0_LSB 0x1B | ||
740 | #define QIB_7322_HwErrMask_SDmaMemReadErrMask_0_MSB 0x1B | ||
741 | #define QIB_7322_HwErrMask_SDmaMemReadErrMask_0_RMASK 0x1 | ||
742 | #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_LSB 0xF | ||
743 | #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_MSB 0xF | ||
744 | #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_RMASK 0x1 | ||
745 | #define QIB_7322_HwErrMask_statusValidNoEopMask_1_LSB 0xE | ||
746 | #define QIB_7322_HwErrMask_statusValidNoEopMask_1_MSB 0xE | ||
747 | #define QIB_7322_HwErrMask_statusValidNoEopMask_1_RMASK 0x1 | ||
748 | #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_LSB 0xD | ||
749 | #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_MSB 0xD | ||
750 | #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_RMASK 0x1 | ||
751 | #define QIB_7322_HwErrMask_statusValidNoEopMask_0_LSB 0xC | ||
752 | #define QIB_7322_HwErrMask_statusValidNoEopMask_0_MSB 0xC | ||
753 | #define QIB_7322_HwErrMask_statusValidNoEopMask_0_RMASK 0x1 | ||
754 | #define QIB_7322_HwErrMask_LATriggeredMask_LSB 0xB | ||
755 | #define QIB_7322_HwErrMask_LATriggeredMask_MSB 0xB | ||
756 | #define QIB_7322_HwErrMask_LATriggeredMask_RMASK 0x1 | ||
757 | |||
758 | #define QIB_7322_HwErrStatus_OFFS 0xA0 | ||
759 | #define QIB_7322_HwErrStatus_DEF 0x0000000000000000 | ||
760 | #define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_1_LSB 0x3F | ||
761 | #define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_1_MSB 0x3F | ||
762 | #define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_1_RMASK 0x1 | ||
763 | #define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_0_LSB 0x3E | ||
764 | #define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_0_MSB 0x3E | ||
765 | #define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_0_RMASK 0x1 | ||
766 | #define QIB_7322_HwErrStatus_PCIESerdesPClkNotDetect_LSB 0x37 | ||
767 | #define QIB_7322_HwErrStatus_PCIESerdesPClkNotDetect_MSB 0x37 | ||
768 | #define QIB_7322_HwErrStatus_PCIESerdesPClkNotDetect_RMASK 0x1 | ||
769 | #define QIB_7322_HwErrStatus_PowerOnBISTFailed_LSB 0x36 | ||
770 | #define QIB_7322_HwErrStatus_PowerOnBISTFailed_MSB 0x36 | ||
771 | #define QIB_7322_HwErrStatus_PowerOnBISTFailed_RMASK 0x1 | ||
772 | #define QIB_7322_HwErrStatus_TempsenseTholdReached_LSB 0x35 | ||
773 | #define QIB_7322_HwErrStatus_TempsenseTholdReached_MSB 0x35 | ||
774 | #define QIB_7322_HwErrStatus_TempsenseTholdReached_RMASK 0x1 | ||
775 | #define QIB_7322_HwErrStatus_MemoryErr_LSB 0x30 | ||
776 | #define QIB_7322_HwErrStatus_MemoryErr_MSB 0x30 | ||
777 | #define QIB_7322_HwErrStatus_MemoryErr_RMASK 0x1 | ||
778 | #define QIB_7322_HwErrStatus_pcie_phy_txParityErr_LSB 0x22 | ||
779 | #define QIB_7322_HwErrStatus_pcie_phy_txParityErr_MSB 0x22 | ||
780 | #define QIB_7322_HwErrStatus_pcie_phy_txParityErr_RMASK 0x1 | ||
781 | #define QIB_7322_HwErrStatus_PCIeBusParity_LSB 0x1F | ||
782 | #define QIB_7322_HwErrStatus_PCIeBusParity_MSB 0x21 | ||
783 | #define QIB_7322_HwErrStatus_PCIeBusParity_RMASK 0x7 | ||
784 | #define QIB_7322_HwErrStatus_PcieCplTimeout_LSB 0x1E | ||
785 | #define QIB_7322_HwErrStatus_PcieCplTimeout_MSB 0x1E | ||
786 | #define QIB_7322_HwErrStatus_PcieCplTimeout_RMASK 0x1 | ||
787 | #define QIB_7322_HwErrStatus_PciePoisonedTLP_LSB 0x1D | ||
788 | #define QIB_7322_HwErrStatus_PciePoisonedTLP_MSB 0x1D | ||
789 | #define QIB_7322_HwErrStatus_PciePoisonedTLP_RMASK 0x1 | ||
790 | #define QIB_7322_HwErrStatus_SDmaMemReadErr_1_LSB 0x1C | ||
791 | #define QIB_7322_HwErrStatus_SDmaMemReadErr_1_MSB 0x1C | ||
792 | #define QIB_7322_HwErrStatus_SDmaMemReadErr_1_RMASK 0x1 | ||
793 | #define QIB_7322_HwErrStatus_SDmaMemReadErr_0_LSB 0x1B | ||
794 | #define QIB_7322_HwErrStatus_SDmaMemReadErr_0_MSB 0x1B | ||
795 | #define QIB_7322_HwErrStatus_SDmaMemReadErr_0_RMASK 0x1 | ||
796 | #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_LSB 0xF | ||
797 | #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_MSB 0xF | ||
798 | #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_RMASK 0x1 | ||
799 | #define QIB_7322_HwErrStatus_statusValidNoEop_1_LSB 0xE | ||
800 | #define QIB_7322_HwErrStatus_statusValidNoEop_1_MSB 0xE | ||
801 | #define QIB_7322_HwErrStatus_statusValidNoEop_1_RMASK 0x1 | ||
802 | #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_LSB 0xD | ||
803 | #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_MSB 0xD | ||
804 | #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_RMASK 0x1 | ||
805 | #define QIB_7322_HwErrStatus_statusValidNoEop_0_LSB 0xC | ||
806 | #define QIB_7322_HwErrStatus_statusValidNoEop_0_MSB 0xC | ||
807 | #define QIB_7322_HwErrStatus_statusValidNoEop_0_RMASK 0x1 | ||
808 | #define QIB_7322_HwErrStatus_LATriggered_LSB 0xB | ||
809 | #define QIB_7322_HwErrStatus_LATriggered_MSB 0xB | ||
810 | #define QIB_7322_HwErrStatus_LATriggered_RMASK 0x1 | ||
811 | |||
812 | #define QIB_7322_HwErrClear_OFFS 0xA8 | ||
813 | #define QIB_7322_HwErrClear_DEF 0x0000000000000000 | ||
814 | #define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_1_LSB 0x3F | ||
815 | #define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_1_MSB 0x3F | ||
816 | #define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_1_RMASK 0x1 | ||
817 | #define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_0_LSB 0x3E | ||
818 | #define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_0_MSB 0x3E | ||
819 | #define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_0_RMASK 0x1 | ||
820 | #define QIB_7322_HwErrClear_PCIESerdesPClkNotDetectClear_LSB 0x37 | ||
821 | #define QIB_7322_HwErrClear_PCIESerdesPClkNotDetectClear_MSB 0x37 | ||
822 | #define QIB_7322_HwErrClear_PCIESerdesPClkNotDetectClear_RMASK 0x1 | ||
823 | #define QIB_7322_HwErrClear_PowerOnBISTFailedClear_LSB 0x36 | ||
824 | #define QIB_7322_HwErrClear_PowerOnBISTFailedClear_MSB 0x36 | ||
825 | #define QIB_7322_HwErrClear_PowerOnBISTFailedClear_RMASK 0x1 | ||
826 | #define QIB_7322_HwErrClear_TempsenseTholdReachedClear_LSB 0x35 | ||
827 | #define QIB_7322_HwErrClear_TempsenseTholdReachedClear_MSB 0x35 | ||
828 | #define QIB_7322_HwErrClear_TempsenseTholdReachedClear_RMASK 0x1 | ||
829 | #define QIB_7322_HwErrClear_MemoryErrClear_LSB 0x30 | ||
830 | #define QIB_7322_HwErrClear_MemoryErrClear_MSB 0x30 | ||
831 | #define QIB_7322_HwErrClear_MemoryErrClear_RMASK 0x1 | ||
832 | #define QIB_7322_HwErrClear_pcie_phy_txParityErr_LSB 0x22 | ||
833 | #define QIB_7322_HwErrClear_pcie_phy_txParityErr_MSB 0x22 | ||
834 | #define QIB_7322_HwErrClear_pcie_phy_txParityErr_RMASK 0x1 | ||
835 | #define QIB_7322_HwErrClear_PCIeBusParityClear_LSB 0x1F | ||
836 | #define QIB_7322_HwErrClear_PCIeBusParityClear_MSB 0x21 | ||
837 | #define QIB_7322_HwErrClear_PCIeBusParityClear_RMASK 0x7 | ||
838 | #define QIB_7322_HwErrClear_PcieCplTimeoutClear_LSB 0x1E | ||
839 | #define QIB_7322_HwErrClear_PcieCplTimeoutClear_MSB 0x1E | ||
840 | #define QIB_7322_HwErrClear_PcieCplTimeoutClear_RMASK 0x1 | ||
841 | #define QIB_7322_HwErrClear_PciePoisonedTLPClear_LSB 0x1D | ||
842 | #define QIB_7322_HwErrClear_PciePoisonedTLPClear_MSB 0x1D | ||
843 | #define QIB_7322_HwErrClear_PciePoisonedTLPClear_RMASK 0x1 | ||
844 | #define QIB_7322_HwErrClear_SDmaMemReadErrClear_1_LSB 0x1C | ||
845 | #define QIB_7322_HwErrClear_SDmaMemReadErrClear_1_MSB 0x1C | ||
846 | #define QIB_7322_HwErrClear_SDmaMemReadErrClear_1_RMASK 0x1 | ||
847 | #define QIB_7322_HwErrClear_SDmaMemReadErrClear_0_LSB 0x1B | ||
848 | #define QIB_7322_HwErrClear_SDmaMemReadErrClear_0_MSB 0x1B | ||
849 | #define QIB_7322_HwErrClear_SDmaMemReadErrClear_0_RMASK 0x1 | ||
850 | #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_LSB 0xF | ||
851 | #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_MSB 0xF | ||
852 | #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_RMASK 0x1 | ||
853 | #define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_LSB 0xE | ||
854 | #define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_MSB 0xE | ||
855 | #define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_RMASK 0x1 | ||
856 | #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_LSB 0xD | ||
857 | #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_MSB 0xD | ||
858 | #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_RMASK 0x1 | ||
859 | #define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_LSB 0xC | ||
860 | #define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_MSB 0xC | ||
861 | #define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_RMASK 0x1 | ||
862 | #define QIB_7322_HwErrClear_LATriggeredClear_LSB 0xB | ||
863 | #define QIB_7322_HwErrClear_LATriggeredClear_MSB 0xB | ||
864 | #define QIB_7322_HwErrClear_LATriggeredClear_RMASK 0x1 | ||
865 | |||
866 | #define QIB_7322_HwDiagCtrl_OFFS 0xB0 | ||
867 | #define QIB_7322_HwDiagCtrl_DEF 0x0000000000000000 | ||
868 | #define QIB_7322_HwDiagCtrl_Diagnostic_LSB 0x3F | ||
869 | #define QIB_7322_HwDiagCtrl_Diagnostic_MSB 0x3F | ||
870 | #define QIB_7322_HwDiagCtrl_Diagnostic_RMASK 0x1 | ||
871 | #define QIB_7322_HwDiagCtrl_CounterWrEnable_LSB 0x3D | ||
872 | #define QIB_7322_HwDiagCtrl_CounterWrEnable_MSB 0x3D | ||
873 | #define QIB_7322_HwDiagCtrl_CounterWrEnable_RMASK 0x1 | ||
874 | #define QIB_7322_HwDiagCtrl_CounterDisable_LSB 0x3C | ||
875 | #define QIB_7322_HwDiagCtrl_CounterDisable_MSB 0x3C | ||
876 | #define QIB_7322_HwDiagCtrl_CounterDisable_RMASK 0x1 | ||
877 | #define QIB_7322_HwDiagCtrl_forcePCIeBusParity_LSB 0x1F | ||
878 | #define QIB_7322_HwDiagCtrl_forcePCIeBusParity_MSB 0x22 | ||
879 | #define QIB_7322_HwDiagCtrl_forcePCIeBusParity_RMASK 0xF | ||
880 | #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_LSB 0xF | ||
881 | #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_MSB 0xF | ||
882 | #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_RMASK 0x1 | ||
883 | #define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_LSB 0xE | ||
884 | #define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_MSB 0xE | ||
885 | #define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_RMASK 0x1 | ||
886 | #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_LSB 0xD | ||
887 | #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_MSB 0xD | ||
888 | #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_RMASK 0x1 | ||
889 | #define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_LSB 0xC | ||
890 | #define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_MSB 0xC | ||
891 | #define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_RMASK 0x1 | ||
892 | |||
893 | #define QIB_7322_EXTStatus_OFFS 0xC0 | ||
894 | #define QIB_7322_EXTStatus_DEF 0x000000000000X000 | ||
895 | #define QIB_7322_EXTStatus_GPIOIn_LSB 0x30 | ||
896 | #define QIB_7322_EXTStatus_GPIOIn_MSB 0x3F | ||
897 | #define QIB_7322_EXTStatus_GPIOIn_RMASK 0xFFFF | ||
898 | #define QIB_7322_EXTStatus_MemBISTDisabled_LSB 0xF | ||
899 | #define QIB_7322_EXTStatus_MemBISTDisabled_MSB 0xF | ||
900 | #define QIB_7322_EXTStatus_MemBISTDisabled_RMASK 0x1 | ||
901 | #define QIB_7322_EXTStatus_MemBISTEndTest_LSB 0xE | ||
902 | #define QIB_7322_EXTStatus_MemBISTEndTest_MSB 0xE | ||
903 | #define QIB_7322_EXTStatus_MemBISTEndTest_RMASK 0x1 | ||
904 | |||
905 | #define QIB_7322_EXTCtrl_OFFS 0xC8 | ||
906 | #define QIB_7322_EXTCtrl_DEF 0x0000000000000000 | ||
907 | #define QIB_7322_EXTCtrl_GPIOOe_LSB 0x30 | ||
908 | #define QIB_7322_EXTCtrl_GPIOOe_MSB 0x3F | ||
909 | #define QIB_7322_EXTCtrl_GPIOOe_RMASK 0xFFFF | ||
910 | #define QIB_7322_EXTCtrl_GPIOInvert_LSB 0x20 | ||
911 | #define QIB_7322_EXTCtrl_GPIOInvert_MSB 0x2F | ||
912 | #define QIB_7322_EXTCtrl_GPIOInvert_RMASK 0xFFFF | ||
913 | #define QIB_7322_EXTCtrl_LEDPort1GreenOn_LSB 0x3 | ||
914 | #define QIB_7322_EXTCtrl_LEDPort1GreenOn_MSB 0x3 | ||
915 | #define QIB_7322_EXTCtrl_LEDPort1GreenOn_RMASK 0x1 | ||
916 | #define QIB_7322_EXTCtrl_LEDPort1YellowOn_LSB 0x2 | ||
917 | #define QIB_7322_EXTCtrl_LEDPort1YellowOn_MSB 0x2 | ||
918 | #define QIB_7322_EXTCtrl_LEDPort1YellowOn_RMASK 0x1 | ||
919 | #define QIB_7322_EXTCtrl_LEDPort0GreenOn_LSB 0x1 | ||
920 | #define QIB_7322_EXTCtrl_LEDPort0GreenOn_MSB 0x1 | ||
921 | #define QIB_7322_EXTCtrl_LEDPort0GreenOn_RMASK 0x1 | ||
922 | #define QIB_7322_EXTCtrl_LEDPort0YellowOn_LSB 0x0 | ||
923 | #define QIB_7322_EXTCtrl_LEDPort0YellowOn_MSB 0x0 | ||
924 | #define QIB_7322_EXTCtrl_LEDPort0YellowOn_RMASK 0x1 | ||
925 | |||
926 | #define QIB_7322_GPIOOut_OFFS 0xE0 | ||
927 | #define QIB_7322_GPIOOut_DEF 0x0000000000000000 | ||
928 | |||
929 | #define QIB_7322_GPIOMask_OFFS 0xE8 | ||
930 | #define QIB_7322_GPIOMask_DEF 0x0000000000000000 | ||
931 | |||
932 | #define QIB_7322_GPIOStatus_OFFS 0xF0 | ||
933 | #define QIB_7322_GPIOStatus_DEF 0x0000000000000000 | ||
934 | |||
935 | #define QIB_7322_GPIOClear_OFFS 0xF8 | ||
936 | #define QIB_7322_GPIOClear_DEF 0x0000000000000000 | ||
937 | |||
938 | #define QIB_7322_RcvCtrl_OFFS 0x100 | ||
939 | #define QIB_7322_RcvCtrl_DEF 0x0000000000000000 | ||
940 | #define QIB_7322_RcvCtrl_TidReDirect_LSB 0x30 | ||
941 | #define QIB_7322_RcvCtrl_TidReDirect_MSB 0x3F | ||
942 | #define QIB_7322_RcvCtrl_TidReDirect_RMASK 0xFFFF | ||
943 | #define QIB_7322_RcvCtrl_TailUpd_LSB 0x2F | ||
944 | #define QIB_7322_RcvCtrl_TailUpd_MSB 0x2F | ||
945 | #define QIB_7322_RcvCtrl_TailUpd_RMASK 0x1 | ||
946 | #define QIB_7322_RcvCtrl_XrcTypeCode_LSB 0x2C | ||
947 | #define QIB_7322_RcvCtrl_XrcTypeCode_MSB 0x2E | ||
948 | #define QIB_7322_RcvCtrl_XrcTypeCode_RMASK 0x7 | ||
949 | #define QIB_7322_RcvCtrl_TidFlowEnable_LSB 0x2B | ||
950 | #define QIB_7322_RcvCtrl_TidFlowEnable_MSB 0x2B | ||
951 | #define QIB_7322_RcvCtrl_TidFlowEnable_RMASK 0x1 | ||
952 | #define QIB_7322_RcvCtrl_ContextCfg_LSB 0x29 | ||
953 | #define QIB_7322_RcvCtrl_ContextCfg_MSB 0x2A | ||
954 | #define QIB_7322_RcvCtrl_ContextCfg_RMASK 0x3 | ||
955 | #define QIB_7322_RcvCtrl_IntrAvail_LSB 0x14 | ||
956 | #define QIB_7322_RcvCtrl_IntrAvail_MSB 0x25 | ||
957 | #define QIB_7322_RcvCtrl_IntrAvail_RMASK 0x3FFFF | ||
958 | #define QIB_7322_RcvCtrl_dontDropRHQFull_LSB 0x0 | ||
959 | #define QIB_7322_RcvCtrl_dontDropRHQFull_MSB 0x11 | ||
960 | #define QIB_7322_RcvCtrl_dontDropRHQFull_RMASK 0x3FFFF | ||
961 | |||
962 | #define QIB_7322_RcvHdrSize_OFFS 0x110 | ||
963 | #define QIB_7322_RcvHdrSize_DEF 0x0000000000000000 | ||
964 | |||
965 | #define QIB_7322_RcvHdrCnt_OFFS 0x118 | ||
966 | #define QIB_7322_RcvHdrCnt_DEF 0x0000000000000000 | ||
967 | |||
968 | #define QIB_7322_RcvHdrEntSize_OFFS 0x120 | ||
969 | #define QIB_7322_RcvHdrEntSize_DEF 0x0000000000000000 | ||
970 | |||
971 | #define QIB_7322_RcvTIDBase_OFFS 0x128 | ||
972 | #define QIB_7322_RcvTIDBase_DEF 0x0000000000050000 | ||
973 | |||
974 | #define QIB_7322_RcvTIDCnt_OFFS 0x130 | ||
975 | #define QIB_7322_RcvTIDCnt_DEF 0x0000000000000200 | ||
976 | |||
977 | #define QIB_7322_RcvEgrBase_OFFS 0x138 | ||
978 | #define QIB_7322_RcvEgrBase_DEF 0x0000000000014000 | ||
979 | |||
980 | #define QIB_7322_RcvEgrCnt_OFFS 0x140 | ||
981 | #define QIB_7322_RcvEgrCnt_DEF 0x0000000000001000 | ||
982 | |||
983 | #define QIB_7322_RcvBufBase_OFFS 0x148 | ||
984 | #define QIB_7322_RcvBufBase_DEF 0x0000000000080000 | ||
985 | |||
986 | #define QIB_7322_RcvBufSize_OFFS 0x150 | ||
987 | #define QIB_7322_RcvBufSize_DEF 0x0000000000005000 | ||
988 | |||
989 | #define QIB_7322_RxIntMemBase_OFFS 0x158 | ||
990 | #define QIB_7322_RxIntMemBase_DEF 0x0000000000077000 | ||
991 | |||
992 | #define QIB_7322_RxIntMemSize_OFFS 0x160 | ||
993 | #define QIB_7322_RxIntMemSize_DEF 0x0000000000007000 | ||
994 | |||
995 | #define QIB_7322_feature_mask_OFFS 0x190 | ||
996 | #define QIB_7322_feature_mask_DEF 0x00000000000000XX | ||
997 | |||
998 | #define QIB_7322_active_feature_mask_OFFS 0x198 | ||
999 | #define QIB_7322_active_feature_mask_DEF 0x00000000000000XX | ||
1000 | #define QIB_7322_active_feature_mask_Port1_QDR_Enabled_LSB 0x5 | ||
1001 | #define QIB_7322_active_feature_mask_Port1_QDR_Enabled_MSB 0x5 | ||
1002 | #define QIB_7322_active_feature_mask_Port1_QDR_Enabled_RMASK 0x1 | ||
1003 | #define QIB_7322_active_feature_mask_Port1_DDR_Enabled_LSB 0x4 | ||
1004 | #define QIB_7322_active_feature_mask_Port1_DDR_Enabled_MSB 0x4 | ||
1005 | #define QIB_7322_active_feature_mask_Port1_DDR_Enabled_RMASK 0x1 | ||
1006 | #define QIB_7322_active_feature_mask_Port1_SDR_Enabled_LSB 0x3 | ||
1007 | #define QIB_7322_active_feature_mask_Port1_SDR_Enabled_MSB 0x3 | ||
1008 | #define QIB_7322_active_feature_mask_Port1_SDR_Enabled_RMASK 0x1 | ||
1009 | #define QIB_7322_active_feature_mask_Port0_QDR_Enabled_LSB 0x2 | ||
1010 | #define QIB_7322_active_feature_mask_Port0_QDR_Enabled_MSB 0x2 | ||
1011 | #define QIB_7322_active_feature_mask_Port0_QDR_Enabled_RMASK 0x1 | ||
1012 | #define QIB_7322_active_feature_mask_Port0_DDR_Enabled_LSB 0x1 | ||
1013 | #define QIB_7322_active_feature_mask_Port0_DDR_Enabled_MSB 0x1 | ||
1014 | #define QIB_7322_active_feature_mask_Port0_DDR_Enabled_RMASK 0x1 | ||
1015 | #define QIB_7322_active_feature_mask_Port0_SDR_Enabled_LSB 0x0 | ||
1016 | #define QIB_7322_active_feature_mask_Port0_SDR_Enabled_MSB 0x0 | ||
1017 | #define QIB_7322_active_feature_mask_Port0_SDR_Enabled_RMASK 0x1 | ||
1018 | |||
1019 | #define QIB_7322_SendCtrl_OFFS 0x1C0 | ||
1020 | #define QIB_7322_SendCtrl_DEF 0x0000000000000000 | ||
1021 | #define QIB_7322_SendCtrl_Disarm_LSB 0x1F | ||
1022 | #define QIB_7322_SendCtrl_Disarm_MSB 0x1F | ||
1023 | #define QIB_7322_SendCtrl_Disarm_RMASK 0x1 | ||
1024 | #define QIB_7322_SendCtrl_SendBufAvailPad64Byte_LSB 0x1D | ||
1025 | #define QIB_7322_SendCtrl_SendBufAvailPad64Byte_MSB 0x1D | ||
1026 | #define QIB_7322_SendCtrl_SendBufAvailPad64Byte_RMASK 0x1 | ||
1027 | #define QIB_7322_SendCtrl_AvailUpdThld_LSB 0x18 | ||
1028 | #define QIB_7322_SendCtrl_AvailUpdThld_MSB 0x1C | ||
1029 | #define QIB_7322_SendCtrl_AvailUpdThld_RMASK 0x1F | ||
1030 | #define QIB_7322_SendCtrl_DisarmSendBuf_LSB 0x10 | ||
1031 | #define QIB_7322_SendCtrl_DisarmSendBuf_MSB 0x17 | ||
1032 | #define QIB_7322_SendCtrl_DisarmSendBuf_RMASK 0xFF | ||
1033 | #define QIB_7322_SendCtrl_SpecialTriggerEn_LSB 0x4 | ||
1034 | #define QIB_7322_SendCtrl_SpecialTriggerEn_MSB 0x4 | ||
1035 | #define QIB_7322_SendCtrl_SpecialTriggerEn_RMASK 0x1 | ||
1036 | #define QIB_7322_SendCtrl_SendBufAvailUpd_LSB 0x2 | ||
1037 | #define QIB_7322_SendCtrl_SendBufAvailUpd_MSB 0x2 | ||
1038 | #define QIB_7322_SendCtrl_SendBufAvailUpd_RMASK 0x1 | ||
1039 | #define QIB_7322_SendCtrl_SendIntBufAvail_LSB 0x1 | ||
1040 | #define QIB_7322_SendCtrl_SendIntBufAvail_MSB 0x1 | ||
1041 | #define QIB_7322_SendCtrl_SendIntBufAvail_RMASK 0x1 | ||
1042 | |||
1043 | #define QIB_7322_SendBufBase_OFFS 0x1C8 | ||
1044 | #define QIB_7322_SendBufBase_DEF 0x0018000000100000 | ||
1045 | #define QIB_7322_SendBufBase_BaseAddr_LargePIO_LSB 0x20 | ||
1046 | #define QIB_7322_SendBufBase_BaseAddr_LargePIO_MSB 0x34 | ||
1047 | #define QIB_7322_SendBufBase_BaseAddr_LargePIO_RMASK 0x1FFFFF | ||
1048 | #define QIB_7322_SendBufBase_BaseAddr_SmallPIO_LSB 0x0 | ||
1049 | #define QIB_7322_SendBufBase_BaseAddr_SmallPIO_MSB 0x14 | ||
1050 | #define QIB_7322_SendBufBase_BaseAddr_SmallPIO_RMASK 0x1FFFFF | ||
1051 | |||
1052 | #define QIB_7322_SendBufSize_OFFS 0x1D0 | ||
1053 | #define QIB_7322_SendBufSize_DEF 0x0000108000000880 | ||
1054 | #define QIB_7322_SendBufSize_Size_LargePIO_LSB 0x20 | ||
1055 | #define QIB_7322_SendBufSize_Size_LargePIO_MSB 0x2C | ||
1056 | #define QIB_7322_SendBufSize_Size_LargePIO_RMASK 0x1FFF | ||
1057 | #define QIB_7322_SendBufSize_Size_SmallPIO_LSB 0x0 | ||
1058 | #define QIB_7322_SendBufSize_Size_SmallPIO_MSB 0xB | ||
1059 | #define QIB_7322_SendBufSize_Size_SmallPIO_RMASK 0xFFF | ||
1060 | |||
1061 | #define QIB_7322_SendBufCnt_OFFS 0x1D8 | ||
1062 | #define QIB_7322_SendBufCnt_DEF 0x0000002000000080 | ||
1063 | #define QIB_7322_SendBufCnt_Num_LargeBuffers_LSB 0x20 | ||
1064 | #define QIB_7322_SendBufCnt_Num_LargeBuffers_MSB 0x25 | ||
1065 | #define QIB_7322_SendBufCnt_Num_LargeBuffers_RMASK 0x3F | ||
1066 | #define QIB_7322_SendBufCnt_Num_SmallBuffers_LSB 0x0 | ||
1067 | #define QIB_7322_SendBufCnt_Num_SmallBuffers_MSB 0x8 | ||
1068 | #define QIB_7322_SendBufCnt_Num_SmallBuffers_RMASK 0x1FF | ||
1069 | |||
1070 | #define QIB_7322_SendBufAvailAddr_OFFS 0x1E0 | ||
1071 | #define QIB_7322_SendBufAvailAddr_DEF 0x0000000000000000 | ||
1072 | #define QIB_7322_SendBufAvailAddr_SendBufAvailAddr_LSB 0x6 | ||
1073 | #define QIB_7322_SendBufAvailAddr_SendBufAvailAddr_MSB 0x27 | ||
1074 | #define QIB_7322_SendBufAvailAddr_SendBufAvailAddr_RMASK 0x3FFFFFFFF | ||
1075 | |||
1076 | #define QIB_7322_SendBufErr0_OFFS 0x240 | ||
1077 | #define QIB_7322_SendBufErr0_DEF 0x0000000000000000 | ||
1078 | #define QIB_7322_SendBufErr0_SendBufErr_63_0_LSB 0x0 | ||
1079 | #define QIB_7322_SendBufErr0_SendBufErr_63_0_MSB 0x3F | ||
1080 | #define QIB_7322_SendBufErr0_SendBufErr_63_0_RMASK 0x0 | ||
1081 | |||
1082 | #define QIB_7322_AvailUpdCount_OFFS 0x268 | ||
1083 | #define QIB_7322_AvailUpdCount_DEF 0x0000000000000000 | ||
1084 | #define QIB_7322_AvailUpdCount_AvailUpdCount_LSB 0x0 | ||
1085 | #define QIB_7322_AvailUpdCount_AvailUpdCount_MSB 0x4 | ||
1086 | #define QIB_7322_AvailUpdCount_AvailUpdCount_RMASK 0x1F | ||
1087 | |||
1088 | #define QIB_7322_RcvHdrAddr0_OFFS 0x280 | ||
1089 | #define QIB_7322_RcvHdrAddr0_DEF 0x0000000000000000 | ||
1090 | #define QIB_7322_RcvHdrAddr0_RcvHdrAddr_LSB 0x2 | ||
1091 | #define QIB_7322_RcvHdrAddr0_RcvHdrAddr_MSB 0x27 | ||
1092 | #define QIB_7322_RcvHdrAddr0_RcvHdrAddr_RMASK 0x3FFFFFFFFF | ||
1093 | |||
1094 | #define QIB_7322_RcvHdrTailAddr0_OFFS 0x340 | ||
1095 | #define QIB_7322_RcvHdrTailAddr0_DEF 0x0000000000000000 | ||
1096 | #define QIB_7322_RcvHdrTailAddr0_RcvHdrTailAddr_LSB 0x2 | ||
1097 | #define QIB_7322_RcvHdrTailAddr0_RcvHdrTailAddr_MSB 0x27 | ||
1098 | #define QIB_7322_RcvHdrTailAddr0_RcvHdrTailAddr_RMASK 0x3FFFFFFFFF | ||
1099 | |||
1100 | #define QIB_7322_ahb_access_ctrl_OFFS 0x460 | ||
1101 | #define QIB_7322_ahb_access_ctrl_DEF 0x0000000000000000 | ||
1102 | #define QIB_7322_ahb_access_ctrl_sw_sel_ahb_trgt_LSB 0x1 | ||
1103 | #define QIB_7322_ahb_access_ctrl_sw_sel_ahb_trgt_MSB 0x2 | ||
1104 | #define QIB_7322_ahb_access_ctrl_sw_sel_ahb_trgt_RMASK 0x3 | ||
1105 | #define QIB_7322_ahb_access_ctrl_sw_ahb_sel_LSB 0x0 | ||
1106 | #define QIB_7322_ahb_access_ctrl_sw_ahb_sel_MSB 0x0 | ||
1107 | #define QIB_7322_ahb_access_ctrl_sw_ahb_sel_RMASK 0x1 | ||
1108 | |||
1109 | #define QIB_7322_ahb_transaction_reg_OFFS 0x468 | ||
1110 | #define QIB_7322_ahb_transaction_reg_DEF 0x0000000080000000 | ||
1111 | #define QIB_7322_ahb_transaction_reg_ahb_data_LSB 0x20 | ||
1112 | #define QIB_7322_ahb_transaction_reg_ahb_data_MSB 0x3F | ||
1113 | #define QIB_7322_ahb_transaction_reg_ahb_data_RMASK 0xFFFFFFFF | ||
1114 | #define QIB_7322_ahb_transaction_reg_ahb_rdy_LSB 0x1F | ||
1115 | #define QIB_7322_ahb_transaction_reg_ahb_rdy_MSB 0x1F | ||
1116 | #define QIB_7322_ahb_transaction_reg_ahb_rdy_RMASK 0x1 | ||
1117 | #define QIB_7322_ahb_transaction_reg_ahb_req_err_LSB 0x1E | ||
1118 | #define QIB_7322_ahb_transaction_reg_ahb_req_err_MSB 0x1E | ||
1119 | #define QIB_7322_ahb_transaction_reg_ahb_req_err_RMASK 0x1 | ||
1120 | #define QIB_7322_ahb_transaction_reg_write_not_read_LSB 0x1B | ||
1121 | #define QIB_7322_ahb_transaction_reg_write_not_read_MSB 0x1B | ||
1122 | #define QIB_7322_ahb_transaction_reg_write_not_read_RMASK 0x1 | ||
1123 | #define QIB_7322_ahb_transaction_reg_ahb_address_LSB 0x10 | ||
1124 | #define QIB_7322_ahb_transaction_reg_ahb_address_MSB 0x1A | ||
1125 | #define QIB_7322_ahb_transaction_reg_ahb_address_RMASK 0x7FF | ||
1126 | |||
1127 | #define QIB_7322_SPC_JTAG_ACCESS_REG_OFFS 0x470 | ||
1128 | #define QIB_7322_SPC_JTAG_ACCESS_REG_DEF 0x0000000000000001 | ||
1129 | #define QIB_7322_SPC_JTAG_ACCESS_REG_SPC_JTAG_ACCESS_EN_LSB 0xA | ||
1130 | #define QIB_7322_SPC_JTAG_ACCESS_REG_SPC_JTAG_ACCESS_EN_MSB 0xA | ||
1131 | #define QIB_7322_SPC_JTAG_ACCESS_REG_SPC_JTAG_ACCESS_EN_RMASK 0x1 | ||
1132 | #define QIB_7322_SPC_JTAG_ACCESS_REG_bist_en_LSB 0x5 | ||
1133 | #define QIB_7322_SPC_JTAG_ACCESS_REG_bist_en_MSB 0x9 | ||
1134 | #define QIB_7322_SPC_JTAG_ACCESS_REG_bist_en_RMASK 0x1F | ||
1135 | #define QIB_7322_SPC_JTAG_ACCESS_REG_opcode_LSB 0x3 | ||
1136 | #define QIB_7322_SPC_JTAG_ACCESS_REG_opcode_MSB 0x4 | ||
1137 | #define QIB_7322_SPC_JTAG_ACCESS_REG_opcode_RMASK 0x3 | ||
1138 | #define QIB_7322_SPC_JTAG_ACCESS_REG_tdi_LSB 0x2 | ||
1139 | #define QIB_7322_SPC_JTAG_ACCESS_REG_tdi_MSB 0x2 | ||
1140 | #define QIB_7322_SPC_JTAG_ACCESS_REG_tdi_RMASK 0x1 | ||
1141 | #define QIB_7322_SPC_JTAG_ACCESS_REG_tdo_LSB 0x1 | ||
1142 | #define QIB_7322_SPC_JTAG_ACCESS_REG_tdo_MSB 0x1 | ||
1143 | #define QIB_7322_SPC_JTAG_ACCESS_REG_tdo_RMASK 0x1 | ||
1144 | #define QIB_7322_SPC_JTAG_ACCESS_REG_rdy_LSB 0x0 | ||
1145 | #define QIB_7322_SPC_JTAG_ACCESS_REG_rdy_MSB 0x0 | ||
1146 | #define QIB_7322_SPC_JTAG_ACCESS_REG_rdy_RMASK 0x1 | ||
1147 | |||
1148 | #define QIB_7322_SendCheckMask0_OFFS 0x4C0 | ||
1149 | #define QIB_7322_SendCheckMask0_DEF 0x0000000000000000 | ||
1150 | #define QIB_7322_SendCheckMask0_SendCheckMask_63_32_LSB 0x0 | ||
1151 | #define QIB_7322_SendCheckMask0_SendCheckMask_63_32_MSB 0x3F | ||
1152 | #define QIB_7322_SendCheckMask0_SendCheckMask_63_32_RMASK 0x0 | ||
1153 | |||
1154 | #define QIB_7322_SendGRHCheckMask0_OFFS 0x4E0 | ||
1155 | #define QIB_7322_SendGRHCheckMask0_DEF 0x0000000000000000 | ||
1156 | #define QIB_7322_SendGRHCheckMask0_SendGRHCheckMask_63_32_LSB 0x0 | ||
1157 | #define QIB_7322_SendGRHCheckMask0_SendGRHCheckMask_63_32_MSB 0x3F | ||
1158 | #define QIB_7322_SendGRHCheckMask0_SendGRHCheckMask_63_32_RMASK 0x0 | ||
1159 | |||
1160 | #define QIB_7322_SendIBPacketMask0_OFFS 0x500 | ||
1161 | #define QIB_7322_SendIBPacketMask0_DEF 0x0000000000000000 | ||
1162 | #define QIB_7322_SendIBPacketMask0_SendIBPacketMask_63_32_LSB 0x0 | ||
1163 | #define QIB_7322_SendIBPacketMask0_SendIBPacketMask_63_32_MSB 0x3F | ||
1164 | #define QIB_7322_SendIBPacketMask0_SendIBPacketMask_63_32_RMASK 0x0 | ||
1165 | |||
1166 | #define QIB_7322_IntRedirect0_OFFS 0x540 | ||
1167 | #define QIB_7322_IntRedirect0_DEF 0x0000000000000000 | ||
1168 | #define QIB_7322_IntRedirect0_vec11_LSB 0x37 | ||
1169 | #define QIB_7322_IntRedirect0_vec11_MSB 0x3B | ||
1170 | #define QIB_7322_IntRedirect0_vec11_RMASK 0x1F | ||
1171 | #define QIB_7322_IntRedirect0_vec10_LSB 0x32 | ||
1172 | #define QIB_7322_IntRedirect0_vec10_MSB 0x36 | ||
1173 | #define QIB_7322_IntRedirect0_vec10_RMASK 0x1F | ||
1174 | #define QIB_7322_IntRedirect0_vec9_LSB 0x2D | ||
1175 | #define QIB_7322_IntRedirect0_vec9_MSB 0x31 | ||
1176 | #define QIB_7322_IntRedirect0_vec9_RMASK 0x1F | ||
1177 | #define QIB_7322_IntRedirect0_vec8_LSB 0x28 | ||
1178 | #define QIB_7322_IntRedirect0_vec8_MSB 0x2C | ||
1179 | #define QIB_7322_IntRedirect0_vec8_RMASK 0x1F | ||
1180 | #define QIB_7322_IntRedirect0_vec7_LSB 0x23 | ||
1181 | #define QIB_7322_IntRedirect0_vec7_MSB 0x27 | ||
1182 | #define QIB_7322_IntRedirect0_vec7_RMASK 0x1F | ||
1183 | #define QIB_7322_IntRedirect0_vec6_LSB 0x1E | ||
1184 | #define QIB_7322_IntRedirect0_vec6_MSB 0x22 | ||
1185 | #define QIB_7322_IntRedirect0_vec6_RMASK 0x1F | ||
1186 | #define QIB_7322_IntRedirect0_vec5_LSB 0x19 | ||
1187 | #define QIB_7322_IntRedirect0_vec5_MSB 0x1D | ||
1188 | #define QIB_7322_IntRedirect0_vec5_RMASK 0x1F | ||
1189 | #define QIB_7322_IntRedirect0_vec4_LSB 0x14 | ||
1190 | #define QIB_7322_IntRedirect0_vec4_MSB 0x18 | ||
1191 | #define QIB_7322_IntRedirect0_vec4_RMASK 0x1F | ||
1192 | #define QIB_7322_IntRedirect0_vec3_LSB 0xF | ||
1193 | #define QIB_7322_IntRedirect0_vec3_MSB 0x13 | ||
1194 | #define QIB_7322_IntRedirect0_vec3_RMASK 0x1F | ||
1195 | #define QIB_7322_IntRedirect0_vec2_LSB 0xA | ||
1196 | #define QIB_7322_IntRedirect0_vec2_MSB 0xE | ||
1197 | #define QIB_7322_IntRedirect0_vec2_RMASK 0x1F | ||
1198 | #define QIB_7322_IntRedirect0_vec1_LSB 0x5 | ||
1199 | #define QIB_7322_IntRedirect0_vec1_MSB 0x9 | ||
1200 | #define QIB_7322_IntRedirect0_vec1_RMASK 0x1F | ||
1201 | #define QIB_7322_IntRedirect0_vec0_LSB 0x0 | ||
1202 | #define QIB_7322_IntRedirect0_vec0_MSB 0x4 | ||
1203 | #define QIB_7322_IntRedirect0_vec0_RMASK 0x1F | ||
1204 | |||
1205 | #define QIB_7322_Int_Granted_OFFS 0x570 | ||
1206 | #define QIB_7322_Int_Granted_DEF 0x0000000000000000 | ||
1207 | |||
1208 | #define QIB_7322_vec_clr_without_int_OFFS 0x578 | ||
1209 | #define QIB_7322_vec_clr_without_int_DEF 0x0000000000000000 | ||
1210 | |||
1211 | #define QIB_7322_DCACtrlA_OFFS 0x580 | ||
1212 | #define QIB_7322_DCACtrlA_DEF 0x0000000000000000 | ||
1213 | #define QIB_7322_DCACtrlA_SendDMAHead1DCAEnable_LSB 0x4 | ||
1214 | #define QIB_7322_DCACtrlA_SendDMAHead1DCAEnable_MSB 0x4 | ||
1215 | #define QIB_7322_DCACtrlA_SendDMAHead1DCAEnable_RMASK 0x1 | ||
1216 | #define QIB_7322_DCACtrlA_SendDMAHead0DCAEnable_LSB 0x3 | ||
1217 | #define QIB_7322_DCACtrlA_SendDMAHead0DCAEnable_MSB 0x3 | ||
1218 | #define QIB_7322_DCACtrlA_SendDMAHead0DCAEnable_RMASK 0x1 | ||
1219 | #define QIB_7322_DCACtrlA_RcvTailUpdDCAEnable_LSB 0x2 | ||
1220 | #define QIB_7322_DCACtrlA_RcvTailUpdDCAEnable_MSB 0x2 | ||
1221 | #define QIB_7322_DCACtrlA_RcvTailUpdDCAEnable_RMASK 0x1 | ||
1222 | #define QIB_7322_DCACtrlA_EagerDCAEnable_LSB 0x1 | ||
1223 | #define QIB_7322_DCACtrlA_EagerDCAEnable_MSB 0x1 | ||
1224 | #define QIB_7322_DCACtrlA_EagerDCAEnable_RMASK 0x1 | ||
1225 | #define QIB_7322_DCACtrlA_RcvHdrqDCAEnable_LSB 0x0 | ||
1226 | #define QIB_7322_DCACtrlA_RcvHdrqDCAEnable_MSB 0x0 | ||
1227 | #define QIB_7322_DCACtrlA_RcvHdrqDCAEnable_RMASK 0x1 | ||
1228 | |||
1229 | #define QIB_7322_DCACtrlB_OFFS 0x588 | ||
1230 | #define QIB_7322_DCACtrlB_DEF 0x0000000000000000 | ||
1231 | #define QIB_7322_DCACtrlB_RcvHdrq3DCAXfrCnt_LSB 0x36 | ||
1232 | #define QIB_7322_DCACtrlB_RcvHdrq3DCAXfrCnt_MSB 0x3B | ||
1233 | #define QIB_7322_DCACtrlB_RcvHdrq3DCAXfrCnt_RMASK 0x3F | ||
1234 | #define QIB_7322_DCACtrlB_RcvHdrq3DCAOPH_LSB 0x2E | ||
1235 | #define QIB_7322_DCACtrlB_RcvHdrq3DCAOPH_MSB 0x35 | ||
1236 | #define QIB_7322_DCACtrlB_RcvHdrq3DCAOPH_RMASK 0xFF | ||
1237 | #define QIB_7322_DCACtrlB_RcvHdrq2DCAXfrCnt_LSB 0x28 | ||
1238 | #define QIB_7322_DCACtrlB_RcvHdrq2DCAXfrCnt_MSB 0x2D | ||
1239 | #define QIB_7322_DCACtrlB_RcvHdrq2DCAXfrCnt_RMASK 0x3F | ||
1240 | #define QIB_7322_DCACtrlB_RcvHdrq2DCAOPH_LSB 0x20 | ||
1241 | #define QIB_7322_DCACtrlB_RcvHdrq2DCAOPH_MSB 0x27 | ||
1242 | #define QIB_7322_DCACtrlB_RcvHdrq2DCAOPH_RMASK 0xFF | ||
1243 | #define QIB_7322_DCACtrlB_RcvHdrq1DCAXfrCnt_LSB 0x16 | ||
1244 | #define QIB_7322_DCACtrlB_RcvHdrq1DCAXfrCnt_MSB 0x1B | ||
1245 | #define QIB_7322_DCACtrlB_RcvHdrq1DCAXfrCnt_RMASK 0x3F | ||
1246 | #define QIB_7322_DCACtrlB_RcvHdrq1DCAOPH_LSB 0xE | ||
1247 | #define QIB_7322_DCACtrlB_RcvHdrq1DCAOPH_MSB 0x15 | ||
1248 | #define QIB_7322_DCACtrlB_RcvHdrq1DCAOPH_RMASK 0xFF | ||
1249 | #define QIB_7322_DCACtrlB_RcvHdrq0DCAXfrCnt_LSB 0x8 | ||
1250 | #define QIB_7322_DCACtrlB_RcvHdrq0DCAXfrCnt_MSB 0xD | ||
1251 | #define QIB_7322_DCACtrlB_RcvHdrq0DCAXfrCnt_RMASK 0x3F | ||
1252 | #define QIB_7322_DCACtrlB_RcvHdrq0DCAOPH_LSB 0x0 | ||
1253 | #define QIB_7322_DCACtrlB_RcvHdrq0DCAOPH_MSB 0x7 | ||
1254 | #define QIB_7322_DCACtrlB_RcvHdrq0DCAOPH_RMASK 0xFF | ||
1255 | |||
1256 | #define QIB_7322_DCACtrlC_OFFS 0x590 | ||
1257 | #define QIB_7322_DCACtrlC_DEF 0x0000000000000000 | ||
1258 | #define QIB_7322_DCACtrlC_RcvHdrq7DCAXfrCnt_LSB 0x36 | ||
1259 | #define QIB_7322_DCACtrlC_RcvHdrq7DCAXfrCnt_MSB 0x3B | ||
1260 | #define QIB_7322_DCACtrlC_RcvHdrq7DCAXfrCnt_RMASK 0x3F | ||
1261 | #define QIB_7322_DCACtrlC_RcvHdrq7DCAOPH_LSB 0x2E | ||
1262 | #define QIB_7322_DCACtrlC_RcvHdrq7DCAOPH_MSB 0x35 | ||
1263 | #define QIB_7322_DCACtrlC_RcvHdrq7DCAOPH_RMASK 0xFF | ||
1264 | #define QIB_7322_DCACtrlC_RcvHdrq6DCAXfrCnt_LSB 0x28 | ||
1265 | #define QIB_7322_DCACtrlC_RcvHdrq6DCAXfrCnt_MSB 0x2D | ||
1266 | #define QIB_7322_DCACtrlC_RcvHdrq6DCAXfrCnt_RMASK 0x3F | ||
1267 | #define QIB_7322_DCACtrlC_RcvHdrq6DCAOPH_LSB 0x20 | ||
1268 | #define QIB_7322_DCACtrlC_RcvHdrq6DCAOPH_MSB 0x27 | ||
1269 | #define QIB_7322_DCACtrlC_RcvHdrq6DCAOPH_RMASK 0xFF | ||
1270 | #define QIB_7322_DCACtrlC_RcvHdrq5DCAXfrCnt_LSB 0x16 | ||
1271 | #define QIB_7322_DCACtrlC_RcvHdrq5DCAXfrCnt_MSB 0x1B | ||
1272 | #define QIB_7322_DCACtrlC_RcvHdrq5DCAXfrCnt_RMASK 0x3F | ||
1273 | #define QIB_7322_DCACtrlC_RcvHdrq5DCAOPH_LSB 0xE | ||
1274 | #define QIB_7322_DCACtrlC_RcvHdrq5DCAOPH_MSB 0x15 | ||
1275 | #define QIB_7322_DCACtrlC_RcvHdrq5DCAOPH_RMASK 0xFF | ||
1276 | #define QIB_7322_DCACtrlC_RcvHdrq4DCAXfrCnt_LSB 0x8 | ||
1277 | #define QIB_7322_DCACtrlC_RcvHdrq4DCAXfrCnt_MSB 0xD | ||
1278 | #define QIB_7322_DCACtrlC_RcvHdrq4DCAXfrCnt_RMASK 0x3F | ||
1279 | #define QIB_7322_DCACtrlC_RcvHdrq4DCAOPH_LSB 0x0 | ||
1280 | #define QIB_7322_DCACtrlC_RcvHdrq4DCAOPH_MSB 0x7 | ||
1281 | #define QIB_7322_DCACtrlC_RcvHdrq4DCAOPH_RMASK 0xFF | ||
1282 | |||
1283 | #define QIB_7322_DCACtrlD_OFFS 0x598 | ||
1284 | #define QIB_7322_DCACtrlD_DEF 0x0000000000000000 | ||
1285 | #define QIB_7322_DCACtrlD_RcvHdrq11DCAXfrCnt_LSB 0x36 | ||
1286 | #define QIB_7322_DCACtrlD_RcvHdrq11DCAXfrCnt_MSB 0x3B | ||
1287 | #define QIB_7322_DCACtrlD_RcvHdrq11DCAXfrCnt_RMASK 0x3F | ||
1288 | #define QIB_7322_DCACtrlD_RcvHdrq11DCAOPH_LSB 0x2E | ||
1289 | #define QIB_7322_DCACtrlD_RcvHdrq11DCAOPH_MSB 0x35 | ||
1290 | #define QIB_7322_DCACtrlD_RcvHdrq11DCAOPH_RMASK 0xFF | ||
1291 | #define QIB_7322_DCACtrlD_RcvHdrq10DCAXfrCnt_LSB 0x28 | ||
1292 | #define QIB_7322_DCACtrlD_RcvHdrq10DCAXfrCnt_MSB 0x2D | ||
1293 | #define QIB_7322_DCACtrlD_RcvHdrq10DCAXfrCnt_RMASK 0x3F | ||
1294 | #define QIB_7322_DCACtrlD_RcvHdrq10DCAOPH_LSB 0x20 | ||
1295 | #define QIB_7322_DCACtrlD_RcvHdrq10DCAOPH_MSB 0x27 | ||
1296 | #define QIB_7322_DCACtrlD_RcvHdrq10DCAOPH_RMASK 0xFF | ||
1297 | #define QIB_7322_DCACtrlD_RcvHdrq9DCAXfrCnt_LSB 0x16 | ||
1298 | #define QIB_7322_DCACtrlD_RcvHdrq9DCAXfrCnt_MSB 0x1B | ||
1299 | #define QIB_7322_DCACtrlD_RcvHdrq9DCAXfrCnt_RMASK 0x3F | ||
1300 | #define QIB_7322_DCACtrlD_RcvHdrq9DCAOPH_LSB 0xE | ||
1301 | #define QIB_7322_DCACtrlD_RcvHdrq9DCAOPH_MSB 0x15 | ||
1302 | #define QIB_7322_DCACtrlD_RcvHdrq9DCAOPH_RMASK 0xFF | ||
1303 | #define QIB_7322_DCACtrlD_RcvHdrq8DCAXfrCnt_LSB 0x8 | ||
1304 | #define QIB_7322_DCACtrlD_RcvHdrq8DCAXfrCnt_MSB 0xD | ||
1305 | #define QIB_7322_DCACtrlD_RcvHdrq8DCAXfrCnt_RMASK 0x3F | ||
1306 | #define QIB_7322_DCACtrlD_RcvHdrq8DCAOPH_LSB 0x0 | ||
1307 | #define QIB_7322_DCACtrlD_RcvHdrq8DCAOPH_MSB 0x7 | ||
1308 | #define QIB_7322_DCACtrlD_RcvHdrq8DCAOPH_RMASK 0xFF | ||
1309 | |||
1310 | #define QIB_7322_DCACtrlE_OFFS 0x5A0 | ||
1311 | #define QIB_7322_DCACtrlE_DEF 0x0000000000000000 | ||
1312 | #define QIB_7322_DCACtrlE_RcvHdrq15DCAXfrCnt_LSB 0x36 | ||
1313 | #define QIB_7322_DCACtrlE_RcvHdrq15DCAXfrCnt_MSB 0x3B | ||
1314 | #define QIB_7322_DCACtrlE_RcvHdrq15DCAXfrCnt_RMASK 0x3F | ||
1315 | #define QIB_7322_DCACtrlE_RcvHdrq15DCAOPH_LSB 0x2E | ||
1316 | #define QIB_7322_DCACtrlE_RcvHdrq15DCAOPH_MSB 0x35 | ||
1317 | #define QIB_7322_DCACtrlE_RcvHdrq15DCAOPH_RMASK 0xFF | ||
1318 | #define QIB_7322_DCACtrlE_RcvHdrq14DCAXfrCnt_LSB 0x28 | ||
1319 | #define QIB_7322_DCACtrlE_RcvHdrq14DCAXfrCnt_MSB 0x2D | ||
1320 | #define QIB_7322_DCACtrlE_RcvHdrq14DCAXfrCnt_RMASK 0x3F | ||
1321 | #define QIB_7322_DCACtrlE_RcvHdrq14DCAOPH_LSB 0x20 | ||
1322 | #define QIB_7322_DCACtrlE_RcvHdrq14DCAOPH_MSB 0x27 | ||
1323 | #define QIB_7322_DCACtrlE_RcvHdrq14DCAOPH_RMASK 0xFF | ||
1324 | #define QIB_7322_DCACtrlE_RcvHdrq13DCAXfrCnt_LSB 0x16 | ||
1325 | #define QIB_7322_DCACtrlE_RcvHdrq13DCAXfrCnt_MSB 0x1B | ||
1326 | #define QIB_7322_DCACtrlE_RcvHdrq13DCAXfrCnt_RMASK 0x3F | ||
1327 | #define QIB_7322_DCACtrlE_RcvHdrq13DCAOPH_LSB 0xE | ||
1328 | #define QIB_7322_DCACtrlE_RcvHdrq13DCAOPH_MSB 0x15 | ||
1329 | #define QIB_7322_DCACtrlE_RcvHdrq13DCAOPH_RMASK 0xFF | ||
1330 | #define QIB_7322_DCACtrlE_RcvHdrq12DCAXfrCnt_LSB 0x8 | ||
1331 | #define QIB_7322_DCACtrlE_RcvHdrq12DCAXfrCnt_MSB 0xD | ||
1332 | #define QIB_7322_DCACtrlE_RcvHdrq12DCAXfrCnt_RMASK 0x3F | ||
1333 | #define QIB_7322_DCACtrlE_RcvHdrq12DCAOPH_LSB 0x0 | ||
1334 | #define QIB_7322_DCACtrlE_RcvHdrq12DCAOPH_MSB 0x7 | ||
1335 | #define QIB_7322_DCACtrlE_RcvHdrq12DCAOPH_RMASK 0xFF | ||
1336 | |||
1337 | #define QIB_7322_DCACtrlF_OFFS 0x5A8 | ||
1338 | #define QIB_7322_DCACtrlF_DEF 0x0000000000000000 | ||
1339 | #define QIB_7322_DCACtrlF_SendDma1DCAOPH_LSB 0x28 | ||
1340 | #define QIB_7322_DCACtrlF_SendDma1DCAOPH_MSB 0x2F | ||
1341 | #define QIB_7322_DCACtrlF_SendDma1DCAOPH_RMASK 0xFF | ||
1342 | #define QIB_7322_DCACtrlF_SendDma0DCAOPH_LSB 0x20 | ||
1343 | #define QIB_7322_DCACtrlF_SendDma0DCAOPH_MSB 0x27 | ||
1344 | #define QIB_7322_DCACtrlF_SendDma0DCAOPH_RMASK 0xFF | ||
1345 | #define QIB_7322_DCACtrlF_RcvHdrq17DCAXfrCnt_LSB 0x16 | ||
1346 | #define QIB_7322_DCACtrlF_RcvHdrq17DCAXfrCnt_MSB 0x1B | ||
1347 | #define QIB_7322_DCACtrlF_RcvHdrq17DCAXfrCnt_RMASK 0x3F | ||
1348 | #define QIB_7322_DCACtrlF_RcvHdrq17DCAOPH_LSB 0xE | ||
1349 | #define QIB_7322_DCACtrlF_RcvHdrq17DCAOPH_MSB 0x15 | ||
1350 | #define QIB_7322_DCACtrlF_RcvHdrq17DCAOPH_RMASK 0xFF | ||
1351 | #define QIB_7322_DCACtrlF_RcvHdrq16DCAXfrCnt_LSB 0x8 | ||
1352 | #define QIB_7322_DCACtrlF_RcvHdrq16DCAXfrCnt_MSB 0xD | ||
1353 | #define QIB_7322_DCACtrlF_RcvHdrq16DCAXfrCnt_RMASK 0x3F | ||
1354 | #define QIB_7322_DCACtrlF_RcvHdrq16DCAOPH_LSB 0x0 | ||
1355 | #define QIB_7322_DCACtrlF_RcvHdrq16DCAOPH_MSB 0x7 | ||
1356 | #define QIB_7322_DCACtrlF_RcvHdrq16DCAOPH_RMASK 0xFF | ||
1357 | |||
1358 | #define QIB_7322_RcvAvailTimeOut0_OFFS 0xC00 | ||
1359 | #define QIB_7322_RcvAvailTimeOut0_DEF 0x0000000000000000 | ||
1360 | #define QIB_7322_RcvAvailTimeOut0_RcvAvailTOCount_LSB 0x10 | ||
1361 | #define QIB_7322_RcvAvailTimeOut0_RcvAvailTOCount_MSB 0x1F | ||
1362 | #define QIB_7322_RcvAvailTimeOut0_RcvAvailTOCount_RMASK 0xFFFF | ||
1363 | #define QIB_7322_RcvAvailTimeOut0_RcvAvailTOReload_LSB 0x0 | ||
1364 | #define QIB_7322_RcvAvailTimeOut0_RcvAvailTOReload_MSB 0xF | ||
1365 | #define QIB_7322_RcvAvailTimeOut0_RcvAvailTOReload_RMASK 0xFFFF | ||
1366 | |||
1367 | #define QIB_7322_CntrRegBase_0_OFFS 0x1028 | ||
1368 | #define QIB_7322_CntrRegBase_0_DEF 0x0000000000012000 | ||
1369 | |||
1370 | #define QIB_7322_ErrMask_0_OFFS 0x1080 | ||
1371 | #define QIB_7322_ErrMask_0_DEF 0x0000000000000000 | ||
1372 | #define QIB_7322_ErrMask_0_IBStatusChangedMask_LSB 0x3A | ||
1373 | #define QIB_7322_ErrMask_0_IBStatusChangedMask_MSB 0x3A | ||
1374 | #define QIB_7322_ErrMask_0_IBStatusChangedMask_RMASK 0x1 | ||
1375 | #define QIB_7322_ErrMask_0_SHeadersErrMask_LSB 0x39 | ||
1376 | #define QIB_7322_ErrMask_0_SHeadersErrMask_MSB 0x39 | ||
1377 | #define QIB_7322_ErrMask_0_SHeadersErrMask_RMASK 0x1 | ||
1378 | #define QIB_7322_ErrMask_0_VL15BufMisuseErrMask_LSB 0x36 | ||
1379 | #define QIB_7322_ErrMask_0_VL15BufMisuseErrMask_MSB 0x36 | ||
1380 | #define QIB_7322_ErrMask_0_VL15BufMisuseErrMask_RMASK 0x1 | ||
1381 | #define QIB_7322_ErrMask_0_SDmaHaltErrMask_LSB 0x31 | ||
1382 | #define QIB_7322_ErrMask_0_SDmaHaltErrMask_MSB 0x31 | ||
1383 | #define QIB_7322_ErrMask_0_SDmaHaltErrMask_RMASK 0x1 | ||
1384 | #define QIB_7322_ErrMask_0_SDmaDescAddrMisalignErrMask_LSB 0x30 | ||
1385 | #define QIB_7322_ErrMask_0_SDmaDescAddrMisalignErrMask_MSB 0x30 | ||
1386 | #define QIB_7322_ErrMask_0_SDmaDescAddrMisalignErrMask_RMASK 0x1 | ||
1387 | #define QIB_7322_ErrMask_0_SDmaUnexpDataErrMask_LSB 0x2F | ||
1388 | #define QIB_7322_ErrMask_0_SDmaUnexpDataErrMask_MSB 0x2F | ||
1389 | #define QIB_7322_ErrMask_0_SDmaUnexpDataErrMask_RMASK 0x1 | ||
1390 | #define QIB_7322_ErrMask_0_SDmaMissingDwErrMask_LSB 0x2E | ||
1391 | #define QIB_7322_ErrMask_0_SDmaMissingDwErrMask_MSB 0x2E | ||
1392 | #define QIB_7322_ErrMask_0_SDmaMissingDwErrMask_RMASK 0x1 | ||
1393 | #define QIB_7322_ErrMask_0_SDmaDwEnErrMask_LSB 0x2D | ||
1394 | #define QIB_7322_ErrMask_0_SDmaDwEnErrMask_MSB 0x2D | ||
1395 | #define QIB_7322_ErrMask_0_SDmaDwEnErrMask_RMASK 0x1 | ||
1396 | #define QIB_7322_ErrMask_0_SDmaRpyTagErrMask_LSB 0x2C | ||
1397 | #define QIB_7322_ErrMask_0_SDmaRpyTagErrMask_MSB 0x2C | ||
1398 | #define QIB_7322_ErrMask_0_SDmaRpyTagErrMask_RMASK 0x1 | ||
1399 | #define QIB_7322_ErrMask_0_SDma1stDescErrMask_LSB 0x2B | ||
1400 | #define QIB_7322_ErrMask_0_SDma1stDescErrMask_MSB 0x2B | ||
1401 | #define QIB_7322_ErrMask_0_SDma1stDescErrMask_RMASK 0x1 | ||
1402 | #define QIB_7322_ErrMask_0_SDmaBaseErrMask_LSB 0x2A | ||
1403 | #define QIB_7322_ErrMask_0_SDmaBaseErrMask_MSB 0x2A | ||
1404 | #define QIB_7322_ErrMask_0_SDmaBaseErrMask_RMASK 0x1 | ||
1405 | #define QIB_7322_ErrMask_0_SDmaTailOutOfBoundErrMask_LSB 0x29 | ||
1406 | #define QIB_7322_ErrMask_0_SDmaTailOutOfBoundErrMask_MSB 0x29 | ||
1407 | #define QIB_7322_ErrMask_0_SDmaTailOutOfBoundErrMask_RMASK 0x1 | ||
1408 | #define QIB_7322_ErrMask_0_SDmaOutOfBoundErrMask_LSB 0x28 | ||
1409 | #define QIB_7322_ErrMask_0_SDmaOutOfBoundErrMask_MSB 0x28 | ||
1410 | #define QIB_7322_ErrMask_0_SDmaOutOfBoundErrMask_RMASK 0x1 | ||
1411 | #define QIB_7322_ErrMask_0_SDmaGenMismatchErrMask_LSB 0x27 | ||
1412 | #define QIB_7322_ErrMask_0_SDmaGenMismatchErrMask_MSB 0x27 | ||
1413 | #define QIB_7322_ErrMask_0_SDmaGenMismatchErrMask_RMASK 0x1 | ||
1414 | #define QIB_7322_ErrMask_0_SendBufMisuseErrMask_LSB 0x26 | ||
1415 | #define QIB_7322_ErrMask_0_SendBufMisuseErrMask_MSB 0x26 | ||
1416 | #define QIB_7322_ErrMask_0_SendBufMisuseErrMask_RMASK 0x1 | ||
1417 | #define QIB_7322_ErrMask_0_SendUnsupportedVLErrMask_LSB 0x25 | ||
1418 | #define QIB_7322_ErrMask_0_SendUnsupportedVLErrMask_MSB 0x25 | ||
1419 | #define QIB_7322_ErrMask_0_SendUnsupportedVLErrMask_RMASK 0x1 | ||
1420 | #define QIB_7322_ErrMask_0_SendUnexpectedPktNumErrMask_LSB 0x24 | ||
1421 | #define QIB_7322_ErrMask_0_SendUnexpectedPktNumErrMask_MSB 0x24 | ||
1422 | #define QIB_7322_ErrMask_0_SendUnexpectedPktNumErrMask_RMASK 0x1 | ||
1423 | #define QIB_7322_ErrMask_0_SendDroppedDataPktErrMask_LSB 0x22 | ||
1424 | #define QIB_7322_ErrMask_0_SendDroppedDataPktErrMask_MSB 0x22 | ||
1425 | #define QIB_7322_ErrMask_0_SendDroppedDataPktErrMask_RMASK 0x1 | ||
1426 | #define QIB_7322_ErrMask_0_SendDroppedSmpPktErrMask_LSB 0x21 | ||
1427 | #define QIB_7322_ErrMask_0_SendDroppedSmpPktErrMask_MSB 0x21 | ||
1428 | #define QIB_7322_ErrMask_0_SendDroppedSmpPktErrMask_RMASK 0x1 | ||
1429 | #define QIB_7322_ErrMask_0_SendPktLenErrMask_LSB 0x20 | ||
1430 | #define QIB_7322_ErrMask_0_SendPktLenErrMask_MSB 0x20 | ||
1431 | #define QIB_7322_ErrMask_0_SendPktLenErrMask_RMASK 0x1 | ||
1432 | #define QIB_7322_ErrMask_0_SendUnderRunErrMask_LSB 0x1F | ||
1433 | #define QIB_7322_ErrMask_0_SendUnderRunErrMask_MSB 0x1F | ||
1434 | #define QIB_7322_ErrMask_0_SendUnderRunErrMask_RMASK 0x1 | ||
1435 | #define QIB_7322_ErrMask_0_SendMaxPktLenErrMask_LSB 0x1E | ||
1436 | #define QIB_7322_ErrMask_0_SendMaxPktLenErrMask_MSB 0x1E | ||
1437 | #define QIB_7322_ErrMask_0_SendMaxPktLenErrMask_RMASK 0x1 | ||
1438 | #define QIB_7322_ErrMask_0_SendMinPktLenErrMask_LSB 0x1D | ||
1439 | #define QIB_7322_ErrMask_0_SendMinPktLenErrMask_MSB 0x1D | ||
1440 | #define QIB_7322_ErrMask_0_SendMinPktLenErrMask_RMASK 0x1 | ||
1441 | #define QIB_7322_ErrMask_0_RcvIBLostLinkErrMask_LSB 0x11 | ||
1442 | #define QIB_7322_ErrMask_0_RcvIBLostLinkErrMask_MSB 0x11 | ||
1443 | #define QIB_7322_ErrMask_0_RcvIBLostLinkErrMask_RMASK 0x1 | ||
1444 | #define QIB_7322_ErrMask_0_RcvHdrErrMask_LSB 0x10 | ||
1445 | #define QIB_7322_ErrMask_0_RcvHdrErrMask_MSB 0x10 | ||
1446 | #define QIB_7322_ErrMask_0_RcvHdrErrMask_RMASK 0x1 | ||
1447 | #define QIB_7322_ErrMask_0_RcvHdrLenErrMask_LSB 0xF | ||
1448 | #define QIB_7322_ErrMask_0_RcvHdrLenErrMask_MSB 0xF | ||
1449 | #define QIB_7322_ErrMask_0_RcvHdrLenErrMask_RMASK 0x1 | ||
1450 | #define QIB_7322_ErrMask_0_RcvBadTidErrMask_LSB 0xE | ||
1451 | #define QIB_7322_ErrMask_0_RcvBadTidErrMask_MSB 0xE | ||
1452 | #define QIB_7322_ErrMask_0_RcvBadTidErrMask_RMASK 0x1 | ||
1453 | #define QIB_7322_ErrMask_0_RcvBadVersionErrMask_LSB 0xB | ||
1454 | #define QIB_7322_ErrMask_0_RcvBadVersionErrMask_MSB 0xB | ||
1455 | #define QIB_7322_ErrMask_0_RcvBadVersionErrMask_RMASK 0x1 | ||
1456 | #define QIB_7322_ErrMask_0_RcvIBFlowErrMask_LSB 0xA | ||
1457 | #define QIB_7322_ErrMask_0_RcvIBFlowErrMask_MSB 0xA | ||
1458 | #define QIB_7322_ErrMask_0_RcvIBFlowErrMask_RMASK 0x1 | ||
1459 | #define QIB_7322_ErrMask_0_RcvEBPErrMask_LSB 0x9 | ||
1460 | #define QIB_7322_ErrMask_0_RcvEBPErrMask_MSB 0x9 | ||
1461 | #define QIB_7322_ErrMask_0_RcvEBPErrMask_RMASK 0x1 | ||
1462 | #define QIB_7322_ErrMask_0_RcvUnsupportedVLErrMask_LSB 0x8 | ||
1463 | #define QIB_7322_ErrMask_0_RcvUnsupportedVLErrMask_MSB 0x8 | ||
1464 | #define QIB_7322_ErrMask_0_RcvUnsupportedVLErrMask_RMASK 0x1 | ||
1465 | #define QIB_7322_ErrMask_0_RcvUnexpectedCharErrMask_LSB 0x7 | ||
1466 | #define QIB_7322_ErrMask_0_RcvUnexpectedCharErrMask_MSB 0x7 | ||
1467 | #define QIB_7322_ErrMask_0_RcvUnexpectedCharErrMask_RMASK 0x1 | ||
1468 | #define QIB_7322_ErrMask_0_RcvShortPktLenErrMask_LSB 0x6 | ||
1469 | #define QIB_7322_ErrMask_0_RcvShortPktLenErrMask_MSB 0x6 | ||
1470 | #define QIB_7322_ErrMask_0_RcvShortPktLenErrMask_RMASK 0x1 | ||
1471 | #define QIB_7322_ErrMask_0_RcvLongPktLenErrMask_LSB 0x5 | ||
1472 | #define QIB_7322_ErrMask_0_RcvLongPktLenErrMask_MSB 0x5 | ||
1473 | #define QIB_7322_ErrMask_0_RcvLongPktLenErrMask_RMASK 0x1 | ||
1474 | #define QIB_7322_ErrMask_0_RcvMaxPktLenErrMask_LSB 0x4 | ||
1475 | #define QIB_7322_ErrMask_0_RcvMaxPktLenErrMask_MSB 0x4 | ||
1476 | #define QIB_7322_ErrMask_0_RcvMaxPktLenErrMask_RMASK 0x1 | ||
1477 | #define QIB_7322_ErrMask_0_RcvMinPktLenErrMask_LSB 0x3 | ||
1478 | #define QIB_7322_ErrMask_0_RcvMinPktLenErrMask_MSB 0x3 | ||
1479 | #define QIB_7322_ErrMask_0_RcvMinPktLenErrMask_RMASK 0x1 | ||
1480 | #define QIB_7322_ErrMask_0_RcvICRCErrMask_LSB 0x2 | ||
1481 | #define QIB_7322_ErrMask_0_RcvICRCErrMask_MSB 0x2 | ||
1482 | #define QIB_7322_ErrMask_0_RcvICRCErrMask_RMASK 0x1 | ||
1483 | #define QIB_7322_ErrMask_0_RcvVCRCErrMask_LSB 0x1 | ||
1484 | #define QIB_7322_ErrMask_0_RcvVCRCErrMask_MSB 0x1 | ||
1485 | #define QIB_7322_ErrMask_0_RcvVCRCErrMask_RMASK 0x1 | ||
1486 | #define QIB_7322_ErrMask_0_RcvFormatErrMask_LSB 0x0 | ||
1487 | #define QIB_7322_ErrMask_0_RcvFormatErrMask_MSB 0x0 | ||
1488 | #define QIB_7322_ErrMask_0_RcvFormatErrMask_RMASK 0x1 | ||
1489 | |||
1490 | #define QIB_7322_ErrStatus_0_OFFS 0x1088 | ||
1491 | #define QIB_7322_ErrStatus_0_DEF 0x0000000000000000 | ||
1492 | #define QIB_7322_ErrStatus_0_IBStatusChanged_LSB 0x3A | ||
1493 | #define QIB_7322_ErrStatus_0_IBStatusChanged_MSB 0x3A | ||
1494 | #define QIB_7322_ErrStatus_0_IBStatusChanged_RMASK 0x1 | ||
1495 | #define QIB_7322_ErrStatus_0_SHeadersErr_LSB 0x39 | ||
1496 | #define QIB_7322_ErrStatus_0_SHeadersErr_MSB 0x39 | ||
1497 | #define QIB_7322_ErrStatus_0_SHeadersErr_RMASK 0x1 | ||
1498 | #define QIB_7322_ErrStatus_0_VL15BufMisuseErr_LSB 0x36 | ||
1499 | #define QIB_7322_ErrStatus_0_VL15BufMisuseErr_MSB 0x36 | ||
1500 | #define QIB_7322_ErrStatus_0_VL15BufMisuseErr_RMASK 0x1 | ||
1501 | #define QIB_7322_ErrStatus_0_SDmaHaltErr_LSB 0x31 | ||
1502 | #define QIB_7322_ErrStatus_0_SDmaHaltErr_MSB 0x31 | ||
1503 | #define QIB_7322_ErrStatus_0_SDmaHaltErr_RMASK 0x1 | ||
1504 | #define QIB_7322_ErrStatus_0_SDmaDescAddrMisalignErr_LSB 0x30 | ||
1505 | #define QIB_7322_ErrStatus_0_SDmaDescAddrMisalignErr_MSB 0x30 | ||
1506 | #define QIB_7322_ErrStatus_0_SDmaDescAddrMisalignErr_RMASK 0x1 | ||
1507 | #define QIB_7322_ErrStatus_0_SDmaUnexpDataErr_LSB 0x2F | ||
1508 | #define QIB_7322_ErrStatus_0_SDmaUnexpDataErr_MSB 0x2F | ||
1509 | #define QIB_7322_ErrStatus_0_SDmaUnexpDataErr_RMASK 0x1 | ||
1510 | #define QIB_7322_ErrStatus_0_SDmaMissingDwErr_LSB 0x2E | ||
1511 | #define QIB_7322_ErrStatus_0_SDmaMissingDwErr_MSB 0x2E | ||
1512 | #define QIB_7322_ErrStatus_0_SDmaMissingDwErr_RMASK 0x1 | ||
1513 | #define QIB_7322_ErrStatus_0_SDmaDwEnErr_LSB 0x2D | ||
1514 | #define QIB_7322_ErrStatus_0_SDmaDwEnErr_MSB 0x2D | ||
1515 | #define QIB_7322_ErrStatus_0_SDmaDwEnErr_RMASK 0x1 | ||
1516 | #define QIB_7322_ErrStatus_0_SDmaRpyTagErr_LSB 0x2C | ||
1517 | #define QIB_7322_ErrStatus_0_SDmaRpyTagErr_MSB 0x2C | ||
1518 | #define QIB_7322_ErrStatus_0_SDmaRpyTagErr_RMASK 0x1 | ||
1519 | #define QIB_7322_ErrStatus_0_SDma1stDescErr_LSB 0x2B | ||
1520 | #define QIB_7322_ErrStatus_0_SDma1stDescErr_MSB 0x2B | ||
1521 | #define QIB_7322_ErrStatus_0_SDma1stDescErr_RMASK 0x1 | ||
1522 | #define QIB_7322_ErrStatus_0_SDmaBaseErr_LSB 0x2A | ||
1523 | #define QIB_7322_ErrStatus_0_SDmaBaseErr_MSB 0x2A | ||
1524 | #define QIB_7322_ErrStatus_0_SDmaBaseErr_RMASK 0x1 | ||
1525 | #define QIB_7322_ErrStatus_0_SDmaTailOutOfBoundErr_LSB 0x29 | ||
1526 | #define QIB_7322_ErrStatus_0_SDmaTailOutOfBoundErr_MSB 0x29 | ||
1527 | #define QIB_7322_ErrStatus_0_SDmaTailOutOfBoundErr_RMASK 0x1 | ||
1528 | #define QIB_7322_ErrStatus_0_SDmaOutOfBoundErr_LSB 0x28 | ||
1529 | #define QIB_7322_ErrStatus_0_SDmaOutOfBoundErr_MSB 0x28 | ||
1530 | #define QIB_7322_ErrStatus_0_SDmaOutOfBoundErr_RMASK 0x1 | ||
1531 | #define QIB_7322_ErrStatus_0_SDmaGenMismatchErr_LSB 0x27 | ||
1532 | #define QIB_7322_ErrStatus_0_SDmaGenMismatchErr_MSB 0x27 | ||
1533 | #define QIB_7322_ErrStatus_0_SDmaGenMismatchErr_RMASK 0x1 | ||
1534 | #define QIB_7322_ErrStatus_0_SendBufMisuseErr_LSB 0x26 | ||
1535 | #define QIB_7322_ErrStatus_0_SendBufMisuseErr_MSB 0x26 | ||
1536 | #define QIB_7322_ErrStatus_0_SendBufMisuseErr_RMASK 0x1 | ||
1537 | #define QIB_7322_ErrStatus_0_SendUnsupportedVLErr_LSB 0x25 | ||
1538 | #define QIB_7322_ErrStatus_0_SendUnsupportedVLErr_MSB 0x25 | ||
1539 | #define QIB_7322_ErrStatus_0_SendUnsupportedVLErr_RMASK 0x1 | ||
1540 | #define QIB_7322_ErrStatus_0_SendUnexpectedPktNumErr_LSB 0x24 | ||
1541 | #define QIB_7322_ErrStatus_0_SendUnexpectedPktNumErr_MSB 0x24 | ||
1542 | #define QIB_7322_ErrStatus_0_SendUnexpectedPktNumErr_RMASK 0x1 | ||
1543 | #define QIB_7322_ErrStatus_0_SendDroppedDataPktErr_LSB 0x22 | ||
1544 | #define QIB_7322_ErrStatus_0_SendDroppedDataPktErr_MSB 0x22 | ||
1545 | #define QIB_7322_ErrStatus_0_SendDroppedDataPktErr_RMASK 0x1 | ||
1546 | #define QIB_7322_ErrStatus_0_SendDroppedSmpPktErr_LSB 0x21 | ||
1547 | #define QIB_7322_ErrStatus_0_SendDroppedSmpPktErr_MSB 0x21 | ||
1548 | #define QIB_7322_ErrStatus_0_SendDroppedSmpPktErr_RMASK 0x1 | ||
1549 | #define QIB_7322_ErrStatus_0_SendPktLenErr_LSB 0x20 | ||
1550 | #define QIB_7322_ErrStatus_0_SendPktLenErr_MSB 0x20 | ||
1551 | #define QIB_7322_ErrStatus_0_SendPktLenErr_RMASK 0x1 | ||
1552 | #define QIB_7322_ErrStatus_0_SendUnderRunErr_LSB 0x1F | ||
1553 | #define QIB_7322_ErrStatus_0_SendUnderRunErr_MSB 0x1F | ||
1554 | #define QIB_7322_ErrStatus_0_SendUnderRunErr_RMASK 0x1 | ||
1555 | #define QIB_7322_ErrStatus_0_SendMaxPktLenErr_LSB 0x1E | ||
1556 | #define QIB_7322_ErrStatus_0_SendMaxPktLenErr_MSB 0x1E | ||
1557 | #define QIB_7322_ErrStatus_0_SendMaxPktLenErr_RMASK 0x1 | ||
1558 | #define QIB_7322_ErrStatus_0_SendMinPktLenErr_LSB 0x1D | ||
1559 | #define QIB_7322_ErrStatus_0_SendMinPktLenErr_MSB 0x1D | ||
1560 | #define QIB_7322_ErrStatus_0_SendMinPktLenErr_RMASK 0x1 | ||
1561 | #define QIB_7322_ErrStatus_0_RcvIBLostLinkErr_LSB 0x11 | ||
1562 | #define QIB_7322_ErrStatus_0_RcvIBLostLinkErr_MSB 0x11 | ||
1563 | #define QIB_7322_ErrStatus_0_RcvIBLostLinkErr_RMASK 0x1 | ||
1564 | #define QIB_7322_ErrStatus_0_RcvHdrErr_LSB 0x10 | ||
1565 | #define QIB_7322_ErrStatus_0_RcvHdrErr_MSB 0x10 | ||
1566 | #define QIB_7322_ErrStatus_0_RcvHdrErr_RMASK 0x1 | ||
1567 | #define QIB_7322_ErrStatus_0_RcvHdrLenErr_LSB 0xF | ||
1568 | #define QIB_7322_ErrStatus_0_RcvHdrLenErr_MSB 0xF | ||
1569 | #define QIB_7322_ErrStatus_0_RcvHdrLenErr_RMASK 0x1 | ||
1570 | #define QIB_7322_ErrStatus_0_RcvBadTidErr_LSB 0xE | ||
1571 | #define QIB_7322_ErrStatus_0_RcvBadTidErr_MSB 0xE | ||
1572 | #define QIB_7322_ErrStatus_0_RcvBadTidErr_RMASK 0x1 | ||
1573 | #define QIB_7322_ErrStatus_0_RcvBadVersionErr_LSB 0xB | ||
1574 | #define QIB_7322_ErrStatus_0_RcvBadVersionErr_MSB 0xB | ||
1575 | #define QIB_7322_ErrStatus_0_RcvBadVersionErr_RMASK 0x1 | ||
1576 | #define QIB_7322_ErrStatus_0_RcvIBFlowErr_LSB 0xA | ||
1577 | #define QIB_7322_ErrStatus_0_RcvIBFlowErr_MSB 0xA | ||
1578 | #define QIB_7322_ErrStatus_0_RcvIBFlowErr_RMASK 0x1 | ||
1579 | #define QIB_7322_ErrStatus_0_RcvEBPErr_LSB 0x9 | ||
1580 | #define QIB_7322_ErrStatus_0_RcvEBPErr_MSB 0x9 | ||
1581 | #define QIB_7322_ErrStatus_0_RcvEBPErr_RMASK 0x1 | ||
1582 | #define QIB_7322_ErrStatus_0_RcvUnsupportedVLErr_LSB 0x8 | ||
1583 | #define QIB_7322_ErrStatus_0_RcvUnsupportedVLErr_MSB 0x8 | ||
1584 | #define QIB_7322_ErrStatus_0_RcvUnsupportedVLErr_RMASK 0x1 | ||
1585 | #define QIB_7322_ErrStatus_0_RcvUnexpectedCharErr_LSB 0x7 | ||
1586 | #define QIB_7322_ErrStatus_0_RcvUnexpectedCharErr_MSB 0x7 | ||
1587 | #define QIB_7322_ErrStatus_0_RcvUnexpectedCharErr_RMASK 0x1 | ||
1588 | #define QIB_7322_ErrStatus_0_RcvShortPktLenErr_LSB 0x6 | ||
1589 | #define QIB_7322_ErrStatus_0_RcvShortPktLenErr_MSB 0x6 | ||
1590 | #define QIB_7322_ErrStatus_0_RcvShortPktLenErr_RMASK 0x1 | ||
1591 | #define QIB_7322_ErrStatus_0_RcvLongPktLenErr_LSB 0x5 | ||
1592 | #define QIB_7322_ErrStatus_0_RcvLongPktLenErr_MSB 0x5 | ||
1593 | #define QIB_7322_ErrStatus_0_RcvLongPktLenErr_RMASK 0x1 | ||
1594 | #define QIB_7322_ErrStatus_0_RcvMaxPktLenErr_LSB 0x4 | ||
1595 | #define QIB_7322_ErrStatus_0_RcvMaxPktLenErr_MSB 0x4 | ||
1596 | #define QIB_7322_ErrStatus_0_RcvMaxPktLenErr_RMASK 0x1 | ||
1597 | #define QIB_7322_ErrStatus_0_RcvMinPktLenErr_LSB 0x3 | ||
1598 | #define QIB_7322_ErrStatus_0_RcvMinPktLenErr_MSB 0x3 | ||
1599 | #define QIB_7322_ErrStatus_0_RcvMinPktLenErr_RMASK 0x1 | ||
1600 | #define QIB_7322_ErrStatus_0_RcvICRCErr_LSB 0x2 | ||
1601 | #define QIB_7322_ErrStatus_0_RcvICRCErr_MSB 0x2 | ||
1602 | #define QIB_7322_ErrStatus_0_RcvICRCErr_RMASK 0x1 | ||
1603 | #define QIB_7322_ErrStatus_0_RcvVCRCErr_LSB 0x1 | ||
1604 | #define QIB_7322_ErrStatus_0_RcvVCRCErr_MSB 0x1 | ||
1605 | #define QIB_7322_ErrStatus_0_RcvVCRCErr_RMASK 0x1 | ||
1606 | #define QIB_7322_ErrStatus_0_RcvFormatErr_LSB 0x0 | ||
1607 | #define QIB_7322_ErrStatus_0_RcvFormatErr_MSB 0x0 | ||
1608 | #define QIB_7322_ErrStatus_0_RcvFormatErr_RMASK 0x1 | ||
1609 | |||
1610 | #define QIB_7322_ErrClear_0_OFFS 0x1090 | ||
1611 | #define QIB_7322_ErrClear_0_DEF 0x0000000000000000 | ||
1612 | #define QIB_7322_ErrClear_0_IBStatusChangedClear_LSB 0x3A | ||
1613 | #define QIB_7322_ErrClear_0_IBStatusChangedClear_MSB 0x3A | ||
1614 | #define QIB_7322_ErrClear_0_IBStatusChangedClear_RMASK 0x1 | ||
1615 | #define QIB_7322_ErrClear_0_SHeadersErrClear_LSB 0x39 | ||
1616 | #define QIB_7322_ErrClear_0_SHeadersErrClear_MSB 0x39 | ||
1617 | #define QIB_7322_ErrClear_0_SHeadersErrClear_RMASK 0x1 | ||
1618 | #define QIB_7322_ErrClear_0_VL15BufMisuseErrClear_LSB 0x36 | ||
1619 | #define QIB_7322_ErrClear_0_VL15BufMisuseErrClear_MSB 0x36 | ||
1620 | #define QIB_7322_ErrClear_0_VL15BufMisuseErrClear_RMASK 0x1 | ||
1621 | #define QIB_7322_ErrClear_0_SDmaHaltErrClear_LSB 0x31 | ||
1622 | #define QIB_7322_ErrClear_0_SDmaHaltErrClear_MSB 0x31 | ||
1623 | #define QIB_7322_ErrClear_0_SDmaHaltErrClear_RMASK 0x1 | ||
1624 | #define QIB_7322_ErrClear_0_SDmaDescAddrMisalignErrClear_LSB 0x30 | ||
1625 | #define QIB_7322_ErrClear_0_SDmaDescAddrMisalignErrClear_MSB 0x30 | ||
1626 | #define QIB_7322_ErrClear_0_SDmaDescAddrMisalignErrClear_RMASK 0x1 | ||
1627 | #define QIB_7322_ErrClear_0_SDmaUnexpDataErrClear_LSB 0x2F | ||
1628 | #define QIB_7322_ErrClear_0_SDmaUnexpDataErrClear_MSB 0x2F | ||
1629 | #define QIB_7322_ErrClear_0_SDmaUnexpDataErrClear_RMASK 0x1 | ||
1630 | #define QIB_7322_ErrClear_0_SDmaMissingDwErrClear_LSB 0x2E | ||
1631 | #define QIB_7322_ErrClear_0_SDmaMissingDwErrClear_MSB 0x2E | ||
1632 | #define QIB_7322_ErrClear_0_SDmaMissingDwErrClear_RMASK 0x1 | ||
1633 | #define QIB_7322_ErrClear_0_SDmaDwEnErrClear_LSB 0x2D | ||
1634 | #define QIB_7322_ErrClear_0_SDmaDwEnErrClear_MSB 0x2D | ||
1635 | #define QIB_7322_ErrClear_0_SDmaDwEnErrClear_RMASK 0x1 | ||
1636 | #define QIB_7322_ErrClear_0_SDmaRpyTagErrClear_LSB 0x2C | ||
1637 | #define QIB_7322_ErrClear_0_SDmaRpyTagErrClear_MSB 0x2C | ||
1638 | #define QIB_7322_ErrClear_0_SDmaRpyTagErrClear_RMASK 0x1 | ||
1639 | #define QIB_7322_ErrClear_0_SDma1stDescErrClear_LSB 0x2B | ||
1640 | #define QIB_7322_ErrClear_0_SDma1stDescErrClear_MSB 0x2B | ||
1641 | #define QIB_7322_ErrClear_0_SDma1stDescErrClear_RMASK 0x1 | ||
1642 | #define QIB_7322_ErrClear_0_SDmaBaseErrClear_LSB 0x2A | ||
1643 | #define QIB_7322_ErrClear_0_SDmaBaseErrClear_MSB 0x2A | ||
1644 | #define QIB_7322_ErrClear_0_SDmaBaseErrClear_RMASK 0x1 | ||
1645 | #define QIB_7322_ErrClear_0_SDmaTailOutOfBoundErrClear_LSB 0x29 | ||
1646 | #define QIB_7322_ErrClear_0_SDmaTailOutOfBoundErrClear_MSB 0x29 | ||
1647 | #define QIB_7322_ErrClear_0_SDmaTailOutOfBoundErrClear_RMASK 0x1 | ||
1648 | #define QIB_7322_ErrClear_0_SDmaOutOfBoundErrClear_LSB 0x28 | ||
1649 | #define QIB_7322_ErrClear_0_SDmaOutOfBoundErrClear_MSB 0x28 | ||
1650 | #define QIB_7322_ErrClear_0_SDmaOutOfBoundErrClear_RMASK 0x1 | ||
1651 | #define QIB_7322_ErrClear_0_SDmaGenMismatchErrClear_LSB 0x27 | ||
1652 | #define QIB_7322_ErrClear_0_SDmaGenMismatchErrClear_MSB 0x27 | ||
1653 | #define QIB_7322_ErrClear_0_SDmaGenMismatchErrClear_RMASK 0x1 | ||
1654 | #define QIB_7322_ErrClear_0_SendBufMisuseErrClear_LSB 0x26 | ||
1655 | #define QIB_7322_ErrClear_0_SendBufMisuseErrClear_MSB 0x26 | ||
1656 | #define QIB_7322_ErrClear_0_SendBufMisuseErrClear_RMASK 0x1 | ||
1657 | #define QIB_7322_ErrClear_0_SendUnsupportedVLErrClear_LSB 0x25 | ||
1658 | #define QIB_7322_ErrClear_0_SendUnsupportedVLErrClear_MSB 0x25 | ||
1659 | #define QIB_7322_ErrClear_0_SendUnsupportedVLErrClear_RMASK 0x1 | ||
1660 | #define QIB_7322_ErrClear_0_SendUnexpectedPktNumErrClear_LSB 0x24 | ||
1661 | #define QIB_7322_ErrClear_0_SendUnexpectedPktNumErrClear_MSB 0x24 | ||
1662 | #define QIB_7322_ErrClear_0_SendUnexpectedPktNumErrClear_RMASK 0x1 | ||
1663 | #define QIB_7322_ErrClear_0_SendDroppedDataPktErrClear_LSB 0x22 | ||
1664 | #define QIB_7322_ErrClear_0_SendDroppedDataPktErrClear_MSB 0x22 | ||
1665 | #define QIB_7322_ErrClear_0_SendDroppedDataPktErrClear_RMASK 0x1 | ||
1666 | #define QIB_7322_ErrClear_0_SendDroppedSmpPktErrClear_LSB 0x21 | ||
1667 | #define QIB_7322_ErrClear_0_SendDroppedSmpPktErrClear_MSB 0x21 | ||
1668 | #define QIB_7322_ErrClear_0_SendDroppedSmpPktErrClear_RMASK 0x1 | ||
1669 | #define QIB_7322_ErrClear_0_SendPktLenErrClear_LSB 0x20 | ||
1670 | #define QIB_7322_ErrClear_0_SendPktLenErrClear_MSB 0x20 | ||
1671 | #define QIB_7322_ErrClear_0_SendPktLenErrClear_RMASK 0x1 | ||
1672 | #define QIB_7322_ErrClear_0_SendUnderRunErrClear_LSB 0x1F | ||
1673 | #define QIB_7322_ErrClear_0_SendUnderRunErrClear_MSB 0x1F | ||
1674 | #define QIB_7322_ErrClear_0_SendUnderRunErrClear_RMASK 0x1 | ||
1675 | #define QIB_7322_ErrClear_0_SendMaxPktLenErrClear_LSB 0x1E | ||
1676 | #define QIB_7322_ErrClear_0_SendMaxPktLenErrClear_MSB 0x1E | ||
1677 | #define QIB_7322_ErrClear_0_SendMaxPktLenErrClear_RMASK 0x1 | ||
1678 | #define QIB_7322_ErrClear_0_SendMinPktLenErrClear_LSB 0x1D | ||
1679 | #define QIB_7322_ErrClear_0_SendMinPktLenErrClear_MSB 0x1D | ||
1680 | #define QIB_7322_ErrClear_0_SendMinPktLenErrClear_RMASK 0x1 | ||
1681 | #define QIB_7322_ErrClear_0_RcvIBLostLinkErrClear_LSB 0x11 | ||
1682 | #define QIB_7322_ErrClear_0_RcvIBLostLinkErrClear_MSB 0x11 | ||
1683 | #define QIB_7322_ErrClear_0_RcvIBLostLinkErrClear_RMASK 0x1 | ||
1684 | #define QIB_7322_ErrClear_0_RcvHdrErrClear_LSB 0x10 | ||
1685 | #define QIB_7322_ErrClear_0_RcvHdrErrClear_MSB 0x10 | ||
1686 | #define QIB_7322_ErrClear_0_RcvHdrErrClear_RMASK 0x1 | ||
1687 | #define QIB_7322_ErrClear_0_RcvHdrLenErrClear_LSB 0xF | ||
1688 | #define QIB_7322_ErrClear_0_RcvHdrLenErrClear_MSB 0xF | ||
1689 | #define QIB_7322_ErrClear_0_RcvHdrLenErrClear_RMASK 0x1 | ||
1690 | #define QIB_7322_ErrClear_0_RcvBadTidErrClear_LSB 0xE | ||
1691 | #define QIB_7322_ErrClear_0_RcvBadTidErrClear_MSB 0xE | ||
1692 | #define QIB_7322_ErrClear_0_RcvBadTidErrClear_RMASK 0x1 | ||
1693 | #define QIB_7322_ErrClear_0_RcvBadVersionErrClear_LSB 0xB | ||
1694 | #define QIB_7322_ErrClear_0_RcvBadVersionErrClear_MSB 0xB | ||
1695 | #define QIB_7322_ErrClear_0_RcvBadVersionErrClear_RMASK 0x1 | ||
1696 | #define QIB_7322_ErrClear_0_RcvIBFlowErrClear_LSB 0xA | ||
1697 | #define QIB_7322_ErrClear_0_RcvIBFlowErrClear_MSB 0xA | ||
1698 | #define QIB_7322_ErrClear_0_RcvIBFlowErrClear_RMASK 0x1 | ||
1699 | #define QIB_7322_ErrClear_0_RcvEBPErrClear_LSB 0x9 | ||
1700 | #define QIB_7322_ErrClear_0_RcvEBPErrClear_MSB 0x9 | ||
1701 | #define QIB_7322_ErrClear_0_RcvEBPErrClear_RMASK 0x1 | ||
1702 | #define QIB_7322_ErrClear_0_RcvUnsupportedVLErrClear_LSB 0x8 | ||
1703 | #define QIB_7322_ErrClear_0_RcvUnsupportedVLErrClear_MSB 0x8 | ||
1704 | #define QIB_7322_ErrClear_0_RcvUnsupportedVLErrClear_RMASK 0x1 | ||
1705 | #define QIB_7322_ErrClear_0_RcvUnexpectedCharErrClear_LSB 0x7 | ||
1706 | #define QIB_7322_ErrClear_0_RcvUnexpectedCharErrClear_MSB 0x7 | ||
1707 | #define QIB_7322_ErrClear_0_RcvUnexpectedCharErrClear_RMASK 0x1 | ||
1708 | #define QIB_7322_ErrClear_0_RcvShortPktLenErrClear_LSB 0x6 | ||
1709 | #define QIB_7322_ErrClear_0_RcvShortPktLenErrClear_MSB 0x6 | ||
1710 | #define QIB_7322_ErrClear_0_RcvShortPktLenErrClear_RMASK 0x1 | ||
1711 | #define QIB_7322_ErrClear_0_RcvLongPktLenErrClear_LSB 0x5 | ||
1712 | #define QIB_7322_ErrClear_0_RcvLongPktLenErrClear_MSB 0x5 | ||
1713 | #define QIB_7322_ErrClear_0_RcvLongPktLenErrClear_RMASK 0x1 | ||
1714 | #define QIB_7322_ErrClear_0_RcvMaxPktLenErrClear_LSB 0x4 | ||
1715 | #define QIB_7322_ErrClear_0_RcvMaxPktLenErrClear_MSB 0x4 | ||
1716 | #define QIB_7322_ErrClear_0_RcvMaxPktLenErrClear_RMASK 0x1 | ||
1717 | #define QIB_7322_ErrClear_0_RcvMinPktLenErrClear_LSB 0x3 | ||
1718 | #define QIB_7322_ErrClear_0_RcvMinPktLenErrClear_MSB 0x3 | ||
1719 | #define QIB_7322_ErrClear_0_RcvMinPktLenErrClear_RMASK 0x1 | ||
1720 | #define QIB_7322_ErrClear_0_RcvICRCErrClear_LSB 0x2 | ||
1721 | #define QIB_7322_ErrClear_0_RcvICRCErrClear_MSB 0x2 | ||
1722 | #define QIB_7322_ErrClear_0_RcvICRCErrClear_RMASK 0x1 | ||
1723 | #define QIB_7322_ErrClear_0_RcvVCRCErrClear_LSB 0x1 | ||
1724 | #define QIB_7322_ErrClear_0_RcvVCRCErrClear_MSB 0x1 | ||
1725 | #define QIB_7322_ErrClear_0_RcvVCRCErrClear_RMASK 0x1 | ||
1726 | #define QIB_7322_ErrClear_0_RcvFormatErrClear_LSB 0x0 | ||
1727 | #define QIB_7322_ErrClear_0_RcvFormatErrClear_MSB 0x0 | ||
1728 | #define QIB_7322_ErrClear_0_RcvFormatErrClear_RMASK 0x1 | ||
1729 | |||
1730 | #define QIB_7322_TXEStatus_0_OFFS 0x10B8 | ||
1731 | #define QIB_7322_TXEStatus_0_DEF 0x0000000XC00080FF | ||
1732 | #define QIB_7322_TXEStatus_0_TXE_IBC_Idle_LSB 0x1F | ||
1733 | #define QIB_7322_TXEStatus_0_TXE_IBC_Idle_MSB 0x1F | ||
1734 | #define QIB_7322_TXEStatus_0_TXE_IBC_Idle_RMASK 0x1 | ||
1735 | #define QIB_7322_TXEStatus_0_RmFifoEmpty_LSB 0x1E | ||
1736 | #define QIB_7322_TXEStatus_0_RmFifoEmpty_MSB 0x1E | ||
1737 | #define QIB_7322_TXEStatus_0_RmFifoEmpty_RMASK 0x1 | ||
1738 | #define QIB_7322_TXEStatus_0_LaFifoEmpty_VL15_LSB 0xF | ||
1739 | #define QIB_7322_TXEStatus_0_LaFifoEmpty_VL15_MSB 0xF | ||
1740 | #define QIB_7322_TXEStatus_0_LaFifoEmpty_VL15_RMASK 0x1 | ||
1741 | #define QIB_7322_TXEStatus_0_LaFifoEmpty_VL7_LSB 0x7 | ||
1742 | #define QIB_7322_TXEStatus_0_LaFifoEmpty_VL7_MSB 0x7 | ||
1743 | #define QIB_7322_TXEStatus_0_LaFifoEmpty_VL7_RMASK 0x1 | ||
1744 | #define QIB_7322_TXEStatus_0_LaFifoEmpty_VL6_LSB 0x6 | ||
1745 | #define QIB_7322_TXEStatus_0_LaFifoEmpty_VL6_MSB 0x6 | ||
1746 | #define QIB_7322_TXEStatus_0_LaFifoEmpty_VL6_RMASK 0x1 | ||
1747 | #define QIB_7322_TXEStatus_0_LaFifoEmpty_VL5_LSB 0x5 | ||
1748 | #define QIB_7322_TXEStatus_0_LaFifoEmpty_VL5_MSB 0x5 | ||
1749 | #define QIB_7322_TXEStatus_0_LaFifoEmpty_VL5_RMASK 0x1 | ||
1750 | #define QIB_7322_TXEStatus_0_LaFifoEmpty_VL4_LSB 0x4 | ||
1751 | #define QIB_7322_TXEStatus_0_LaFifoEmpty_VL4_MSB 0x4 | ||
1752 | #define QIB_7322_TXEStatus_0_LaFifoEmpty_VL4_RMASK 0x1 | ||
1753 | #define QIB_7322_TXEStatus_0_LaFifoEmpty_VL3_LSB 0x3 | ||
1754 | #define QIB_7322_TXEStatus_0_LaFifoEmpty_VL3_MSB 0x3 | ||
1755 | #define QIB_7322_TXEStatus_0_LaFifoEmpty_VL3_RMASK 0x1 | ||
1756 | #define QIB_7322_TXEStatus_0_LaFifoEmpty_VL2_LSB 0x2 | ||
1757 | #define QIB_7322_TXEStatus_0_LaFifoEmpty_VL2_MSB 0x2 | ||
1758 | #define QIB_7322_TXEStatus_0_LaFifoEmpty_VL2_RMASK 0x1 | ||
1759 | #define QIB_7322_TXEStatus_0_LaFifoEmpty_VL1_LSB 0x1 | ||
1760 | #define QIB_7322_TXEStatus_0_LaFifoEmpty_VL1_MSB 0x1 | ||
1761 | #define QIB_7322_TXEStatus_0_LaFifoEmpty_VL1_RMASK 0x1 | ||
1762 | #define QIB_7322_TXEStatus_0_LaFifoEmpty_VL0_LSB 0x0 | ||
1763 | #define QIB_7322_TXEStatus_0_LaFifoEmpty_VL0_MSB 0x0 | ||
1764 | #define QIB_7322_TXEStatus_0_LaFifoEmpty_VL0_RMASK 0x1 | ||
1765 | |||
1766 | #define QIB_7322_RcvCtrl_0_OFFS 0x1100 | ||
1767 | #define QIB_7322_RcvCtrl_0_DEF 0x0000000000000000 | ||
1768 | #define QIB_7322_RcvCtrl_0_RcvResetCredit_LSB 0x2A | ||
1769 | #define QIB_7322_RcvCtrl_0_RcvResetCredit_MSB 0x2A | ||
1770 | #define QIB_7322_RcvCtrl_0_RcvResetCredit_RMASK 0x1 | ||
1771 | #define QIB_7322_RcvCtrl_0_RcvPartitionKeyDisable_LSB 0x29 | ||
1772 | #define QIB_7322_RcvCtrl_0_RcvPartitionKeyDisable_MSB 0x29 | ||
1773 | #define QIB_7322_RcvCtrl_0_RcvPartitionKeyDisable_RMASK 0x1 | ||
1774 | #define QIB_7322_RcvCtrl_0_RcvQPMapEnable_LSB 0x28 | ||
1775 | #define QIB_7322_RcvCtrl_0_RcvQPMapEnable_MSB 0x28 | ||
1776 | #define QIB_7322_RcvCtrl_0_RcvQPMapEnable_RMASK 0x1 | ||
1777 | #define QIB_7322_RcvCtrl_0_RcvIBPortEnable_LSB 0x27 | ||
1778 | #define QIB_7322_RcvCtrl_0_RcvIBPortEnable_MSB 0x27 | ||
1779 | #define QIB_7322_RcvCtrl_0_RcvIBPortEnable_RMASK 0x1 | ||
1780 | #define QIB_7322_RcvCtrl_0_ContextEnableUser_LSB 0x2 | ||
1781 | #define QIB_7322_RcvCtrl_0_ContextEnableUser_MSB 0x11 | ||
1782 | #define QIB_7322_RcvCtrl_0_ContextEnableUser_RMASK 0xFFFF | ||
1783 | #define QIB_7322_RcvCtrl_0_ContextEnableKernel_LSB 0x0 | ||
1784 | #define QIB_7322_RcvCtrl_0_ContextEnableKernel_MSB 0x0 | ||
1785 | #define QIB_7322_RcvCtrl_0_ContextEnableKernel_RMASK 0x1 | ||
1786 | |||
1787 | #define QIB_7322_RcvBTHQP_0_OFFS 0x1108 | ||
1788 | #define QIB_7322_RcvBTHQP_0_DEF 0x0000000000000000 | ||
1789 | #define QIB_7322_RcvBTHQP_0_RcvBTHQP_LSB 0x0 | ||
1790 | #define QIB_7322_RcvBTHQP_0_RcvBTHQP_MSB 0x17 | ||
1791 | #define QIB_7322_RcvBTHQP_0_RcvBTHQP_RMASK 0xFFFFFF | ||
1792 | |||
1793 | #define QIB_7322_RcvQPMapTableA_0_OFFS 0x1110 | ||
1794 | #define QIB_7322_RcvQPMapTableA_0_DEF 0x0000000000000000 | ||
1795 | #define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext5_LSB 0x19 | ||
1796 | #define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext5_MSB 0x1D | ||
1797 | #define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext5_RMASK 0x1F | ||
1798 | #define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext4_LSB 0x14 | ||
1799 | #define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext4_MSB 0x18 | ||
1800 | #define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext4_RMASK 0x1F | ||
1801 | #define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext3_LSB 0xF | ||
1802 | #define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext3_MSB 0x13 | ||
1803 | #define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext3_RMASK 0x1F | ||
1804 | #define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext2_LSB 0xA | ||
1805 | #define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext2_MSB 0xE | ||
1806 | #define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext2_RMASK 0x1F | ||
1807 | #define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext1_LSB 0x5 | ||
1808 | #define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext1_MSB 0x9 | ||
1809 | #define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext1_RMASK 0x1F | ||
1810 | #define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext0_LSB 0x0 | ||
1811 | #define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext0_MSB 0x4 | ||
1812 | #define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext0_RMASK 0x1F | ||
1813 | |||
1814 | #define QIB_7322_RcvQPMapTableB_0_OFFS 0x1118 | ||
1815 | #define QIB_7322_RcvQPMapTableB_0_DEF 0x0000000000000000 | ||
1816 | #define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext11_LSB 0x19 | ||
1817 | #define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext11_MSB 0x1D | ||
1818 | #define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext11_RMASK 0x1F | ||
1819 | #define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext10_LSB 0x14 | ||
1820 | #define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext10_MSB 0x18 | ||
1821 | #define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext10_RMASK 0x1F | ||
1822 | #define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext9_LSB 0xF | ||
1823 | #define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext9_MSB 0x13 | ||
1824 | #define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext9_RMASK 0x1F | ||
1825 | #define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext8_LSB 0xA | ||
1826 | #define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext8_MSB 0xE | ||
1827 | #define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext8_RMASK 0x1F | ||
1828 | #define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext7_LSB 0x5 | ||
1829 | #define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext7_MSB 0x9 | ||
1830 | #define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext7_RMASK 0x1F | ||
1831 | #define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext6_LSB 0x0 | ||
1832 | #define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext6_MSB 0x4 | ||
1833 | #define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext6_RMASK 0x1F | ||
1834 | |||
1835 | #define QIB_7322_RcvQPMapTableC_0_OFFS 0x1120 | ||
1836 | #define QIB_7322_RcvQPMapTableC_0_DEF 0x0000000000000000 | ||
1837 | #define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext17_LSB 0x19 | ||
1838 | #define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext17_MSB 0x1D | ||
1839 | #define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext17_RMASK 0x1F | ||
1840 | #define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext16_LSB 0x14 | ||
1841 | #define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext16_MSB 0x18 | ||
1842 | #define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext16_RMASK 0x1F | ||
1843 | #define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext15_LSB 0xF | ||
1844 | #define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext15_MSB 0x13 | ||
1845 | #define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext15_RMASK 0x1F | ||
1846 | #define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext14_LSB 0xA | ||
1847 | #define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext14_MSB 0xE | ||
1848 | #define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext14_RMASK 0x1F | ||
1849 | #define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext13_LSB 0x5 | ||
1850 | #define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext13_MSB 0x9 | ||
1851 | #define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext13_RMASK 0x1F | ||
1852 | #define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext12_LSB 0x0 | ||
1853 | #define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext12_MSB 0x4 | ||
1854 | #define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext12_RMASK 0x1F | ||
1855 | |||
1856 | #define QIB_7322_RcvQPMapTableD_0_OFFS 0x1128 | ||
1857 | #define QIB_7322_RcvQPMapTableD_0_DEF 0x0000000000000000 | ||
1858 | #define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext23_LSB 0x19 | ||
1859 | #define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext23_MSB 0x1D | ||
1860 | #define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext23_RMASK 0x1F | ||
1861 | #define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext22_LSB 0x14 | ||
1862 | #define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext22_MSB 0x18 | ||
1863 | #define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext22_RMASK 0x1F | ||
1864 | #define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext21_LSB 0xF | ||
1865 | #define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext21_MSB 0x13 | ||
1866 | #define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext21_RMASK 0x1F | ||
1867 | #define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext20_LSB 0xA | ||
1868 | #define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext20_MSB 0xE | ||
1869 | #define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext20_RMASK 0x1F | ||
1870 | #define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext19_LSB 0x5 | ||
1871 | #define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext19_MSB 0x9 | ||
1872 | #define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext19_RMASK 0x1F | ||
1873 | #define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext18_LSB 0x0 | ||
1874 | #define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext18_MSB 0x4 | ||
1875 | #define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext18_RMASK 0x1F | ||
1876 | |||
1877 | #define QIB_7322_RcvQPMapTableE_0_OFFS 0x1130 | ||
1878 | #define QIB_7322_RcvQPMapTableE_0_DEF 0x0000000000000000 | ||
1879 | #define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext29_LSB 0x19 | ||
1880 | #define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext29_MSB 0x1D | ||
1881 | #define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext29_RMASK 0x1F | ||
1882 | #define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext28_LSB 0x14 | ||
1883 | #define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext28_MSB 0x18 | ||
1884 | #define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext28_RMASK 0x1F | ||
1885 | #define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext27_LSB 0xF | ||
1886 | #define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext27_MSB 0x13 | ||
1887 | #define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext27_RMASK 0x1F | ||
1888 | #define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext26_LSB 0xA | ||
1889 | #define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext26_MSB 0xE | ||
1890 | #define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext26_RMASK 0x1F | ||
1891 | #define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext25_LSB 0x5 | ||
1892 | #define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext25_MSB 0x9 | ||
1893 | #define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext25_RMASK 0x1F | ||
1894 | #define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext24_LSB 0x0 | ||
1895 | #define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext24_MSB 0x4 | ||
1896 | #define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext24_RMASK 0x1F | ||
1897 | |||
1898 | #define QIB_7322_RcvQPMapTableF_0_OFFS 0x1138 | ||
1899 | #define QIB_7322_RcvQPMapTableF_0_DEF 0x0000000000000000 | ||
1900 | #define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext31_LSB 0x5 | ||
1901 | #define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext31_MSB 0x9 | ||
1902 | #define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext31_RMASK 0x1F | ||
1903 | #define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext30_LSB 0x0 | ||
1904 | #define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext30_MSB 0x4 | ||
1905 | #define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext30_RMASK 0x1F | ||
1906 | |||
1907 | #define QIB_7322_PSStat_0_OFFS 0x1140 | ||
1908 | #define QIB_7322_PSStat_0_DEF 0x0000000000000000 | ||
1909 | |||
1910 | #define QIB_7322_PSStart_0_OFFS 0x1148 | ||
1911 | #define QIB_7322_PSStart_0_DEF 0x0000000000000000 | ||
1912 | |||
1913 | #define QIB_7322_PSInterval_0_OFFS 0x1150 | ||
1914 | #define QIB_7322_PSInterval_0_DEF 0x0000000000000000 | ||
1915 | |||
1916 | #define QIB_7322_RcvStatus_0_OFFS 0x1160 | ||
1917 | #define QIB_7322_RcvStatus_0_DEF 0x0000000000000000 | ||
1918 | #define QIB_7322_RcvStatus_0_DmaeqBlockingContext_LSB 0x1 | ||
1919 | #define QIB_7322_RcvStatus_0_DmaeqBlockingContext_MSB 0x5 | ||
1920 | #define QIB_7322_RcvStatus_0_DmaeqBlockingContext_RMASK 0x1F | ||
1921 | #define QIB_7322_RcvStatus_0_RxPktInProgress_LSB 0x0 | ||
1922 | #define QIB_7322_RcvStatus_0_RxPktInProgress_MSB 0x0 | ||
1923 | #define QIB_7322_RcvStatus_0_RxPktInProgress_RMASK 0x1 | ||
1924 | |||
1925 | #define QIB_7322_RcvPartitionKey_0_OFFS 0x1168 | ||
1926 | #define QIB_7322_RcvPartitionKey_0_DEF 0x0000000000000000 | ||
1927 | |||
1928 | #define QIB_7322_RcvQPMulticastContext_0_OFFS 0x1170 | ||
1929 | #define QIB_7322_RcvQPMulticastContext_0_DEF 0x0000000000000000 | ||
1930 | #define QIB_7322_RcvQPMulticastContext_0_RcvQpMcContext_LSB 0x0 | ||
1931 | #define QIB_7322_RcvQPMulticastContext_0_RcvQpMcContext_MSB 0x4 | ||
1932 | #define QIB_7322_RcvQPMulticastContext_0_RcvQpMcContext_RMASK 0x1F | ||
1933 | |||
1934 | #define QIB_7322_RcvPktLEDCnt_0_OFFS 0x1178 | ||
1935 | #define QIB_7322_RcvPktLEDCnt_0_DEF 0x0000000000000000 | ||
1936 | #define QIB_7322_RcvPktLEDCnt_0_ONperiod_LSB 0x20 | ||
1937 | #define QIB_7322_RcvPktLEDCnt_0_ONperiod_MSB 0x3F | ||
1938 | #define QIB_7322_RcvPktLEDCnt_0_ONperiod_RMASK 0xFFFFFFFF | ||
1939 | #define QIB_7322_RcvPktLEDCnt_0_OFFperiod_LSB 0x0 | ||
1940 | #define QIB_7322_RcvPktLEDCnt_0_OFFperiod_MSB 0x1F | ||
1941 | #define QIB_7322_RcvPktLEDCnt_0_OFFperiod_RMASK 0xFFFFFFFF | ||
1942 | |||
1943 | #define QIB_7322_SendDmaIdleCnt_0_OFFS 0x1180 | ||
1944 | #define QIB_7322_SendDmaIdleCnt_0_DEF 0x0000000000000000 | ||
1945 | #define QIB_7322_SendDmaIdleCnt_0_SendDmaIdleCnt_LSB 0x0 | ||
1946 | #define QIB_7322_SendDmaIdleCnt_0_SendDmaIdleCnt_MSB 0xF | ||
1947 | #define QIB_7322_SendDmaIdleCnt_0_SendDmaIdleCnt_RMASK 0xFFFF | ||
1948 | |||
1949 | #define QIB_7322_SendDmaReloadCnt_0_OFFS 0x1188 | ||
1950 | #define QIB_7322_SendDmaReloadCnt_0_DEF 0x0000000000000000 | ||
1951 | #define QIB_7322_SendDmaReloadCnt_0_SendDmaReloadCnt_LSB 0x0 | ||
1952 | #define QIB_7322_SendDmaReloadCnt_0_SendDmaReloadCnt_MSB 0xF | ||
1953 | #define QIB_7322_SendDmaReloadCnt_0_SendDmaReloadCnt_RMASK 0xFFFF | ||
1954 | |||
1955 | #define QIB_7322_SendDmaDescCnt_0_OFFS 0x1190 | ||
1956 | #define QIB_7322_SendDmaDescCnt_0_DEF 0x0000000000000000 | ||
1957 | #define QIB_7322_SendDmaDescCnt_0_SendDmaDescCnt_LSB 0x0 | ||
1958 | #define QIB_7322_SendDmaDescCnt_0_SendDmaDescCnt_MSB 0xF | ||
1959 | #define QIB_7322_SendDmaDescCnt_0_SendDmaDescCnt_RMASK 0xFFFF | ||
1960 | |||
1961 | #define QIB_7322_SendCtrl_0_OFFS 0x11C0 | ||
1962 | #define QIB_7322_SendCtrl_0_DEF 0x0000000000000000 | ||
1963 | #define QIB_7322_SendCtrl_0_IBVLArbiterEn_LSB 0xF | ||
1964 | #define QIB_7322_SendCtrl_0_IBVLArbiterEn_MSB 0xF | ||
1965 | #define QIB_7322_SendCtrl_0_IBVLArbiterEn_RMASK 0x1 | ||
1966 | #define QIB_7322_SendCtrl_0_TxeDrainRmFifo_LSB 0xE | ||
1967 | #define QIB_7322_SendCtrl_0_TxeDrainRmFifo_MSB 0xE | ||
1968 | #define QIB_7322_SendCtrl_0_TxeDrainRmFifo_RMASK 0x1 | ||
1969 | #define QIB_7322_SendCtrl_0_TxeDrainLaFifo_LSB 0xD | ||
1970 | #define QIB_7322_SendCtrl_0_TxeDrainLaFifo_MSB 0xD | ||
1971 | #define QIB_7322_SendCtrl_0_TxeDrainLaFifo_RMASK 0x1 | ||
1972 | #define QIB_7322_SendCtrl_0_SDmaHalt_LSB 0xC | ||
1973 | #define QIB_7322_SendCtrl_0_SDmaHalt_MSB 0xC | ||
1974 | #define QIB_7322_SendCtrl_0_SDmaHalt_RMASK 0x1 | ||
1975 | #define QIB_7322_SendCtrl_0_SDmaEnable_LSB 0xB | ||
1976 | #define QIB_7322_SendCtrl_0_SDmaEnable_MSB 0xB | ||
1977 | #define QIB_7322_SendCtrl_0_SDmaEnable_RMASK 0x1 | ||
1978 | #define QIB_7322_SendCtrl_0_SDmaSingleDescriptor_LSB 0xA | ||
1979 | #define QIB_7322_SendCtrl_0_SDmaSingleDescriptor_MSB 0xA | ||
1980 | #define QIB_7322_SendCtrl_0_SDmaSingleDescriptor_RMASK 0x1 | ||
1981 | #define QIB_7322_SendCtrl_0_SDmaIntEnable_LSB 0x9 | ||
1982 | #define QIB_7322_SendCtrl_0_SDmaIntEnable_MSB 0x9 | ||
1983 | #define QIB_7322_SendCtrl_0_SDmaIntEnable_RMASK 0x1 | ||
1984 | #define QIB_7322_SendCtrl_0_SDmaCleanup_LSB 0x8 | ||
1985 | #define QIB_7322_SendCtrl_0_SDmaCleanup_MSB 0x8 | ||
1986 | #define QIB_7322_SendCtrl_0_SDmaCleanup_RMASK 0x1 | ||
1987 | #define QIB_7322_SendCtrl_0_ForceCreditUpToDate_LSB 0x7 | ||
1988 | #define QIB_7322_SendCtrl_0_ForceCreditUpToDate_MSB 0x7 | ||
1989 | #define QIB_7322_SendCtrl_0_ForceCreditUpToDate_RMASK 0x1 | ||
1990 | #define QIB_7322_SendCtrl_0_SendEnable_LSB 0x3 | ||
1991 | #define QIB_7322_SendCtrl_0_SendEnable_MSB 0x3 | ||
1992 | #define QIB_7322_SendCtrl_0_SendEnable_RMASK 0x1 | ||
1993 | #define QIB_7322_SendCtrl_0_TxeBypassIbc_LSB 0x1 | ||
1994 | #define QIB_7322_SendCtrl_0_TxeBypassIbc_MSB 0x1 | ||
1995 | #define QIB_7322_SendCtrl_0_TxeBypassIbc_RMASK 0x1 | ||
1996 | #define QIB_7322_SendCtrl_0_TxeAbortIbc_LSB 0x0 | ||
1997 | #define QIB_7322_SendCtrl_0_TxeAbortIbc_MSB 0x0 | ||
1998 | #define QIB_7322_SendCtrl_0_TxeAbortIbc_RMASK 0x1 | ||
1999 | |||
2000 | #define QIB_7322_SendDmaBase_0_OFFS 0x11F8 | ||
2001 | #define QIB_7322_SendDmaBase_0_DEF 0x0000000000000000 | ||
2002 | #define QIB_7322_SendDmaBase_0_SendDmaBase_LSB 0x0 | ||
2003 | #define QIB_7322_SendDmaBase_0_SendDmaBase_MSB 0x2F | ||
2004 | #define QIB_7322_SendDmaBase_0_SendDmaBase_RMASK 0xFFFFFFFFFFFF | ||
2005 | |||
2006 | #define QIB_7322_SendDmaLenGen_0_OFFS 0x1200 | ||
2007 | #define QIB_7322_SendDmaLenGen_0_DEF 0x0000000000000000 | ||
2008 | #define QIB_7322_SendDmaLenGen_0_Generation_LSB 0x10 | ||
2009 | #define QIB_7322_SendDmaLenGen_0_Generation_MSB 0x12 | ||
2010 | #define QIB_7322_SendDmaLenGen_0_Generation_RMASK 0x7 | ||
2011 | #define QIB_7322_SendDmaLenGen_0_Length_LSB 0x0 | ||
2012 | #define QIB_7322_SendDmaLenGen_0_Length_MSB 0xF | ||
2013 | #define QIB_7322_SendDmaLenGen_0_Length_RMASK 0xFFFF | ||
2014 | |||
2015 | #define QIB_7322_SendDmaTail_0_OFFS 0x1208 | ||
2016 | #define QIB_7322_SendDmaTail_0_DEF 0x0000000000000000 | ||
2017 | #define QIB_7322_SendDmaTail_0_SendDmaTail_LSB 0x0 | ||
2018 | #define QIB_7322_SendDmaTail_0_SendDmaTail_MSB 0xF | ||
2019 | #define QIB_7322_SendDmaTail_0_SendDmaTail_RMASK 0xFFFF | ||
2020 | |||
2021 | #define QIB_7322_SendDmaHead_0_OFFS 0x1210 | ||
2022 | #define QIB_7322_SendDmaHead_0_DEF 0x0000000000000000 | ||
2023 | #define QIB_7322_SendDmaHead_0_InternalSendDmaHead_LSB 0x20 | ||
2024 | #define QIB_7322_SendDmaHead_0_InternalSendDmaHead_MSB 0x2F | ||
2025 | #define QIB_7322_SendDmaHead_0_InternalSendDmaHead_RMASK 0xFFFF | ||
2026 | #define QIB_7322_SendDmaHead_0_SendDmaHead_LSB 0x0 | ||
2027 | #define QIB_7322_SendDmaHead_0_SendDmaHead_MSB 0xF | ||
2028 | #define QIB_7322_SendDmaHead_0_SendDmaHead_RMASK 0xFFFF | ||
2029 | |||
2030 | #define QIB_7322_SendDmaHeadAddr_0_OFFS 0x1218 | ||
2031 | #define QIB_7322_SendDmaHeadAddr_0_DEF 0x0000000000000000 | ||
2032 | #define QIB_7322_SendDmaHeadAddr_0_SendDmaHeadAddr_LSB 0x0 | ||
2033 | #define QIB_7322_SendDmaHeadAddr_0_SendDmaHeadAddr_MSB 0x2F | ||
2034 | #define QIB_7322_SendDmaHeadAddr_0_SendDmaHeadAddr_RMASK 0xFFFFFFFFFFFF | ||
2035 | |||
2036 | #define QIB_7322_SendDmaBufMask0_0_OFFS 0x1220 | ||
2037 | #define QIB_7322_SendDmaBufMask0_0_DEF 0x0000000000000000 | ||
2038 | #define QIB_7322_SendDmaBufMask0_0_BufMask_63_0_LSB 0x0 | ||
2039 | #define QIB_7322_SendDmaBufMask0_0_BufMask_63_0_MSB 0x3F | ||
2040 | #define QIB_7322_SendDmaBufMask0_0_BufMask_63_0_RMASK 0x0 | ||
2041 | |||
2042 | #define QIB_7322_SendDmaStatus_0_OFFS 0x1238 | ||
2043 | #define QIB_7322_SendDmaStatus_0_DEF 0x0000000042000000 | ||
2044 | #define QIB_7322_SendDmaStatus_0_ScoreBoardDrainInProg_LSB 0x3F | ||
2045 | #define QIB_7322_SendDmaStatus_0_ScoreBoardDrainInProg_MSB 0x3F | ||
2046 | #define QIB_7322_SendDmaStatus_0_ScoreBoardDrainInProg_RMASK 0x1 | ||
2047 | #define QIB_7322_SendDmaStatus_0_HaltInProg_LSB 0x3E | ||
2048 | #define QIB_7322_SendDmaStatus_0_HaltInProg_MSB 0x3E | ||
2049 | #define QIB_7322_SendDmaStatus_0_HaltInProg_RMASK 0x1 | ||
2050 | #define QIB_7322_SendDmaStatus_0_InternalSDmaHalt_LSB 0x3D | ||
2051 | #define QIB_7322_SendDmaStatus_0_InternalSDmaHalt_MSB 0x3D | ||
2052 | #define QIB_7322_SendDmaStatus_0_InternalSDmaHalt_RMASK 0x1 | ||
2053 | #define QIB_7322_SendDmaStatus_0_ScbDescIndex_13_0_LSB 0x2F | ||
2054 | #define QIB_7322_SendDmaStatus_0_ScbDescIndex_13_0_MSB 0x3C | ||
2055 | #define QIB_7322_SendDmaStatus_0_ScbDescIndex_13_0_RMASK 0x3FFF | ||
2056 | #define QIB_7322_SendDmaStatus_0_RpyLowAddr_6_0_LSB 0x28 | ||
2057 | #define QIB_7322_SendDmaStatus_0_RpyLowAddr_6_0_MSB 0x2E | ||
2058 | #define QIB_7322_SendDmaStatus_0_RpyLowAddr_6_0_RMASK 0x7F | ||
2059 | #define QIB_7322_SendDmaStatus_0_RpyTag_7_0_LSB 0x20 | ||
2060 | #define QIB_7322_SendDmaStatus_0_RpyTag_7_0_MSB 0x27 | ||
2061 | #define QIB_7322_SendDmaStatus_0_RpyTag_7_0_RMASK 0xFF | ||
2062 | #define QIB_7322_SendDmaStatus_0_ScbFull_LSB 0x1F | ||
2063 | #define QIB_7322_SendDmaStatus_0_ScbFull_MSB 0x1F | ||
2064 | #define QIB_7322_SendDmaStatus_0_ScbFull_RMASK 0x1 | ||
2065 | #define QIB_7322_SendDmaStatus_0_ScbEmpty_LSB 0x1E | ||
2066 | #define QIB_7322_SendDmaStatus_0_ScbEmpty_MSB 0x1E | ||
2067 | #define QIB_7322_SendDmaStatus_0_ScbEmpty_RMASK 0x1 | ||
2068 | #define QIB_7322_SendDmaStatus_0_ScbEntryValid_LSB 0x1D | ||
2069 | #define QIB_7322_SendDmaStatus_0_ScbEntryValid_MSB 0x1D | ||
2070 | #define QIB_7322_SendDmaStatus_0_ScbEntryValid_RMASK 0x1 | ||
2071 | #define QIB_7322_SendDmaStatus_0_ScbFetchDescFlag_LSB 0x1C | ||
2072 | #define QIB_7322_SendDmaStatus_0_ScbFetchDescFlag_MSB 0x1C | ||
2073 | #define QIB_7322_SendDmaStatus_0_ScbFetchDescFlag_RMASK 0x1 | ||
2074 | #define QIB_7322_SendDmaStatus_0_SplFifoReadyToGo_LSB 0x1B | ||
2075 | #define QIB_7322_SendDmaStatus_0_SplFifoReadyToGo_MSB 0x1B | ||
2076 | #define QIB_7322_SendDmaStatus_0_SplFifoReadyToGo_RMASK 0x1 | ||
2077 | #define QIB_7322_SendDmaStatus_0_SplFifoDisarmed_LSB 0x1A | ||
2078 | #define QIB_7322_SendDmaStatus_0_SplFifoDisarmed_MSB 0x1A | ||
2079 | #define QIB_7322_SendDmaStatus_0_SplFifoDisarmed_RMASK 0x1 | ||
2080 | #define QIB_7322_SendDmaStatus_0_SplFifoEmpty_LSB 0x19 | ||
2081 | #define QIB_7322_SendDmaStatus_0_SplFifoEmpty_MSB 0x19 | ||
2082 | #define QIB_7322_SendDmaStatus_0_SplFifoEmpty_RMASK 0x1 | ||
2083 | #define QIB_7322_SendDmaStatus_0_SplFifoFull_LSB 0x18 | ||
2084 | #define QIB_7322_SendDmaStatus_0_SplFifoFull_MSB 0x18 | ||
2085 | #define QIB_7322_SendDmaStatus_0_SplFifoFull_RMASK 0x1 | ||
2086 | #define QIB_7322_SendDmaStatus_0_SplFifoBufNum_LSB 0x10 | ||
2087 | #define QIB_7322_SendDmaStatus_0_SplFifoBufNum_MSB 0x17 | ||
2088 | #define QIB_7322_SendDmaStatus_0_SplFifoBufNum_RMASK 0xFF | ||
2089 | #define QIB_7322_SendDmaStatus_0_SplFifoDescIndex_LSB 0x0 | ||
2090 | #define QIB_7322_SendDmaStatus_0_SplFifoDescIndex_MSB 0xF | ||
2091 | #define QIB_7322_SendDmaStatus_0_SplFifoDescIndex_RMASK 0xFFFF | ||
2092 | |||
2093 | #define QIB_7322_SendDmaPriorityThld_0_OFFS 0x1258 | ||
2094 | #define QIB_7322_SendDmaPriorityThld_0_DEF 0x0000000000000000 | ||
2095 | #define QIB_7322_SendDmaPriorityThld_0_PriorityThreshold_LSB 0x0 | ||
2096 | #define QIB_7322_SendDmaPriorityThld_0_PriorityThreshold_MSB 0x3 | ||
2097 | #define QIB_7322_SendDmaPriorityThld_0_PriorityThreshold_RMASK 0xF | ||
2098 | |||
2099 | #define QIB_7322_SendHdrErrSymptom_0_OFFS 0x1260 | ||
2100 | #define QIB_7322_SendHdrErrSymptom_0_DEF 0x0000000000000000 | ||
2101 | #define QIB_7322_SendHdrErrSymptom_0_NonKeyPacket_LSB 0x6 | ||
2102 | #define QIB_7322_SendHdrErrSymptom_0_NonKeyPacket_MSB 0x6 | ||
2103 | #define QIB_7322_SendHdrErrSymptom_0_NonKeyPacket_RMASK 0x1 | ||
2104 | #define QIB_7322_SendHdrErrSymptom_0_GRHFail_LSB 0x5 | ||
2105 | #define QIB_7322_SendHdrErrSymptom_0_GRHFail_MSB 0x5 | ||
2106 | #define QIB_7322_SendHdrErrSymptom_0_GRHFail_RMASK 0x1 | ||
2107 | #define QIB_7322_SendHdrErrSymptom_0_PkeyFail_LSB 0x4 | ||
2108 | #define QIB_7322_SendHdrErrSymptom_0_PkeyFail_MSB 0x4 | ||
2109 | #define QIB_7322_SendHdrErrSymptom_0_PkeyFail_RMASK 0x1 | ||
2110 | #define QIB_7322_SendHdrErrSymptom_0_QPFail_LSB 0x3 | ||
2111 | #define QIB_7322_SendHdrErrSymptom_0_QPFail_MSB 0x3 | ||
2112 | #define QIB_7322_SendHdrErrSymptom_0_QPFail_RMASK 0x1 | ||
2113 | #define QIB_7322_SendHdrErrSymptom_0_SLIDFail_LSB 0x2 | ||
2114 | #define QIB_7322_SendHdrErrSymptom_0_SLIDFail_MSB 0x2 | ||
2115 | #define QIB_7322_SendHdrErrSymptom_0_SLIDFail_RMASK 0x1 | ||
2116 | #define QIB_7322_SendHdrErrSymptom_0_RawIPV6_LSB 0x1 | ||
2117 | #define QIB_7322_SendHdrErrSymptom_0_RawIPV6_MSB 0x1 | ||
2118 | #define QIB_7322_SendHdrErrSymptom_0_RawIPV6_RMASK 0x1 | ||
2119 | #define QIB_7322_SendHdrErrSymptom_0_PacketTooSmall_LSB 0x0 | ||
2120 | #define QIB_7322_SendHdrErrSymptom_0_PacketTooSmall_MSB 0x0 | ||
2121 | #define QIB_7322_SendHdrErrSymptom_0_PacketTooSmall_RMASK 0x1 | ||
2122 | |||
2123 | #define QIB_7322_RxCreditVL0_0_OFFS 0x1280 | ||
2124 | #define QIB_7322_RxCreditVL0_0_DEF 0x0000000000000000 | ||
2125 | #define QIB_7322_RxCreditVL0_0_RxBufrConsumedVL_LSB 0x10 | ||
2126 | #define QIB_7322_RxCreditVL0_0_RxBufrConsumedVL_MSB 0x1B | ||
2127 | #define QIB_7322_RxCreditVL0_0_RxBufrConsumedVL_RMASK 0xFFF | ||
2128 | #define QIB_7322_RxCreditVL0_0_RxMaxCreditVL_LSB 0x0 | ||
2129 | #define QIB_7322_RxCreditVL0_0_RxMaxCreditVL_MSB 0xB | ||
2130 | #define QIB_7322_RxCreditVL0_0_RxMaxCreditVL_RMASK 0xFFF | ||
2131 | |||
2132 | #define QIB_7322_SendDmaBufUsed0_0_OFFS 0x1480 | ||
2133 | #define QIB_7322_SendDmaBufUsed0_0_DEF 0x0000000000000000 | ||
2134 | #define QIB_7322_SendDmaBufUsed0_0_BufUsed_63_0_LSB 0x0 | ||
2135 | #define QIB_7322_SendDmaBufUsed0_0_BufUsed_63_0_MSB 0x3F | ||
2136 | #define QIB_7322_SendDmaBufUsed0_0_BufUsed_63_0_RMASK 0x0 | ||
2137 | |||
2138 | #define QIB_7322_SendCheckControl_0_OFFS 0x14A8 | ||
2139 | #define QIB_7322_SendCheckControl_0_DEF 0x0000000000000000 | ||
2140 | #define QIB_7322_SendCheckControl_0_PKey_En_LSB 0x4 | ||
2141 | #define QIB_7322_SendCheckControl_0_PKey_En_MSB 0x4 | ||
2142 | #define QIB_7322_SendCheckControl_0_PKey_En_RMASK 0x1 | ||
2143 | #define QIB_7322_SendCheckControl_0_BTHQP_En_LSB 0x3 | ||
2144 | #define QIB_7322_SendCheckControl_0_BTHQP_En_MSB 0x3 | ||
2145 | #define QIB_7322_SendCheckControl_0_BTHQP_En_RMASK 0x1 | ||
2146 | #define QIB_7322_SendCheckControl_0_SLID_En_LSB 0x2 | ||
2147 | #define QIB_7322_SendCheckControl_0_SLID_En_MSB 0x2 | ||
2148 | #define QIB_7322_SendCheckControl_0_SLID_En_RMASK 0x1 | ||
2149 | #define QIB_7322_SendCheckControl_0_RawIPV6_En_LSB 0x1 | ||
2150 | #define QIB_7322_SendCheckControl_0_RawIPV6_En_MSB 0x1 | ||
2151 | #define QIB_7322_SendCheckControl_0_RawIPV6_En_RMASK 0x1 | ||
2152 | #define QIB_7322_SendCheckControl_0_PacketTooSmall_En_LSB 0x0 | ||
2153 | #define QIB_7322_SendCheckControl_0_PacketTooSmall_En_MSB 0x0 | ||
2154 | #define QIB_7322_SendCheckControl_0_PacketTooSmall_En_RMASK 0x1 | ||
2155 | |||
2156 | #define QIB_7322_SendIBSLIDMask_0_OFFS 0x14B0 | ||
2157 | #define QIB_7322_SendIBSLIDMask_0_DEF 0x0000000000000000 | ||
2158 | #define QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_LSB 0x0 | ||
2159 | #define QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_MSB 0xF | ||
2160 | #define QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK 0xFFFF | ||
2161 | |||
2162 | #define QIB_7322_SendIBSLIDAssign_0_OFFS 0x14B8 | ||
2163 | #define QIB_7322_SendIBSLIDAssign_0_DEF 0x0000000000000000 | ||
2164 | #define QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_LSB 0x0 | ||
2165 | #define QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_MSB 0xF | ||
2166 | #define QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK 0xFFFF | ||
2167 | |||
2168 | #define QIB_7322_IBCStatusA_0_OFFS 0x1540 | ||
2169 | #define QIB_7322_IBCStatusA_0_DEF 0x0000000000000X02 | ||
2170 | #define QIB_7322_IBCStatusA_0_TxCreditOk_VL7_LSB 0x27 | ||
2171 | #define QIB_7322_IBCStatusA_0_TxCreditOk_VL7_MSB 0x27 | ||
2172 | #define QIB_7322_IBCStatusA_0_TxCreditOk_VL7_RMASK 0x1 | ||
2173 | #define QIB_7322_IBCStatusA_0_TxCreditOk_VL6_LSB 0x26 | ||
2174 | #define QIB_7322_IBCStatusA_0_TxCreditOk_VL6_MSB 0x26 | ||
2175 | #define QIB_7322_IBCStatusA_0_TxCreditOk_VL6_RMASK 0x1 | ||
2176 | #define QIB_7322_IBCStatusA_0_TxCreditOk_VL5_LSB 0x25 | ||
2177 | #define QIB_7322_IBCStatusA_0_TxCreditOk_VL5_MSB 0x25 | ||
2178 | #define QIB_7322_IBCStatusA_0_TxCreditOk_VL5_RMASK 0x1 | ||
2179 | #define QIB_7322_IBCStatusA_0_TxCreditOk_VL4_LSB 0x24 | ||
2180 | #define QIB_7322_IBCStatusA_0_TxCreditOk_VL4_MSB 0x24 | ||
2181 | #define QIB_7322_IBCStatusA_0_TxCreditOk_VL4_RMASK 0x1 | ||
2182 | #define QIB_7322_IBCStatusA_0_TxCreditOk_VL3_LSB 0x23 | ||
2183 | #define QIB_7322_IBCStatusA_0_TxCreditOk_VL3_MSB 0x23 | ||
2184 | #define QIB_7322_IBCStatusA_0_TxCreditOk_VL3_RMASK 0x1 | ||
2185 | #define QIB_7322_IBCStatusA_0_TxCreditOk_VL2_LSB 0x22 | ||
2186 | #define QIB_7322_IBCStatusA_0_TxCreditOk_VL2_MSB 0x22 | ||
2187 | #define QIB_7322_IBCStatusA_0_TxCreditOk_VL2_RMASK 0x1 | ||
2188 | #define QIB_7322_IBCStatusA_0_TxCreditOk_VL1_LSB 0x21 | ||
2189 | #define QIB_7322_IBCStatusA_0_TxCreditOk_VL1_MSB 0x21 | ||
2190 | #define QIB_7322_IBCStatusA_0_TxCreditOk_VL1_RMASK 0x1 | ||
2191 | #define QIB_7322_IBCStatusA_0_TxCreditOk_VL0_LSB 0x20 | ||
2192 | #define QIB_7322_IBCStatusA_0_TxCreditOk_VL0_MSB 0x20 | ||
2193 | #define QIB_7322_IBCStatusA_0_TxCreditOk_VL0_RMASK 0x1 | ||
2194 | #define QIB_7322_IBCStatusA_0_TxReady_LSB 0x1E | ||
2195 | #define QIB_7322_IBCStatusA_0_TxReady_MSB 0x1E | ||
2196 | #define QIB_7322_IBCStatusA_0_TxReady_RMASK 0x1 | ||
2197 | #define QIB_7322_IBCStatusA_0_LinkSpeedQDR_LSB 0x1D | ||
2198 | #define QIB_7322_IBCStatusA_0_LinkSpeedQDR_MSB 0x1D | ||
2199 | #define QIB_7322_IBCStatusA_0_LinkSpeedQDR_RMASK 0x1 | ||
2200 | #define QIB_7322_IBCStatusA_0_ScrambleCapRemote_LSB 0xF | ||
2201 | #define QIB_7322_IBCStatusA_0_ScrambleCapRemote_MSB 0xF | ||
2202 | #define QIB_7322_IBCStatusA_0_ScrambleCapRemote_RMASK 0x1 | ||
2203 | #define QIB_7322_IBCStatusA_0_ScrambleEn_LSB 0xE | ||
2204 | #define QIB_7322_IBCStatusA_0_ScrambleEn_MSB 0xE | ||
2205 | #define QIB_7322_IBCStatusA_0_ScrambleEn_RMASK 0x1 | ||
2206 | #define QIB_7322_IBCStatusA_0_IBTxLaneReversed_LSB 0xD | ||
2207 | #define QIB_7322_IBCStatusA_0_IBTxLaneReversed_MSB 0xD | ||
2208 | #define QIB_7322_IBCStatusA_0_IBTxLaneReversed_RMASK 0x1 | ||
2209 | #define QIB_7322_IBCStatusA_0_IBRxLaneReversed_LSB 0xC | ||
2210 | #define QIB_7322_IBCStatusA_0_IBRxLaneReversed_MSB 0xC | ||
2211 | #define QIB_7322_IBCStatusA_0_IBRxLaneReversed_RMASK 0x1 | ||
2212 | #define QIB_7322_IBCStatusA_0_DDS_RXEQ_FAIL_LSB 0xA | ||
2213 | #define QIB_7322_IBCStatusA_0_DDS_RXEQ_FAIL_MSB 0xA | ||
2214 | #define QIB_7322_IBCStatusA_0_DDS_RXEQ_FAIL_RMASK 0x1 | ||
2215 | #define QIB_7322_IBCStatusA_0_LinkWidthActive_LSB 0x9 | ||
2216 | #define QIB_7322_IBCStatusA_0_LinkWidthActive_MSB 0x9 | ||
2217 | #define QIB_7322_IBCStatusA_0_LinkWidthActive_RMASK 0x1 | ||
2218 | #define QIB_7322_IBCStatusA_0_LinkSpeedActive_LSB 0x8 | ||
2219 | #define QIB_7322_IBCStatusA_0_LinkSpeedActive_MSB 0x8 | ||
2220 | #define QIB_7322_IBCStatusA_0_LinkSpeedActive_RMASK 0x1 | ||
2221 | #define QIB_7322_IBCStatusA_0_LinkState_LSB 0x5 | ||
2222 | #define QIB_7322_IBCStatusA_0_LinkState_MSB 0x7 | ||
2223 | #define QIB_7322_IBCStatusA_0_LinkState_RMASK 0x7 | ||
2224 | #define QIB_7322_IBCStatusA_0_LinkTrainingState_LSB 0x0 | ||
2225 | #define QIB_7322_IBCStatusA_0_LinkTrainingState_MSB 0x4 | ||
2226 | #define QIB_7322_IBCStatusA_0_LinkTrainingState_RMASK 0x1F | ||
2227 | |||
2228 | #define QIB_7322_IBCStatusB_0_OFFS 0x1548 | ||
2229 | #define QIB_7322_IBCStatusB_0_DEF 0x00000000XXXXXXXX | ||
2230 | #define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_debug_LSB 0x27 | ||
2231 | #define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_debug_MSB 0x27 | ||
2232 | #define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_debug_RMASK 0x1 | ||
2233 | #define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_reached_threshold_LSB 0x26 | ||
2234 | #define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_reached_threshold_MSB 0x26 | ||
2235 | #define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_reached_threshold_RMASK 0x1 | ||
2236 | #define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_started_LSB 0x25 | ||
2237 | #define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_started_MSB 0x25 | ||
2238 | #define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_started_RMASK 0x1 | ||
2239 | #define QIB_7322_IBCStatusB_0_heartbeat_timed_out_LSB 0x24 | ||
2240 | #define QIB_7322_IBCStatusB_0_heartbeat_timed_out_MSB 0x24 | ||
2241 | #define QIB_7322_IBCStatusB_0_heartbeat_timed_out_RMASK 0x1 | ||
2242 | #define QIB_7322_IBCStatusB_0_heartbeat_crosstalk_LSB 0x20 | ||
2243 | #define QIB_7322_IBCStatusB_0_heartbeat_crosstalk_MSB 0x23 | ||
2244 | #define QIB_7322_IBCStatusB_0_heartbeat_crosstalk_RMASK 0xF | ||
2245 | #define QIB_7322_IBCStatusB_0_RxEqLocalDevice_LSB 0x1E | ||
2246 | #define QIB_7322_IBCStatusB_0_RxEqLocalDevice_MSB 0x1F | ||
2247 | #define QIB_7322_IBCStatusB_0_RxEqLocalDevice_RMASK 0x3 | ||
2248 | #define QIB_7322_IBCStatusB_0_ReqDDSLocalFromRmt_LSB 0x1A | ||
2249 | #define QIB_7322_IBCStatusB_0_ReqDDSLocalFromRmt_MSB 0x1D | ||
2250 | #define QIB_7322_IBCStatusB_0_ReqDDSLocalFromRmt_RMASK 0xF | ||
2251 | #define QIB_7322_IBCStatusB_0_LinkRoundTripLatency_LSB 0x0 | ||
2252 | #define QIB_7322_IBCStatusB_0_LinkRoundTripLatency_MSB 0x19 | ||
2253 | #define QIB_7322_IBCStatusB_0_LinkRoundTripLatency_RMASK 0x3FFFFFF | ||
2254 | |||
2255 | #define QIB_7322_IBCCtrlA_0_OFFS 0x1560 | ||
2256 | #define QIB_7322_IBCCtrlA_0_DEF 0x0000000000000000 | ||
2257 | #define QIB_7322_IBCCtrlA_0_Loopback_LSB 0x3F | ||
2258 | #define QIB_7322_IBCCtrlA_0_Loopback_MSB 0x3F | ||
2259 | #define QIB_7322_IBCCtrlA_0_Loopback_RMASK 0x1 | ||
2260 | #define QIB_7322_IBCCtrlA_0_LinkDownDefaultState_LSB 0x3E | ||
2261 | #define QIB_7322_IBCCtrlA_0_LinkDownDefaultState_MSB 0x3E | ||
2262 | #define QIB_7322_IBCCtrlA_0_LinkDownDefaultState_RMASK 0x1 | ||
2263 | #define QIB_7322_IBCCtrlA_0_IBLinkEn_LSB 0x3D | ||
2264 | #define QIB_7322_IBCCtrlA_0_IBLinkEn_MSB 0x3D | ||
2265 | #define QIB_7322_IBCCtrlA_0_IBLinkEn_RMASK 0x1 | ||
2266 | #define QIB_7322_IBCCtrlA_0_IBStatIntReductionEn_LSB 0x3C | ||
2267 | #define QIB_7322_IBCCtrlA_0_IBStatIntReductionEn_MSB 0x3C | ||
2268 | #define QIB_7322_IBCCtrlA_0_IBStatIntReductionEn_RMASK 0x1 | ||
2269 | #define QIB_7322_IBCCtrlA_0_NumVLane_LSB 0x30 | ||
2270 | #define QIB_7322_IBCCtrlA_0_NumVLane_MSB 0x32 | ||
2271 | #define QIB_7322_IBCCtrlA_0_NumVLane_RMASK 0x7 | ||
2272 | #define QIB_7322_IBCCtrlA_0_OverrunThreshold_LSB 0x24 | ||
2273 | #define QIB_7322_IBCCtrlA_0_OverrunThreshold_MSB 0x27 | ||
2274 | #define QIB_7322_IBCCtrlA_0_OverrunThreshold_RMASK 0xF | ||
2275 | #define QIB_7322_IBCCtrlA_0_PhyerrThreshold_LSB 0x20 | ||
2276 | #define QIB_7322_IBCCtrlA_0_PhyerrThreshold_MSB 0x23 | ||
2277 | #define QIB_7322_IBCCtrlA_0_PhyerrThreshold_RMASK 0xF | ||
2278 | #define QIB_7322_IBCCtrlA_0_MaxPktLen_LSB 0x15 | ||
2279 | #define QIB_7322_IBCCtrlA_0_MaxPktLen_MSB 0x1F | ||
2280 | #define QIB_7322_IBCCtrlA_0_MaxPktLen_RMASK 0x7FF | ||
2281 | #define QIB_7322_IBCCtrlA_0_LinkCmd_LSB 0x13 | ||
2282 | #define QIB_7322_IBCCtrlA_0_LinkCmd_MSB 0x14 | ||
2283 | #define QIB_7322_IBCCtrlA_0_LinkCmd_RMASK 0x3 | ||
2284 | #define QIB_7322_IBCCtrlA_0_LinkInitCmd_LSB 0x10 | ||
2285 | #define QIB_7322_IBCCtrlA_0_LinkInitCmd_MSB 0x12 | ||
2286 | #define QIB_7322_IBCCtrlA_0_LinkInitCmd_RMASK 0x7 | ||
2287 | #define QIB_7322_IBCCtrlA_0_FlowCtrlWaterMark_LSB 0x8 | ||
2288 | #define QIB_7322_IBCCtrlA_0_FlowCtrlWaterMark_MSB 0xF | ||
2289 | #define QIB_7322_IBCCtrlA_0_FlowCtrlWaterMark_RMASK 0xFF | ||
2290 | #define QIB_7322_IBCCtrlA_0_FlowCtrlPeriod_LSB 0x0 | ||
2291 | #define QIB_7322_IBCCtrlA_0_FlowCtrlPeriod_MSB 0x7 | ||
2292 | #define QIB_7322_IBCCtrlA_0_FlowCtrlPeriod_RMASK 0xFF | ||
2293 | |||
2294 | #define QIB_7322_IBCCtrlB_0_OFFS 0x1568 | ||
2295 | #define QIB_7322_IBCCtrlB_0_DEF 0x00000000000305FF | ||
2296 | #define QIB_7322_IBCCtrlB_0_IB_DLID_MASK_LSB 0x30 | ||
2297 | #define QIB_7322_IBCCtrlB_0_IB_DLID_MASK_MSB 0x3F | ||
2298 | #define QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK 0xFFFF | ||
2299 | #define QIB_7322_IBCCtrlB_0_IB_DLID_LSB 0x20 | ||
2300 | #define QIB_7322_IBCCtrlB_0_IB_DLID_MSB 0x2F | ||
2301 | #define QIB_7322_IBCCtrlB_0_IB_DLID_RMASK 0xFFFF | ||
2302 | #define QIB_7322_IBCCtrlB_0_IB_ENABLE_FILT_DPKT_LSB 0x1B | ||
2303 | #define QIB_7322_IBCCtrlB_0_IB_ENABLE_FILT_DPKT_MSB 0x1B | ||
2304 | #define QIB_7322_IBCCtrlB_0_IB_ENABLE_FILT_DPKT_RMASK 0x1 | ||
2305 | #define QIB_7322_IBCCtrlB_0_HRTBT_REQ_LSB 0x1A | ||
2306 | #define QIB_7322_IBCCtrlB_0_HRTBT_REQ_MSB 0x1A | ||
2307 | #define QIB_7322_IBCCtrlB_0_HRTBT_REQ_RMASK 0x1 | ||
2308 | #define QIB_7322_IBCCtrlB_0_HRTBT_PORT_LSB 0x12 | ||
2309 | #define QIB_7322_IBCCtrlB_0_HRTBT_PORT_MSB 0x19 | ||
2310 | #define QIB_7322_IBCCtrlB_0_HRTBT_PORT_RMASK 0xFF | ||
2311 | #define QIB_7322_IBCCtrlB_0_HRTBT_AUTO_LSB 0x11 | ||
2312 | #define QIB_7322_IBCCtrlB_0_HRTBT_AUTO_MSB 0x11 | ||
2313 | #define QIB_7322_IBCCtrlB_0_HRTBT_AUTO_RMASK 0x1 | ||
2314 | #define QIB_7322_IBCCtrlB_0_HRTBT_ENB_LSB 0x10 | ||
2315 | #define QIB_7322_IBCCtrlB_0_HRTBT_ENB_MSB 0x10 | ||
2316 | #define QIB_7322_IBCCtrlB_0_HRTBT_ENB_RMASK 0x1 | ||
2317 | #define QIB_7322_IBCCtrlB_0_SD_DDS_LSB 0xC | ||
2318 | #define QIB_7322_IBCCtrlB_0_SD_DDS_MSB 0xF | ||
2319 | #define QIB_7322_IBCCtrlB_0_SD_DDS_RMASK 0xF | ||
2320 | #define QIB_7322_IBCCtrlB_0_SD_DDSV_LSB 0xB | ||
2321 | #define QIB_7322_IBCCtrlB_0_SD_DDSV_MSB 0xB | ||
2322 | #define QIB_7322_IBCCtrlB_0_SD_DDSV_RMASK 0x1 | ||
2323 | #define QIB_7322_IBCCtrlB_0_SD_ADD_ENB_LSB 0xA | ||
2324 | #define QIB_7322_IBCCtrlB_0_SD_ADD_ENB_MSB 0xA | ||
2325 | #define QIB_7322_IBCCtrlB_0_SD_ADD_ENB_RMASK 0x1 | ||
2326 | #define QIB_7322_IBCCtrlB_0_SD_RX_EQUAL_ENABLE_LSB 0x9 | ||
2327 | #define QIB_7322_IBCCtrlB_0_SD_RX_EQUAL_ENABLE_MSB 0x9 | ||
2328 | #define QIB_7322_IBCCtrlB_0_SD_RX_EQUAL_ENABLE_RMASK 0x1 | ||
2329 | #define QIB_7322_IBCCtrlB_0_IB_LANE_REV_SUPPORTED_LSB 0x8 | ||
2330 | #define QIB_7322_IBCCtrlB_0_IB_LANE_REV_SUPPORTED_MSB 0x8 | ||
2331 | #define QIB_7322_IBCCtrlB_0_IB_LANE_REV_SUPPORTED_RMASK 0x1 | ||
2332 | #define QIB_7322_IBCCtrlB_0_IB_POLARITY_REV_SUPP_LSB 0x7 | ||
2333 | #define QIB_7322_IBCCtrlB_0_IB_POLARITY_REV_SUPP_MSB 0x7 | ||
2334 | #define QIB_7322_IBCCtrlB_0_IB_POLARITY_REV_SUPP_RMASK 0x1 | ||
2335 | #define QIB_7322_IBCCtrlB_0_IB_NUM_CHANNELS_LSB 0x5 | ||
2336 | #define QIB_7322_IBCCtrlB_0_IB_NUM_CHANNELS_MSB 0x6 | ||
2337 | #define QIB_7322_IBCCtrlB_0_IB_NUM_CHANNELS_RMASK 0x3 | ||
2338 | #define QIB_7322_IBCCtrlB_0_SD_SPEED_QDR_LSB 0x4 | ||
2339 | #define QIB_7322_IBCCtrlB_0_SD_SPEED_QDR_MSB 0x4 | ||
2340 | #define QIB_7322_IBCCtrlB_0_SD_SPEED_QDR_RMASK 0x1 | ||
2341 | #define QIB_7322_IBCCtrlB_0_SD_SPEED_DDR_LSB 0x3 | ||
2342 | #define QIB_7322_IBCCtrlB_0_SD_SPEED_DDR_MSB 0x3 | ||
2343 | #define QIB_7322_IBCCtrlB_0_SD_SPEED_DDR_RMASK 0x1 | ||
2344 | #define QIB_7322_IBCCtrlB_0_SD_SPEED_SDR_LSB 0x2 | ||
2345 | #define QIB_7322_IBCCtrlB_0_SD_SPEED_SDR_MSB 0x2 | ||
2346 | #define QIB_7322_IBCCtrlB_0_SD_SPEED_SDR_RMASK 0x1 | ||
2347 | #define QIB_7322_IBCCtrlB_0_SD_SPEED_LSB 0x1 | ||
2348 | #define QIB_7322_IBCCtrlB_0_SD_SPEED_MSB 0x1 | ||
2349 | #define QIB_7322_IBCCtrlB_0_SD_SPEED_RMASK 0x1 | ||
2350 | #define QIB_7322_IBCCtrlB_0_IB_ENHANCED_MODE_LSB 0x0 | ||
2351 | #define QIB_7322_IBCCtrlB_0_IB_ENHANCED_MODE_MSB 0x0 | ||
2352 | #define QIB_7322_IBCCtrlB_0_IB_ENHANCED_MODE_RMASK 0x1 | ||
2353 | |||
2354 | #define QIB_7322_IBCCtrlC_0_OFFS 0x1570 | ||
2355 | #define QIB_7322_IBCCtrlC_0_DEF 0x0000000000000301 | ||
2356 | #define QIB_7322_IBCCtrlC_0_IB_BACK_PORCH_LSB 0x5 | ||
2357 | #define QIB_7322_IBCCtrlC_0_IB_BACK_PORCH_MSB 0x9 | ||
2358 | #define QIB_7322_IBCCtrlC_0_IB_BACK_PORCH_RMASK 0x1F | ||
2359 | #define QIB_7322_IBCCtrlC_0_IB_FRONT_PORCH_LSB 0x0 | ||
2360 | #define QIB_7322_IBCCtrlC_0_IB_FRONT_PORCH_MSB 0x4 | ||
2361 | #define QIB_7322_IBCCtrlC_0_IB_FRONT_PORCH_RMASK 0x1F | ||
2362 | |||
2363 | #define QIB_7322_HRTBT_GUID_0_OFFS 0x1588 | ||
2364 | #define QIB_7322_HRTBT_GUID_0_DEF 0x0000000000000000 | ||
2365 | |||
2366 | #define QIB_7322_IB_SDTEST_IF_TX_0_OFFS 0x1590 | ||
2367 | #define QIB_7322_IB_SDTEST_IF_TX_0_DEF 0x0000000000000000 | ||
2368 | #define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_RX_CFG_LSB 0x30 | ||
2369 | #define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_RX_CFG_MSB 0x3F | ||
2370 | #define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_RX_CFG_RMASK 0xFFFF | ||
2371 | #define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_TX_CFG_LSB 0x20 | ||
2372 | #define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_TX_CFG_MSB 0x2F | ||
2373 | #define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_TX_CFG_RMASK 0xFFFF | ||
2374 | #define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_SPEED_LSB 0xD | ||
2375 | #define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_SPEED_MSB 0xF | ||
2376 | #define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_SPEED_RMASK 0x7 | ||
2377 | #define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_OPCODE_LSB 0xB | ||
2378 | #define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_OPCODE_MSB 0xC | ||
2379 | #define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_OPCODE_RMASK 0x3 | ||
2380 | #define QIB_7322_IB_SDTEST_IF_TX_0_CREDIT_CHANGE_LSB 0x4 | ||
2381 | #define QIB_7322_IB_SDTEST_IF_TX_0_CREDIT_CHANGE_MSB 0x4 | ||
2382 | #define QIB_7322_IB_SDTEST_IF_TX_0_CREDIT_CHANGE_RMASK 0x1 | ||
2383 | #define QIB_7322_IB_SDTEST_IF_TX_0_VL_CAP_LSB 0x2 | ||
2384 | #define QIB_7322_IB_SDTEST_IF_TX_0_VL_CAP_MSB 0x3 | ||
2385 | #define QIB_7322_IB_SDTEST_IF_TX_0_VL_CAP_RMASK 0x3 | ||
2386 | #define QIB_7322_IB_SDTEST_IF_TX_0_TS_3_TX_VALID_LSB 0x1 | ||
2387 | #define QIB_7322_IB_SDTEST_IF_TX_0_TS_3_TX_VALID_MSB 0x1 | ||
2388 | #define QIB_7322_IB_SDTEST_IF_TX_0_TS_3_TX_VALID_RMASK 0x1 | ||
2389 | #define QIB_7322_IB_SDTEST_IF_TX_0_TS_T_TX_VALID_LSB 0x0 | ||
2390 | #define QIB_7322_IB_SDTEST_IF_TX_0_TS_T_TX_VALID_MSB 0x0 | ||
2391 | #define QIB_7322_IB_SDTEST_IF_TX_0_TS_T_TX_VALID_RMASK 0x1 | ||
2392 | |||
2393 | #define QIB_7322_IB_SDTEST_IF_RX_0_OFFS 0x1598 | ||
2394 | #define QIB_7322_IB_SDTEST_IF_RX_0_DEF 0x0000000000000000 | ||
2395 | #define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_RX_CFG_LSB 0x30 | ||
2396 | #define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_RX_CFG_MSB 0x3F | ||
2397 | #define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_RX_CFG_RMASK 0xFFFF | ||
2398 | #define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_TX_CFG_LSB 0x20 | ||
2399 | #define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_TX_CFG_MSB 0x2F | ||
2400 | #define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_TX_CFG_RMASK 0xFFFF | ||
2401 | #define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_B_LSB 0x18 | ||
2402 | #define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_B_MSB 0x1F | ||
2403 | #define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_B_RMASK 0xFF | ||
2404 | #define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_A_LSB 0x10 | ||
2405 | #define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_A_MSB 0x17 | ||
2406 | #define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_A_RMASK 0xFF | ||
2407 | #define QIB_7322_IB_SDTEST_IF_RX_0_TS_3_RX_VALID_LSB 0x1 | ||
2408 | #define QIB_7322_IB_SDTEST_IF_RX_0_TS_3_RX_VALID_MSB 0x1 | ||
2409 | #define QIB_7322_IB_SDTEST_IF_RX_0_TS_3_RX_VALID_RMASK 0x1 | ||
2410 | #define QIB_7322_IB_SDTEST_IF_RX_0_TS_T_RX_VALID_LSB 0x0 | ||
2411 | #define QIB_7322_IB_SDTEST_IF_RX_0_TS_T_RX_VALID_MSB 0x0 | ||
2412 | #define QIB_7322_IB_SDTEST_IF_RX_0_TS_T_RX_VALID_RMASK 0x1 | ||
2413 | |||
2414 | #define QIB_7322_IBNCModeCtrl_0_OFFS 0x15B8 | ||
2415 | #define QIB_7322_IBNCModeCtrl_0_DEF 0x0000000000000000 | ||
2416 | #define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteForce_LSB 0x22 | ||
2417 | #define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteForce_MSB 0x22 | ||
2418 | #define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteForce_RMASK 0x1 | ||
2419 | #define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteMask_LSB 0x21 | ||
2420 | #define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteMask_MSB 0x21 | ||
2421 | #define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteMask_RMASK 0x1 | ||
2422 | #define QIB_7322_IBNCModeCtrl_0_ScrambleCapLocal_LSB 0x20 | ||
2423 | #define QIB_7322_IBNCModeCtrl_0_ScrambleCapLocal_MSB 0x20 | ||
2424 | #define QIB_7322_IBNCModeCtrl_0_ScrambleCapLocal_RMASK 0x1 | ||
2425 | #define QIB_7322_IBNCModeCtrl_0_TSMCode_TS2_LSB 0x11 | ||
2426 | #define QIB_7322_IBNCModeCtrl_0_TSMCode_TS2_MSB 0x19 | ||
2427 | #define QIB_7322_IBNCModeCtrl_0_TSMCode_TS2_RMASK 0x1FF | ||
2428 | #define QIB_7322_IBNCModeCtrl_0_TSMCode_TS1_LSB 0x8 | ||
2429 | #define QIB_7322_IBNCModeCtrl_0_TSMCode_TS1_MSB 0x10 | ||
2430 | #define QIB_7322_IBNCModeCtrl_0_TSMCode_TS1_RMASK 0x1FF | ||
2431 | #define QIB_7322_IBNCModeCtrl_0_TSMEnable_ignore_TSM_on_rx_LSB 0x2 | ||
2432 | #define QIB_7322_IBNCModeCtrl_0_TSMEnable_ignore_TSM_on_rx_MSB 0x2 | ||
2433 | #define QIB_7322_IBNCModeCtrl_0_TSMEnable_ignore_TSM_on_rx_RMASK 0x1 | ||
2434 | #define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS2_LSB 0x1 | ||
2435 | #define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS2_MSB 0x1 | ||
2436 | #define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS2_RMASK 0x1 | ||
2437 | #define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS1_LSB 0x0 | ||
2438 | #define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS1_MSB 0x0 | ||
2439 | #define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS1_RMASK 0x1 | ||
2440 | |||
2441 | #define QIB_7322_IBSerdesStatus_0_OFFS 0x15D0 | ||
2442 | #define QIB_7322_IBSerdesStatus_0_DEF 0x0000000000000000 | ||
2443 | |||
2444 | #define QIB_7322_IBPCSConfig_0_OFFS 0x15D8 | ||
2445 | #define QIB_7322_IBPCSConfig_0_DEF 0x0000000000000007 | ||
2446 | #define QIB_7322_IBPCSConfig_0_link_sync_mask_LSB 0x9 | ||
2447 | #define QIB_7322_IBPCSConfig_0_link_sync_mask_MSB 0x12 | ||
2448 | #define QIB_7322_IBPCSConfig_0_link_sync_mask_RMASK 0x3FF | ||
2449 | #define QIB_7322_IBPCSConfig_0_xcv_rreset_LSB 0x2 | ||
2450 | #define QIB_7322_IBPCSConfig_0_xcv_rreset_MSB 0x2 | ||
2451 | #define QIB_7322_IBPCSConfig_0_xcv_rreset_RMASK 0x1 | ||
2452 | #define QIB_7322_IBPCSConfig_0_xcv_treset_LSB 0x1 | ||
2453 | #define QIB_7322_IBPCSConfig_0_xcv_treset_MSB 0x1 | ||
2454 | #define QIB_7322_IBPCSConfig_0_xcv_treset_RMASK 0x1 | ||
2455 | #define QIB_7322_IBPCSConfig_0_tx_rx_reset_LSB 0x0 | ||
2456 | #define QIB_7322_IBPCSConfig_0_tx_rx_reset_MSB 0x0 | ||
2457 | #define QIB_7322_IBPCSConfig_0_tx_rx_reset_RMASK 0x1 | ||
2458 | |||
2459 | #define QIB_7322_IBSerdesCtrl_0_OFFS 0x15E0 | ||
2460 | #define QIB_7322_IBSerdesCtrl_0_DEF 0x0000000000FFA00F | ||
2461 | #define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_QDR_LSB 0x1A | ||
2462 | #define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_QDR_MSB 0x1A | ||
2463 | #define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_QDR_RMASK 0x1 | ||
2464 | #define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_DDR_LSB 0x19 | ||
2465 | #define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_DDR_MSB 0x19 | ||
2466 | #define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_DDR_RMASK 0x1 | ||
2467 | #define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_SDR_LSB 0x18 | ||
2468 | #define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_SDR_MSB 0x18 | ||
2469 | #define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_SDR_RMASK 0x1 | ||
2470 | #define QIB_7322_IBSerdesCtrl_0_CHANNEL_RESET_N_LSB 0x14 | ||
2471 | #define QIB_7322_IBSerdesCtrl_0_CHANNEL_RESET_N_MSB 0x17 | ||
2472 | #define QIB_7322_IBSerdesCtrl_0_CHANNEL_RESET_N_RMASK 0xF | ||
2473 | #define QIB_7322_IBSerdesCtrl_0_CGMODE_LSB 0x10 | ||
2474 | #define QIB_7322_IBSerdesCtrl_0_CGMODE_MSB 0x13 | ||
2475 | #define QIB_7322_IBSerdesCtrl_0_CGMODE_RMASK 0xF | ||
2476 | #define QIB_7322_IBSerdesCtrl_0_IB_LAT_MODE_LSB 0xF | ||
2477 | #define QIB_7322_IBSerdesCtrl_0_IB_LAT_MODE_MSB 0xF | ||
2478 | #define QIB_7322_IBSerdesCtrl_0_IB_LAT_MODE_RMASK 0x1 | ||
2479 | #define QIB_7322_IBSerdesCtrl_0_RXLOSEN_LSB 0xD | ||
2480 | #define QIB_7322_IBSerdesCtrl_0_RXLOSEN_MSB 0xD | ||
2481 | #define QIB_7322_IBSerdesCtrl_0_RXLOSEN_RMASK 0x1 | ||
2482 | #define QIB_7322_IBSerdesCtrl_0_LPEN_LSB 0xC | ||
2483 | #define QIB_7322_IBSerdesCtrl_0_LPEN_MSB 0xC | ||
2484 | #define QIB_7322_IBSerdesCtrl_0_LPEN_RMASK 0x1 | ||
2485 | #define QIB_7322_IBSerdesCtrl_0_PLLPD_LSB 0xB | ||
2486 | #define QIB_7322_IBSerdesCtrl_0_PLLPD_MSB 0xB | ||
2487 | #define QIB_7322_IBSerdesCtrl_0_PLLPD_RMASK 0x1 | ||
2488 | #define QIB_7322_IBSerdesCtrl_0_TXPD_LSB 0xA | ||
2489 | #define QIB_7322_IBSerdesCtrl_0_TXPD_MSB 0xA | ||
2490 | #define QIB_7322_IBSerdesCtrl_0_TXPD_RMASK 0x1 | ||
2491 | #define QIB_7322_IBSerdesCtrl_0_RXPD_LSB 0x9 | ||
2492 | #define QIB_7322_IBSerdesCtrl_0_RXPD_MSB 0x9 | ||
2493 | #define QIB_7322_IBSerdesCtrl_0_RXPD_RMASK 0x1 | ||
2494 | #define QIB_7322_IBSerdesCtrl_0_TXIDLE_LSB 0x8 | ||
2495 | #define QIB_7322_IBSerdesCtrl_0_TXIDLE_MSB 0x8 | ||
2496 | #define QIB_7322_IBSerdesCtrl_0_TXIDLE_RMASK 0x1 | ||
2497 | #define QIB_7322_IBSerdesCtrl_0_CMODE_LSB 0x0 | ||
2498 | #define QIB_7322_IBSerdesCtrl_0_CMODE_MSB 0x6 | ||
2499 | #define QIB_7322_IBSerdesCtrl_0_CMODE_RMASK 0x7F | ||
2500 | |||
2501 | #define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_OFFS 0x1600 | ||
2502 | #define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_DEF 0x0000000000000000 | ||
2503 | #define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_tx_override_deemphasis_select_LSB 0x1F | ||
2504 | #define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_tx_override_deemphasis_select_MSB 0x1F | ||
2505 | #define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_tx_override_deemphasis_select_RMASK 0x1 | ||
2506 | #define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_reset_tx_deemphasis_override_LSB 0x1E | ||
2507 | #define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_reset_tx_deemphasis_override_MSB 0x1E | ||
2508 | #define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_reset_tx_deemphasis_override_RMASK 0x1 | ||
2509 | #define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txampcntl_d2a_LSB 0xE | ||
2510 | #define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txampcntl_d2a_MSB 0x11 | ||
2511 | #define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txampcntl_d2a_RMASK 0xF | ||
2512 | #define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txc0_ena_LSB 0x9 | ||
2513 | #define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txc0_ena_MSB 0xD | ||
2514 | #define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txc0_ena_RMASK 0x1F | ||
2515 | #define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcp1_ena_LSB 0x5 | ||
2516 | #define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcp1_ena_MSB 0x8 | ||
2517 | #define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcp1_ena_RMASK 0xF | ||
2518 | #define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_xtra_emph0_LSB 0x3 | ||
2519 | #define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_xtra_emph0_MSB 0x4 | ||
2520 | #define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_xtra_emph0_RMASK 0x3 | ||
2521 | #define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_ena_LSB 0x0 | ||
2522 | #define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_ena_MSB 0x2 | ||
2523 | #define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_ena_RMASK 0x7 | ||
2524 | |||
2525 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_OFFS 0x1640 | ||
2526 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_DEF 0x0000000000000000 | ||
2527 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch3_LSB 0x27 | ||
2528 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch3_MSB 0x27 | ||
2529 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch3_RMASK 0x1 | ||
2530 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch2_LSB 0x26 | ||
2531 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch2_MSB 0x26 | ||
2532 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch2_RMASK 0x1 | ||
2533 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch1_LSB 0x25 | ||
2534 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch1_MSB 0x25 | ||
2535 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch1_RMASK 0x1 | ||
2536 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch0_LSB 0x24 | ||
2537 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch0_MSB 0x24 | ||
2538 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch0_RMASK 0x1 | ||
2539 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch3_LSB 0x23 | ||
2540 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch3_MSB 0x23 | ||
2541 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch3_RMASK 0x1 | ||
2542 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch2_LSB 0x22 | ||
2543 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch2_MSB 0x22 | ||
2544 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch2_RMASK 0x1 | ||
2545 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch1_LSB 0x21 | ||
2546 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch1_MSB 0x21 | ||
2547 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch1_RMASK 0x1 | ||
2548 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch0_LSB 0x20 | ||
2549 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch0_MSB 0x20 | ||
2550 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch0_RMASK 0x1 | ||
2551 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch3_LSB 0x18 | ||
2552 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch3_MSB 0x1F | ||
2553 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch3_RMASK 0xFF | ||
2554 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch2_LSB 0x10 | ||
2555 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch2_MSB 0x17 | ||
2556 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch2_RMASK 0xFF | ||
2557 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch1_LSB 0x8 | ||
2558 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch1_MSB 0xF | ||
2559 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch1_RMASK 0xFF | ||
2560 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch0_LSB 0x0 | ||
2561 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch0_MSB 0x7 | ||
2562 | #define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch0_RMASK 0xFF | ||
2563 | |||
2564 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_OFFS 0x1648 | ||
2565 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_DEF 0x0000000000000000 | ||
2566 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch3_LSB 0x27 | ||
2567 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch3_MSB 0x27 | ||
2568 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch3_RMASK 0x1 | ||
2569 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch2_LSB 0x26 | ||
2570 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch2_MSB 0x26 | ||
2571 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch2_RMASK 0x1 | ||
2572 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch1_LSB 0x25 | ||
2573 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch1_MSB 0x25 | ||
2574 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch1_RMASK 0x1 | ||
2575 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch0_LSB 0x24 | ||
2576 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch0_MSB 0x24 | ||
2577 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch0_RMASK 0x1 | ||
2578 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch3_LSB 0x23 | ||
2579 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch3_MSB 0x23 | ||
2580 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch3_RMASK 0x1 | ||
2581 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch2_LSB 0x22 | ||
2582 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch2_MSB 0x22 | ||
2583 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch2_RMASK 0x1 | ||
2584 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch1_LSB 0x21 | ||
2585 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch1_MSB 0x21 | ||
2586 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch1_RMASK 0x1 | ||
2587 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch0_LSB 0x20 | ||
2588 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch0_MSB 0x20 | ||
2589 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch0_RMASK 0x1 | ||
2590 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch3_LSB 0x18 | ||
2591 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch3_MSB 0x1F | ||
2592 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch3_RMASK 0xFF | ||
2593 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch2_LSB 0x10 | ||
2594 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch2_MSB 0x17 | ||
2595 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch2_RMASK 0xFF | ||
2596 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch1_LSB 0x8 | ||
2597 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch1_MSB 0xF | ||
2598 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch1_RMASK 0xFF | ||
2599 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch0_LSB 0x0 | ||
2600 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch0_MSB 0x7 | ||
2601 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch0_RMASK 0xFF | ||
2602 | |||
2603 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_OFFS 0x1650 | ||
2604 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_DEF 0x0000000000000000 | ||
2605 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch3_LSB 0x27 | ||
2606 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch3_MSB 0x27 | ||
2607 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch3_RMASK 0x1 | ||
2608 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch2_LSB 0x26 | ||
2609 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch2_MSB 0x26 | ||
2610 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch2_RMASK 0x1 | ||
2611 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch1_LSB 0x25 | ||
2612 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch1_MSB 0x25 | ||
2613 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch1_RMASK 0x1 | ||
2614 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch0_LSB 0x24 | ||
2615 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch0_MSB 0x24 | ||
2616 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch0_RMASK 0x1 | ||
2617 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch3_LSB 0x23 | ||
2618 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch3_MSB 0x23 | ||
2619 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch3_RMASK 0x1 | ||
2620 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch2_LSB 0x22 | ||
2621 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch2_MSB 0x22 | ||
2622 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch2_RMASK 0x1 | ||
2623 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch1_LSB 0x21 | ||
2624 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch1_MSB 0x21 | ||
2625 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch1_RMASK 0x1 | ||
2626 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch0_LSB 0x20 | ||
2627 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch0_MSB 0x20 | ||
2628 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch0_RMASK 0x1 | ||
2629 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch3_LSB 0x18 | ||
2630 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch3_MSB 0x1F | ||
2631 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch3_RMASK 0xFF | ||
2632 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch2_LSB 0x10 | ||
2633 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch2_MSB 0x17 | ||
2634 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch2_RMASK 0xFF | ||
2635 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch1_LSB 0x8 | ||
2636 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch1_MSB 0xF | ||
2637 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch1_RMASK 0xFF | ||
2638 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch0_LSB 0x0 | ||
2639 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch0_MSB 0x7 | ||
2640 | #define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch0_RMASK 0xFF | ||
2641 | |||
2642 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_OFFS 0x1658 | ||
2643 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_DEF 0x0000000000000000 | ||
2644 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch3_LSB 0x27 | ||
2645 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch3_MSB 0x27 | ||
2646 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch3_RMASK 0x1 | ||
2647 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch2_LSB 0x26 | ||
2648 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch2_MSB 0x26 | ||
2649 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch2_RMASK 0x1 | ||
2650 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch1_LSB 0x25 | ||
2651 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch1_MSB 0x25 | ||
2652 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch1_RMASK 0x1 | ||
2653 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch0_LSB 0x24 | ||
2654 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch0_MSB 0x24 | ||
2655 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch0_RMASK 0x1 | ||
2656 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch3_LSB 0x23 | ||
2657 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch3_MSB 0x23 | ||
2658 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch3_RMASK 0x1 | ||
2659 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch2_LSB 0x22 | ||
2660 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch2_MSB 0x22 | ||
2661 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch2_RMASK 0x1 | ||
2662 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch1_LSB 0x21 | ||
2663 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch1_MSB 0x21 | ||
2664 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch1_RMASK 0x1 | ||
2665 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch0_LSB 0x20 | ||
2666 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch0_MSB 0x20 | ||
2667 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch0_RMASK 0x1 | ||
2668 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch3_LSB 0x18 | ||
2669 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch3_MSB 0x1F | ||
2670 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch3_RMASK 0xFF | ||
2671 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch2_LSB 0x10 | ||
2672 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch2_MSB 0x17 | ||
2673 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch2_RMASK 0xFF | ||
2674 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch1_LSB 0x8 | ||
2675 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch1_MSB 0xF | ||
2676 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch1_RMASK 0xFF | ||
2677 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch0_LSB 0x0 | ||
2678 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch0_MSB 0x7 | ||
2679 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch0_RMASK 0xFF | ||
2680 | |||
2681 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_OFFS 0x1660 | ||
2682 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_DEF 0x0000000000000000 | ||
2683 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch3_LSB 0x27 | ||
2684 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch3_MSB 0x27 | ||
2685 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch3_RMASK 0x1 | ||
2686 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch2_LSB 0x26 | ||
2687 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch2_MSB 0x26 | ||
2688 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch2_RMASK 0x1 | ||
2689 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch1_LSB 0x25 | ||
2690 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch1_MSB 0x25 | ||
2691 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch1_RMASK 0x1 | ||
2692 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch0_LSB 0x24 | ||
2693 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch0_MSB 0x24 | ||
2694 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch0_RMASK 0x1 | ||
2695 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch3_LSB 0x23 | ||
2696 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch3_MSB 0x23 | ||
2697 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch3_RMASK 0x1 | ||
2698 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch2_LSB 0x22 | ||
2699 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch2_MSB 0x22 | ||
2700 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch2_RMASK 0x1 | ||
2701 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch1_LSB 0x21 | ||
2702 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch1_MSB 0x21 | ||
2703 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch1_RMASK 0x1 | ||
2704 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch0_LSB 0x20 | ||
2705 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch0_MSB 0x20 | ||
2706 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch0_RMASK 0x1 | ||
2707 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch3_LSB 0x18 | ||
2708 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch3_MSB 0x1F | ||
2709 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch3_RMASK 0xFF | ||
2710 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch2_LSB 0x10 | ||
2711 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch2_MSB 0x17 | ||
2712 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch2_RMASK 0xFF | ||
2713 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch1_LSB 0x8 | ||
2714 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch1_MSB 0xF | ||
2715 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch1_RMASK 0xFF | ||
2716 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch0_LSB 0x0 | ||
2717 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch0_MSB 0x7 | ||
2718 | #define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch0_RMASK 0xFF | ||
2719 | |||
2720 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_OFFS 0x1668 | ||
2721 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_DEF 0x0000000000000000 | ||
2722 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch3_LSB 0x27 | ||
2723 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch3_MSB 0x27 | ||
2724 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch3_RMASK 0x1 | ||
2725 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch2_LSB 0x26 | ||
2726 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch2_MSB 0x26 | ||
2727 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch2_RMASK 0x1 | ||
2728 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch1_LSB 0x25 | ||
2729 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch1_MSB 0x25 | ||
2730 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch1_RMASK 0x1 | ||
2731 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch0_LSB 0x24 | ||
2732 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch0_MSB 0x24 | ||
2733 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch0_RMASK 0x1 | ||
2734 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch3_LSB 0x23 | ||
2735 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch3_MSB 0x23 | ||
2736 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch3_RMASK 0x1 | ||
2737 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch2_LSB 0x22 | ||
2738 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch2_MSB 0x22 | ||
2739 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch2_RMASK 0x1 | ||
2740 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch1_LSB 0x21 | ||
2741 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch1_MSB 0x21 | ||
2742 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch1_RMASK 0x1 | ||
2743 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch0_LSB 0x20 | ||
2744 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch0_MSB 0x20 | ||
2745 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch0_RMASK 0x1 | ||
2746 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch3_LSB 0x18 | ||
2747 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch3_MSB 0x1F | ||
2748 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch3_RMASK 0xFF | ||
2749 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch2_LSB 0x10 | ||
2750 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch2_MSB 0x17 | ||
2751 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch2_RMASK 0xFF | ||
2752 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch1_LSB 0x8 | ||
2753 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch1_MSB 0xF | ||
2754 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch1_RMASK 0xFF | ||
2755 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch0_LSB 0x0 | ||
2756 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch0_MSB 0x7 | ||
2757 | #define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch0_RMASK 0xFF | ||
2758 | |||
2759 | #define QIB_7322_ADAPT_DISABLE_TIMER_THRESHOLD_0_OFFS 0x1670 | ||
2760 | #define QIB_7322_ADAPT_DISABLE_TIMER_THRESHOLD_0_DEF 0x0000000000000000 | ||
2761 | |||
2762 | #define QIB_7322_HighPriorityLimit_0_OFFS 0x1BC0 | ||
2763 | #define QIB_7322_HighPriorityLimit_0_DEF 0x0000000000000000 | ||
2764 | #define QIB_7322_HighPriorityLimit_0_Limit_LSB 0x0 | ||
2765 | #define QIB_7322_HighPriorityLimit_0_Limit_MSB 0x7 | ||
2766 | #define QIB_7322_HighPriorityLimit_0_Limit_RMASK 0xFF | ||
2767 | |||
2768 | #define QIB_7322_LowPriority0_0_OFFS 0x1C00 | ||
2769 | #define QIB_7322_LowPriority0_0_DEF 0x0000000000000000 | ||
2770 | #define QIB_7322_LowPriority0_0_VirtualLane_LSB 0x10 | ||
2771 | #define QIB_7322_LowPriority0_0_VirtualLane_MSB 0x12 | ||
2772 | #define QIB_7322_LowPriority0_0_VirtualLane_RMASK 0x7 | ||
2773 | #define QIB_7322_LowPriority0_0_Weight_LSB 0x0 | ||
2774 | #define QIB_7322_LowPriority0_0_Weight_MSB 0x7 | ||
2775 | #define QIB_7322_LowPriority0_0_Weight_RMASK 0xFF | ||
2776 | |||
2777 | #define QIB_7322_HighPriority0_0_OFFS 0x1E00 | ||
2778 | #define QIB_7322_HighPriority0_0_DEF 0x0000000000000000 | ||
2779 | #define QIB_7322_HighPriority0_0_VirtualLane_LSB 0x10 | ||
2780 | #define QIB_7322_HighPriority0_0_VirtualLane_MSB 0x12 | ||
2781 | #define QIB_7322_HighPriority0_0_VirtualLane_RMASK 0x7 | ||
2782 | #define QIB_7322_HighPriority0_0_Weight_LSB 0x0 | ||
2783 | #define QIB_7322_HighPriority0_0_Weight_MSB 0x7 | ||
2784 | #define QIB_7322_HighPriority0_0_Weight_RMASK 0xFF | ||
2785 | |||
2786 | #define QIB_7322_CntrRegBase_1_OFFS 0x2028 | ||
2787 | #define QIB_7322_CntrRegBase_1_DEF 0x0000000000013000 | ||
2788 | |||
2789 | #define QIB_7322_RcvQPMulticastContext_1_OFFS 0x2170 | ||
2790 | |||
2791 | #define QIB_7322_SendCtrl_1_OFFS 0x21C0 | ||
2792 | |||
2793 | #define QIB_7322_SendBufAvail0_OFFS 0x3000 | ||
2794 | #define QIB_7322_SendBufAvail0_DEF 0x0000000000000000 | ||
2795 | #define QIB_7322_SendBufAvail0_SendBuf_31_0_LSB 0x0 | ||
2796 | #define QIB_7322_SendBufAvail0_SendBuf_31_0_MSB 0x3F | ||
2797 | #define QIB_7322_SendBufAvail0_SendBuf_31_0_RMASK 0x0 | ||
2798 | |||
2799 | #define QIB_7322_MsixTable_OFFS 0x8000 | ||
2800 | #define QIB_7322_MsixTable_DEF 0x0000000000000000 | ||
2801 | |||
2802 | #define QIB_7322_MsixPba_OFFS 0x9000 | ||
2803 | #define QIB_7322_MsixPba_DEF 0x0000000000000000 | ||
2804 | |||
2805 | #define QIB_7322_LAMemory_OFFS 0xA000 | ||
2806 | #define QIB_7322_LAMemory_DEF 0x0000000000000000 | ||
2807 | |||
2808 | #define QIB_7322_LBIntCnt_OFFS 0x11000 | ||
2809 | #define QIB_7322_LBIntCnt_DEF 0x0000000000000000 | ||
2810 | |||
2811 | #define QIB_7322_LBFlowStallCnt_OFFS 0x11008 | ||
2812 | #define QIB_7322_LBFlowStallCnt_DEF 0x0000000000000000 | ||
2813 | |||
2814 | #define QIB_7322_RxTIDFullErrCnt_OFFS 0x110D0 | ||
2815 | #define QIB_7322_RxTIDFullErrCnt_DEF 0x0000000000000000 | ||
2816 | |||
2817 | #define QIB_7322_RxTIDValidErrCnt_OFFS 0x110D8 | ||
2818 | #define QIB_7322_RxTIDValidErrCnt_DEF 0x0000000000000000 | ||
2819 | |||
2820 | #define QIB_7322_RxP0HdrEgrOvflCnt_OFFS 0x110E8 | ||
2821 | #define QIB_7322_RxP0HdrEgrOvflCnt_DEF 0x0000000000000000 | ||
2822 | |||
2823 | #define QIB_7322_PcieRetryBufDiagQwordCnt_OFFS 0x111A0 | ||
2824 | #define QIB_7322_PcieRetryBufDiagQwordCnt_DEF 0x0000000000000000 | ||
2825 | |||
2826 | #define QIB_7322_RxTidFlowDropCnt_OFFS 0x111E0 | ||
2827 | #define QIB_7322_RxTidFlowDropCnt_DEF 0x0000000000000000 | ||
2828 | |||
2829 | #define QIB_7322_LBIntCnt_0_OFFS 0x12000 | ||
2830 | #define QIB_7322_LBIntCnt_0_DEF 0x0000000000000000 | ||
2831 | |||
2832 | #define QIB_7322_TxCreditUpToDateTimeOut_0_OFFS 0x12008 | ||
2833 | #define QIB_7322_TxCreditUpToDateTimeOut_0_DEF 0x0000000000000000 | ||
2834 | |||
2835 | #define QIB_7322_TxSDmaDescCnt_0_OFFS 0x12010 | ||
2836 | #define QIB_7322_TxSDmaDescCnt_0_DEF 0x0000000000000000 | ||
2837 | |||
2838 | #define QIB_7322_TxUnsupVLErrCnt_0_OFFS 0x12018 | ||
2839 | #define QIB_7322_TxUnsupVLErrCnt_0_DEF 0x0000000000000000 | ||
2840 | |||
2841 | #define QIB_7322_TxDataPktCnt_0_OFFS 0x12020 | ||
2842 | #define QIB_7322_TxDataPktCnt_0_DEF 0x0000000000000000 | ||
2843 | |||
2844 | #define QIB_7322_TxFlowPktCnt_0_OFFS 0x12028 | ||
2845 | #define QIB_7322_TxFlowPktCnt_0_DEF 0x0000000000000000 | ||
2846 | |||
2847 | #define QIB_7322_TxDwordCnt_0_OFFS 0x12030 | ||
2848 | #define QIB_7322_TxDwordCnt_0_DEF 0x0000000000000000 | ||
2849 | |||
2850 | #define QIB_7322_TxLenErrCnt_0_OFFS 0x12038 | ||
2851 | #define QIB_7322_TxLenErrCnt_0_DEF 0x0000000000000000 | ||
2852 | |||
2853 | #define QIB_7322_TxMaxMinLenErrCnt_0_OFFS 0x12040 | ||
2854 | #define QIB_7322_TxMaxMinLenErrCnt_0_DEF 0x0000000000000000 | ||
2855 | |||
2856 | #define QIB_7322_TxUnderrunCnt_0_OFFS 0x12048 | ||
2857 | #define QIB_7322_TxUnderrunCnt_0_DEF 0x0000000000000000 | ||
2858 | |||
2859 | #define QIB_7322_TxFlowStallCnt_0_OFFS 0x12050 | ||
2860 | #define QIB_7322_TxFlowStallCnt_0_DEF 0x0000000000000000 | ||
2861 | |||
2862 | #define QIB_7322_TxDroppedPktCnt_0_OFFS 0x12058 | ||
2863 | #define QIB_7322_TxDroppedPktCnt_0_DEF 0x0000000000000000 | ||
2864 | |||
2865 | #define QIB_7322_RxDroppedPktCnt_0_OFFS 0x12060 | ||
2866 | #define QIB_7322_RxDroppedPktCnt_0_DEF 0x0000000000000000 | ||
2867 | |||
2868 | #define QIB_7322_RxDataPktCnt_0_OFFS 0x12068 | ||
2869 | #define QIB_7322_RxDataPktCnt_0_DEF 0x0000000000000000 | ||
2870 | |||
2871 | #define QIB_7322_RxFlowPktCnt_0_OFFS 0x12070 | ||
2872 | #define QIB_7322_RxFlowPktCnt_0_DEF 0x0000000000000000 | ||
2873 | |||
2874 | #define QIB_7322_RxDwordCnt_0_OFFS 0x12078 | ||
2875 | #define QIB_7322_RxDwordCnt_0_DEF 0x0000000000000000 | ||
2876 | |||
2877 | #define QIB_7322_RxLenErrCnt_0_OFFS 0x12080 | ||
2878 | #define QIB_7322_RxLenErrCnt_0_DEF 0x0000000000000000 | ||
2879 | |||
2880 | #define QIB_7322_RxMaxMinLenErrCnt_0_OFFS 0x12088 | ||
2881 | #define QIB_7322_RxMaxMinLenErrCnt_0_DEF 0x0000000000000000 | ||
2882 | |||
2883 | #define QIB_7322_RxICRCErrCnt_0_OFFS 0x12090 | ||
2884 | #define QIB_7322_RxICRCErrCnt_0_DEF 0x0000000000000000 | ||
2885 | |||
2886 | #define QIB_7322_RxVCRCErrCnt_0_OFFS 0x12098 | ||
2887 | #define QIB_7322_RxVCRCErrCnt_0_DEF 0x0000000000000000 | ||
2888 | |||
2889 | #define QIB_7322_RxFlowCtrlViolCnt_0_OFFS 0x120A0 | ||
2890 | #define QIB_7322_RxFlowCtrlViolCnt_0_DEF 0x0000000000000000 | ||
2891 | |||
2892 | #define QIB_7322_RxVersionErrCnt_0_OFFS 0x120A8 | ||
2893 | #define QIB_7322_RxVersionErrCnt_0_DEF 0x0000000000000000 | ||
2894 | |||
2895 | #define QIB_7322_RxLinkMalformCnt_0_OFFS 0x120B0 | ||
2896 | #define QIB_7322_RxLinkMalformCnt_0_DEF 0x0000000000000000 | ||
2897 | |||
2898 | #define QIB_7322_RxEBPCnt_0_OFFS 0x120B8 | ||
2899 | #define QIB_7322_RxEBPCnt_0_DEF 0x0000000000000000 | ||
2900 | |||
2901 | #define QIB_7322_RxLPCRCErrCnt_0_OFFS 0x120C0 | ||
2902 | #define QIB_7322_RxLPCRCErrCnt_0_DEF 0x0000000000000000 | ||
2903 | |||
2904 | #define QIB_7322_RxBufOvflCnt_0_OFFS 0x120C8 | ||
2905 | #define QIB_7322_RxBufOvflCnt_0_DEF 0x0000000000000000 | ||
2906 | |||
2907 | #define QIB_7322_RxLenTruncateCnt_0_OFFS 0x120D0 | ||
2908 | #define QIB_7322_RxLenTruncateCnt_0_DEF 0x0000000000000000 | ||
2909 | |||
2910 | #define QIB_7322_RxPKeyMismatchCnt_0_OFFS 0x120E0 | ||
2911 | #define QIB_7322_RxPKeyMismatchCnt_0_DEF 0x0000000000000000 | ||
2912 | |||
2913 | #define QIB_7322_IBLinkDownedCnt_0_OFFS 0x12180 | ||
2914 | #define QIB_7322_IBLinkDownedCnt_0_DEF 0x0000000000000000 | ||
2915 | |||
2916 | #define QIB_7322_IBSymbolErrCnt_0_OFFS 0x12188 | ||
2917 | #define QIB_7322_IBSymbolErrCnt_0_DEF 0x0000000000000000 | ||
2918 | |||
2919 | #define QIB_7322_IBStatusChangeCnt_0_OFFS 0x12190 | ||
2920 | #define QIB_7322_IBStatusChangeCnt_0_DEF 0x0000000000000000 | ||
2921 | |||
2922 | #define QIB_7322_IBLinkErrRecoveryCnt_0_OFFS 0x12198 | ||
2923 | #define QIB_7322_IBLinkErrRecoveryCnt_0_DEF 0x0000000000000000 | ||
2924 | |||
2925 | #define QIB_7322_ExcessBufferOvflCnt_0_OFFS 0x121A8 | ||
2926 | #define QIB_7322_ExcessBufferOvflCnt_0_DEF 0x0000000000000000 | ||
2927 | |||
2928 | #define QIB_7322_LocalLinkIntegrityErrCnt_0_OFFS 0x121B0 | ||
2929 | #define QIB_7322_LocalLinkIntegrityErrCnt_0_DEF 0x0000000000000000 | ||
2930 | |||
2931 | #define QIB_7322_RxVlErrCnt_0_OFFS 0x121B8 | ||
2932 | #define QIB_7322_RxVlErrCnt_0_DEF 0x0000000000000000 | ||
2933 | |||
2934 | #define QIB_7322_RxDlidFltrCnt_0_OFFS 0x121C0 | ||
2935 | #define QIB_7322_RxDlidFltrCnt_0_DEF 0x0000000000000000 | ||
2936 | |||
2937 | #define QIB_7322_RxVL15DroppedPktCnt_0_OFFS 0x121C8 | ||
2938 | #define QIB_7322_RxVL15DroppedPktCnt_0_DEF 0x0000000000000000 | ||
2939 | |||
2940 | #define QIB_7322_RxOtherLocalPhyErrCnt_0_OFFS 0x121D0 | ||
2941 | #define QIB_7322_RxOtherLocalPhyErrCnt_0_DEF 0x0000000000000000 | ||
2942 | |||
2943 | #define QIB_7322_RxQPInvalidContextCnt_0_OFFS 0x121D8 | ||
2944 | #define QIB_7322_RxQPInvalidContextCnt_0_DEF 0x0000000000000000 | ||
2945 | |||
2946 | #define QIB_7322_TxHeadersErrCnt_0_OFFS 0x121F8 | ||
2947 | #define QIB_7322_TxHeadersErrCnt_0_DEF 0x0000000000000000 | ||
2948 | |||
2949 | #define QIB_7322_PSRcvDataCount_0_OFFS 0x12218 | ||
2950 | #define QIB_7322_PSRcvDataCount_0_DEF 0x0000000000000000 | ||
2951 | |||
2952 | #define QIB_7322_PSRcvPktsCount_0_OFFS 0x12220 | ||
2953 | #define QIB_7322_PSRcvPktsCount_0_DEF 0x0000000000000000 | ||
2954 | |||
2955 | #define QIB_7322_PSXmitDataCount_0_OFFS 0x12228 | ||
2956 | #define QIB_7322_PSXmitDataCount_0_DEF 0x0000000000000000 | ||
2957 | |||
2958 | #define QIB_7322_PSXmitPktsCount_0_OFFS 0x12230 | ||
2959 | #define QIB_7322_PSXmitPktsCount_0_DEF 0x0000000000000000 | ||
2960 | |||
2961 | #define QIB_7322_PSXmitWaitCount_0_OFFS 0x12238 | ||
2962 | #define QIB_7322_PSXmitWaitCount_0_DEF 0x0000000000000000 | ||
2963 | |||
2964 | #define QIB_7322_LBIntCnt_1_OFFS 0x13000 | ||
2965 | #define QIB_7322_LBIntCnt_1_DEF 0x0000000000000000 | ||
2966 | |||
2967 | #define QIB_7322_TxCreditUpToDateTimeOut_1_OFFS 0x13008 | ||
2968 | #define QIB_7322_TxCreditUpToDateTimeOut_1_DEF 0x0000000000000000 | ||
2969 | |||
2970 | #define QIB_7322_TxSDmaDescCnt_1_OFFS 0x13010 | ||
2971 | #define QIB_7322_TxSDmaDescCnt_1_DEF 0x0000000000000000 | ||
2972 | |||
2973 | #define QIB_7322_TxUnsupVLErrCnt_1_OFFS 0x13018 | ||
2974 | #define QIB_7322_TxUnsupVLErrCnt_1_DEF 0x0000000000000000 | ||
2975 | |||
2976 | #define QIB_7322_TxDataPktCnt_1_OFFS 0x13020 | ||
2977 | #define QIB_7322_TxDataPktCnt_1_DEF 0x0000000000000000 | ||
2978 | |||
2979 | #define QIB_7322_TxFlowPktCnt_1_OFFS 0x13028 | ||
2980 | #define QIB_7322_TxFlowPktCnt_1_DEF 0x0000000000000000 | ||
2981 | |||
2982 | #define QIB_7322_TxDwordCnt_1_OFFS 0x13030 | ||
2983 | #define QIB_7322_TxDwordCnt_1_DEF 0x0000000000000000 | ||
2984 | |||
2985 | #define QIB_7322_TxLenErrCnt_1_OFFS 0x13038 | ||
2986 | #define QIB_7322_TxLenErrCnt_1_DEF 0x0000000000000000 | ||
2987 | |||
2988 | #define QIB_7322_TxMaxMinLenErrCnt_1_OFFS 0x13040 | ||
2989 | #define QIB_7322_TxMaxMinLenErrCnt_1_DEF 0x0000000000000000 | ||
2990 | |||
2991 | #define QIB_7322_TxUnderrunCnt_1_OFFS 0x13048 | ||
2992 | #define QIB_7322_TxUnderrunCnt_1_DEF 0x0000000000000000 | ||
2993 | |||
2994 | #define QIB_7322_TxFlowStallCnt_1_OFFS 0x13050 | ||
2995 | #define QIB_7322_TxFlowStallCnt_1_DEF 0x0000000000000000 | ||
2996 | |||
2997 | #define QIB_7322_TxDroppedPktCnt_1_OFFS 0x13058 | ||
2998 | #define QIB_7322_TxDroppedPktCnt_1_DEF 0x0000000000000000 | ||
2999 | |||
3000 | #define QIB_7322_RxDroppedPktCnt_1_OFFS 0x13060 | ||
3001 | #define QIB_7322_RxDroppedPktCnt_1_DEF 0x0000000000000000 | ||
3002 | |||
3003 | #define QIB_7322_RxDataPktCnt_1_OFFS 0x13068 | ||
3004 | #define QIB_7322_RxDataPktCnt_1_DEF 0x0000000000000000 | ||
3005 | |||
3006 | #define QIB_7322_RxFlowPktCnt_1_OFFS 0x13070 | ||
3007 | #define QIB_7322_RxFlowPktCnt_1_DEF 0x0000000000000000 | ||
3008 | |||
3009 | #define QIB_7322_RxDwordCnt_1_OFFS 0x13078 | ||
3010 | #define QIB_7322_RxDwordCnt_1_DEF 0x0000000000000000 | ||
3011 | |||
3012 | #define QIB_7322_RxLenErrCnt_1_OFFS 0x13080 | ||
3013 | #define QIB_7322_RxLenErrCnt_1_DEF 0x0000000000000000 | ||
3014 | |||
3015 | #define QIB_7322_RxMaxMinLenErrCnt_1_OFFS 0x13088 | ||
3016 | #define QIB_7322_RxMaxMinLenErrCnt_1_DEF 0x0000000000000000 | ||
3017 | |||
3018 | #define QIB_7322_RxICRCErrCnt_1_OFFS 0x13090 | ||
3019 | #define QIB_7322_RxICRCErrCnt_1_DEF 0x0000000000000000 | ||
3020 | |||
3021 | #define QIB_7322_RxVCRCErrCnt_1_OFFS 0x13098 | ||
3022 | #define QIB_7322_RxVCRCErrCnt_1_DEF 0x0000000000000000 | ||
3023 | |||
3024 | #define QIB_7322_RxFlowCtrlViolCnt_1_OFFS 0x130A0 | ||
3025 | #define QIB_7322_RxFlowCtrlViolCnt_1_DEF 0x0000000000000000 | ||
3026 | |||
3027 | #define QIB_7322_RxVersionErrCnt_1_OFFS 0x130A8 | ||
3028 | #define QIB_7322_RxVersionErrCnt_1_DEF 0x0000000000000000 | ||
3029 | |||
3030 | #define QIB_7322_RxLinkMalformCnt_1_OFFS 0x130B0 | ||
3031 | #define QIB_7322_RxLinkMalformCnt_1_DEF 0x0000000000000000 | ||
3032 | |||
3033 | #define QIB_7322_RxEBPCnt_1_OFFS 0x130B8 | ||
3034 | #define QIB_7322_RxEBPCnt_1_DEF 0x0000000000000000 | ||
3035 | |||
3036 | #define QIB_7322_RxLPCRCErrCnt_1_OFFS 0x130C0 | ||
3037 | #define QIB_7322_RxLPCRCErrCnt_1_DEF 0x0000000000000000 | ||
3038 | |||
3039 | #define QIB_7322_RxBufOvflCnt_1_OFFS 0x130C8 | ||
3040 | #define QIB_7322_RxBufOvflCnt_1_DEF 0x0000000000000000 | ||
3041 | |||
3042 | #define QIB_7322_RxLenTruncateCnt_1_OFFS 0x130D0 | ||
3043 | #define QIB_7322_RxLenTruncateCnt_1_DEF 0x0000000000000000 | ||
3044 | |||
3045 | #define QIB_7322_RxPKeyMismatchCnt_1_OFFS 0x130E0 | ||
3046 | #define QIB_7322_RxPKeyMismatchCnt_1_DEF 0x0000000000000000 | ||
3047 | |||
3048 | #define QIB_7322_IBLinkDownedCnt_1_OFFS 0x13180 | ||
3049 | #define QIB_7322_IBLinkDownedCnt_1_DEF 0x0000000000000000 | ||
3050 | |||
3051 | #define QIB_7322_IBSymbolErrCnt_1_OFFS 0x13188 | ||
3052 | #define QIB_7322_IBSymbolErrCnt_1_DEF 0x0000000000000000 | ||
3053 | |||
3054 | #define QIB_7322_IBStatusChangeCnt_1_OFFS 0x13190 | ||
3055 | #define QIB_7322_IBStatusChangeCnt_1_DEF 0x0000000000000000 | ||
3056 | |||
3057 | #define QIB_7322_IBLinkErrRecoveryCnt_1_OFFS 0x13198 | ||
3058 | #define QIB_7322_IBLinkErrRecoveryCnt_1_DEF 0x0000000000000000 | ||
3059 | |||
3060 | #define QIB_7322_ExcessBufferOvflCnt_1_OFFS 0x131A8 | ||
3061 | #define QIB_7322_ExcessBufferOvflCnt_1_DEF 0x0000000000000000 | ||
3062 | |||
3063 | #define QIB_7322_LocalLinkIntegrityErrCnt_1_OFFS 0x131B0 | ||
3064 | #define QIB_7322_LocalLinkIntegrityErrCnt_1_DEF 0x0000000000000000 | ||
3065 | |||
3066 | #define QIB_7322_RxVlErrCnt_1_OFFS 0x131B8 | ||
3067 | #define QIB_7322_RxVlErrCnt_1_DEF 0x0000000000000000 | ||
3068 | |||
3069 | #define QIB_7322_RxDlidFltrCnt_1_OFFS 0x131C0 | ||
3070 | #define QIB_7322_RxDlidFltrCnt_1_DEF 0x0000000000000000 | ||
3071 | |||
3072 | #define QIB_7322_RxVL15DroppedPktCnt_1_OFFS 0x131C8 | ||
3073 | #define QIB_7322_RxVL15DroppedPktCnt_1_DEF 0x0000000000000000 | ||
3074 | |||
3075 | #define QIB_7322_RxOtherLocalPhyErrCnt_1_OFFS 0x131D0 | ||
3076 | #define QIB_7322_RxOtherLocalPhyErrCnt_1_DEF 0x0000000000000000 | ||
3077 | |||
3078 | #define QIB_7322_RxQPInvalidContextCnt_1_OFFS 0x131D8 | ||
3079 | #define QIB_7322_RxQPInvalidContextCnt_1_DEF 0x0000000000000000 | ||
3080 | |||
3081 | #define QIB_7322_TxHeadersErrCnt_1_OFFS 0x131F8 | ||
3082 | #define QIB_7322_TxHeadersErrCnt_1_DEF 0x0000000000000000 | ||
3083 | |||
3084 | #define QIB_7322_PSRcvDataCount_1_OFFS 0x13218 | ||
3085 | #define QIB_7322_PSRcvDataCount_1_DEF 0x0000000000000000 | ||
3086 | |||
3087 | #define QIB_7322_PSRcvPktsCount_1_OFFS 0x13220 | ||
3088 | #define QIB_7322_PSRcvPktsCount_1_DEF 0x0000000000000000 | ||
3089 | |||
3090 | #define QIB_7322_PSXmitDataCount_1_OFFS 0x13228 | ||
3091 | #define QIB_7322_PSXmitDataCount_1_DEF 0x0000000000000000 | ||
3092 | |||
3093 | #define QIB_7322_PSXmitPktsCount_1_OFFS 0x13230 | ||
3094 | #define QIB_7322_PSXmitPktsCount_1_DEF 0x0000000000000000 | ||
3095 | |||
3096 | #define QIB_7322_PSXmitWaitCount_1_OFFS 0x13238 | ||
3097 | #define QIB_7322_PSXmitWaitCount_1_DEF 0x0000000000000000 | ||
3098 | |||
3099 | #define QIB_7322_RcvEgrArray_OFFS 0x14000 | ||
3100 | #define QIB_7322_RcvEgrArray_DEF 0x0000000000000000 | ||
3101 | #define QIB_7322_RcvEgrArray_RT_BufSize_LSB 0x25 | ||
3102 | #define QIB_7322_RcvEgrArray_RT_BufSize_MSB 0x27 | ||
3103 | #define QIB_7322_RcvEgrArray_RT_BufSize_RMASK 0x7 | ||
3104 | #define QIB_7322_RcvEgrArray_RT_Addr_LSB 0x0 | ||
3105 | #define QIB_7322_RcvEgrArray_RT_Addr_MSB 0x24 | ||
3106 | #define QIB_7322_RcvEgrArray_RT_Addr_RMASK 0x1FFFFFFFFF | ||
3107 | |||
3108 | #define QIB_7322_RcvTIDArray0_OFFS 0x50000 | ||
3109 | #define QIB_7322_RcvTIDArray0_DEF 0x0000000000000000 | ||
3110 | #define QIB_7322_RcvTIDArray0_RT_BufSize_LSB 0x25 | ||
3111 | #define QIB_7322_RcvTIDArray0_RT_BufSize_MSB 0x27 | ||
3112 | #define QIB_7322_RcvTIDArray0_RT_BufSize_RMASK 0x7 | ||
3113 | #define QIB_7322_RcvTIDArray0_RT_Addr_LSB 0x0 | ||
3114 | #define QIB_7322_RcvTIDArray0_RT_Addr_MSB 0x24 | ||
3115 | #define QIB_7322_RcvTIDArray0_RT_Addr_RMASK 0x1FFFFFFFFF | ||
3116 | |||
3117 | #define QIB_7322_IBSD_DDS_MAP_TABLE_0_OFFS 0xD0000 | ||
3118 | #define QIB_7322_IBSD_DDS_MAP_TABLE_0_DEF 0x0000000000000000 | ||
3119 | |||
3120 | #define QIB_7322_RcvHdrTail0_OFFS 0x200000 | ||
3121 | #define QIB_7322_RcvHdrTail0_DEF 0x0000000000000000 | ||
3122 | |||
3123 | #define QIB_7322_RcvHdrHead0_OFFS 0x200008 | ||
3124 | #define QIB_7322_RcvHdrHead0_DEF 0x0000000000000000 | ||
3125 | #define QIB_7322_RcvHdrHead0_counter_LSB 0x20 | ||
3126 | #define QIB_7322_RcvHdrHead0_counter_MSB 0x2F | ||
3127 | #define QIB_7322_RcvHdrHead0_counter_RMASK 0xFFFF | ||
3128 | #define QIB_7322_RcvHdrHead0_RcvHeadPointer_LSB 0x0 | ||
3129 | #define QIB_7322_RcvHdrHead0_RcvHeadPointer_MSB 0x1F | ||
3130 | #define QIB_7322_RcvHdrHead0_RcvHeadPointer_RMASK 0xFFFFFFFF | ||
3131 | |||
3132 | #define QIB_7322_RcvEgrIndexTail0_OFFS 0x200010 | ||
3133 | #define QIB_7322_RcvEgrIndexTail0_DEF 0x0000000000000000 | ||
3134 | |||
3135 | #define QIB_7322_RcvEgrIndexHead0_OFFS 0x200018 | ||
3136 | #define QIB_7322_RcvEgrIndexHead0_DEF 0x0000000000000000 | ||
3137 | |||
3138 | #define QIB_7322_RcvTIDFlowTable0_OFFS 0x201000 | ||
3139 | #define QIB_7322_RcvTIDFlowTable0_DEF 0x0000000000000000 | ||
3140 | #define QIB_7322_RcvTIDFlowTable0_GenMismatch_LSB 0x1C | ||
3141 | #define QIB_7322_RcvTIDFlowTable0_GenMismatch_MSB 0x1C | ||
3142 | #define QIB_7322_RcvTIDFlowTable0_GenMismatch_RMASK 0x1 | ||
3143 | #define QIB_7322_RcvTIDFlowTable0_SeqMismatch_LSB 0x1B | ||
3144 | #define QIB_7322_RcvTIDFlowTable0_SeqMismatch_MSB 0x1B | ||
3145 | #define QIB_7322_RcvTIDFlowTable0_SeqMismatch_RMASK 0x1 | ||
3146 | #define QIB_7322_RcvTIDFlowTable0_KeepOnGenErr_LSB 0x16 | ||
3147 | #define QIB_7322_RcvTIDFlowTable0_KeepOnGenErr_MSB 0x16 | ||
3148 | #define QIB_7322_RcvTIDFlowTable0_KeepOnGenErr_RMASK 0x1 | ||
3149 | #define QIB_7322_RcvTIDFlowTable0_KeepAfterSeqErr_LSB 0x15 | ||
3150 | #define QIB_7322_RcvTIDFlowTable0_KeepAfterSeqErr_MSB 0x15 | ||
3151 | #define QIB_7322_RcvTIDFlowTable0_KeepAfterSeqErr_RMASK 0x1 | ||
3152 | #define QIB_7322_RcvTIDFlowTable0_HdrSuppEnabled_LSB 0x14 | ||
3153 | #define QIB_7322_RcvTIDFlowTable0_HdrSuppEnabled_MSB 0x14 | ||
3154 | #define QIB_7322_RcvTIDFlowTable0_HdrSuppEnabled_RMASK 0x1 | ||
3155 | #define QIB_7322_RcvTIDFlowTable0_FlowValid_LSB 0x13 | ||
3156 | #define QIB_7322_RcvTIDFlowTable0_FlowValid_MSB 0x13 | ||
3157 | #define QIB_7322_RcvTIDFlowTable0_FlowValid_RMASK 0x1 | ||
3158 | #define QIB_7322_RcvTIDFlowTable0_GenVal_LSB 0xB | ||
3159 | #define QIB_7322_RcvTIDFlowTable0_GenVal_MSB 0x12 | ||
3160 | #define QIB_7322_RcvTIDFlowTable0_GenVal_RMASK 0xFF | ||
3161 | #define QIB_7322_RcvTIDFlowTable0_SeqNum_LSB 0x0 | ||
3162 | #define QIB_7322_RcvTIDFlowTable0_SeqNum_MSB 0xA | ||
3163 | #define QIB_7322_RcvTIDFlowTable0_SeqNum_RMASK 0x7FF | ||
diff --git a/drivers/infiniband/hw/qib/qib_common.h b/drivers/infiniband/hw/qib/qib_common.h new file mode 100644 index 000000000000..b3955ed8f794 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_common.h | |||
@@ -0,0 +1,758 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. | ||
3 | * All rights reserved. | ||
4 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #ifndef _QIB_COMMON_H | ||
36 | #define _QIB_COMMON_H | ||
37 | |||
38 | /* | ||
39 | * This file contains defines, structures, etc. that are used | ||
40 | * to communicate between kernel and user code. | ||
41 | */ | ||
42 | |||
43 | /* This is the IEEE-assigned OUI for QLogic Inc. QLogic_IB */ | ||
44 | #define QIB_SRC_OUI_1 0x00 | ||
45 | #define QIB_SRC_OUI_2 0x11 | ||
46 | #define QIB_SRC_OUI_3 0x75 | ||
47 | |||
48 | /* version of protocol header (known to chip also). In the long run, | ||
49 | * we should be able to generate and accept a range of version numbers; | ||
50 | * for now we only accept one, and it's compiled in. | ||
51 | */ | ||
52 | #define IPS_PROTO_VERSION 2 | ||
53 | |||
54 | /* | ||
55 | * These are compile time constants that you may want to enable or disable | ||
56 | * if you are trying to debug problems with code or performance. | ||
57 | * QIB_VERBOSE_TRACING define as 1 if you want additional tracing in | ||
58 | * fastpath code | ||
59 | * QIB_TRACE_REGWRITES define as 1 if you want register writes to be | ||
60 | * traced in faspath code | ||
61 | * _QIB_TRACING define as 0 if you want to remove all tracing in a | ||
62 | * compilation unit | ||
63 | */ | ||
64 | |||
65 | /* | ||
66 | * The value in the BTH QP field that QLogic_IB uses to differentiate | ||
67 | * an qlogic_ib protocol IB packet vs standard IB transport | ||
68 | * This it needs to be even (0x656b78), because the LSB is sometimes | ||
69 | * used for the MSB of context. The change may cause a problem | ||
70 | * interoperating with older software. | ||
71 | */ | ||
72 | #define QIB_KD_QP 0x656b78 | ||
73 | |||
74 | /* | ||
75 | * These are the status bits readable (in ascii form, 64bit value) | ||
76 | * from the "status" sysfs file. For binary compatibility, values | ||
77 | * must remain as is; removed states can be reused for different | ||
78 | * purposes. | ||
79 | */ | ||
80 | #define QIB_STATUS_INITTED 0x1 /* basic initialization done */ | ||
81 | /* Chip has been found and initted */ | ||
82 | #define QIB_STATUS_CHIP_PRESENT 0x20 | ||
83 | /* IB link is at ACTIVE, usable for data traffic */ | ||
84 | #define QIB_STATUS_IB_READY 0x40 | ||
85 | /* link is configured, LID, MTU, etc. have been set */ | ||
86 | #define QIB_STATUS_IB_CONF 0x80 | ||
87 | /* A Fatal hardware error has occurred. */ | ||
88 | #define QIB_STATUS_HWERROR 0x200 | ||
89 | |||
90 | /* | ||
91 | * The list of usermode accessible registers. Also see Reg_* later in file. | ||
92 | */ | ||
93 | enum qib_ureg { | ||
94 | /* (RO) DMA RcvHdr to be used next. */ | ||
95 | ur_rcvhdrtail = 0, | ||
96 | /* (RW) RcvHdr entry to be processed next by host. */ | ||
97 | ur_rcvhdrhead = 1, | ||
98 | /* (RO) Index of next Eager index to use. */ | ||
99 | ur_rcvegrindextail = 2, | ||
100 | /* (RW) Eager TID to be processed next */ | ||
101 | ur_rcvegrindexhead = 3, | ||
102 | /* For internal use only; max register number. */ | ||
103 | _QIB_UregMax | ||
104 | }; | ||
105 | |||
106 | /* bit values for spi_runtime_flags */ | ||
107 | #define QIB_RUNTIME_PCIE 0x0002 | ||
108 | #define QIB_RUNTIME_FORCE_WC_ORDER 0x0004 | ||
109 | #define QIB_RUNTIME_RCVHDR_COPY 0x0008 | ||
110 | #define QIB_RUNTIME_MASTER 0x0010 | ||
111 | #define QIB_RUNTIME_RCHK 0x0020 | ||
112 | #define QIB_RUNTIME_NODMA_RTAIL 0x0080 | ||
113 | #define QIB_RUNTIME_SPECIAL_TRIGGER 0x0100 | ||
114 | #define QIB_RUNTIME_SDMA 0x0200 | ||
115 | #define QIB_RUNTIME_FORCE_PIOAVAIL 0x0400 | ||
116 | #define QIB_RUNTIME_PIO_REGSWAPPED 0x0800 | ||
117 | #define QIB_RUNTIME_CTXT_MSB_IN_QP 0x1000 | ||
118 | #define QIB_RUNTIME_CTXT_REDIRECT 0x2000 | ||
119 | #define QIB_RUNTIME_HDRSUPP 0x4000 | ||
120 | |||
121 | /* | ||
122 | * This structure is returned by qib_userinit() immediately after | ||
123 | * open to get implementation-specific info, and info specific to this | ||
124 | * instance. | ||
125 | * | ||
126 | * This struct must have explict pad fields where type sizes | ||
127 | * may result in different alignments between 32 and 64 bit | ||
128 | * programs, since the 64 bit * bit kernel requires the user code | ||
129 | * to have matching offsets | ||
130 | */ | ||
131 | struct qib_base_info { | ||
132 | /* version of hardware, for feature checking. */ | ||
133 | __u32 spi_hw_version; | ||
134 | /* version of software, for feature checking. */ | ||
135 | __u32 spi_sw_version; | ||
136 | /* QLogic_IB context assigned, goes into sent packets */ | ||
137 | __u16 spi_ctxt; | ||
138 | __u16 spi_subctxt; | ||
139 | /* | ||
140 | * IB MTU, packets IB data must be less than this. | ||
141 | * The MTU is in bytes, and will be a multiple of 4 bytes. | ||
142 | */ | ||
143 | __u32 spi_mtu; | ||
144 | /* | ||
145 | * Size of a PIO buffer. Any given packet's total size must be less | ||
146 | * than this (in words). Included is the starting control word, so | ||
147 | * if 513 is returned, then total pkt size is 512 words or less. | ||
148 | */ | ||
149 | __u32 spi_piosize; | ||
150 | /* size of the TID cache in qlogic_ib, in entries */ | ||
151 | __u32 spi_tidcnt; | ||
152 | /* size of the TID Eager list in qlogic_ib, in entries */ | ||
153 | __u32 spi_tidegrcnt; | ||
154 | /* size of a single receive header queue entry in words. */ | ||
155 | __u32 spi_rcvhdrent_size; | ||
156 | /* | ||
157 | * Count of receive header queue entries allocated. | ||
158 | * This may be less than the spu_rcvhdrcnt passed in!. | ||
159 | */ | ||
160 | __u32 spi_rcvhdr_cnt; | ||
161 | |||
162 | /* per-chip and other runtime features bitmap (QIB_RUNTIME_*) */ | ||
163 | __u32 spi_runtime_flags; | ||
164 | |||
165 | /* address where hardware receive header queue is mapped */ | ||
166 | __u64 spi_rcvhdr_base; | ||
167 | |||
168 | /* user program. */ | ||
169 | |||
170 | /* base address of eager TID receive buffers used by hardware. */ | ||
171 | __u64 spi_rcv_egrbufs; | ||
172 | |||
173 | /* Allocated by initialization code, not by protocol. */ | ||
174 | |||
175 | /* | ||
176 | * Size of each TID buffer in host memory, starting at | ||
177 | * spi_rcv_egrbufs. The buffers are virtually contiguous. | ||
178 | */ | ||
179 | __u32 spi_rcv_egrbufsize; | ||
180 | /* | ||
181 | * The special QP (queue pair) value that identifies an qlogic_ib | ||
182 | * protocol packet from standard IB packets. More, probably much | ||
183 | * more, to be added. | ||
184 | */ | ||
185 | __u32 spi_qpair; | ||
186 | |||
187 | /* | ||
188 | * User register base for init code, not to be used directly by | ||
189 | * protocol or applications. Always points to chip registers, | ||
190 | * for normal or shared context. | ||
191 | */ | ||
192 | __u64 spi_uregbase; | ||
193 | /* | ||
194 | * Maximum buffer size in bytes that can be used in a single TID | ||
195 | * entry (assuming the buffer is aligned to this boundary). This is | ||
196 | * the minimum of what the hardware and software support Guaranteed | ||
197 | * to be a power of 2. | ||
198 | */ | ||
199 | __u32 spi_tid_maxsize; | ||
200 | /* | ||
201 | * alignment of each pio send buffer (byte count | ||
202 | * to add to spi_piobufbase to get to second buffer) | ||
203 | */ | ||
204 | __u32 spi_pioalign; | ||
205 | /* | ||
206 | * The index of the first pio buffer available to this process; | ||
207 | * needed to do lookup in spi_pioavailaddr; not added to | ||
208 | * spi_piobufbase. | ||
209 | */ | ||
210 | __u32 spi_pioindex; | ||
211 | /* number of buffers mapped for this process */ | ||
212 | __u32 spi_piocnt; | ||
213 | |||
214 | /* | ||
215 | * Base address of writeonly pio buffers for this process. | ||
216 | * Each buffer has spi_piosize words, and is aligned on spi_pioalign | ||
217 | * boundaries. spi_piocnt buffers are mapped from this address | ||
218 | */ | ||
219 | __u64 spi_piobufbase; | ||
220 | |||
221 | /* | ||
222 | * Base address of readonly memory copy of the pioavail registers. | ||
223 | * There are 2 bits for each buffer. | ||
224 | */ | ||
225 | __u64 spi_pioavailaddr; | ||
226 | |||
227 | /* | ||
228 | * Address where driver updates a copy of the interface and driver | ||
229 | * status (QIB_STATUS_*) as a 64 bit value. It's followed by a | ||
230 | * link status qword (formerly combined with driver status), then a | ||
231 | * string indicating hardware error, if there was one. | ||
232 | */ | ||
233 | __u64 spi_status; | ||
234 | |||
235 | /* number of chip ctxts available to user processes */ | ||
236 | __u32 spi_nctxts; | ||
237 | __u16 spi_unit; /* unit number of chip we are using */ | ||
238 | __u16 spi_port; /* IB port number we are using */ | ||
239 | /* num bufs in each contiguous set */ | ||
240 | __u32 spi_rcv_egrperchunk; | ||
241 | /* size in bytes of each contiguous set */ | ||
242 | __u32 spi_rcv_egrchunksize; | ||
243 | /* total size of mmap to cover full rcvegrbuffers */ | ||
244 | __u32 spi_rcv_egrbuftotlen; | ||
245 | __u32 spi_rhf_offset; /* dword offset in hdrqent for rcvhdr flags */ | ||
246 | /* address of readonly memory copy of the rcvhdrq tail register. */ | ||
247 | __u64 spi_rcvhdr_tailaddr; | ||
248 | |||
249 | /* | ||
250 | * shared memory pages for subctxts if ctxt is shared; these cover | ||
251 | * all the processes in the group sharing a single context. | ||
252 | * all have enough space for the num_subcontexts value on this job. | ||
253 | */ | ||
254 | __u64 spi_subctxt_uregbase; | ||
255 | __u64 spi_subctxt_rcvegrbuf; | ||
256 | __u64 spi_subctxt_rcvhdr_base; | ||
257 | |||
258 | /* shared memory page for send buffer disarm status */ | ||
259 | __u64 spi_sendbuf_status; | ||
260 | } __attribute__ ((aligned(8))); | ||
261 | |||
262 | /* | ||
263 | * This version number is given to the driver by the user code during | ||
264 | * initialization in the spu_userversion field of qib_user_info, so | ||
265 | * the driver can check for compatibility with user code. | ||
266 | * | ||
267 | * The major version changes when data structures | ||
268 | * change in an incompatible way. The driver must be the same or higher | ||
269 | * for initialization to succeed. In some cases, a higher version | ||
270 | * driver will not interoperate with older software, and initialization | ||
271 | * will return an error. | ||
272 | */ | ||
273 | #define QIB_USER_SWMAJOR 1 | ||
274 | |||
275 | /* | ||
276 | * Minor version differences are always compatible | ||
277 | * a within a major version, however if user software is larger | ||
278 | * than driver software, some new features and/or structure fields | ||
279 | * may not be implemented; the user code must deal with this if it | ||
280 | * cares, or it must abort after initialization reports the difference. | ||
281 | */ | ||
282 | #define QIB_USER_SWMINOR 10 | ||
283 | |||
284 | #define QIB_USER_SWVERSION ((QIB_USER_SWMAJOR << 16) | QIB_USER_SWMINOR) | ||
285 | |||
286 | #ifndef QIB_KERN_TYPE | ||
287 | #define QIB_KERN_TYPE 0 | ||
288 | #define QIB_IDSTR "QLogic kernel.org driver" | ||
289 | #endif | ||
290 | |||
291 | /* | ||
292 | * Similarly, this is the kernel version going back to the user. It's | ||
293 | * slightly different, in that we want to tell if the driver was built as | ||
294 | * part of a QLogic release, or from the driver from openfabrics.org, | ||
295 | * kernel.org, or a standard distribution, for support reasons. | ||
296 | * The high bit is 0 for non-QLogic and 1 for QLogic-built/supplied. | ||
297 | * | ||
298 | * It's returned by the driver to the user code during initialization in the | ||
299 | * spi_sw_version field of qib_base_info, so the user code can in turn | ||
300 | * check for compatibility with the kernel. | ||
301 | */ | ||
302 | #define QIB_KERN_SWVERSION ((QIB_KERN_TYPE << 31) | QIB_USER_SWVERSION) | ||
303 | |||
304 | /* | ||
305 | * This structure is passed to qib_userinit() to tell the driver where | ||
306 | * user code buffers are, sizes, etc. The offsets and sizes of the | ||
307 | * fields must remain unchanged, for binary compatibility. It can | ||
308 | * be extended, if userversion is changed so user code can tell, if needed | ||
309 | */ | ||
310 | struct qib_user_info { | ||
311 | /* | ||
312 | * version of user software, to detect compatibility issues. | ||
313 | * Should be set to QIB_USER_SWVERSION. | ||
314 | */ | ||
315 | __u32 spu_userversion; | ||
316 | |||
317 | __u32 _spu_unused2; | ||
318 | |||
319 | /* size of struct base_info to write to */ | ||
320 | __u32 spu_base_info_size; | ||
321 | |||
322 | __u32 _spu_unused3; | ||
323 | |||
324 | /* | ||
325 | * If two or more processes wish to share a context, each process | ||
326 | * must set the spu_subctxt_cnt and spu_subctxt_id to the same | ||
327 | * values. The only restriction on the spu_subctxt_id is that | ||
328 | * it be unique for a given node. | ||
329 | */ | ||
330 | __u16 spu_subctxt_cnt; | ||
331 | __u16 spu_subctxt_id; | ||
332 | |||
333 | __u32 spu_port; /* IB port requested by user if > 0 */ | ||
334 | |||
335 | /* | ||
336 | * address of struct base_info to write to | ||
337 | */ | ||
338 | __u64 spu_base_info; | ||
339 | |||
340 | } __attribute__ ((aligned(8))); | ||
341 | |||
342 | /* User commands. */ | ||
343 | |||
344 | /* 16 available, was: old set up userspace (for old user code) */ | ||
345 | #define QIB_CMD_CTXT_INFO 17 /* find out what resources we got */ | ||
346 | #define QIB_CMD_RECV_CTRL 18 /* control receipt of packets */ | ||
347 | #define QIB_CMD_TID_UPDATE 19 /* update expected TID entries */ | ||
348 | #define QIB_CMD_TID_FREE 20 /* free expected TID entries */ | ||
349 | #define QIB_CMD_SET_PART_KEY 21 /* add partition key */ | ||
350 | /* 22 available, was: return info on slave processes (for old user code) */ | ||
351 | #define QIB_CMD_ASSIGN_CTXT 23 /* allocate HCA and ctxt */ | ||
352 | #define QIB_CMD_USER_INIT 24 /* set up userspace */ | ||
353 | #define QIB_CMD_UNUSED_1 25 | ||
354 | #define QIB_CMD_UNUSED_2 26 | ||
355 | #define QIB_CMD_PIOAVAILUPD 27 /* force an update of PIOAvail reg */ | ||
356 | #define QIB_CMD_POLL_TYPE 28 /* set the kind of polling we want */ | ||
357 | #define QIB_CMD_ARMLAUNCH_CTRL 29 /* armlaunch detection control */ | ||
358 | /* 30 is unused */ | ||
359 | #define QIB_CMD_SDMA_INFLIGHT 31 /* sdma inflight counter request */ | ||
360 | #define QIB_CMD_SDMA_COMPLETE 32 /* sdma completion counter request */ | ||
361 | /* 33 available, was a testing feature */ | ||
362 | #define QIB_CMD_DISARM_BUFS 34 /* disarm send buffers w/ errors */ | ||
363 | #define QIB_CMD_ACK_EVENT 35 /* ack & clear bits */ | ||
364 | #define QIB_CMD_CPUS_LIST 36 /* list of cpus allocated, for pinned | ||
365 | * processes: qib_cpus_list */ | ||
366 | |||
367 | /* | ||
368 | * QIB_CMD_ACK_EVENT obsoletes QIB_CMD_DISARM_BUFS, but we keep it for | ||
369 | * compatibility with libraries from previous release. The ACK_EVENT | ||
370 | * will take appropriate driver action (if any, just DISARM for now), | ||
371 | * then clear the bits passed in as part of the mask. These bits are | ||
372 | * in the first 64bit word at spi_sendbuf_status, and are passed to | ||
373 | * the driver in the event_mask union as well. | ||
374 | */ | ||
375 | #define _QIB_EVENT_DISARM_BUFS_BIT 0 | ||
376 | #define _QIB_EVENT_LINKDOWN_BIT 1 | ||
377 | #define _QIB_EVENT_LID_CHANGE_BIT 2 | ||
378 | #define _QIB_EVENT_LMC_CHANGE_BIT 3 | ||
379 | #define _QIB_EVENT_SL2VL_CHANGE_BIT 4 | ||
380 | #define _QIB_MAX_EVENT_BIT _QIB_EVENT_SL2VL_CHANGE_BIT | ||
381 | |||
382 | #define QIB_EVENT_DISARM_BUFS_BIT (1UL << _QIB_EVENT_DISARM_BUFS_BIT) | ||
383 | #define QIB_EVENT_LINKDOWN_BIT (1UL << _QIB_EVENT_LINKDOWN_BIT) | ||
384 | #define QIB_EVENT_LID_CHANGE_BIT (1UL << _QIB_EVENT_LID_CHANGE_BIT) | ||
385 | #define QIB_EVENT_LMC_CHANGE_BIT (1UL << _QIB_EVENT_LMC_CHANGE_BIT) | ||
386 | #define QIB_EVENT_SL2VL_CHANGE_BIT (1UL << _QIB_EVENT_SL2VL_CHANGE_BIT) | ||
387 | |||
388 | |||
389 | /* | ||
390 | * Poll types | ||
391 | */ | ||
392 | #define QIB_POLL_TYPE_ANYRCV 0x0 | ||
393 | #define QIB_POLL_TYPE_URGENT 0x1 | ||
394 | |||
395 | struct qib_ctxt_info { | ||
396 | __u16 num_active; /* number of active units */ | ||
397 | __u16 unit; /* unit (chip) assigned to caller */ | ||
398 | __u16 port; /* IB port assigned to caller (1-based) */ | ||
399 | __u16 ctxt; /* ctxt on unit assigned to caller */ | ||
400 | __u16 subctxt; /* subctxt on unit assigned to caller */ | ||
401 | __u16 num_ctxts; /* number of ctxts available on unit */ | ||
402 | __u16 num_subctxts; /* number of subctxts opened on ctxt */ | ||
403 | __u16 rec_cpu; /* cpu # for affinity (ffff if none) */ | ||
404 | }; | ||
405 | |||
406 | struct qib_tid_info { | ||
407 | __u32 tidcnt; | ||
408 | /* make structure same size in 32 and 64 bit */ | ||
409 | __u32 tid__unused; | ||
410 | /* virtual address of first page in transfer */ | ||
411 | __u64 tidvaddr; | ||
412 | /* pointer (same size 32/64 bit) to __u16 tid array */ | ||
413 | __u64 tidlist; | ||
414 | |||
415 | /* | ||
416 | * pointer (same size 32/64 bit) to bitmap of TIDs used | ||
417 | * for this call; checked for being large enough at open | ||
418 | */ | ||
419 | __u64 tidmap; | ||
420 | }; | ||
421 | |||
422 | struct qib_cmd { | ||
423 | __u32 type; /* command type */ | ||
424 | union { | ||
425 | struct qib_tid_info tid_info; | ||
426 | struct qib_user_info user_info; | ||
427 | |||
428 | /* | ||
429 | * address in userspace where we should put the sdma | ||
430 | * inflight counter | ||
431 | */ | ||
432 | __u64 sdma_inflight; | ||
433 | /* | ||
434 | * address in userspace where we should put the sdma | ||
435 | * completion counter | ||
436 | */ | ||
437 | __u64 sdma_complete; | ||
438 | /* address in userspace of struct qib_ctxt_info to | ||
439 | write result to */ | ||
440 | __u64 ctxt_info; | ||
441 | /* enable/disable receipt of packets */ | ||
442 | __u32 recv_ctrl; | ||
443 | /* enable/disable armlaunch errors (non-zero to enable) */ | ||
444 | __u32 armlaunch_ctrl; | ||
445 | /* partition key to set */ | ||
446 | __u16 part_key; | ||
447 | /* user address of __u32 bitmask of active slaves */ | ||
448 | __u64 slave_mask_addr; | ||
449 | /* type of polling we want */ | ||
450 | __u16 poll_type; | ||
451 | /* back pressure enable bit for one particular context */ | ||
452 | __u8 ctxt_bp; | ||
453 | /* qib_user_event_ack(), IPATH_EVENT_* bits */ | ||
454 | __u64 event_mask; | ||
455 | } cmd; | ||
456 | }; | ||
457 | |||
458 | struct qib_iovec { | ||
459 | /* Pointer to data, but same size 32 and 64 bit */ | ||
460 | __u64 iov_base; | ||
461 | |||
462 | /* | ||
463 | * Length of data; don't need 64 bits, but want | ||
464 | * qib_sendpkt to remain same size as before 32 bit changes, so... | ||
465 | */ | ||
466 | __u64 iov_len; | ||
467 | }; | ||
468 | |||
469 | /* | ||
470 | * Describes a single packet for send. Each packet can have one or more | ||
471 | * buffers, but the total length (exclusive of IB headers) must be less | ||
472 | * than the MTU, and if using the PIO method, entire packet length, | ||
473 | * including IB headers, must be less than the qib_piosize value (words). | ||
474 | * Use of this necessitates including sys/uio.h | ||
475 | */ | ||
476 | struct __qib_sendpkt { | ||
477 | __u32 sps_flags; /* flags for packet (TBD) */ | ||
478 | __u32 sps_cnt; /* number of entries to use in sps_iov */ | ||
479 | /* array of iov's describing packet. TEMPORARY */ | ||
480 | struct qib_iovec sps_iov[4]; | ||
481 | }; | ||
482 | |||
483 | /* | ||
484 | * Diagnostics can send a packet by "writing" the following | ||
485 | * structs to the diag data special file. | ||
486 | * This allows a custom | ||
487 | * pbc (+ static rate) qword, so that special modes and deliberate | ||
488 | * changes to CRCs can be used. The elements were also re-ordered | ||
489 | * for better alignment and to avoid padding issues. | ||
490 | */ | ||
491 | #define _DIAG_XPKT_VERS 3 | ||
492 | struct qib_diag_xpkt { | ||
493 | __u16 version; | ||
494 | __u16 unit; | ||
495 | __u16 port; | ||
496 | __u16 len; | ||
497 | __u64 data; | ||
498 | __u64 pbc_wd; | ||
499 | }; | ||
500 | |||
501 | /* | ||
502 | * Data layout in I2C flash (for GUID, etc.) | ||
503 | * All fields are little-endian binary unless otherwise stated | ||
504 | */ | ||
505 | #define QIB_FLASH_VERSION 2 | ||
506 | struct qib_flash { | ||
507 | /* flash layout version (QIB_FLASH_VERSION) */ | ||
508 | __u8 if_fversion; | ||
509 | /* checksum protecting if_length bytes */ | ||
510 | __u8 if_csum; | ||
511 | /* | ||
512 | * valid length (in use, protected by if_csum), including | ||
513 | * if_fversion and if_csum themselves) | ||
514 | */ | ||
515 | __u8 if_length; | ||
516 | /* the GUID, in network order */ | ||
517 | __u8 if_guid[8]; | ||
518 | /* number of GUIDs to use, starting from if_guid */ | ||
519 | __u8 if_numguid; | ||
520 | /* the (last 10 characters of) board serial number, in ASCII */ | ||
521 | char if_serial[12]; | ||
522 | /* board mfg date (YYYYMMDD ASCII) */ | ||
523 | char if_mfgdate[8]; | ||
524 | /* last board rework/test date (YYYYMMDD ASCII) */ | ||
525 | char if_testdate[8]; | ||
526 | /* logging of error counts, TBD */ | ||
527 | __u8 if_errcntp[4]; | ||
528 | /* powered on hours, updated at driver unload */ | ||
529 | __u8 if_powerhour[2]; | ||
530 | /* ASCII free-form comment field */ | ||
531 | char if_comment[32]; | ||
532 | /* Backwards compatible prefix for longer QLogic Serial Numbers */ | ||
533 | char if_sprefix[4]; | ||
534 | /* 82 bytes used, min flash size is 128 bytes */ | ||
535 | __u8 if_future[46]; | ||
536 | }; | ||
537 | |||
538 | /* | ||
539 | * These are the counters implemented in the chip, and are listed in order. | ||
540 | * The InterCaps naming is taken straight from the chip spec. | ||
541 | */ | ||
542 | struct qlogic_ib_counters { | ||
543 | __u64 LBIntCnt; | ||
544 | __u64 LBFlowStallCnt; | ||
545 | __u64 TxSDmaDescCnt; /* was Reserved1 */ | ||
546 | __u64 TxUnsupVLErrCnt; | ||
547 | __u64 TxDataPktCnt; | ||
548 | __u64 TxFlowPktCnt; | ||
549 | __u64 TxDwordCnt; | ||
550 | __u64 TxLenErrCnt; | ||
551 | __u64 TxMaxMinLenErrCnt; | ||
552 | __u64 TxUnderrunCnt; | ||
553 | __u64 TxFlowStallCnt; | ||
554 | __u64 TxDroppedPktCnt; | ||
555 | __u64 RxDroppedPktCnt; | ||
556 | __u64 RxDataPktCnt; | ||
557 | __u64 RxFlowPktCnt; | ||
558 | __u64 RxDwordCnt; | ||
559 | __u64 RxLenErrCnt; | ||
560 | __u64 RxMaxMinLenErrCnt; | ||
561 | __u64 RxICRCErrCnt; | ||
562 | __u64 RxVCRCErrCnt; | ||
563 | __u64 RxFlowCtrlErrCnt; | ||
564 | __u64 RxBadFormatCnt; | ||
565 | __u64 RxLinkProblemCnt; | ||
566 | __u64 RxEBPCnt; | ||
567 | __u64 RxLPCRCErrCnt; | ||
568 | __u64 RxBufOvflCnt; | ||
569 | __u64 RxTIDFullErrCnt; | ||
570 | __u64 RxTIDValidErrCnt; | ||
571 | __u64 RxPKeyMismatchCnt; | ||
572 | __u64 RxP0HdrEgrOvflCnt; | ||
573 | __u64 RxP1HdrEgrOvflCnt; | ||
574 | __u64 RxP2HdrEgrOvflCnt; | ||
575 | __u64 RxP3HdrEgrOvflCnt; | ||
576 | __u64 RxP4HdrEgrOvflCnt; | ||
577 | __u64 RxP5HdrEgrOvflCnt; | ||
578 | __u64 RxP6HdrEgrOvflCnt; | ||
579 | __u64 RxP7HdrEgrOvflCnt; | ||
580 | __u64 RxP8HdrEgrOvflCnt; | ||
581 | __u64 RxP9HdrEgrOvflCnt; | ||
582 | __u64 RxP10HdrEgrOvflCnt; | ||
583 | __u64 RxP11HdrEgrOvflCnt; | ||
584 | __u64 RxP12HdrEgrOvflCnt; | ||
585 | __u64 RxP13HdrEgrOvflCnt; | ||
586 | __u64 RxP14HdrEgrOvflCnt; | ||
587 | __u64 RxP15HdrEgrOvflCnt; | ||
588 | __u64 RxP16HdrEgrOvflCnt; | ||
589 | __u64 IBStatusChangeCnt; | ||
590 | __u64 IBLinkErrRecoveryCnt; | ||
591 | __u64 IBLinkDownedCnt; | ||
592 | __u64 IBSymbolErrCnt; | ||
593 | __u64 RxVL15DroppedPktCnt; | ||
594 | __u64 RxOtherLocalPhyErrCnt; | ||
595 | __u64 PcieRetryBufDiagQwordCnt; | ||
596 | __u64 ExcessBufferOvflCnt; | ||
597 | __u64 LocalLinkIntegrityErrCnt; | ||
598 | __u64 RxVlErrCnt; | ||
599 | __u64 RxDlidFltrCnt; | ||
600 | }; | ||
601 | |||
602 | /* | ||
603 | * The next set of defines are for packet headers, and chip register | ||
604 | * and memory bits that are visible to and/or used by user-mode software. | ||
605 | */ | ||
606 | |||
607 | /* RcvHdrFlags bits */ | ||
608 | #define QLOGIC_IB_RHF_LENGTH_MASK 0x7FF | ||
609 | #define QLOGIC_IB_RHF_LENGTH_SHIFT 0 | ||
610 | #define QLOGIC_IB_RHF_RCVTYPE_MASK 0x7 | ||
611 | #define QLOGIC_IB_RHF_RCVTYPE_SHIFT 11 | ||
612 | #define QLOGIC_IB_RHF_EGRINDEX_MASK 0xFFF | ||
613 | #define QLOGIC_IB_RHF_EGRINDEX_SHIFT 16 | ||
614 | #define QLOGIC_IB_RHF_SEQ_MASK 0xF | ||
615 | #define QLOGIC_IB_RHF_SEQ_SHIFT 0 | ||
616 | #define QLOGIC_IB_RHF_HDRQ_OFFSET_MASK 0x7FF | ||
617 | #define QLOGIC_IB_RHF_HDRQ_OFFSET_SHIFT 4 | ||
618 | #define QLOGIC_IB_RHF_H_ICRCERR 0x80000000 | ||
619 | #define QLOGIC_IB_RHF_H_VCRCERR 0x40000000 | ||
620 | #define QLOGIC_IB_RHF_H_PARITYERR 0x20000000 | ||
621 | #define QLOGIC_IB_RHF_H_LENERR 0x10000000 | ||
622 | #define QLOGIC_IB_RHF_H_MTUERR 0x08000000 | ||
623 | #define QLOGIC_IB_RHF_H_IHDRERR 0x04000000 | ||
624 | #define QLOGIC_IB_RHF_H_TIDERR 0x02000000 | ||
625 | #define QLOGIC_IB_RHF_H_MKERR 0x01000000 | ||
626 | #define QLOGIC_IB_RHF_H_IBERR 0x00800000 | ||
627 | #define QLOGIC_IB_RHF_H_ERR_MASK 0xFF800000 | ||
628 | #define QLOGIC_IB_RHF_L_USE_EGR 0x80000000 | ||
629 | #define QLOGIC_IB_RHF_L_SWA 0x00008000 | ||
630 | #define QLOGIC_IB_RHF_L_SWB 0x00004000 | ||
631 | |||
632 | /* qlogic_ib header fields */ | ||
633 | #define QLOGIC_IB_I_VERS_MASK 0xF | ||
634 | #define QLOGIC_IB_I_VERS_SHIFT 28 | ||
635 | #define QLOGIC_IB_I_CTXT_MASK 0xF | ||
636 | #define QLOGIC_IB_I_CTXT_SHIFT 24 | ||
637 | #define QLOGIC_IB_I_TID_MASK 0x7FF | ||
638 | #define QLOGIC_IB_I_TID_SHIFT 13 | ||
639 | #define QLOGIC_IB_I_OFFSET_MASK 0x1FFF | ||
640 | #define QLOGIC_IB_I_OFFSET_SHIFT 0 | ||
641 | |||
642 | /* K_PktFlags bits */ | ||
643 | #define QLOGIC_IB_KPF_INTR 0x1 | ||
644 | #define QLOGIC_IB_KPF_SUBCTXT_MASK 0x3 | ||
645 | #define QLOGIC_IB_KPF_SUBCTXT_SHIFT 1 | ||
646 | |||
647 | #define QLOGIC_IB_MAX_SUBCTXT 4 | ||
648 | |||
649 | /* SendPIO per-buffer control */ | ||
650 | #define QLOGIC_IB_SP_TEST 0x40 | ||
651 | #define QLOGIC_IB_SP_TESTEBP 0x20 | ||
652 | #define QLOGIC_IB_SP_TRIGGER_SHIFT 15 | ||
653 | |||
654 | /* SendPIOAvail bits */ | ||
655 | #define QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT 1 | ||
656 | #define QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT 0 | ||
657 | |||
658 | /* qlogic_ib header format */ | ||
659 | struct qib_header { | ||
660 | /* | ||
661 | * Version - 4 bits, Context - 4 bits, TID - 10 bits and Offset - | ||
662 | * 14 bits before ECO change ~28 Dec 03. After that, Vers 4, | ||
663 | * Context 4, TID 11, offset 13. | ||
664 | */ | ||
665 | __le32 ver_ctxt_tid_offset; | ||
666 | __le16 chksum; | ||
667 | __le16 pkt_flags; | ||
668 | }; | ||
669 | |||
670 | /* | ||
671 | * qlogic_ib user message header format. | ||
672 | * This structure contains the first 4 fields common to all protocols | ||
673 | * that employ qlogic_ib. | ||
674 | */ | ||
675 | struct qib_message_header { | ||
676 | __be16 lrh[4]; | ||
677 | __be32 bth[3]; | ||
678 | /* fields below this point are in host byte order */ | ||
679 | struct qib_header iph; | ||
680 | __u8 sub_opcode; | ||
681 | }; | ||
682 | |||
683 | /* IB - LRH header consts */ | ||
684 | #define QIB_LRH_GRH 0x0003 /* 1. word of IB LRH - next header: GRH */ | ||
685 | #define QIB_LRH_BTH 0x0002 /* 1. word of IB LRH - next header: BTH */ | ||
686 | |||
687 | /* misc. */ | ||
688 | #define SIZE_OF_CRC 1 | ||
689 | |||
690 | #define QIB_DEFAULT_P_KEY 0xFFFF | ||
691 | #define QIB_PERMISSIVE_LID 0xFFFF | ||
692 | #define QIB_AETH_CREDIT_SHIFT 24 | ||
693 | #define QIB_AETH_CREDIT_MASK 0x1F | ||
694 | #define QIB_AETH_CREDIT_INVAL 0x1F | ||
695 | #define QIB_PSN_MASK 0xFFFFFF | ||
696 | #define QIB_MSN_MASK 0xFFFFFF | ||
697 | #define QIB_QPN_MASK 0xFFFFFF | ||
698 | #define QIB_MULTICAST_LID_BASE 0xC000 | ||
699 | #define QIB_EAGER_TID_ID QLOGIC_IB_I_TID_MASK | ||
700 | #define QIB_MULTICAST_QPN 0xFFFFFF | ||
701 | |||
702 | /* Receive Header Queue: receive type (from qlogic_ib) */ | ||
703 | #define RCVHQ_RCV_TYPE_EXPECTED 0 | ||
704 | #define RCVHQ_RCV_TYPE_EAGER 1 | ||
705 | #define RCVHQ_RCV_TYPE_NON_KD 2 | ||
706 | #define RCVHQ_RCV_TYPE_ERROR 3 | ||
707 | |||
708 | #define QIB_HEADER_QUEUE_WORDS 9 | ||
709 | |||
710 | /* functions for extracting fields from rcvhdrq entries for the driver. | ||
711 | */ | ||
712 | static inline __u32 qib_hdrget_err_flags(const __le32 *rbuf) | ||
713 | { | ||
714 | return __le32_to_cpu(rbuf[1]) & QLOGIC_IB_RHF_H_ERR_MASK; | ||
715 | } | ||
716 | |||
717 | static inline __u32 qib_hdrget_rcv_type(const __le32 *rbuf) | ||
718 | { | ||
719 | return (__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_RCVTYPE_SHIFT) & | ||
720 | QLOGIC_IB_RHF_RCVTYPE_MASK; | ||
721 | } | ||
722 | |||
723 | static inline __u32 qib_hdrget_length_in_bytes(const __le32 *rbuf) | ||
724 | { | ||
725 | return ((__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_LENGTH_SHIFT) & | ||
726 | QLOGIC_IB_RHF_LENGTH_MASK) << 2; | ||
727 | } | ||
728 | |||
729 | static inline __u32 qib_hdrget_index(const __le32 *rbuf) | ||
730 | { | ||
731 | return (__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_EGRINDEX_SHIFT) & | ||
732 | QLOGIC_IB_RHF_EGRINDEX_MASK; | ||
733 | } | ||
734 | |||
735 | static inline __u32 qib_hdrget_seq(const __le32 *rbuf) | ||
736 | { | ||
737 | return (__le32_to_cpu(rbuf[1]) >> QLOGIC_IB_RHF_SEQ_SHIFT) & | ||
738 | QLOGIC_IB_RHF_SEQ_MASK; | ||
739 | } | ||
740 | |||
741 | static inline __u32 qib_hdrget_offset(const __le32 *rbuf) | ||
742 | { | ||
743 | return (__le32_to_cpu(rbuf[1]) >> QLOGIC_IB_RHF_HDRQ_OFFSET_SHIFT) & | ||
744 | QLOGIC_IB_RHF_HDRQ_OFFSET_MASK; | ||
745 | } | ||
746 | |||
747 | static inline __u32 qib_hdrget_use_egr_buf(const __le32 *rbuf) | ||
748 | { | ||
749 | return __le32_to_cpu(rbuf[0]) & QLOGIC_IB_RHF_L_USE_EGR; | ||
750 | } | ||
751 | |||
752 | static inline __u32 qib_hdrget_qib_ver(__le32 hdrword) | ||
753 | { | ||
754 | return (__le32_to_cpu(hdrword) >> QLOGIC_IB_I_VERS_SHIFT) & | ||
755 | QLOGIC_IB_I_VERS_MASK; | ||
756 | } | ||
757 | |||
758 | #endif /* _QIB_COMMON_H */ | ||
diff --git a/drivers/infiniband/hw/qib/qib_cq.c b/drivers/infiniband/hw/qib/qib_cq.c new file mode 100644 index 000000000000..a86cbf880f98 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_cq.c | |||
@@ -0,0 +1,484 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2008, 2010 QLogic Corporation. All rights reserved. | ||
3 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | |||
34 | #include <linux/err.h> | ||
35 | #include <linux/slab.h> | ||
36 | #include <linux/vmalloc.h> | ||
37 | |||
38 | #include "qib_verbs.h" | ||
39 | |||
40 | /** | ||
41 | * qib_cq_enter - add a new entry to the completion queue | ||
42 | * @cq: completion queue | ||
43 | * @entry: work completion entry to add | ||
44 | * @sig: true if @entry is a solicitated entry | ||
45 | * | ||
46 | * This may be called with qp->s_lock held. | ||
47 | */ | ||
48 | void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int solicited) | ||
49 | { | ||
50 | struct qib_cq_wc *wc; | ||
51 | unsigned long flags; | ||
52 | u32 head; | ||
53 | u32 next; | ||
54 | |||
55 | spin_lock_irqsave(&cq->lock, flags); | ||
56 | |||
57 | /* | ||
58 | * Note that the head pointer might be writable by user processes. | ||
59 | * Take care to verify it is a sane value. | ||
60 | */ | ||
61 | wc = cq->queue; | ||
62 | head = wc->head; | ||
63 | if (head >= (unsigned) cq->ibcq.cqe) { | ||
64 | head = cq->ibcq.cqe; | ||
65 | next = 0; | ||
66 | } else | ||
67 | next = head + 1; | ||
68 | if (unlikely(next == wc->tail)) { | ||
69 | spin_unlock_irqrestore(&cq->lock, flags); | ||
70 | if (cq->ibcq.event_handler) { | ||
71 | struct ib_event ev; | ||
72 | |||
73 | ev.device = cq->ibcq.device; | ||
74 | ev.element.cq = &cq->ibcq; | ||
75 | ev.event = IB_EVENT_CQ_ERR; | ||
76 | cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); | ||
77 | } | ||
78 | return; | ||
79 | } | ||
80 | if (cq->ip) { | ||
81 | wc->uqueue[head].wr_id = entry->wr_id; | ||
82 | wc->uqueue[head].status = entry->status; | ||
83 | wc->uqueue[head].opcode = entry->opcode; | ||
84 | wc->uqueue[head].vendor_err = entry->vendor_err; | ||
85 | wc->uqueue[head].byte_len = entry->byte_len; | ||
86 | wc->uqueue[head].ex.imm_data = | ||
87 | (__u32 __force)entry->ex.imm_data; | ||
88 | wc->uqueue[head].qp_num = entry->qp->qp_num; | ||
89 | wc->uqueue[head].src_qp = entry->src_qp; | ||
90 | wc->uqueue[head].wc_flags = entry->wc_flags; | ||
91 | wc->uqueue[head].pkey_index = entry->pkey_index; | ||
92 | wc->uqueue[head].slid = entry->slid; | ||
93 | wc->uqueue[head].sl = entry->sl; | ||
94 | wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits; | ||
95 | wc->uqueue[head].port_num = entry->port_num; | ||
96 | /* Make sure entry is written before the head index. */ | ||
97 | smp_wmb(); | ||
98 | } else | ||
99 | wc->kqueue[head] = *entry; | ||
100 | wc->head = next; | ||
101 | |||
102 | if (cq->notify == IB_CQ_NEXT_COMP || | ||
103 | (cq->notify == IB_CQ_SOLICITED && solicited)) { | ||
104 | cq->notify = IB_CQ_NONE; | ||
105 | cq->triggered++; | ||
106 | /* | ||
107 | * This will cause send_complete() to be called in | ||
108 | * another thread. | ||
109 | */ | ||
110 | queue_work(qib_cq_wq, &cq->comptask); | ||
111 | } | ||
112 | |||
113 | spin_unlock_irqrestore(&cq->lock, flags); | ||
114 | } | ||
115 | |||
116 | /** | ||
117 | * qib_poll_cq - poll for work completion entries | ||
118 | * @ibcq: the completion queue to poll | ||
119 | * @num_entries: the maximum number of entries to return | ||
120 | * @entry: pointer to array where work completions are placed | ||
121 | * | ||
122 | * Returns the number of completion entries polled. | ||
123 | * | ||
124 | * This may be called from interrupt context. Also called by ib_poll_cq() | ||
125 | * in the generic verbs code. | ||
126 | */ | ||
127 | int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) | ||
128 | { | ||
129 | struct qib_cq *cq = to_icq(ibcq); | ||
130 | struct qib_cq_wc *wc; | ||
131 | unsigned long flags; | ||
132 | int npolled; | ||
133 | u32 tail; | ||
134 | |||
135 | /* The kernel can only poll a kernel completion queue */ | ||
136 | if (cq->ip) { | ||
137 | npolled = -EINVAL; | ||
138 | goto bail; | ||
139 | } | ||
140 | |||
141 | spin_lock_irqsave(&cq->lock, flags); | ||
142 | |||
143 | wc = cq->queue; | ||
144 | tail = wc->tail; | ||
145 | if (tail > (u32) cq->ibcq.cqe) | ||
146 | tail = (u32) cq->ibcq.cqe; | ||
147 | for (npolled = 0; npolled < num_entries; ++npolled, ++entry) { | ||
148 | if (tail == wc->head) | ||
149 | break; | ||
150 | /* The kernel doesn't need a RMB since it has the lock. */ | ||
151 | *entry = wc->kqueue[tail]; | ||
152 | if (tail >= cq->ibcq.cqe) | ||
153 | tail = 0; | ||
154 | else | ||
155 | tail++; | ||
156 | } | ||
157 | wc->tail = tail; | ||
158 | |||
159 | spin_unlock_irqrestore(&cq->lock, flags); | ||
160 | |||
161 | bail: | ||
162 | return npolled; | ||
163 | } | ||
164 | |||
165 | static void send_complete(struct work_struct *work) | ||
166 | { | ||
167 | struct qib_cq *cq = container_of(work, struct qib_cq, comptask); | ||
168 | |||
169 | /* | ||
170 | * The completion handler will most likely rearm the notification | ||
171 | * and poll for all pending entries. If a new completion entry | ||
172 | * is added while we are in this routine, queue_work() | ||
173 | * won't call us again until we return so we check triggered to | ||
174 | * see if we need to call the handler again. | ||
175 | */ | ||
176 | for (;;) { | ||
177 | u8 triggered = cq->triggered; | ||
178 | |||
179 | /* | ||
180 | * IPoIB connected mode assumes the callback is from a | ||
181 | * soft IRQ. We simulate this by blocking "bottom halves". | ||
182 | * See the implementation for ipoib_cm_handle_tx_wc(), | ||
183 | * netif_tx_lock_bh() and netif_tx_lock(). | ||
184 | */ | ||
185 | local_bh_disable(); | ||
186 | cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); | ||
187 | local_bh_enable(); | ||
188 | |||
189 | if (cq->triggered == triggered) | ||
190 | return; | ||
191 | } | ||
192 | } | ||
193 | |||
194 | /** | ||
195 | * qib_create_cq - create a completion queue | ||
196 | * @ibdev: the device this completion queue is attached to | ||
197 | * @entries: the minimum size of the completion queue | ||
198 | * @context: unused by the QLogic_IB driver | ||
199 | * @udata: user data for libibverbs.so | ||
200 | * | ||
201 | * Returns a pointer to the completion queue or negative errno values | ||
202 | * for failure. | ||
203 | * | ||
204 | * Called by ib_create_cq() in the generic verbs code. | ||
205 | */ | ||
206 | struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries, | ||
207 | int comp_vector, struct ib_ucontext *context, | ||
208 | struct ib_udata *udata) | ||
209 | { | ||
210 | struct qib_ibdev *dev = to_idev(ibdev); | ||
211 | struct qib_cq *cq; | ||
212 | struct qib_cq_wc *wc; | ||
213 | struct ib_cq *ret; | ||
214 | u32 sz; | ||
215 | |||
216 | if (entries < 1 || entries > ib_qib_max_cqes) { | ||
217 | ret = ERR_PTR(-EINVAL); | ||
218 | goto done; | ||
219 | } | ||
220 | |||
221 | /* Allocate the completion queue structure. */ | ||
222 | cq = kmalloc(sizeof(*cq), GFP_KERNEL); | ||
223 | if (!cq) { | ||
224 | ret = ERR_PTR(-ENOMEM); | ||
225 | goto done; | ||
226 | } | ||
227 | |||
228 | /* | ||
229 | * Allocate the completion queue entries and head/tail pointers. | ||
230 | * This is allocated separately so that it can be resized and | ||
231 | * also mapped into user space. | ||
232 | * We need to use vmalloc() in order to support mmap and large | ||
233 | * numbers of entries. | ||
234 | */ | ||
235 | sz = sizeof(*wc); | ||
236 | if (udata && udata->outlen >= sizeof(__u64)) | ||
237 | sz += sizeof(struct ib_uverbs_wc) * (entries + 1); | ||
238 | else | ||
239 | sz += sizeof(struct ib_wc) * (entries + 1); | ||
240 | wc = vmalloc_user(sz); | ||
241 | if (!wc) { | ||
242 | ret = ERR_PTR(-ENOMEM); | ||
243 | goto bail_cq; | ||
244 | } | ||
245 | |||
246 | /* | ||
247 | * Return the address of the WC as the offset to mmap. | ||
248 | * See qib_mmap() for details. | ||
249 | */ | ||
250 | if (udata && udata->outlen >= sizeof(__u64)) { | ||
251 | int err; | ||
252 | |||
253 | cq->ip = qib_create_mmap_info(dev, sz, context, wc); | ||
254 | if (!cq->ip) { | ||
255 | ret = ERR_PTR(-ENOMEM); | ||
256 | goto bail_wc; | ||
257 | } | ||
258 | |||
259 | err = ib_copy_to_udata(udata, &cq->ip->offset, | ||
260 | sizeof(cq->ip->offset)); | ||
261 | if (err) { | ||
262 | ret = ERR_PTR(err); | ||
263 | goto bail_ip; | ||
264 | } | ||
265 | } else | ||
266 | cq->ip = NULL; | ||
267 | |||
268 | spin_lock(&dev->n_cqs_lock); | ||
269 | if (dev->n_cqs_allocated == ib_qib_max_cqs) { | ||
270 | spin_unlock(&dev->n_cqs_lock); | ||
271 | ret = ERR_PTR(-ENOMEM); | ||
272 | goto bail_ip; | ||
273 | } | ||
274 | |||
275 | dev->n_cqs_allocated++; | ||
276 | spin_unlock(&dev->n_cqs_lock); | ||
277 | |||
278 | if (cq->ip) { | ||
279 | spin_lock_irq(&dev->pending_lock); | ||
280 | list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps); | ||
281 | spin_unlock_irq(&dev->pending_lock); | ||
282 | } | ||
283 | |||
284 | /* | ||
285 | * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe. | ||
286 | * The number of entries should be >= the number requested or return | ||
287 | * an error. | ||
288 | */ | ||
289 | cq->ibcq.cqe = entries; | ||
290 | cq->notify = IB_CQ_NONE; | ||
291 | cq->triggered = 0; | ||
292 | spin_lock_init(&cq->lock); | ||
293 | INIT_WORK(&cq->comptask, send_complete); | ||
294 | wc->head = 0; | ||
295 | wc->tail = 0; | ||
296 | cq->queue = wc; | ||
297 | |||
298 | ret = &cq->ibcq; | ||
299 | |||
300 | goto done; | ||
301 | |||
302 | bail_ip: | ||
303 | kfree(cq->ip); | ||
304 | bail_wc: | ||
305 | vfree(wc); | ||
306 | bail_cq: | ||
307 | kfree(cq); | ||
308 | done: | ||
309 | return ret; | ||
310 | } | ||
311 | |||
312 | /** | ||
313 | * qib_destroy_cq - destroy a completion queue | ||
314 | * @ibcq: the completion queue to destroy. | ||
315 | * | ||
316 | * Returns 0 for success. | ||
317 | * | ||
318 | * Called by ib_destroy_cq() in the generic verbs code. | ||
319 | */ | ||
320 | int qib_destroy_cq(struct ib_cq *ibcq) | ||
321 | { | ||
322 | struct qib_ibdev *dev = to_idev(ibcq->device); | ||
323 | struct qib_cq *cq = to_icq(ibcq); | ||
324 | |||
325 | flush_work(&cq->comptask); | ||
326 | spin_lock(&dev->n_cqs_lock); | ||
327 | dev->n_cqs_allocated--; | ||
328 | spin_unlock(&dev->n_cqs_lock); | ||
329 | if (cq->ip) | ||
330 | kref_put(&cq->ip->ref, qib_release_mmap_info); | ||
331 | else | ||
332 | vfree(cq->queue); | ||
333 | kfree(cq); | ||
334 | |||
335 | return 0; | ||
336 | } | ||
337 | |||
338 | /** | ||
339 | * qib_req_notify_cq - change the notification type for a completion queue | ||
340 | * @ibcq: the completion queue | ||
341 | * @notify_flags: the type of notification to request | ||
342 | * | ||
343 | * Returns 0 for success. | ||
344 | * | ||
345 | * This may be called from interrupt context. Also called by | ||
346 | * ib_req_notify_cq() in the generic verbs code. | ||
347 | */ | ||
348 | int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags) | ||
349 | { | ||
350 | struct qib_cq *cq = to_icq(ibcq); | ||
351 | unsigned long flags; | ||
352 | int ret = 0; | ||
353 | |||
354 | spin_lock_irqsave(&cq->lock, flags); | ||
355 | /* | ||
356 | * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow | ||
357 | * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2). | ||
358 | */ | ||
359 | if (cq->notify != IB_CQ_NEXT_COMP) | ||
360 | cq->notify = notify_flags & IB_CQ_SOLICITED_MASK; | ||
361 | |||
362 | if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) && | ||
363 | cq->queue->head != cq->queue->tail) | ||
364 | ret = 1; | ||
365 | |||
366 | spin_unlock_irqrestore(&cq->lock, flags); | ||
367 | |||
368 | return ret; | ||
369 | } | ||
370 | |||
371 | /** | ||
372 | * qib_resize_cq - change the size of the CQ | ||
373 | * @ibcq: the completion queue | ||
374 | * | ||
375 | * Returns 0 for success. | ||
376 | */ | ||
377 | int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) | ||
378 | { | ||
379 | struct qib_cq *cq = to_icq(ibcq); | ||
380 | struct qib_cq_wc *old_wc; | ||
381 | struct qib_cq_wc *wc; | ||
382 | u32 head, tail, n; | ||
383 | int ret; | ||
384 | u32 sz; | ||
385 | |||
386 | if (cqe < 1 || cqe > ib_qib_max_cqes) { | ||
387 | ret = -EINVAL; | ||
388 | goto bail; | ||
389 | } | ||
390 | |||
391 | /* | ||
392 | * Need to use vmalloc() if we want to support large #s of entries. | ||
393 | */ | ||
394 | sz = sizeof(*wc); | ||
395 | if (udata && udata->outlen >= sizeof(__u64)) | ||
396 | sz += sizeof(struct ib_uverbs_wc) * (cqe + 1); | ||
397 | else | ||
398 | sz += sizeof(struct ib_wc) * (cqe + 1); | ||
399 | wc = vmalloc_user(sz); | ||
400 | if (!wc) { | ||
401 | ret = -ENOMEM; | ||
402 | goto bail; | ||
403 | } | ||
404 | |||
405 | /* Check that we can write the offset to mmap. */ | ||
406 | if (udata && udata->outlen >= sizeof(__u64)) { | ||
407 | __u64 offset = 0; | ||
408 | |||
409 | ret = ib_copy_to_udata(udata, &offset, sizeof(offset)); | ||
410 | if (ret) | ||
411 | goto bail_free; | ||
412 | } | ||
413 | |||
414 | spin_lock_irq(&cq->lock); | ||
415 | /* | ||
416 | * Make sure head and tail are sane since they | ||
417 | * might be user writable. | ||
418 | */ | ||
419 | old_wc = cq->queue; | ||
420 | head = old_wc->head; | ||
421 | if (head > (u32) cq->ibcq.cqe) | ||
422 | head = (u32) cq->ibcq.cqe; | ||
423 | tail = old_wc->tail; | ||
424 | if (tail > (u32) cq->ibcq.cqe) | ||
425 | tail = (u32) cq->ibcq.cqe; | ||
426 | if (head < tail) | ||
427 | n = cq->ibcq.cqe + 1 + head - tail; | ||
428 | else | ||
429 | n = head - tail; | ||
430 | if (unlikely((u32)cqe < n)) { | ||
431 | ret = -EINVAL; | ||
432 | goto bail_unlock; | ||
433 | } | ||
434 | for (n = 0; tail != head; n++) { | ||
435 | if (cq->ip) | ||
436 | wc->uqueue[n] = old_wc->uqueue[tail]; | ||
437 | else | ||
438 | wc->kqueue[n] = old_wc->kqueue[tail]; | ||
439 | if (tail == (u32) cq->ibcq.cqe) | ||
440 | tail = 0; | ||
441 | else | ||
442 | tail++; | ||
443 | } | ||
444 | cq->ibcq.cqe = cqe; | ||
445 | wc->head = n; | ||
446 | wc->tail = 0; | ||
447 | cq->queue = wc; | ||
448 | spin_unlock_irq(&cq->lock); | ||
449 | |||
450 | vfree(old_wc); | ||
451 | |||
452 | if (cq->ip) { | ||
453 | struct qib_ibdev *dev = to_idev(ibcq->device); | ||
454 | struct qib_mmap_info *ip = cq->ip; | ||
455 | |||
456 | qib_update_mmap_info(dev, ip, sz, wc); | ||
457 | |||
458 | /* | ||
459 | * Return the offset to mmap. | ||
460 | * See qib_mmap() for details. | ||
461 | */ | ||
462 | if (udata && udata->outlen >= sizeof(__u64)) { | ||
463 | ret = ib_copy_to_udata(udata, &ip->offset, | ||
464 | sizeof(ip->offset)); | ||
465 | if (ret) | ||
466 | goto bail; | ||
467 | } | ||
468 | |||
469 | spin_lock_irq(&dev->pending_lock); | ||
470 | if (list_empty(&ip->pending_mmaps)) | ||
471 | list_add(&ip->pending_mmaps, &dev->pending_mmaps); | ||
472 | spin_unlock_irq(&dev->pending_lock); | ||
473 | } | ||
474 | |||
475 | ret = 0; | ||
476 | goto bail; | ||
477 | |||
478 | bail_unlock: | ||
479 | spin_unlock_irq(&cq->lock); | ||
480 | bail_free: | ||
481 | vfree(wc); | ||
482 | bail: | ||
483 | return ret; | ||
484 | } | ||
diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c new file mode 100644 index 000000000000..ca98dd523752 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_diag.c | |||
@@ -0,0 +1,894 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2010 QLogic Corporation. All rights reserved. | ||
3 | * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. | ||
4 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | /* | ||
36 | * This file contains support for diagnostic functions. It is accessed by | ||
37 | * opening the qib_diag device, normally minor number 129. Diagnostic use | ||
38 | * of the QLogic_IB chip may render the chip or board unusable until the | ||
39 | * driver is unloaded, or in some cases, until the system is rebooted. | ||
40 | * | ||
41 | * Accesses to the chip through this interface are not similar to going | ||
42 | * through the /sys/bus/pci resource mmap interface. | ||
43 | */ | ||
44 | |||
45 | #include <linux/io.h> | ||
46 | #include <linux/pci.h> | ||
47 | #include <linux/poll.h> | ||
48 | #include <linux/vmalloc.h> | ||
49 | #include <linux/fs.h> | ||
50 | #include <linux/uaccess.h> | ||
51 | |||
52 | #include "qib.h" | ||
53 | #include "qib_common.h" | ||
54 | |||
55 | /* | ||
56 | * Each client that opens the diag device must read then write | ||
57 | * offset 0, to prevent lossage from random cat or od. diag_state | ||
58 | * sequences this "handshake". | ||
59 | */ | ||
60 | enum diag_state { UNUSED = 0, OPENED, INIT, READY }; | ||
61 | |||
62 | /* State for an individual client. PID so children cannot abuse handshake */ | ||
63 | static struct qib_diag_client { | ||
64 | struct qib_diag_client *next; | ||
65 | struct qib_devdata *dd; | ||
66 | pid_t pid; | ||
67 | enum diag_state state; | ||
68 | } *client_pool; | ||
69 | |||
70 | /* | ||
71 | * Get a client struct. Recycled if possible, else kmalloc. | ||
72 | * Must be called with qib_mutex held | ||
73 | */ | ||
74 | static struct qib_diag_client *get_client(struct qib_devdata *dd) | ||
75 | { | ||
76 | struct qib_diag_client *dc; | ||
77 | |||
78 | dc = client_pool; | ||
79 | if (dc) | ||
80 | /* got from pool remove it and use */ | ||
81 | client_pool = dc->next; | ||
82 | else | ||
83 | /* None in pool, alloc and init */ | ||
84 | dc = kmalloc(sizeof *dc, GFP_KERNEL); | ||
85 | |||
86 | if (dc) { | ||
87 | dc->next = NULL; | ||
88 | dc->dd = dd; | ||
89 | dc->pid = current->pid; | ||
90 | dc->state = OPENED; | ||
91 | } | ||
92 | return dc; | ||
93 | } | ||
94 | |||
95 | /* | ||
96 | * Return to pool. Must be called with qib_mutex held | ||
97 | */ | ||
98 | static void return_client(struct qib_diag_client *dc) | ||
99 | { | ||
100 | struct qib_devdata *dd = dc->dd; | ||
101 | struct qib_diag_client *tdc, *rdc; | ||
102 | |||
103 | rdc = NULL; | ||
104 | if (dc == dd->diag_client) { | ||
105 | dd->diag_client = dc->next; | ||
106 | rdc = dc; | ||
107 | } else { | ||
108 | tdc = dc->dd->diag_client; | ||
109 | while (tdc) { | ||
110 | if (dc == tdc->next) { | ||
111 | tdc->next = dc->next; | ||
112 | rdc = dc; | ||
113 | break; | ||
114 | } | ||
115 | tdc = tdc->next; | ||
116 | } | ||
117 | } | ||
118 | if (rdc) { | ||
119 | rdc->state = UNUSED; | ||
120 | rdc->dd = NULL; | ||
121 | rdc->pid = 0; | ||
122 | rdc->next = client_pool; | ||
123 | client_pool = rdc; | ||
124 | } | ||
125 | } | ||
126 | |||
127 | static int qib_diag_open(struct inode *in, struct file *fp); | ||
128 | static int qib_diag_release(struct inode *in, struct file *fp); | ||
129 | static ssize_t qib_diag_read(struct file *fp, char __user *data, | ||
130 | size_t count, loff_t *off); | ||
131 | static ssize_t qib_diag_write(struct file *fp, const char __user *data, | ||
132 | size_t count, loff_t *off); | ||
133 | |||
134 | static const struct file_operations diag_file_ops = { | ||
135 | .owner = THIS_MODULE, | ||
136 | .write = qib_diag_write, | ||
137 | .read = qib_diag_read, | ||
138 | .open = qib_diag_open, | ||
139 | .release = qib_diag_release | ||
140 | }; | ||
141 | |||
142 | static atomic_t diagpkt_count = ATOMIC_INIT(0); | ||
143 | static struct cdev *diagpkt_cdev; | ||
144 | static struct device *diagpkt_device; | ||
145 | |||
146 | static ssize_t qib_diagpkt_write(struct file *fp, const char __user *data, | ||
147 | size_t count, loff_t *off); | ||
148 | |||
149 | static const struct file_operations diagpkt_file_ops = { | ||
150 | .owner = THIS_MODULE, | ||
151 | .write = qib_diagpkt_write, | ||
152 | }; | ||
153 | |||
154 | int qib_diag_add(struct qib_devdata *dd) | ||
155 | { | ||
156 | char name[16]; | ||
157 | int ret = 0; | ||
158 | |||
159 | if (atomic_inc_return(&diagpkt_count) == 1) { | ||
160 | ret = qib_cdev_init(QIB_DIAGPKT_MINOR, "ipath_diagpkt", | ||
161 | &diagpkt_file_ops, &diagpkt_cdev, | ||
162 | &diagpkt_device); | ||
163 | if (ret) | ||
164 | goto done; | ||
165 | } | ||
166 | |||
167 | snprintf(name, sizeof(name), "ipath_diag%d", dd->unit); | ||
168 | ret = qib_cdev_init(QIB_DIAG_MINOR_BASE + dd->unit, name, | ||
169 | &diag_file_ops, &dd->diag_cdev, | ||
170 | &dd->diag_device); | ||
171 | done: | ||
172 | return ret; | ||
173 | } | ||
174 | |||
175 | static void qib_unregister_observers(struct qib_devdata *dd); | ||
176 | |||
177 | void qib_diag_remove(struct qib_devdata *dd) | ||
178 | { | ||
179 | struct qib_diag_client *dc; | ||
180 | |||
181 | if (atomic_dec_and_test(&diagpkt_count)) | ||
182 | qib_cdev_cleanup(&diagpkt_cdev, &diagpkt_device); | ||
183 | |||
184 | qib_cdev_cleanup(&dd->diag_cdev, &dd->diag_device); | ||
185 | |||
186 | /* | ||
187 | * Return all diag_clients of this device. There should be none, | ||
188 | * as we are "guaranteed" that no clients are still open | ||
189 | */ | ||
190 | while (dd->diag_client) | ||
191 | return_client(dd->diag_client); | ||
192 | |||
193 | /* Now clean up all unused client structs */ | ||
194 | while (client_pool) { | ||
195 | dc = client_pool; | ||
196 | client_pool = dc->next; | ||
197 | kfree(dc); | ||
198 | } | ||
199 | /* Clean up observer list */ | ||
200 | qib_unregister_observers(dd); | ||
201 | } | ||
202 | |||
203 | /* qib_remap_ioaddr32 - remap an offset into chip address space to __iomem * | ||
204 | * | ||
205 | * @dd: the qlogic_ib device | ||
206 | * @offs: the offset in chip-space | ||
207 | * @cntp: Pointer to max (byte) count for transfer starting at offset | ||
208 | * This returns a u32 __iomem * so it can be used for both 64 and 32-bit | ||
209 | * mapping. It is needed because with the use of PAT for control of | ||
210 | * write-combining, the logically contiguous address-space of the chip | ||
211 | * may be split into virtually non-contiguous spaces, with different | ||
212 | * attributes, which are them mapped to contiguous physical space | ||
213 | * based from the first BAR. | ||
214 | * | ||
215 | * The code below makes the same assumptions as were made in | ||
216 | * init_chip_wc_pat() (qib_init.c), copied here: | ||
217 | * Assumes chip address space looks like: | ||
218 | * - kregs + sregs + cregs + uregs (in any order) | ||
219 | * - piobufs (2K and 4K bufs in either order) | ||
220 | * or: | ||
221 | * - kregs + sregs + cregs (in any order) | ||
222 | * - piobufs (2K and 4K bufs in either order) | ||
223 | * - uregs | ||
224 | * | ||
225 | * If cntp is non-NULL, returns how many bytes from offset can be accessed | ||
226 | * Returns 0 if the offset is not mapped. | ||
227 | */ | ||
228 | static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset, | ||
229 | u32 *cntp) | ||
230 | { | ||
231 | u32 kreglen; | ||
232 | u32 snd_bottom, snd_lim = 0; | ||
233 | u32 __iomem *krb32 = (u32 __iomem *)dd->kregbase; | ||
234 | u32 __iomem *map = NULL; | ||
235 | u32 cnt = 0; | ||
236 | |||
237 | /* First, simplest case, offset is within the first map. */ | ||
238 | kreglen = (dd->kregend - dd->kregbase) * sizeof(u64); | ||
239 | if (offset < kreglen) { | ||
240 | map = krb32 + (offset / sizeof(u32)); | ||
241 | cnt = kreglen - offset; | ||
242 | goto mapped; | ||
243 | } | ||
244 | |||
245 | /* | ||
246 | * Next check for user regs, the next most common case, | ||
247 | * and a cheap check because if they are not in the first map | ||
248 | * they are last in chip. | ||
249 | */ | ||
250 | if (dd->userbase) { | ||
251 | /* If user regs mapped, they are after send, so set limit. */ | ||
252 | u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase; | ||
253 | snd_lim = dd->uregbase; | ||
254 | krb32 = (u32 __iomem *)dd->userbase; | ||
255 | if (offset >= dd->uregbase && offset < ulim) { | ||
256 | map = krb32 + (offset - dd->uregbase) / sizeof(u32); | ||
257 | cnt = ulim - offset; | ||
258 | goto mapped; | ||
259 | } | ||
260 | } | ||
261 | |||
262 | /* | ||
263 | * Lastly, check for offset within Send Buffers. | ||
264 | * This is gnarly because struct devdata is deliberately vague | ||
265 | * about things like 7322 VL15 buffers, and we are not in | ||
266 | * chip-specific code here, so should not make many assumptions. | ||
267 | * The one we _do_ make is that the only chip that has more sndbufs | ||
268 | * than we admit is the 7322, and it has userregs above that, so | ||
269 | * we know the snd_lim. | ||
270 | */ | ||
271 | /* Assume 2K buffers are first. */ | ||
272 | snd_bottom = dd->pio2k_bufbase; | ||
273 | if (snd_lim == 0) { | ||
274 | u32 tot2k = dd->piobcnt2k * ALIGN(dd->piosize2k, dd->palign); | ||
275 | snd_lim = snd_bottom + tot2k; | ||
276 | } | ||
277 | /* If 4k buffers exist, account for them by bumping | ||
278 | * appropriate limit. | ||
279 | */ | ||
280 | if (dd->piobcnt4k) { | ||
281 | u32 tot4k = dd->piobcnt4k * dd->align4k; | ||
282 | u32 offs4k = dd->piobufbase >> 32; | ||
283 | if (snd_bottom > offs4k) | ||
284 | snd_bottom = offs4k; | ||
285 | else { | ||
286 | /* 4k above 2k. Bump snd_lim, if needed*/ | ||
287 | if (!dd->userbase) | ||
288 | snd_lim = offs4k + tot4k; | ||
289 | } | ||
290 | } | ||
291 | /* | ||
292 | * Judgement call: can we ignore the space between SendBuffs and | ||
293 | * UserRegs, where we would like to see vl15 buffs, but not more? | ||
294 | */ | ||
295 | if (offset >= snd_bottom && offset < snd_lim) { | ||
296 | offset -= snd_bottom; | ||
297 | map = (u32 __iomem *)dd->piobase + (offset / sizeof(u32)); | ||
298 | cnt = snd_lim - offset; | ||
299 | } | ||
300 | |||
301 | mapped: | ||
302 | if (cntp) | ||
303 | *cntp = cnt; | ||
304 | return map; | ||
305 | } | ||
306 | |||
307 | /* | ||
308 | * qib_read_umem64 - read a 64-bit quantity from the chip into user space | ||
309 | * @dd: the qlogic_ib device | ||
310 | * @uaddr: the location to store the data in user memory | ||
311 | * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore) | ||
312 | * @count: number of bytes to copy (multiple of 32 bits) | ||
313 | * | ||
314 | * This function also localizes all chip memory accesses. | ||
315 | * The copy should be written such that we read full cacheline packets | ||
316 | * from the chip. This is usually used for a single qword | ||
317 | * | ||
318 | * NOTE: This assumes the chip address is 64-bit aligned. | ||
319 | */ | ||
320 | static int qib_read_umem64(struct qib_devdata *dd, void __user *uaddr, | ||
321 | u32 regoffs, size_t count) | ||
322 | { | ||
323 | const u64 __iomem *reg_addr; | ||
324 | const u64 __iomem *reg_end; | ||
325 | u32 limit; | ||
326 | int ret; | ||
327 | |||
328 | reg_addr = (const u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit); | ||
329 | if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) { | ||
330 | ret = -EINVAL; | ||
331 | goto bail; | ||
332 | } | ||
333 | if (count >= limit) | ||
334 | count = limit; | ||
335 | reg_end = reg_addr + (count / sizeof(u64)); | ||
336 | |||
337 | /* not very efficient, but it works for now */ | ||
338 | while (reg_addr < reg_end) { | ||
339 | u64 data = readq(reg_addr); | ||
340 | |||
341 | if (copy_to_user(uaddr, &data, sizeof(u64))) { | ||
342 | ret = -EFAULT; | ||
343 | goto bail; | ||
344 | } | ||
345 | reg_addr++; | ||
346 | uaddr += sizeof(u64); | ||
347 | } | ||
348 | ret = 0; | ||
349 | bail: | ||
350 | return ret; | ||
351 | } | ||
352 | |||
353 | /* | ||
354 | * qib_write_umem64 - write a 64-bit quantity to the chip from user space | ||
355 | * @dd: the qlogic_ib device | ||
356 | * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore) | ||
357 | * @uaddr: the source of the data in user memory | ||
358 | * @count: the number of bytes to copy (multiple of 32 bits) | ||
359 | * | ||
360 | * This is usually used for a single qword | ||
361 | * NOTE: This assumes the chip address is 64-bit aligned. | ||
362 | */ | ||
363 | |||
364 | static int qib_write_umem64(struct qib_devdata *dd, u32 regoffs, | ||
365 | const void __user *uaddr, size_t count) | ||
366 | { | ||
367 | u64 __iomem *reg_addr; | ||
368 | const u64 __iomem *reg_end; | ||
369 | u32 limit; | ||
370 | int ret; | ||
371 | |||
372 | reg_addr = (u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit); | ||
373 | if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) { | ||
374 | ret = -EINVAL; | ||
375 | goto bail; | ||
376 | } | ||
377 | if (count >= limit) | ||
378 | count = limit; | ||
379 | reg_end = reg_addr + (count / sizeof(u64)); | ||
380 | |||
381 | /* not very efficient, but it works for now */ | ||
382 | while (reg_addr < reg_end) { | ||
383 | u64 data; | ||
384 | if (copy_from_user(&data, uaddr, sizeof(data))) { | ||
385 | ret = -EFAULT; | ||
386 | goto bail; | ||
387 | } | ||
388 | writeq(data, reg_addr); | ||
389 | |||
390 | reg_addr++; | ||
391 | uaddr += sizeof(u64); | ||
392 | } | ||
393 | ret = 0; | ||
394 | bail: | ||
395 | return ret; | ||
396 | } | ||
397 | |||
398 | /* | ||
399 | * qib_read_umem32 - read a 32-bit quantity from the chip into user space | ||
400 | * @dd: the qlogic_ib device | ||
401 | * @uaddr: the location to store the data in user memory | ||
402 | * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore) | ||
403 | * @count: number of bytes to copy | ||
404 | * | ||
405 | * read 32 bit values, not 64 bit; for memories that only | ||
406 | * support 32 bit reads; usually a single dword. | ||
407 | */ | ||
408 | static int qib_read_umem32(struct qib_devdata *dd, void __user *uaddr, | ||
409 | u32 regoffs, size_t count) | ||
410 | { | ||
411 | const u32 __iomem *reg_addr; | ||
412 | const u32 __iomem *reg_end; | ||
413 | u32 limit; | ||
414 | int ret; | ||
415 | |||
416 | reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit); | ||
417 | if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) { | ||
418 | ret = -EINVAL; | ||
419 | goto bail; | ||
420 | } | ||
421 | if (count >= limit) | ||
422 | count = limit; | ||
423 | reg_end = reg_addr + (count / sizeof(u32)); | ||
424 | |||
425 | /* not very efficient, but it works for now */ | ||
426 | while (reg_addr < reg_end) { | ||
427 | u32 data = readl(reg_addr); | ||
428 | |||
429 | if (copy_to_user(uaddr, &data, sizeof(data))) { | ||
430 | ret = -EFAULT; | ||
431 | goto bail; | ||
432 | } | ||
433 | |||
434 | reg_addr++; | ||
435 | uaddr += sizeof(u32); | ||
436 | |||
437 | } | ||
438 | ret = 0; | ||
439 | bail: | ||
440 | return ret; | ||
441 | } | ||
442 | |||
443 | /* | ||
444 | * qib_write_umem32 - write a 32-bit quantity to the chip from user space | ||
445 | * @dd: the qlogic_ib device | ||
446 | * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore) | ||
447 | * @uaddr: the source of the data in user memory | ||
448 | * @count: number of bytes to copy | ||
449 | * | ||
450 | * write 32 bit values, not 64 bit; for memories that only | ||
451 | * support 32 bit write; usually a single dword. | ||
452 | */ | ||
453 | |||
454 | static int qib_write_umem32(struct qib_devdata *dd, u32 regoffs, | ||
455 | const void __user *uaddr, size_t count) | ||
456 | { | ||
457 | u32 __iomem *reg_addr; | ||
458 | const u32 __iomem *reg_end; | ||
459 | u32 limit; | ||
460 | int ret; | ||
461 | |||
462 | reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit); | ||
463 | if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) { | ||
464 | ret = -EINVAL; | ||
465 | goto bail; | ||
466 | } | ||
467 | if (count >= limit) | ||
468 | count = limit; | ||
469 | reg_end = reg_addr + (count / sizeof(u32)); | ||
470 | |||
471 | while (reg_addr < reg_end) { | ||
472 | u32 data; | ||
473 | |||
474 | if (copy_from_user(&data, uaddr, sizeof(data))) { | ||
475 | ret = -EFAULT; | ||
476 | goto bail; | ||
477 | } | ||
478 | writel(data, reg_addr); | ||
479 | |||
480 | reg_addr++; | ||
481 | uaddr += sizeof(u32); | ||
482 | } | ||
483 | ret = 0; | ||
484 | bail: | ||
485 | return ret; | ||
486 | } | ||
487 | |||
488 | static int qib_diag_open(struct inode *in, struct file *fp) | ||
489 | { | ||
490 | int unit = iminor(in) - QIB_DIAG_MINOR_BASE; | ||
491 | struct qib_devdata *dd; | ||
492 | struct qib_diag_client *dc; | ||
493 | int ret; | ||
494 | |||
495 | mutex_lock(&qib_mutex); | ||
496 | |||
497 | dd = qib_lookup(unit); | ||
498 | |||
499 | if (dd == NULL || !(dd->flags & QIB_PRESENT) || | ||
500 | !dd->kregbase) { | ||
501 | ret = -ENODEV; | ||
502 | goto bail; | ||
503 | } | ||
504 | |||
505 | dc = get_client(dd); | ||
506 | if (!dc) { | ||
507 | ret = -ENOMEM; | ||
508 | goto bail; | ||
509 | } | ||
510 | dc->next = dd->diag_client; | ||
511 | dd->diag_client = dc; | ||
512 | fp->private_data = dc; | ||
513 | ret = 0; | ||
514 | bail: | ||
515 | mutex_unlock(&qib_mutex); | ||
516 | |||
517 | return ret; | ||
518 | } | ||
519 | |||
520 | /** | ||
521 | * qib_diagpkt_write - write an IB packet | ||
522 | * @fp: the diag data device file pointer | ||
523 | * @data: qib_diag_pkt structure saying where to get the packet | ||
524 | * @count: size of data to write | ||
525 | * @off: unused by this code | ||
526 | */ | ||
527 | static ssize_t qib_diagpkt_write(struct file *fp, | ||
528 | const char __user *data, | ||
529 | size_t count, loff_t *off) | ||
530 | { | ||
531 | u32 __iomem *piobuf; | ||
532 | u32 plen, clen, pbufn; | ||
533 | struct qib_diag_xpkt dp; | ||
534 | u32 *tmpbuf = NULL; | ||
535 | struct qib_devdata *dd; | ||
536 | struct qib_pportdata *ppd; | ||
537 | ssize_t ret = 0; | ||
538 | |||
539 | if (count != sizeof(dp)) { | ||
540 | ret = -EINVAL; | ||
541 | goto bail; | ||
542 | } | ||
543 | if (copy_from_user(&dp, data, sizeof(dp))) { | ||
544 | ret = -EFAULT; | ||
545 | goto bail; | ||
546 | } | ||
547 | |||
548 | dd = qib_lookup(dp.unit); | ||
549 | if (!dd || !(dd->flags & QIB_PRESENT) || !dd->kregbase) { | ||
550 | ret = -ENODEV; | ||
551 | goto bail; | ||
552 | } | ||
553 | if (!(dd->flags & QIB_INITTED)) { | ||
554 | /* no hardware, freeze, etc. */ | ||
555 | ret = -ENODEV; | ||
556 | goto bail; | ||
557 | } | ||
558 | |||
559 | if (dp.version != _DIAG_XPKT_VERS) { | ||
560 | qib_dev_err(dd, "Invalid version %u for diagpkt_write\n", | ||
561 | dp.version); | ||
562 | ret = -EINVAL; | ||
563 | goto bail; | ||
564 | } | ||
565 | /* send count must be an exact number of dwords */ | ||
566 | if (dp.len & 3) { | ||
567 | ret = -EINVAL; | ||
568 | goto bail; | ||
569 | } | ||
570 | if (!dp.port || dp.port > dd->num_pports) { | ||
571 | ret = -EINVAL; | ||
572 | goto bail; | ||
573 | } | ||
574 | ppd = &dd->pport[dp.port - 1]; | ||
575 | |||
576 | /* need total length before first word written */ | ||
577 | /* +1 word is for the qword padding */ | ||
578 | plen = sizeof(u32) + dp.len; | ||
579 | clen = dp.len >> 2; | ||
580 | |||
581 | if ((plen + 4) > ppd->ibmaxlen) { | ||
582 | ret = -EINVAL; | ||
583 | goto bail; /* before writing pbc */ | ||
584 | } | ||
585 | tmpbuf = vmalloc(plen); | ||
586 | if (!tmpbuf) { | ||
587 | qib_devinfo(dd->pcidev, "Unable to allocate tmp buffer, " | ||
588 | "failing\n"); | ||
589 | ret = -ENOMEM; | ||
590 | goto bail; | ||
591 | } | ||
592 | |||
593 | if (copy_from_user(tmpbuf, | ||
594 | (const void __user *) (unsigned long) dp.data, | ||
595 | dp.len)) { | ||
596 | ret = -EFAULT; | ||
597 | goto bail; | ||
598 | } | ||
599 | |||
600 | plen >>= 2; /* in dwords */ | ||
601 | |||
602 | if (dp.pbc_wd == 0) | ||
603 | dp.pbc_wd = plen; | ||
604 | |||
605 | piobuf = dd->f_getsendbuf(ppd, dp.pbc_wd, &pbufn); | ||
606 | if (!piobuf) { | ||
607 | ret = -EBUSY; | ||
608 | goto bail; | ||
609 | } | ||
610 | /* disarm it just to be extra sure */ | ||
611 | dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbufn)); | ||
612 | |||
613 | /* disable header check on pbufn for this packet */ | ||
614 | dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_DIS1, NULL); | ||
615 | |||
616 | writeq(dp.pbc_wd, piobuf); | ||
617 | /* | ||
618 | * Copy all but the trigger word, then flush, so it's written | ||
619 | * to chip before trigger word, then write trigger word, then | ||
620 | * flush again, so packet is sent. | ||
621 | */ | ||
622 | if (dd->flags & QIB_PIO_FLUSH_WC) { | ||
623 | qib_flush_wc(); | ||
624 | qib_pio_copy(piobuf + 2, tmpbuf, clen - 1); | ||
625 | qib_flush_wc(); | ||
626 | __raw_writel(tmpbuf[clen - 1], piobuf + clen + 1); | ||
627 | } else | ||
628 | qib_pio_copy(piobuf + 2, tmpbuf, clen); | ||
629 | |||
630 | if (dd->flags & QIB_USE_SPCL_TRIG) { | ||
631 | u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023; | ||
632 | |||
633 | qib_flush_wc(); | ||
634 | __raw_writel(0xaebecede, piobuf + spcl_off); | ||
635 | } | ||
636 | |||
637 | /* | ||
638 | * Ensure buffer is written to the chip, then re-enable | ||
639 | * header checks (if supported by chip). The txchk | ||
640 | * code will ensure seen by chip before returning. | ||
641 | */ | ||
642 | qib_flush_wc(); | ||
643 | qib_sendbuf_done(dd, pbufn); | ||
644 | dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_ENAB1, NULL); | ||
645 | |||
646 | ret = sizeof(dp); | ||
647 | |||
648 | bail: | ||
649 | vfree(tmpbuf); | ||
650 | return ret; | ||
651 | } | ||
652 | |||
653 | static int qib_diag_release(struct inode *in, struct file *fp) | ||
654 | { | ||
655 | mutex_lock(&qib_mutex); | ||
656 | return_client(fp->private_data); | ||
657 | fp->private_data = NULL; | ||
658 | mutex_unlock(&qib_mutex); | ||
659 | return 0; | ||
660 | } | ||
661 | |||
662 | /* | ||
663 | * Chip-specific code calls to register its interest in | ||
664 | * a specific range. | ||
665 | */ | ||
666 | struct diag_observer_list_elt { | ||
667 | struct diag_observer_list_elt *next; | ||
668 | const struct diag_observer *op; | ||
669 | }; | ||
670 | |||
671 | int qib_register_observer(struct qib_devdata *dd, | ||
672 | const struct diag_observer *op) | ||
673 | { | ||
674 | struct diag_observer_list_elt *olp; | ||
675 | int ret = -EINVAL; | ||
676 | |||
677 | if (!dd || !op) | ||
678 | goto bail; | ||
679 | ret = -ENOMEM; | ||
680 | olp = vmalloc(sizeof *olp); | ||
681 | if (!olp) { | ||
682 | printk(KERN_ERR QIB_DRV_NAME ": vmalloc for observer failed\n"); | ||
683 | goto bail; | ||
684 | } | ||
685 | if (olp) { | ||
686 | unsigned long flags; | ||
687 | |||
688 | spin_lock_irqsave(&dd->qib_diag_trans_lock, flags); | ||
689 | olp->op = op; | ||
690 | olp->next = dd->diag_observer_list; | ||
691 | dd->diag_observer_list = olp; | ||
692 | spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags); | ||
693 | ret = 0; | ||
694 | } | ||
695 | bail: | ||
696 | return ret; | ||
697 | } | ||
698 | |||
699 | /* Remove all registered observers when device is closed */ | ||
700 | static void qib_unregister_observers(struct qib_devdata *dd) | ||
701 | { | ||
702 | struct diag_observer_list_elt *olp; | ||
703 | unsigned long flags; | ||
704 | |||
705 | spin_lock_irqsave(&dd->qib_diag_trans_lock, flags); | ||
706 | olp = dd->diag_observer_list; | ||
707 | while (olp) { | ||
708 | /* Pop one observer, let go of lock */ | ||
709 | dd->diag_observer_list = olp->next; | ||
710 | spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags); | ||
711 | vfree(olp); | ||
712 | /* try again. */ | ||
713 | spin_lock_irqsave(&dd->qib_diag_trans_lock, flags); | ||
714 | olp = dd->diag_observer_list; | ||
715 | } | ||
716 | spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags); | ||
717 | } | ||
718 | |||
719 | /* | ||
720 | * Find the observer, if any, for the specified address. Initial implementation | ||
721 | * is simple stack of observers. This must be called with diag transaction | ||
722 | * lock held. | ||
723 | */ | ||
724 | static const struct diag_observer *diag_get_observer(struct qib_devdata *dd, | ||
725 | u32 addr) | ||
726 | { | ||
727 | struct diag_observer_list_elt *olp; | ||
728 | const struct diag_observer *op = NULL; | ||
729 | |||
730 | olp = dd->diag_observer_list; | ||
731 | while (olp) { | ||
732 | op = olp->op; | ||
733 | if (addr >= op->bottom && addr <= op->top) | ||
734 | break; | ||
735 | olp = olp->next; | ||
736 | } | ||
737 | if (!olp) | ||
738 | op = NULL; | ||
739 | |||
740 | return op; | ||
741 | } | ||
742 | |||
743 | static ssize_t qib_diag_read(struct file *fp, char __user *data, | ||
744 | size_t count, loff_t *off) | ||
745 | { | ||
746 | struct qib_diag_client *dc = fp->private_data; | ||
747 | struct qib_devdata *dd = dc->dd; | ||
748 | void __iomem *kreg_base; | ||
749 | ssize_t ret; | ||
750 | |||
751 | if (dc->pid != current->pid) { | ||
752 | ret = -EPERM; | ||
753 | goto bail; | ||
754 | } | ||
755 | |||
756 | kreg_base = dd->kregbase; | ||
757 | |||
758 | if (count == 0) | ||
759 | ret = 0; | ||
760 | else if ((count % 4) || (*off % 4)) | ||
761 | /* address or length is not 32-bit aligned, hence invalid */ | ||
762 | ret = -EINVAL; | ||
763 | else if (dc->state < READY && (*off || count != 8)) | ||
764 | ret = -EINVAL; /* prevent cat /dev/qib_diag* */ | ||
765 | else { | ||
766 | unsigned long flags; | ||
767 | u64 data64 = 0; | ||
768 | int use_32; | ||
769 | const struct diag_observer *op; | ||
770 | |||
771 | use_32 = (count % 8) || (*off % 8); | ||
772 | ret = -1; | ||
773 | spin_lock_irqsave(&dd->qib_diag_trans_lock, flags); | ||
774 | /* | ||
775 | * Check for observer on this address range. | ||
776 | * we only support a single 32 or 64-bit read | ||
777 | * via observer, currently. | ||
778 | */ | ||
779 | op = diag_get_observer(dd, *off); | ||
780 | if (op) { | ||
781 | u32 offset = *off; | ||
782 | ret = op->hook(dd, op, offset, &data64, 0, use_32); | ||
783 | } | ||
784 | /* | ||
785 | * We need to release lock before any copy_to_user(), | ||
786 | * whether implicit in qib_read_umem* or explicit below. | ||
787 | */ | ||
788 | spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags); | ||
789 | if (!op) { | ||
790 | if (use_32) | ||
791 | /* | ||
792 | * Address or length is not 64-bit aligned; | ||
793 | * do 32-bit rd | ||
794 | */ | ||
795 | ret = qib_read_umem32(dd, data, (u32) *off, | ||
796 | count); | ||
797 | else | ||
798 | ret = qib_read_umem64(dd, data, (u32) *off, | ||
799 | count); | ||
800 | } else if (ret == count) { | ||
801 | /* Below finishes case where observer existed */ | ||
802 | ret = copy_to_user(data, &data64, use_32 ? | ||
803 | sizeof(u32) : sizeof(u64)); | ||
804 | if (ret) | ||
805 | ret = -EFAULT; | ||
806 | } | ||
807 | } | ||
808 | |||
809 | if (ret >= 0) { | ||
810 | *off += count; | ||
811 | ret = count; | ||
812 | if (dc->state == OPENED) | ||
813 | dc->state = INIT; | ||
814 | } | ||
815 | bail: | ||
816 | return ret; | ||
817 | } | ||
818 | |||
819 | static ssize_t qib_diag_write(struct file *fp, const char __user *data, | ||
820 | size_t count, loff_t *off) | ||
821 | { | ||
822 | struct qib_diag_client *dc = fp->private_data; | ||
823 | struct qib_devdata *dd = dc->dd; | ||
824 | void __iomem *kreg_base; | ||
825 | ssize_t ret; | ||
826 | |||
827 | if (dc->pid != current->pid) { | ||
828 | ret = -EPERM; | ||
829 | goto bail; | ||
830 | } | ||
831 | |||
832 | kreg_base = dd->kregbase; | ||
833 | |||
834 | if (count == 0) | ||
835 | ret = 0; | ||
836 | else if ((count % 4) || (*off % 4)) | ||
837 | /* address or length is not 32-bit aligned, hence invalid */ | ||
838 | ret = -EINVAL; | ||
839 | else if (dc->state < READY && | ||
840 | ((*off || count != 8) || dc->state != INIT)) | ||
841 | /* No writes except second-step of init seq */ | ||
842 | ret = -EINVAL; /* before any other write allowed */ | ||
843 | else { | ||
844 | unsigned long flags; | ||
845 | const struct diag_observer *op = NULL; | ||
846 | int use_32 = (count % 8) || (*off % 8); | ||
847 | |||
848 | /* | ||
849 | * Check for observer on this address range. | ||
850 | * We only support a single 32 or 64-bit write | ||
851 | * via observer, currently. This helps, because | ||
852 | * we would otherwise have to jump through hoops | ||
853 | * to make "diag transaction" meaningful when we | ||
854 | * cannot do a copy_from_user while holding the lock. | ||
855 | */ | ||
856 | if (count == 4 || count == 8) { | ||
857 | u64 data64; | ||
858 | u32 offset = *off; | ||
859 | ret = copy_from_user(&data64, data, count); | ||
860 | if (ret) { | ||
861 | ret = -EFAULT; | ||
862 | goto bail; | ||
863 | } | ||
864 | spin_lock_irqsave(&dd->qib_diag_trans_lock, flags); | ||
865 | op = diag_get_observer(dd, *off); | ||
866 | if (op) | ||
867 | ret = op->hook(dd, op, offset, &data64, ~0Ull, | ||
868 | use_32); | ||
869 | spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags); | ||
870 | } | ||
871 | |||
872 | if (!op) { | ||
873 | if (use_32) | ||
874 | /* | ||
875 | * Address or length is not 64-bit aligned; | ||
876 | * do 32-bit write | ||
877 | */ | ||
878 | ret = qib_write_umem32(dd, (u32) *off, data, | ||
879 | count); | ||
880 | else | ||
881 | ret = qib_write_umem64(dd, (u32) *off, data, | ||
882 | count); | ||
883 | } | ||
884 | } | ||
885 | |||
886 | if (ret >= 0) { | ||
887 | *off += count; | ||
888 | ret = count; | ||
889 | if (dc->state == INIT) | ||
890 | dc->state = READY; /* all read/write OK now */ | ||
891 | } | ||
892 | bail: | ||
893 | return ret; | ||
894 | } | ||
diff --git a/drivers/infiniband/hw/qib/qib_dma.c b/drivers/infiniband/hw/qib/qib_dma.c new file mode 100644 index 000000000000..2920bb39a65b --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_dma.c | |||
@@ -0,0 +1,182 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2009, 2010 QLogic, Corporation. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | #include <linux/types.h> | ||
33 | #include <linux/scatterlist.h> | ||
34 | |||
35 | #include "qib_verbs.h" | ||
36 | |||
37 | #define BAD_DMA_ADDRESS ((u64) 0) | ||
38 | |||
39 | /* | ||
40 | * The following functions implement driver specific replacements | ||
41 | * for the ib_dma_*() functions. | ||
42 | * | ||
43 | * These functions return kernel virtual addresses instead of | ||
44 | * device bus addresses since the driver uses the CPU to copy | ||
45 | * data instead of using hardware DMA. | ||
46 | */ | ||
47 | |||
48 | static int qib_mapping_error(struct ib_device *dev, u64 dma_addr) | ||
49 | { | ||
50 | return dma_addr == BAD_DMA_ADDRESS; | ||
51 | } | ||
52 | |||
53 | static u64 qib_dma_map_single(struct ib_device *dev, void *cpu_addr, | ||
54 | size_t size, enum dma_data_direction direction) | ||
55 | { | ||
56 | BUG_ON(!valid_dma_direction(direction)); | ||
57 | return (u64) cpu_addr; | ||
58 | } | ||
59 | |||
60 | static void qib_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size, | ||
61 | enum dma_data_direction direction) | ||
62 | { | ||
63 | BUG_ON(!valid_dma_direction(direction)); | ||
64 | } | ||
65 | |||
66 | static u64 qib_dma_map_page(struct ib_device *dev, struct page *page, | ||
67 | unsigned long offset, size_t size, | ||
68 | enum dma_data_direction direction) | ||
69 | { | ||
70 | u64 addr; | ||
71 | |||
72 | BUG_ON(!valid_dma_direction(direction)); | ||
73 | |||
74 | if (offset + size > PAGE_SIZE) { | ||
75 | addr = BAD_DMA_ADDRESS; | ||
76 | goto done; | ||
77 | } | ||
78 | |||
79 | addr = (u64) page_address(page); | ||
80 | if (addr) | ||
81 | addr += offset; | ||
82 | /* TODO: handle highmem pages */ | ||
83 | |||
84 | done: | ||
85 | return addr; | ||
86 | } | ||
87 | |||
88 | static void qib_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size, | ||
89 | enum dma_data_direction direction) | ||
90 | { | ||
91 | BUG_ON(!valid_dma_direction(direction)); | ||
92 | } | ||
93 | |||
94 | static int qib_map_sg(struct ib_device *dev, struct scatterlist *sgl, | ||
95 | int nents, enum dma_data_direction direction) | ||
96 | { | ||
97 | struct scatterlist *sg; | ||
98 | u64 addr; | ||
99 | int i; | ||
100 | int ret = nents; | ||
101 | |||
102 | BUG_ON(!valid_dma_direction(direction)); | ||
103 | |||
104 | for_each_sg(sgl, sg, nents, i) { | ||
105 | addr = (u64) page_address(sg_page(sg)); | ||
106 | /* TODO: handle highmem pages */ | ||
107 | if (!addr) { | ||
108 | ret = 0; | ||
109 | break; | ||
110 | } | ||
111 | } | ||
112 | return ret; | ||
113 | } | ||
114 | |||
115 | static void qib_unmap_sg(struct ib_device *dev, | ||
116 | struct scatterlist *sg, int nents, | ||
117 | enum dma_data_direction direction) | ||
118 | { | ||
119 | BUG_ON(!valid_dma_direction(direction)); | ||
120 | } | ||
121 | |||
122 | static u64 qib_sg_dma_address(struct ib_device *dev, struct scatterlist *sg) | ||
123 | { | ||
124 | u64 addr = (u64) page_address(sg_page(sg)); | ||
125 | |||
126 | if (addr) | ||
127 | addr += sg->offset; | ||
128 | return addr; | ||
129 | } | ||
130 | |||
131 | static unsigned int qib_sg_dma_len(struct ib_device *dev, | ||
132 | struct scatterlist *sg) | ||
133 | { | ||
134 | return sg->length; | ||
135 | } | ||
136 | |||
137 | static void qib_sync_single_for_cpu(struct ib_device *dev, u64 addr, | ||
138 | size_t size, enum dma_data_direction dir) | ||
139 | { | ||
140 | } | ||
141 | |||
142 | static void qib_sync_single_for_device(struct ib_device *dev, u64 addr, | ||
143 | size_t size, | ||
144 | enum dma_data_direction dir) | ||
145 | { | ||
146 | } | ||
147 | |||
148 | static void *qib_dma_alloc_coherent(struct ib_device *dev, size_t size, | ||
149 | u64 *dma_handle, gfp_t flag) | ||
150 | { | ||
151 | struct page *p; | ||
152 | void *addr = NULL; | ||
153 | |||
154 | p = alloc_pages(flag, get_order(size)); | ||
155 | if (p) | ||
156 | addr = page_address(p); | ||
157 | if (dma_handle) | ||
158 | *dma_handle = (u64) addr; | ||
159 | return addr; | ||
160 | } | ||
161 | |||
162 | static void qib_dma_free_coherent(struct ib_device *dev, size_t size, | ||
163 | void *cpu_addr, u64 dma_handle) | ||
164 | { | ||
165 | free_pages((unsigned long) cpu_addr, get_order(size)); | ||
166 | } | ||
167 | |||
168 | struct ib_dma_mapping_ops qib_dma_mapping_ops = { | ||
169 | .mapping_error = qib_mapping_error, | ||
170 | .map_single = qib_dma_map_single, | ||
171 | .unmap_single = qib_dma_unmap_single, | ||
172 | .map_page = qib_dma_map_page, | ||
173 | .unmap_page = qib_dma_unmap_page, | ||
174 | .map_sg = qib_map_sg, | ||
175 | .unmap_sg = qib_unmap_sg, | ||
176 | .dma_address = qib_sg_dma_address, | ||
177 | .dma_len = qib_sg_dma_len, | ||
178 | .sync_single_for_cpu = qib_sync_single_for_cpu, | ||
179 | .sync_single_for_device = qib_sync_single_for_device, | ||
180 | .alloc_coherent = qib_dma_alloc_coherent, | ||
181 | .free_coherent = qib_dma_free_coherent | ||
182 | }; | ||
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c new file mode 100644 index 000000000000..f15ce076ac49 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_driver.c | |||
@@ -0,0 +1,665 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. | ||
3 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | |||
34 | #include <linux/spinlock.h> | ||
35 | #include <linux/pci.h> | ||
36 | #include <linux/io.h> | ||
37 | #include <linux/delay.h> | ||
38 | #include <linux/netdevice.h> | ||
39 | #include <linux/vmalloc.h> | ||
40 | |||
41 | #include "qib.h" | ||
42 | |||
43 | /* | ||
44 | * The size has to be longer than this string, so we can append | ||
45 | * board/chip information to it in the init code. | ||
46 | */ | ||
47 | const char ib_qib_version[] = QIB_IDSTR "\n"; | ||
48 | |||
49 | DEFINE_SPINLOCK(qib_devs_lock); | ||
50 | LIST_HEAD(qib_dev_list); | ||
51 | DEFINE_MUTEX(qib_mutex); /* general driver use */ | ||
52 | |||
53 | unsigned qib_ibmtu; | ||
54 | module_param_named(ibmtu, qib_ibmtu, uint, S_IRUGO); | ||
55 | MODULE_PARM_DESC(ibmtu, "Set max IB MTU (0=2KB, 1=256, 2=512, ... 5=4096"); | ||
56 | |||
57 | unsigned qib_compat_ddr_negotiate = 1; | ||
58 | module_param_named(compat_ddr_negotiate, qib_compat_ddr_negotiate, uint, | ||
59 | S_IWUSR | S_IRUGO); | ||
60 | MODULE_PARM_DESC(compat_ddr_negotiate, | ||
61 | "Attempt pre-IBTA 1.2 DDR speed negotiation"); | ||
62 | |||
63 | MODULE_LICENSE("Dual BSD/GPL"); | ||
64 | MODULE_AUTHOR("QLogic <support@qlogic.com>"); | ||
65 | MODULE_DESCRIPTION("QLogic IB driver"); | ||
66 | |||
67 | /* | ||
68 | * QIB_PIO_MAXIBHDR is the max IB header size allowed for in our | ||
69 | * PIO send buffers. This is well beyond anything currently | ||
70 | * defined in the InfiniBand spec. | ||
71 | */ | ||
72 | #define QIB_PIO_MAXIBHDR 128 | ||
73 | |||
74 | struct qlogic_ib_stats qib_stats; | ||
75 | |||
76 | const char *qib_get_unit_name(int unit) | ||
77 | { | ||
78 | static char iname[16]; | ||
79 | |||
80 | snprintf(iname, sizeof iname, "infinipath%u", unit); | ||
81 | return iname; | ||
82 | } | ||
83 | |||
84 | /* | ||
85 | * Return count of units with at least one port ACTIVE. | ||
86 | */ | ||
87 | int qib_count_active_units(void) | ||
88 | { | ||
89 | struct qib_devdata *dd; | ||
90 | struct qib_pportdata *ppd; | ||
91 | unsigned long flags; | ||
92 | int pidx, nunits_active = 0; | ||
93 | |||
94 | spin_lock_irqsave(&qib_devs_lock, flags); | ||
95 | list_for_each_entry(dd, &qib_dev_list, list) { | ||
96 | if (!(dd->flags & QIB_PRESENT) || !dd->kregbase) | ||
97 | continue; | ||
98 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | ||
99 | ppd = dd->pport + pidx; | ||
100 | if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT | | ||
101 | QIBL_LINKARMED | QIBL_LINKACTIVE))) { | ||
102 | nunits_active++; | ||
103 | break; | ||
104 | } | ||
105 | } | ||
106 | } | ||
107 | spin_unlock_irqrestore(&qib_devs_lock, flags); | ||
108 | return nunits_active; | ||
109 | } | ||
110 | |||
111 | /* | ||
112 | * Return count of all units, optionally return in arguments | ||
113 | * the number of usable (present) units, and the number of | ||
114 | * ports that are up. | ||
115 | */ | ||
116 | int qib_count_units(int *npresentp, int *nupp) | ||
117 | { | ||
118 | int nunits = 0, npresent = 0, nup = 0; | ||
119 | struct qib_devdata *dd; | ||
120 | unsigned long flags; | ||
121 | int pidx; | ||
122 | struct qib_pportdata *ppd; | ||
123 | |||
124 | spin_lock_irqsave(&qib_devs_lock, flags); | ||
125 | |||
126 | list_for_each_entry(dd, &qib_dev_list, list) { | ||
127 | nunits++; | ||
128 | if ((dd->flags & QIB_PRESENT) && dd->kregbase) | ||
129 | npresent++; | ||
130 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | ||
131 | ppd = dd->pport + pidx; | ||
132 | if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT | | ||
133 | QIBL_LINKARMED | QIBL_LINKACTIVE))) | ||
134 | nup++; | ||
135 | } | ||
136 | } | ||
137 | |||
138 | spin_unlock_irqrestore(&qib_devs_lock, flags); | ||
139 | |||
140 | if (npresentp) | ||
141 | *npresentp = npresent; | ||
142 | if (nupp) | ||
143 | *nupp = nup; | ||
144 | |||
145 | return nunits; | ||
146 | } | ||
147 | |||
148 | /** | ||
149 | * qib_wait_linkstate - wait for an IB link state change to occur | ||
150 | * @dd: the qlogic_ib device | ||
151 | * @state: the state to wait for | ||
152 | * @msecs: the number of milliseconds to wait | ||
153 | * | ||
154 | * wait up to msecs milliseconds for IB link state change to occur for | ||
155 | * now, take the easy polling route. Currently used only by | ||
156 | * qib_set_linkstate. Returns 0 if state reached, otherwise | ||
157 | * -ETIMEDOUT state can have multiple states set, for any of several | ||
158 | * transitions. | ||
159 | */ | ||
160 | int qib_wait_linkstate(struct qib_pportdata *ppd, u32 state, int msecs) | ||
161 | { | ||
162 | int ret; | ||
163 | unsigned long flags; | ||
164 | |||
165 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
166 | if (ppd->state_wanted) { | ||
167 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
168 | ret = -EBUSY; | ||
169 | goto bail; | ||
170 | } | ||
171 | ppd->state_wanted = state; | ||
172 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
173 | wait_event_interruptible_timeout(ppd->state_wait, | ||
174 | (ppd->lflags & state), | ||
175 | msecs_to_jiffies(msecs)); | ||
176 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
177 | ppd->state_wanted = 0; | ||
178 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
179 | |||
180 | if (!(ppd->lflags & state)) | ||
181 | ret = -ETIMEDOUT; | ||
182 | else | ||
183 | ret = 0; | ||
184 | bail: | ||
185 | return ret; | ||
186 | } | ||
187 | |||
188 | int qib_set_linkstate(struct qib_pportdata *ppd, u8 newstate) | ||
189 | { | ||
190 | u32 lstate; | ||
191 | int ret; | ||
192 | struct qib_devdata *dd = ppd->dd; | ||
193 | unsigned long flags; | ||
194 | |||
195 | switch (newstate) { | ||
196 | case QIB_IB_LINKDOWN_ONLY: | ||
197 | dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE, | ||
198 | IB_LINKCMD_DOWN | IB_LINKINITCMD_NOP); | ||
199 | /* don't wait */ | ||
200 | ret = 0; | ||
201 | goto bail; | ||
202 | |||
203 | case QIB_IB_LINKDOWN: | ||
204 | dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE, | ||
205 | IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL); | ||
206 | /* don't wait */ | ||
207 | ret = 0; | ||
208 | goto bail; | ||
209 | |||
210 | case QIB_IB_LINKDOWN_SLEEP: | ||
211 | dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE, | ||
212 | IB_LINKCMD_DOWN | IB_LINKINITCMD_SLEEP); | ||
213 | /* don't wait */ | ||
214 | ret = 0; | ||
215 | goto bail; | ||
216 | |||
217 | case QIB_IB_LINKDOWN_DISABLE: | ||
218 | dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE, | ||
219 | IB_LINKCMD_DOWN | IB_LINKINITCMD_DISABLE); | ||
220 | /* don't wait */ | ||
221 | ret = 0; | ||
222 | goto bail; | ||
223 | |||
224 | case QIB_IB_LINKARM: | ||
225 | if (ppd->lflags & QIBL_LINKARMED) { | ||
226 | ret = 0; | ||
227 | goto bail; | ||
228 | } | ||
229 | if (!(ppd->lflags & (QIBL_LINKINIT | QIBL_LINKACTIVE))) { | ||
230 | ret = -EINVAL; | ||
231 | goto bail; | ||
232 | } | ||
233 | /* | ||
234 | * Since the port can be ACTIVE when we ask for ARMED, | ||
235 | * clear QIBL_LINKV so we can wait for a transition. | ||
236 | * If the link isn't ARMED, then something else happened | ||
237 | * and there is no point waiting for ARMED. | ||
238 | */ | ||
239 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
240 | ppd->lflags &= ~QIBL_LINKV; | ||
241 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
242 | dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE, | ||
243 | IB_LINKCMD_ARMED | IB_LINKINITCMD_NOP); | ||
244 | lstate = QIBL_LINKV; | ||
245 | break; | ||
246 | |||
247 | case QIB_IB_LINKACTIVE: | ||
248 | if (ppd->lflags & QIBL_LINKACTIVE) { | ||
249 | ret = 0; | ||
250 | goto bail; | ||
251 | } | ||
252 | if (!(ppd->lflags & QIBL_LINKARMED)) { | ||
253 | ret = -EINVAL; | ||
254 | goto bail; | ||
255 | } | ||
256 | dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE, | ||
257 | IB_LINKCMD_ACTIVE | IB_LINKINITCMD_NOP); | ||
258 | lstate = QIBL_LINKACTIVE; | ||
259 | break; | ||
260 | |||
261 | default: | ||
262 | ret = -EINVAL; | ||
263 | goto bail; | ||
264 | } | ||
265 | ret = qib_wait_linkstate(ppd, lstate, 10); | ||
266 | |||
267 | bail: | ||
268 | return ret; | ||
269 | } | ||
270 | |||
271 | /* | ||
272 | * Get address of eager buffer from it's index (allocated in chunks, not | ||
273 | * contiguous). | ||
274 | */ | ||
275 | static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail) | ||
276 | { | ||
277 | const u32 chunk = etail / rcd->rcvegrbufs_perchunk; | ||
278 | const u32 idx = etail % rcd->rcvegrbufs_perchunk; | ||
279 | |||
280 | return rcd->rcvegrbuf[chunk] + idx * rcd->dd->rcvegrbufsize; | ||
281 | } | ||
282 | |||
283 | /* | ||
284 | * Returns 1 if error was a CRC, else 0. | ||
285 | * Needed for some chip's synthesized error counters. | ||
286 | */ | ||
287 | static u32 qib_rcv_hdrerr(struct qib_pportdata *ppd, u32 ctxt, | ||
288 | u32 eflags, u32 l, u32 etail, __le32 *rhf_addr, | ||
289 | struct qib_message_header *hdr) | ||
290 | { | ||
291 | u32 ret = 0; | ||
292 | |||
293 | if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR)) | ||
294 | ret = 1; | ||
295 | return ret; | ||
296 | } | ||
297 | |||
298 | /* | ||
299 | * qib_kreceive - receive a packet | ||
300 | * @rcd: the qlogic_ib context | ||
301 | * @llic: gets count of good packets needed to clear lli, | ||
302 | * (used with chips that need need to track crcs for lli) | ||
303 | * | ||
304 | * called from interrupt handler for errors or receive interrupt | ||
305 | * Returns number of CRC error packets, needed by some chips for | ||
306 | * local link integrity tracking. crcs are adjusted down by following | ||
307 | * good packets, if any, and count of good packets is also tracked. | ||
308 | */ | ||
309 | u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts) | ||
310 | { | ||
311 | struct qib_devdata *dd = rcd->dd; | ||
312 | struct qib_pportdata *ppd = rcd->ppd; | ||
313 | __le32 *rhf_addr; | ||
314 | void *ebuf; | ||
315 | const u32 rsize = dd->rcvhdrentsize; /* words */ | ||
316 | const u32 maxcnt = dd->rcvhdrcnt * rsize; /* words */ | ||
317 | u32 etail = -1, l, hdrqtail; | ||
318 | struct qib_message_header *hdr; | ||
319 | u32 eflags, etype, tlen, i = 0, updegr = 0, crcs = 0; | ||
320 | int last; | ||
321 | u64 lval; | ||
322 | struct qib_qp *qp, *nqp; | ||
323 | |||
324 | l = rcd->head; | ||
325 | rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset; | ||
326 | if (dd->flags & QIB_NODMA_RTAIL) { | ||
327 | u32 seq = qib_hdrget_seq(rhf_addr); | ||
328 | if (seq != rcd->seq_cnt) | ||
329 | goto bail; | ||
330 | hdrqtail = 0; | ||
331 | } else { | ||
332 | hdrqtail = qib_get_rcvhdrtail(rcd); | ||
333 | if (l == hdrqtail) | ||
334 | goto bail; | ||
335 | smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ | ||
336 | } | ||
337 | |||
338 | for (last = 0, i = 1; !last; i += !last) { | ||
339 | hdr = dd->f_get_msgheader(dd, rhf_addr); | ||
340 | eflags = qib_hdrget_err_flags(rhf_addr); | ||
341 | etype = qib_hdrget_rcv_type(rhf_addr); | ||
342 | /* total length */ | ||
343 | tlen = qib_hdrget_length_in_bytes(rhf_addr); | ||
344 | ebuf = NULL; | ||
345 | if ((dd->flags & QIB_NODMA_RTAIL) ? | ||
346 | qib_hdrget_use_egr_buf(rhf_addr) : | ||
347 | (etype != RCVHQ_RCV_TYPE_EXPECTED)) { | ||
348 | etail = qib_hdrget_index(rhf_addr); | ||
349 | updegr = 1; | ||
350 | if (tlen > sizeof(*hdr) || | ||
351 | etype >= RCVHQ_RCV_TYPE_NON_KD) | ||
352 | ebuf = qib_get_egrbuf(rcd, etail); | ||
353 | } | ||
354 | if (!eflags) { | ||
355 | u16 lrh_len = be16_to_cpu(hdr->lrh[2]) << 2; | ||
356 | |||
357 | if (lrh_len != tlen) { | ||
358 | qib_stats.sps_lenerrs++; | ||
359 | goto move_along; | ||
360 | } | ||
361 | } | ||
362 | if (etype == RCVHQ_RCV_TYPE_NON_KD && !eflags && | ||
363 | ebuf == NULL && | ||
364 | tlen > (dd->rcvhdrentsize - 2 + 1 - | ||
365 | qib_hdrget_offset(rhf_addr)) << 2) { | ||
366 | goto move_along; | ||
367 | } | ||
368 | |||
369 | /* | ||
370 | * Both tiderr and qibhdrerr are set for all plain IB | ||
371 | * packets; only qibhdrerr should be set. | ||
372 | */ | ||
373 | if (unlikely(eflags)) | ||
374 | crcs += qib_rcv_hdrerr(ppd, rcd->ctxt, eflags, l, | ||
375 | etail, rhf_addr, hdr); | ||
376 | else if (etype == RCVHQ_RCV_TYPE_NON_KD) { | ||
377 | qib_ib_rcv(rcd, hdr, ebuf, tlen); | ||
378 | if (crcs) | ||
379 | crcs--; | ||
380 | else if (llic && *llic) | ||
381 | --*llic; | ||
382 | } | ||
383 | move_along: | ||
384 | l += rsize; | ||
385 | if (l >= maxcnt) | ||
386 | l = 0; | ||
387 | rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset; | ||
388 | if (dd->flags & QIB_NODMA_RTAIL) { | ||
389 | u32 seq = qib_hdrget_seq(rhf_addr); | ||
390 | |||
391 | if (++rcd->seq_cnt > 13) | ||
392 | rcd->seq_cnt = 1; | ||
393 | if (seq != rcd->seq_cnt) | ||
394 | last = 1; | ||
395 | } else if (l == hdrqtail) | ||
396 | last = 1; | ||
397 | /* | ||
398 | * Update head regs etc., every 16 packets, if not last pkt, | ||
399 | * to help prevent rcvhdrq overflows, when many packets | ||
400 | * are processed and queue is nearly full. | ||
401 | * Don't request an interrupt for intermediate updates. | ||
402 | */ | ||
403 | lval = l; | ||
404 | if (!last && !(i & 0xf)) { | ||
405 | dd->f_update_usrhead(rcd, lval, updegr, etail); | ||
406 | updegr = 0; | ||
407 | } | ||
408 | } | ||
409 | |||
410 | rcd->head = l; | ||
411 | rcd->pkt_count += i; | ||
412 | |||
413 | /* | ||
414 | * Iterate over all QPs waiting to respond. | ||
415 | * The list won't change since the IRQ is only run on one CPU. | ||
416 | */ | ||
417 | list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) { | ||
418 | list_del_init(&qp->rspwait); | ||
419 | if (qp->r_flags & QIB_R_RSP_NAK) { | ||
420 | qp->r_flags &= ~QIB_R_RSP_NAK; | ||
421 | qib_send_rc_ack(qp); | ||
422 | } | ||
423 | if (qp->r_flags & QIB_R_RSP_SEND) { | ||
424 | unsigned long flags; | ||
425 | |||
426 | qp->r_flags &= ~QIB_R_RSP_SEND; | ||
427 | spin_lock_irqsave(&qp->s_lock, flags); | ||
428 | if (ib_qib_state_ops[qp->state] & | ||
429 | QIB_PROCESS_OR_FLUSH_SEND) | ||
430 | qib_schedule_send(qp); | ||
431 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
432 | } | ||
433 | if (atomic_dec_and_test(&qp->refcount)) | ||
434 | wake_up(&qp->wait); | ||
435 | } | ||
436 | |||
437 | bail: | ||
438 | /* Report number of packets consumed */ | ||
439 | if (npkts) | ||
440 | *npkts = i; | ||
441 | |||
442 | /* | ||
443 | * Always write head at end, and setup rcv interrupt, even | ||
444 | * if no packets were processed. | ||
445 | */ | ||
446 | lval = (u64)rcd->head | dd->rhdrhead_intr_off; | ||
447 | dd->f_update_usrhead(rcd, lval, updegr, etail); | ||
448 | return crcs; | ||
449 | } | ||
450 | |||
451 | /** | ||
452 | * qib_set_mtu - set the MTU | ||
453 | * @ppd: the perport data | ||
454 | * @arg: the new MTU | ||
455 | * | ||
456 | * We can handle "any" incoming size, the issue here is whether we | ||
457 | * need to restrict our outgoing size. For now, we don't do any | ||
458 | * sanity checking on this, and we don't deal with what happens to | ||
459 | * programs that are already running when the size changes. | ||
460 | * NOTE: changing the MTU will usually cause the IBC to go back to | ||
461 | * link INIT state... | ||
462 | */ | ||
463 | int qib_set_mtu(struct qib_pportdata *ppd, u16 arg) | ||
464 | { | ||
465 | u32 piosize; | ||
466 | int ret, chk; | ||
467 | |||
468 | if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 && | ||
469 | arg != 4096) { | ||
470 | ret = -EINVAL; | ||
471 | goto bail; | ||
472 | } | ||
473 | chk = ib_mtu_enum_to_int(qib_ibmtu); | ||
474 | if (chk > 0 && arg > chk) { | ||
475 | ret = -EINVAL; | ||
476 | goto bail; | ||
477 | } | ||
478 | |||
479 | piosize = ppd->ibmaxlen; | ||
480 | ppd->ibmtu = arg; | ||
481 | |||
482 | if (arg >= (piosize - QIB_PIO_MAXIBHDR)) { | ||
483 | /* Only if it's not the initial value (or reset to it) */ | ||
484 | if (piosize != ppd->init_ibmaxlen) { | ||
485 | if (arg > piosize && arg <= ppd->init_ibmaxlen) | ||
486 | piosize = ppd->init_ibmaxlen - 2 * sizeof(u32); | ||
487 | ppd->ibmaxlen = piosize; | ||
488 | } | ||
489 | } else if ((arg + QIB_PIO_MAXIBHDR) != ppd->ibmaxlen) { | ||
490 | piosize = arg + QIB_PIO_MAXIBHDR - 2 * sizeof(u32); | ||
491 | ppd->ibmaxlen = piosize; | ||
492 | } | ||
493 | |||
494 | ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_MTU, 0); | ||
495 | |||
496 | ret = 0; | ||
497 | |||
498 | bail: | ||
499 | return ret; | ||
500 | } | ||
501 | |||
502 | int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc) | ||
503 | { | ||
504 | struct qib_devdata *dd = ppd->dd; | ||
505 | ppd->lid = lid; | ||
506 | ppd->lmc = lmc; | ||
507 | |||
508 | dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LIDLMC, | ||
509 | lid | (~((1U << lmc) - 1)) << 16); | ||
510 | |||
511 | qib_devinfo(dd->pcidev, "IB%u:%u got a lid: 0x%x\n", | ||
512 | dd->unit, ppd->port, lid); | ||
513 | |||
514 | return 0; | ||
515 | } | ||
516 | |||
517 | /* | ||
518 | * Following deal with the "obviously simple" task of overriding the state | ||
519 | * of the LEDS, which normally indicate link physical and logical status. | ||
520 | * The complications arise in dealing with different hardware mappings | ||
521 | * and the board-dependent routine being called from interrupts. | ||
522 | * and then there's the requirement to _flash_ them. | ||
523 | */ | ||
524 | #define LED_OVER_FREQ_SHIFT 8 | ||
525 | #define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT) | ||
526 | /* Below is "non-zero" to force override, but both actual LEDs are off */ | ||
527 | #define LED_OVER_BOTH_OFF (8) | ||
528 | |||
529 | static void qib_run_led_override(unsigned long opaque) | ||
530 | { | ||
531 | struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; | ||
532 | struct qib_devdata *dd = ppd->dd; | ||
533 | int timeoff; | ||
534 | int ph_idx; | ||
535 | |||
536 | if (!(dd->flags & QIB_INITTED)) | ||
537 | return; | ||
538 | |||
539 | ph_idx = ppd->led_override_phase++ & 1; | ||
540 | ppd->led_override = ppd->led_override_vals[ph_idx]; | ||
541 | timeoff = ppd->led_override_timeoff; | ||
542 | |||
543 | dd->f_setextled(ppd, 1); | ||
544 | /* | ||
545 | * don't re-fire the timer if user asked for it to be off; we let | ||
546 | * it fire one more time after they turn it off to simplify | ||
547 | */ | ||
548 | if (ppd->led_override_vals[0] || ppd->led_override_vals[1]) | ||
549 | mod_timer(&ppd->led_override_timer, jiffies + timeoff); | ||
550 | } | ||
551 | |||
552 | void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val) | ||
553 | { | ||
554 | struct qib_devdata *dd = ppd->dd; | ||
555 | int timeoff, freq; | ||
556 | |||
557 | if (!(dd->flags & QIB_INITTED)) | ||
558 | return; | ||
559 | |||
560 | /* First check if we are blinking. If not, use 1HZ polling */ | ||
561 | timeoff = HZ; | ||
562 | freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT; | ||
563 | |||
564 | if (freq) { | ||
565 | /* For blink, set each phase from one nybble of val */ | ||
566 | ppd->led_override_vals[0] = val & 0xF; | ||
567 | ppd->led_override_vals[1] = (val >> 4) & 0xF; | ||
568 | timeoff = (HZ << 4)/freq; | ||
569 | } else { | ||
570 | /* Non-blink set both phases the same. */ | ||
571 | ppd->led_override_vals[0] = val & 0xF; | ||
572 | ppd->led_override_vals[1] = val & 0xF; | ||
573 | } | ||
574 | ppd->led_override_timeoff = timeoff; | ||
575 | |||
576 | /* | ||
577 | * If the timer has not already been started, do so. Use a "quick" | ||
578 | * timeout so the function will be called soon, to look at our request. | ||
579 | */ | ||
580 | if (atomic_inc_return(&ppd->led_override_timer_active) == 1) { | ||
581 | /* Need to start timer */ | ||
582 | init_timer(&ppd->led_override_timer); | ||
583 | ppd->led_override_timer.function = qib_run_led_override; | ||
584 | ppd->led_override_timer.data = (unsigned long) ppd; | ||
585 | ppd->led_override_timer.expires = jiffies + 1; | ||
586 | add_timer(&ppd->led_override_timer); | ||
587 | } else { | ||
588 | if (ppd->led_override_vals[0] || ppd->led_override_vals[1]) | ||
589 | mod_timer(&ppd->led_override_timer, jiffies + 1); | ||
590 | atomic_dec(&ppd->led_override_timer_active); | ||
591 | } | ||
592 | } | ||
593 | |||
594 | /** | ||
595 | * qib_reset_device - reset the chip if possible | ||
596 | * @unit: the device to reset | ||
597 | * | ||
598 | * Whether or not reset is successful, we attempt to re-initialize the chip | ||
599 | * (that is, much like a driver unload/reload). We clear the INITTED flag | ||
600 | * so that the various entry points will fail until we reinitialize. For | ||
601 | * now, we only allow this if no user contexts are open that use chip resources | ||
602 | */ | ||
603 | int qib_reset_device(int unit) | ||
604 | { | ||
605 | int ret, i; | ||
606 | struct qib_devdata *dd = qib_lookup(unit); | ||
607 | struct qib_pportdata *ppd; | ||
608 | unsigned long flags; | ||
609 | int pidx; | ||
610 | |||
611 | if (!dd) { | ||
612 | ret = -ENODEV; | ||
613 | goto bail; | ||
614 | } | ||
615 | |||
616 | qib_devinfo(dd->pcidev, "Reset on unit %u requested\n", unit); | ||
617 | |||
618 | if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) { | ||
619 | qib_devinfo(dd->pcidev, "Invalid unit number %u or " | ||
620 | "not initialized or not present\n", unit); | ||
621 | ret = -ENXIO; | ||
622 | goto bail; | ||
623 | } | ||
624 | |||
625 | spin_lock_irqsave(&dd->uctxt_lock, flags); | ||
626 | if (dd->rcd) | ||
627 | for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) { | ||
628 | if (!dd->rcd[i] || !dd->rcd[i]->cnt) | ||
629 | continue; | ||
630 | spin_unlock_irqrestore(&dd->uctxt_lock, flags); | ||
631 | ret = -EBUSY; | ||
632 | goto bail; | ||
633 | } | ||
634 | spin_unlock_irqrestore(&dd->uctxt_lock, flags); | ||
635 | |||
636 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | ||
637 | ppd = dd->pport + pidx; | ||
638 | if (atomic_read(&ppd->led_override_timer_active)) { | ||
639 | /* Need to stop LED timer, _then_ shut off LEDs */ | ||
640 | del_timer_sync(&ppd->led_override_timer); | ||
641 | atomic_set(&ppd->led_override_timer_active, 0); | ||
642 | } | ||
643 | |||
644 | /* Shut off LEDs after we are sure timer is not running */ | ||
645 | ppd->led_override = LED_OVER_BOTH_OFF; | ||
646 | dd->f_setextled(ppd, 0); | ||
647 | if (dd->flags & QIB_HAS_SEND_DMA) | ||
648 | qib_teardown_sdma(ppd); | ||
649 | } | ||
650 | |||
651 | ret = dd->f_reset(dd); | ||
652 | if (ret == 1) | ||
653 | ret = qib_init(dd, 1); | ||
654 | else | ||
655 | ret = -EAGAIN; | ||
656 | if (ret) | ||
657 | qib_dev_err(dd, "Reinitialize unit %u after " | ||
658 | "reset failed with %d\n", unit, ret); | ||
659 | else | ||
660 | qib_devinfo(dd->pcidev, "Reinitialized unit %u after " | ||
661 | "resetting\n", unit); | ||
662 | |||
663 | bail: | ||
664 | return ret; | ||
665 | } | ||
diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c new file mode 100644 index 000000000000..92d9cfe98a68 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_eeprom.c | |||
@@ -0,0 +1,451 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. | ||
3 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | |||
34 | #include <linux/delay.h> | ||
35 | #include <linux/pci.h> | ||
36 | #include <linux/vmalloc.h> | ||
37 | |||
38 | #include "qib.h" | ||
39 | |||
40 | /* | ||
41 | * Functions specific to the serial EEPROM on cards handled by ib_qib. | ||
42 | * The actual serail interface code is in qib_twsi.c. This file is a client | ||
43 | */ | ||
44 | |||
45 | /** | ||
46 | * qib_eeprom_read - receives bytes from the eeprom via I2C | ||
47 | * @dd: the qlogic_ib device | ||
48 | * @eeprom_offset: address to read from | ||
49 | * @buffer: where to store result | ||
50 | * @len: number of bytes to receive | ||
51 | */ | ||
52 | int qib_eeprom_read(struct qib_devdata *dd, u8 eeprom_offset, | ||
53 | void *buff, int len) | ||
54 | { | ||
55 | int ret; | ||
56 | |||
57 | ret = mutex_lock_interruptible(&dd->eep_lock); | ||
58 | if (!ret) { | ||
59 | ret = qib_twsi_reset(dd); | ||
60 | if (ret) | ||
61 | qib_dev_err(dd, "EEPROM Reset for read failed\n"); | ||
62 | else | ||
63 | ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, | ||
64 | eeprom_offset, buff, len); | ||
65 | mutex_unlock(&dd->eep_lock); | ||
66 | } | ||
67 | |||
68 | return ret; | ||
69 | } | ||
70 | |||
71 | /* | ||
72 | * Actually update the eeprom, first doing write enable if | ||
73 | * needed, then restoring write enable state. | ||
74 | * Must be called with eep_lock held | ||
75 | */ | ||
76 | static int eeprom_write_with_enable(struct qib_devdata *dd, u8 offset, | ||
77 | const void *buf, int len) | ||
78 | { | ||
79 | int ret, pwen; | ||
80 | |||
81 | pwen = dd->f_eeprom_wen(dd, 1); | ||
82 | ret = qib_twsi_reset(dd); | ||
83 | if (ret) | ||
84 | qib_dev_err(dd, "EEPROM Reset for write failed\n"); | ||
85 | else | ||
86 | ret = qib_twsi_blk_wr(dd, dd->twsi_eeprom_dev, | ||
87 | offset, buf, len); | ||
88 | dd->f_eeprom_wen(dd, pwen); | ||
89 | return ret; | ||
90 | } | ||
91 | |||
92 | /** | ||
93 | * qib_eeprom_write - writes data to the eeprom via I2C | ||
94 | * @dd: the qlogic_ib device | ||
95 | * @eeprom_offset: where to place data | ||
96 | * @buffer: data to write | ||
97 | * @len: number of bytes to write | ||
98 | */ | ||
99 | int qib_eeprom_write(struct qib_devdata *dd, u8 eeprom_offset, | ||
100 | const void *buff, int len) | ||
101 | { | ||
102 | int ret; | ||
103 | |||
104 | ret = mutex_lock_interruptible(&dd->eep_lock); | ||
105 | if (!ret) { | ||
106 | ret = eeprom_write_with_enable(dd, eeprom_offset, buff, len); | ||
107 | mutex_unlock(&dd->eep_lock); | ||
108 | } | ||
109 | |||
110 | return ret; | ||
111 | } | ||
112 | |||
113 | static u8 flash_csum(struct qib_flash *ifp, int adjust) | ||
114 | { | ||
115 | u8 *ip = (u8 *) ifp; | ||
116 | u8 csum = 0, len; | ||
117 | |||
118 | /* | ||
119 | * Limit length checksummed to max length of actual data. | ||
120 | * Checksum of erased eeprom will still be bad, but we avoid | ||
121 | * reading past the end of the buffer we were passed. | ||
122 | */ | ||
123 | len = ifp->if_length; | ||
124 | if (len > sizeof(struct qib_flash)) | ||
125 | len = sizeof(struct qib_flash); | ||
126 | while (len--) | ||
127 | csum += *ip++; | ||
128 | csum -= ifp->if_csum; | ||
129 | csum = ~csum; | ||
130 | if (adjust) | ||
131 | ifp->if_csum = csum; | ||
132 | |||
133 | return csum; | ||
134 | } | ||
135 | |||
136 | /** | ||
137 | * qib_get_eeprom_info- get the GUID et al. from the TSWI EEPROM device | ||
138 | * @dd: the qlogic_ib device | ||
139 | * | ||
140 | * We have the capability to use the nguid field, and get | ||
141 | * the guid from the first chip's flash, to use for all of them. | ||
142 | */ | ||
143 | void qib_get_eeprom_info(struct qib_devdata *dd) | ||
144 | { | ||
145 | void *buf; | ||
146 | struct qib_flash *ifp; | ||
147 | __be64 guid; | ||
148 | int len, eep_stat; | ||
149 | u8 csum, *bguid; | ||
150 | int t = dd->unit; | ||
151 | struct qib_devdata *dd0 = qib_lookup(0); | ||
152 | |||
153 | if (t && dd0->nguid > 1 && t <= dd0->nguid) { | ||
154 | u8 oguid; | ||
155 | dd->base_guid = dd0->base_guid; | ||
156 | bguid = (u8 *) &dd->base_guid; | ||
157 | |||
158 | oguid = bguid[7]; | ||
159 | bguid[7] += t; | ||
160 | if (oguid > bguid[7]) { | ||
161 | if (bguid[6] == 0xff) { | ||
162 | if (bguid[5] == 0xff) { | ||
163 | qib_dev_err(dd, "Can't set %s GUID" | ||
164 | " from base, wraps to" | ||
165 | " OUI!\n", | ||
166 | qib_get_unit_name(t)); | ||
167 | dd->base_guid = 0; | ||
168 | goto bail; | ||
169 | } | ||
170 | bguid[5]++; | ||
171 | } | ||
172 | bguid[6]++; | ||
173 | } | ||
174 | dd->nguid = 1; | ||
175 | goto bail; | ||
176 | } | ||
177 | |||
178 | /* | ||
179 | * Read full flash, not just currently used part, since it may have | ||
180 | * been written with a newer definition. | ||
181 | * */ | ||
182 | len = sizeof(struct qib_flash); | ||
183 | buf = vmalloc(len); | ||
184 | if (!buf) { | ||
185 | qib_dev_err(dd, "Couldn't allocate memory to read %u " | ||
186 | "bytes from eeprom for GUID\n", len); | ||
187 | goto bail; | ||
188 | } | ||
189 | |||
190 | /* | ||
191 | * Use "public" eeprom read function, which does locking and | ||
192 | * figures out device. This will migrate to chip-specific. | ||
193 | */ | ||
194 | eep_stat = qib_eeprom_read(dd, 0, buf, len); | ||
195 | |||
196 | if (eep_stat) { | ||
197 | qib_dev_err(dd, "Failed reading GUID from eeprom\n"); | ||
198 | goto done; | ||
199 | } | ||
200 | ifp = (struct qib_flash *)buf; | ||
201 | |||
202 | csum = flash_csum(ifp, 0); | ||
203 | if (csum != ifp->if_csum) { | ||
204 | qib_devinfo(dd->pcidev, "Bad I2C flash checksum: " | ||
205 | "0x%x, not 0x%x\n", csum, ifp->if_csum); | ||
206 | goto done; | ||
207 | } | ||
208 | if (*(__be64 *) ifp->if_guid == cpu_to_be64(0) || | ||
209 | *(__be64 *) ifp->if_guid == ~cpu_to_be64(0)) { | ||
210 | qib_dev_err(dd, "Invalid GUID %llx from flash; ignoring\n", | ||
211 | *(unsigned long long *) ifp->if_guid); | ||
212 | /* don't allow GUID if all 0 or all 1's */ | ||
213 | goto done; | ||
214 | } | ||
215 | |||
216 | /* complain, but allow it */ | ||
217 | if (*(u64 *) ifp->if_guid == 0x100007511000000ULL) | ||
218 | qib_devinfo(dd->pcidev, "Warning, GUID %llx is " | ||
219 | "default, probably not correct!\n", | ||
220 | *(unsigned long long *) ifp->if_guid); | ||
221 | |||
222 | bguid = ifp->if_guid; | ||
223 | if (!bguid[0] && !bguid[1] && !bguid[2]) { | ||
224 | /* | ||
225 | * Original incorrect GUID format in flash; fix in | ||
226 | * core copy, by shifting up 2 octets; don't need to | ||
227 | * change top octet, since both it and shifted are 0. | ||
228 | */ | ||
229 | bguid[1] = bguid[3]; | ||
230 | bguid[2] = bguid[4]; | ||
231 | bguid[3] = 0; | ||
232 | bguid[4] = 0; | ||
233 | guid = *(__be64 *) ifp->if_guid; | ||
234 | } else | ||
235 | guid = *(__be64 *) ifp->if_guid; | ||
236 | dd->base_guid = guid; | ||
237 | dd->nguid = ifp->if_numguid; | ||
238 | /* | ||
239 | * Things are slightly complicated by the desire to transparently | ||
240 | * support both the Pathscale 10-digit serial number and the QLogic | ||
241 | * 13-character version. | ||
242 | */ | ||
243 | if ((ifp->if_fversion > 1) && ifp->if_sprefix[0] && | ||
244 | ((u8 *) ifp->if_sprefix)[0] != 0xFF) { | ||
245 | char *snp = dd->serial; | ||
246 | |||
247 | /* | ||
248 | * This board has a Serial-prefix, which is stored | ||
249 | * elsewhere for backward-compatibility. | ||
250 | */ | ||
251 | memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix); | ||
252 | snp[sizeof ifp->if_sprefix] = '\0'; | ||
253 | len = strlen(snp); | ||
254 | snp += len; | ||
255 | len = (sizeof dd->serial) - len; | ||
256 | if (len > sizeof ifp->if_serial) | ||
257 | len = sizeof ifp->if_serial; | ||
258 | memcpy(snp, ifp->if_serial, len); | ||
259 | } else | ||
260 | memcpy(dd->serial, ifp->if_serial, | ||
261 | sizeof ifp->if_serial); | ||
262 | if (!strstr(ifp->if_comment, "Tested successfully")) | ||
263 | qib_dev_err(dd, "Board SN %s did not pass functional " | ||
264 | "test: %s\n", dd->serial, ifp->if_comment); | ||
265 | |||
266 | memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT); | ||
267 | /* | ||
268 | * Power-on (actually "active") hours are kept as little-endian value | ||
269 | * in EEPROM, but as seconds in a (possibly as small as 24-bit) | ||
270 | * atomic_t while running. | ||
271 | */ | ||
272 | atomic_set(&dd->active_time, 0); | ||
273 | dd->eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8); | ||
274 | |||
275 | done: | ||
276 | vfree(buf); | ||
277 | |||
278 | bail:; | ||
279 | } | ||
280 | |||
281 | /** | ||
282 | * qib_update_eeprom_log - copy active-time and error counters to eeprom | ||
283 | * @dd: the qlogic_ib device | ||
284 | * | ||
285 | * Although the time is kept as seconds in the qib_devdata struct, it is | ||
286 | * rounded to hours for re-write, as we have only 16 bits in EEPROM. | ||
287 | * First-cut code reads whole (expected) struct qib_flash, modifies, | ||
288 | * re-writes. Future direction: read/write only what we need, assuming | ||
289 | * that the EEPROM had to have been "good enough" for driver init, and | ||
290 | * if not, we aren't making it worse. | ||
291 | * | ||
292 | */ | ||
293 | int qib_update_eeprom_log(struct qib_devdata *dd) | ||
294 | { | ||
295 | void *buf; | ||
296 | struct qib_flash *ifp; | ||
297 | int len, hi_water; | ||
298 | uint32_t new_time, new_hrs; | ||
299 | u8 csum; | ||
300 | int ret, idx; | ||
301 | unsigned long flags; | ||
302 | |||
303 | /* first, check if we actually need to do anything. */ | ||
304 | ret = 0; | ||
305 | for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) { | ||
306 | if (dd->eep_st_new_errs[idx]) { | ||
307 | ret = 1; | ||
308 | break; | ||
309 | } | ||
310 | } | ||
311 | new_time = atomic_read(&dd->active_time); | ||
312 | |||
313 | if (ret == 0 && new_time < 3600) | ||
314 | goto bail; | ||
315 | |||
316 | /* | ||
317 | * The quick-check above determined that there is something worthy | ||
318 | * of logging, so get current contents and do a more detailed idea. | ||
319 | * read full flash, not just currently used part, since it may have | ||
320 | * been written with a newer definition | ||
321 | */ | ||
322 | len = sizeof(struct qib_flash); | ||
323 | buf = vmalloc(len); | ||
324 | ret = 1; | ||
325 | if (!buf) { | ||
326 | qib_dev_err(dd, "Couldn't allocate memory to read %u " | ||
327 | "bytes from eeprom for logging\n", len); | ||
328 | goto bail; | ||
329 | } | ||
330 | |||
331 | /* Grab semaphore and read current EEPROM. If we get an | ||
332 | * error, let go, but if not, keep it until we finish write. | ||
333 | */ | ||
334 | ret = mutex_lock_interruptible(&dd->eep_lock); | ||
335 | if (ret) { | ||
336 | qib_dev_err(dd, "Unable to acquire EEPROM for logging\n"); | ||
337 | goto free_bail; | ||
338 | } | ||
339 | ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, 0, buf, len); | ||
340 | if (ret) { | ||
341 | mutex_unlock(&dd->eep_lock); | ||
342 | qib_dev_err(dd, "Unable read EEPROM for logging\n"); | ||
343 | goto free_bail; | ||
344 | } | ||
345 | ifp = (struct qib_flash *)buf; | ||
346 | |||
347 | csum = flash_csum(ifp, 0); | ||
348 | if (csum != ifp->if_csum) { | ||
349 | mutex_unlock(&dd->eep_lock); | ||
350 | qib_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n", | ||
351 | csum, ifp->if_csum); | ||
352 | ret = 1; | ||
353 | goto free_bail; | ||
354 | } | ||
355 | hi_water = 0; | ||
356 | spin_lock_irqsave(&dd->eep_st_lock, flags); | ||
357 | for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) { | ||
358 | int new_val = dd->eep_st_new_errs[idx]; | ||
359 | if (new_val) { | ||
360 | /* | ||
361 | * If we have seen any errors, add to EEPROM values | ||
362 | * We need to saturate at 0xFF (255) and we also | ||
363 | * would need to adjust the checksum if we were | ||
364 | * trying to minimize EEPROM traffic | ||
365 | * Note that we add to actual current count in EEPROM, | ||
366 | * in case it was altered while we were running. | ||
367 | */ | ||
368 | new_val += ifp->if_errcntp[idx]; | ||
369 | if (new_val > 0xFF) | ||
370 | new_val = 0xFF; | ||
371 | if (ifp->if_errcntp[idx] != new_val) { | ||
372 | ifp->if_errcntp[idx] = new_val; | ||
373 | hi_water = offsetof(struct qib_flash, | ||
374 | if_errcntp) + idx; | ||
375 | } | ||
376 | /* | ||
377 | * update our shadow (used to minimize EEPROM | ||
378 | * traffic), to match what we are about to write. | ||
379 | */ | ||
380 | dd->eep_st_errs[idx] = new_val; | ||
381 | dd->eep_st_new_errs[idx] = 0; | ||
382 | } | ||
383 | } | ||
384 | /* | ||
385 | * Now update active-time. We would like to round to the nearest hour | ||
386 | * but unless atomic_t are sure to be proper signed ints we cannot, | ||
387 | * because we need to account for what we "transfer" to EEPROM and | ||
388 | * if we log an hour at 31 minutes, then we would need to set | ||
389 | * active_time to -29 to accurately count the _next_ hour. | ||
390 | */ | ||
391 | if (new_time >= 3600) { | ||
392 | new_hrs = new_time / 3600; | ||
393 | atomic_sub((new_hrs * 3600), &dd->active_time); | ||
394 | new_hrs += dd->eep_hrs; | ||
395 | if (new_hrs > 0xFFFF) | ||
396 | new_hrs = 0xFFFF; | ||
397 | dd->eep_hrs = new_hrs; | ||
398 | if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) { | ||
399 | ifp->if_powerhour[0] = new_hrs & 0xFF; | ||
400 | hi_water = offsetof(struct qib_flash, if_powerhour); | ||
401 | } | ||
402 | if ((new_hrs >> 8) != ifp->if_powerhour[1]) { | ||
403 | ifp->if_powerhour[1] = new_hrs >> 8; | ||
404 | hi_water = offsetof(struct qib_flash, if_powerhour) + 1; | ||
405 | } | ||
406 | } | ||
407 | /* | ||
408 | * There is a tiny possibility that we could somehow fail to write | ||
409 | * the EEPROM after updating our shadows, but problems from holding | ||
410 | * the spinlock too long are a much bigger issue. | ||
411 | */ | ||
412 | spin_unlock_irqrestore(&dd->eep_st_lock, flags); | ||
413 | if (hi_water) { | ||
414 | /* we made some change to the data, uopdate cksum and write */ | ||
415 | csum = flash_csum(ifp, 1); | ||
416 | ret = eeprom_write_with_enable(dd, 0, buf, hi_water + 1); | ||
417 | } | ||
418 | mutex_unlock(&dd->eep_lock); | ||
419 | if (ret) | ||
420 | qib_dev_err(dd, "Failed updating EEPROM\n"); | ||
421 | |||
422 | free_bail: | ||
423 | vfree(buf); | ||
424 | bail: | ||
425 | return ret; | ||
426 | } | ||
427 | |||
428 | /** | ||
429 | * qib_inc_eeprom_err - increment one of the four error counters | ||
430 | * that are logged to EEPROM. | ||
431 | * @dd: the qlogic_ib device | ||
432 | * @eidx: 0..3, the counter to increment | ||
433 | * @incr: how much to add | ||
434 | * | ||
435 | * Each counter is 8-bits, and saturates at 255 (0xFF). They | ||
436 | * are copied to the EEPROM (aka flash) whenever qib_update_eeprom_log() | ||
437 | * is called, but it can only be called in a context that allows sleep. | ||
438 | * This function can be called even at interrupt level. | ||
439 | */ | ||
440 | void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr) | ||
441 | { | ||
442 | uint new_val; | ||
443 | unsigned long flags; | ||
444 | |||
445 | spin_lock_irqsave(&dd->eep_st_lock, flags); | ||
446 | new_val = dd->eep_st_new_errs[eidx] + incr; | ||
447 | if (new_val > 255) | ||
448 | new_val = 255; | ||
449 | dd->eep_st_new_errs[eidx] = new_val; | ||
450 | spin_unlock_irqrestore(&dd->eep_st_lock, flags); | ||
451 | } | ||
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c new file mode 100644 index 000000000000..a142a9eb5226 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_file_ops.c | |||
@@ -0,0 +1,2317 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. | ||
3 | * All rights reserved. | ||
4 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #include <linux/pci.h> | ||
36 | #include <linux/poll.h> | ||
37 | #include <linux/cdev.h> | ||
38 | #include <linux/swap.h> | ||
39 | #include <linux/vmalloc.h> | ||
40 | #include <linux/highmem.h> | ||
41 | #include <linux/io.h> | ||
42 | #include <linux/uio.h> | ||
43 | #include <linux/jiffies.h> | ||
44 | #include <asm/pgtable.h> | ||
45 | #include <linux/delay.h> | ||
46 | |||
47 | #include "qib.h" | ||
48 | #include "qib_common.h" | ||
49 | #include "qib_user_sdma.h" | ||
50 | |||
51 | static int qib_open(struct inode *, struct file *); | ||
52 | static int qib_close(struct inode *, struct file *); | ||
53 | static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *); | ||
54 | static ssize_t qib_aio_write(struct kiocb *, const struct iovec *, | ||
55 | unsigned long, loff_t); | ||
56 | static unsigned int qib_poll(struct file *, struct poll_table_struct *); | ||
57 | static int qib_mmapf(struct file *, struct vm_area_struct *); | ||
58 | |||
59 | static const struct file_operations qib_file_ops = { | ||
60 | .owner = THIS_MODULE, | ||
61 | .write = qib_write, | ||
62 | .aio_write = qib_aio_write, | ||
63 | .open = qib_open, | ||
64 | .release = qib_close, | ||
65 | .poll = qib_poll, | ||
66 | .mmap = qib_mmapf | ||
67 | }; | ||
68 | |||
69 | /* | ||
70 | * Convert kernel virtual addresses to physical addresses so they don't | ||
71 | * potentially conflict with the chip addresses used as mmap offsets. | ||
72 | * It doesn't really matter what mmap offset we use as long as we can | ||
73 | * interpret it correctly. | ||
74 | */ | ||
75 | static u64 cvt_kvaddr(void *p) | ||
76 | { | ||
77 | struct page *page; | ||
78 | u64 paddr = 0; | ||
79 | |||
80 | page = vmalloc_to_page(p); | ||
81 | if (page) | ||
82 | paddr = page_to_pfn(page) << PAGE_SHIFT; | ||
83 | |||
84 | return paddr; | ||
85 | } | ||
86 | |||
87 | static int qib_get_base_info(struct file *fp, void __user *ubase, | ||
88 | size_t ubase_size) | ||
89 | { | ||
90 | struct qib_ctxtdata *rcd = ctxt_fp(fp); | ||
91 | int ret = 0; | ||
92 | struct qib_base_info *kinfo = NULL; | ||
93 | struct qib_devdata *dd = rcd->dd; | ||
94 | struct qib_pportdata *ppd = rcd->ppd; | ||
95 | unsigned subctxt_cnt; | ||
96 | int shared, master; | ||
97 | size_t sz; | ||
98 | |||
99 | subctxt_cnt = rcd->subctxt_cnt; | ||
100 | if (!subctxt_cnt) { | ||
101 | shared = 0; | ||
102 | master = 0; | ||
103 | subctxt_cnt = 1; | ||
104 | } else { | ||
105 | shared = 1; | ||
106 | master = !subctxt_fp(fp); | ||
107 | } | ||
108 | |||
109 | sz = sizeof(*kinfo); | ||
110 | /* If context sharing is not requested, allow the old size structure */ | ||
111 | if (!shared) | ||
112 | sz -= 7 * sizeof(u64); | ||
113 | if (ubase_size < sz) { | ||
114 | ret = -EINVAL; | ||
115 | goto bail; | ||
116 | } | ||
117 | |||
118 | kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL); | ||
119 | if (kinfo == NULL) { | ||
120 | ret = -ENOMEM; | ||
121 | goto bail; | ||
122 | } | ||
123 | |||
124 | ret = dd->f_get_base_info(rcd, kinfo); | ||
125 | if (ret < 0) | ||
126 | goto bail; | ||
127 | |||
128 | kinfo->spi_rcvhdr_cnt = dd->rcvhdrcnt; | ||
129 | kinfo->spi_rcvhdrent_size = dd->rcvhdrentsize; | ||
130 | kinfo->spi_tidegrcnt = rcd->rcvegrcnt; | ||
131 | kinfo->spi_rcv_egrbufsize = dd->rcvegrbufsize; | ||
132 | /* | ||
133 | * have to mmap whole thing | ||
134 | */ | ||
135 | kinfo->spi_rcv_egrbuftotlen = | ||
136 | rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size; | ||
137 | kinfo->spi_rcv_egrperchunk = rcd->rcvegrbufs_perchunk; | ||
138 | kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen / | ||
139 | rcd->rcvegrbuf_chunks; | ||
140 | kinfo->spi_tidcnt = dd->rcvtidcnt / subctxt_cnt; | ||
141 | if (master) | ||
142 | kinfo->spi_tidcnt += dd->rcvtidcnt % subctxt_cnt; | ||
143 | /* | ||
144 | * for this use, may be cfgctxts summed over all chips that | ||
145 | * are are configured and present | ||
146 | */ | ||
147 | kinfo->spi_nctxts = dd->cfgctxts; | ||
148 | /* unit (chip/board) our context is on */ | ||
149 | kinfo->spi_unit = dd->unit; | ||
150 | kinfo->spi_port = ppd->port; | ||
151 | /* for now, only a single page */ | ||
152 | kinfo->spi_tid_maxsize = PAGE_SIZE; | ||
153 | |||
154 | /* | ||
155 | * Doing this per context, and based on the skip value, etc. This has | ||
156 | * to be the actual buffer size, since the protocol code treats it | ||
157 | * as an array. | ||
158 | * | ||
159 | * These have to be set to user addresses in the user code via mmap. | ||
160 | * These values are used on return to user code for the mmap target | ||
161 | * addresses only. For 32 bit, same 44 bit address problem, so use | ||
162 | * the physical address, not virtual. Before 2.6.11, using the | ||
163 | * page_address() macro worked, but in 2.6.11, even that returns the | ||
164 | * full 64 bit address (upper bits all 1's). So far, using the | ||
165 | * physical addresses (or chip offsets, for chip mapping) works, but | ||
166 | * no doubt some future kernel release will change that, and we'll be | ||
167 | * on to yet another method of dealing with this. | ||
168 | * Normally only one of rcvhdr_tailaddr or rhf_offset is useful | ||
169 | * since the chips with non-zero rhf_offset don't normally | ||
170 | * enable tail register updates to host memory, but for testing, | ||
171 | * both can be enabled and used. | ||
172 | */ | ||
173 | kinfo->spi_rcvhdr_base = (u64) rcd->rcvhdrq_phys; | ||
174 | kinfo->spi_rcvhdr_tailaddr = (u64) rcd->rcvhdrqtailaddr_phys; | ||
175 | kinfo->spi_rhf_offset = dd->rhf_offset; | ||
176 | kinfo->spi_rcv_egrbufs = (u64) rcd->rcvegr_phys; | ||
177 | kinfo->spi_pioavailaddr = (u64) dd->pioavailregs_phys; | ||
178 | /* setup per-unit (not port) status area for user programs */ | ||
179 | kinfo->spi_status = (u64) kinfo->spi_pioavailaddr + | ||
180 | (char *) ppd->statusp - | ||
181 | (char *) dd->pioavailregs_dma; | ||
182 | kinfo->spi_uregbase = (u64) dd->uregbase + dd->ureg_align * rcd->ctxt; | ||
183 | if (!shared) { | ||
184 | kinfo->spi_piocnt = rcd->piocnt; | ||
185 | kinfo->spi_piobufbase = (u64) rcd->piobufs; | ||
186 | kinfo->spi_sendbuf_status = cvt_kvaddr(rcd->user_event_mask); | ||
187 | } else if (master) { | ||
188 | kinfo->spi_piocnt = (rcd->piocnt / subctxt_cnt) + | ||
189 | (rcd->piocnt % subctxt_cnt); | ||
190 | /* Master's PIO buffers are after all the slave's */ | ||
191 | kinfo->spi_piobufbase = (u64) rcd->piobufs + | ||
192 | dd->palign * | ||
193 | (rcd->piocnt - kinfo->spi_piocnt); | ||
194 | } else { | ||
195 | unsigned slave = subctxt_fp(fp) - 1; | ||
196 | |||
197 | kinfo->spi_piocnt = rcd->piocnt / subctxt_cnt; | ||
198 | kinfo->spi_piobufbase = (u64) rcd->piobufs + | ||
199 | dd->palign * kinfo->spi_piocnt * slave; | ||
200 | } | ||
201 | |||
202 | if (shared) { | ||
203 | kinfo->spi_sendbuf_status = | ||
204 | cvt_kvaddr(&rcd->user_event_mask[subctxt_fp(fp)]); | ||
205 | /* only spi_subctxt_* fields should be set in this block! */ | ||
206 | kinfo->spi_subctxt_uregbase = cvt_kvaddr(rcd->subctxt_uregbase); | ||
207 | |||
208 | kinfo->spi_subctxt_rcvegrbuf = | ||
209 | cvt_kvaddr(rcd->subctxt_rcvegrbuf); | ||
210 | kinfo->spi_subctxt_rcvhdr_base = | ||
211 | cvt_kvaddr(rcd->subctxt_rcvhdr_base); | ||
212 | } | ||
213 | |||
214 | /* | ||
215 | * All user buffers are 2KB buffers. If we ever support | ||
216 | * giving 4KB buffers to user processes, this will need some | ||
217 | * work. Can't use piobufbase directly, because it has | ||
218 | * both 2K and 4K buffer base values. | ||
219 | */ | ||
220 | kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->pio2k_bufbase) / | ||
221 | dd->palign; | ||
222 | kinfo->spi_pioalign = dd->palign; | ||
223 | kinfo->spi_qpair = QIB_KD_QP; | ||
224 | /* | ||
225 | * user mode PIO buffers are always 2KB, even when 4KB can | ||
226 | * be received, and sent via the kernel; this is ibmaxlen | ||
227 | * for 2K MTU. | ||
228 | */ | ||
229 | kinfo->spi_piosize = dd->piosize2k - 2 * sizeof(u32); | ||
230 | kinfo->spi_mtu = ppd->ibmaxlen; /* maxlen, not ibmtu */ | ||
231 | kinfo->spi_ctxt = rcd->ctxt; | ||
232 | kinfo->spi_subctxt = subctxt_fp(fp); | ||
233 | kinfo->spi_sw_version = QIB_KERN_SWVERSION; | ||
234 | kinfo->spi_sw_version |= 1U << 31; /* QLogic-built, not kernel.org */ | ||
235 | kinfo->spi_hw_version = dd->revision; | ||
236 | |||
237 | if (master) | ||
238 | kinfo->spi_runtime_flags |= QIB_RUNTIME_MASTER; | ||
239 | |||
240 | sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo); | ||
241 | if (copy_to_user(ubase, kinfo, sz)) | ||
242 | ret = -EFAULT; | ||
243 | bail: | ||
244 | kfree(kinfo); | ||
245 | return ret; | ||
246 | } | ||
247 | |||
248 | /** | ||
249 | * qib_tid_update - update a context TID | ||
250 | * @rcd: the context | ||
251 | * @fp: the qib device file | ||
252 | * @ti: the TID information | ||
253 | * | ||
254 | * The new implementation as of Oct 2004 is that the driver assigns | ||
255 | * the tid and returns it to the caller. To reduce search time, we | ||
256 | * keep a cursor for each context, walking the shadow tid array to find | ||
257 | * one that's not in use. | ||
258 | * | ||
259 | * For now, if we can't allocate the full list, we fail, although | ||
260 | * in the long run, we'll allocate as many as we can, and the | ||
261 | * caller will deal with that by trying the remaining pages later. | ||
262 | * That means that when we fail, we have to mark the tids as not in | ||
263 | * use again, in our shadow copy. | ||
264 | * | ||
265 | * It's up to the caller to free the tids when they are done. | ||
266 | * We'll unlock the pages as they free them. | ||
267 | * | ||
268 | * Also, right now we are locking one page at a time, but since | ||
269 | * the intended use of this routine is for a single group of | ||
270 | * virtually contiguous pages, that should change to improve | ||
271 | * performance. | ||
272 | */ | ||
273 | static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp, | ||
274 | const struct qib_tid_info *ti) | ||
275 | { | ||
276 | int ret = 0, ntids; | ||
277 | u32 tid, ctxttid, cnt, i, tidcnt, tidoff; | ||
278 | u16 *tidlist; | ||
279 | struct qib_devdata *dd = rcd->dd; | ||
280 | u64 physaddr; | ||
281 | unsigned long vaddr; | ||
282 | u64 __iomem *tidbase; | ||
283 | unsigned long tidmap[8]; | ||
284 | struct page **pagep = NULL; | ||
285 | unsigned subctxt = subctxt_fp(fp); | ||
286 | |||
287 | if (!dd->pageshadow) { | ||
288 | ret = -ENOMEM; | ||
289 | goto done; | ||
290 | } | ||
291 | |||
292 | cnt = ti->tidcnt; | ||
293 | if (!cnt) { | ||
294 | ret = -EFAULT; | ||
295 | goto done; | ||
296 | } | ||
297 | ctxttid = rcd->ctxt * dd->rcvtidcnt; | ||
298 | if (!rcd->subctxt_cnt) { | ||
299 | tidcnt = dd->rcvtidcnt; | ||
300 | tid = rcd->tidcursor; | ||
301 | tidoff = 0; | ||
302 | } else if (!subctxt) { | ||
303 | tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) + | ||
304 | (dd->rcvtidcnt % rcd->subctxt_cnt); | ||
305 | tidoff = dd->rcvtidcnt - tidcnt; | ||
306 | ctxttid += tidoff; | ||
307 | tid = tidcursor_fp(fp); | ||
308 | } else { | ||
309 | tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt; | ||
310 | tidoff = tidcnt * (subctxt - 1); | ||
311 | ctxttid += tidoff; | ||
312 | tid = tidcursor_fp(fp); | ||
313 | } | ||
314 | if (cnt > tidcnt) { | ||
315 | /* make sure it all fits in tid_pg_list */ | ||
316 | qib_devinfo(dd->pcidev, "Process tried to allocate %u " | ||
317 | "TIDs, only trying max (%u)\n", cnt, tidcnt); | ||
318 | cnt = tidcnt; | ||
319 | } | ||
320 | pagep = (struct page **) rcd->tid_pg_list; | ||
321 | tidlist = (u16 *) &pagep[dd->rcvtidcnt]; | ||
322 | pagep += tidoff; | ||
323 | tidlist += tidoff; | ||
324 | |||
325 | memset(tidmap, 0, sizeof(tidmap)); | ||
326 | /* before decrement; chip actual # */ | ||
327 | ntids = tidcnt; | ||
328 | tidbase = (u64 __iomem *) (((char __iomem *) dd->kregbase) + | ||
329 | dd->rcvtidbase + | ||
330 | ctxttid * sizeof(*tidbase)); | ||
331 | |||
332 | /* virtual address of first page in transfer */ | ||
333 | vaddr = ti->tidvaddr; | ||
334 | if (!access_ok(VERIFY_WRITE, (void __user *) vaddr, | ||
335 | cnt * PAGE_SIZE)) { | ||
336 | ret = -EFAULT; | ||
337 | goto done; | ||
338 | } | ||
339 | ret = qib_get_user_pages(vaddr, cnt, pagep); | ||
340 | if (ret) { | ||
341 | /* | ||
342 | * if (ret == -EBUSY) | ||
343 | * We can't continue because the pagep array won't be | ||
344 | * initialized. This should never happen, | ||
345 | * unless perhaps the user has mpin'ed the pages | ||
346 | * themselves. | ||
347 | */ | ||
348 | qib_devinfo(dd->pcidev, | ||
349 | "Failed to lock addr %p, %u pages: " | ||
350 | "errno %d\n", (void *) vaddr, cnt, -ret); | ||
351 | goto done; | ||
352 | } | ||
353 | for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) { | ||
354 | for (; ntids--; tid++) { | ||
355 | if (tid == tidcnt) | ||
356 | tid = 0; | ||
357 | if (!dd->pageshadow[ctxttid + tid]) | ||
358 | break; | ||
359 | } | ||
360 | if (ntids < 0) { | ||
361 | /* | ||
362 | * Oops, wrapped all the way through their TIDs, | ||
363 | * and didn't have enough free; see comments at | ||
364 | * start of routine | ||
365 | */ | ||
366 | i--; /* last tidlist[i] not filled in */ | ||
367 | ret = -ENOMEM; | ||
368 | break; | ||
369 | } | ||
370 | tidlist[i] = tid + tidoff; | ||
371 | /* we "know" system pages and TID pages are same size */ | ||
372 | dd->pageshadow[ctxttid + tid] = pagep[i]; | ||
373 | dd->physshadow[ctxttid + tid] = | ||
374 | qib_map_page(dd->pcidev, pagep[i], 0, PAGE_SIZE, | ||
375 | PCI_DMA_FROMDEVICE); | ||
376 | /* | ||
377 | * don't need atomic or it's overhead | ||
378 | */ | ||
379 | __set_bit(tid, tidmap); | ||
380 | physaddr = dd->physshadow[ctxttid + tid]; | ||
381 | /* PERFORMANCE: below should almost certainly be cached */ | ||
382 | dd->f_put_tid(dd, &tidbase[tid], | ||
383 | RCVHQ_RCV_TYPE_EXPECTED, physaddr); | ||
384 | /* | ||
385 | * don't check this tid in qib_ctxtshadow, since we | ||
386 | * just filled it in; start with the next one. | ||
387 | */ | ||
388 | tid++; | ||
389 | } | ||
390 | |||
391 | if (ret) { | ||
392 | u32 limit; | ||
393 | cleanup: | ||
394 | /* jump here if copy out of updated info failed... */ | ||
395 | /* same code that's in qib_free_tid() */ | ||
396 | limit = sizeof(tidmap) * BITS_PER_BYTE; | ||
397 | if (limit > tidcnt) | ||
398 | /* just in case size changes in future */ | ||
399 | limit = tidcnt; | ||
400 | tid = find_first_bit((const unsigned long *)tidmap, limit); | ||
401 | for (; tid < limit; tid++) { | ||
402 | if (!test_bit(tid, tidmap)) | ||
403 | continue; | ||
404 | if (dd->pageshadow[ctxttid + tid]) { | ||
405 | dma_addr_t phys; | ||
406 | |||
407 | phys = dd->physshadow[ctxttid + tid]; | ||
408 | dd->physshadow[ctxttid + tid] = dd->tidinvalid; | ||
409 | /* PERFORMANCE: below should almost certainly | ||
410 | * be cached | ||
411 | */ | ||
412 | dd->f_put_tid(dd, &tidbase[tid], | ||
413 | RCVHQ_RCV_TYPE_EXPECTED, | ||
414 | dd->tidinvalid); | ||
415 | pci_unmap_page(dd->pcidev, phys, PAGE_SIZE, | ||
416 | PCI_DMA_FROMDEVICE); | ||
417 | dd->pageshadow[ctxttid + tid] = NULL; | ||
418 | } | ||
419 | } | ||
420 | qib_release_user_pages(pagep, cnt); | ||
421 | } else { | ||
422 | /* | ||
423 | * Copy the updated array, with qib_tid's filled in, back | ||
424 | * to user. Since we did the copy in already, this "should | ||
425 | * never fail" If it does, we have to clean up... | ||
426 | */ | ||
427 | if (copy_to_user((void __user *) | ||
428 | (unsigned long) ti->tidlist, | ||
429 | tidlist, cnt * sizeof(*tidlist))) { | ||
430 | ret = -EFAULT; | ||
431 | goto cleanup; | ||
432 | } | ||
433 | if (copy_to_user((void __user *) (unsigned long) ti->tidmap, | ||
434 | tidmap, sizeof tidmap)) { | ||
435 | ret = -EFAULT; | ||
436 | goto cleanup; | ||
437 | } | ||
438 | if (tid == tidcnt) | ||
439 | tid = 0; | ||
440 | if (!rcd->subctxt_cnt) | ||
441 | rcd->tidcursor = tid; | ||
442 | else | ||
443 | tidcursor_fp(fp) = tid; | ||
444 | } | ||
445 | |||
446 | done: | ||
447 | return ret; | ||
448 | } | ||
449 | |||
450 | /** | ||
451 | * qib_tid_free - free a context TID | ||
452 | * @rcd: the context | ||
453 | * @subctxt: the subcontext | ||
454 | * @ti: the TID info | ||
455 | * | ||
456 | * right now we are unlocking one page at a time, but since | ||
457 | * the intended use of this routine is for a single group of | ||
458 | * virtually contiguous pages, that should change to improve | ||
459 | * performance. We check that the TID is in range for this context | ||
460 | * but otherwise don't check validity; if user has an error and | ||
461 | * frees the wrong tid, it's only their own data that can thereby | ||
462 | * be corrupted. We do check that the TID was in use, for sanity | ||
463 | * We always use our idea of the saved address, not the address that | ||
464 | * they pass in to us. | ||
465 | */ | ||
466 | static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt, | ||
467 | const struct qib_tid_info *ti) | ||
468 | { | ||
469 | int ret = 0; | ||
470 | u32 tid, ctxttid, cnt, limit, tidcnt; | ||
471 | struct qib_devdata *dd = rcd->dd; | ||
472 | u64 __iomem *tidbase; | ||
473 | unsigned long tidmap[8]; | ||
474 | |||
475 | if (!dd->pageshadow) { | ||
476 | ret = -ENOMEM; | ||
477 | goto done; | ||
478 | } | ||
479 | |||
480 | if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap, | ||
481 | sizeof tidmap)) { | ||
482 | ret = -EFAULT; | ||
483 | goto done; | ||
484 | } | ||
485 | |||
486 | ctxttid = rcd->ctxt * dd->rcvtidcnt; | ||
487 | if (!rcd->subctxt_cnt) | ||
488 | tidcnt = dd->rcvtidcnt; | ||
489 | else if (!subctxt) { | ||
490 | tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) + | ||
491 | (dd->rcvtidcnt % rcd->subctxt_cnt); | ||
492 | ctxttid += dd->rcvtidcnt - tidcnt; | ||
493 | } else { | ||
494 | tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt; | ||
495 | ctxttid += tidcnt * (subctxt - 1); | ||
496 | } | ||
497 | tidbase = (u64 __iomem *) ((char __iomem *)(dd->kregbase) + | ||
498 | dd->rcvtidbase + | ||
499 | ctxttid * sizeof(*tidbase)); | ||
500 | |||
501 | limit = sizeof(tidmap) * BITS_PER_BYTE; | ||
502 | if (limit > tidcnt) | ||
503 | /* just in case size changes in future */ | ||
504 | limit = tidcnt; | ||
505 | tid = find_first_bit(tidmap, limit); | ||
506 | for (cnt = 0; tid < limit; tid++) { | ||
507 | /* | ||
508 | * small optimization; if we detect a run of 3 or so without | ||
509 | * any set, use find_first_bit again. That's mainly to | ||
510 | * accelerate the case where we wrapped, so we have some at | ||
511 | * the beginning, and some at the end, and a big gap | ||
512 | * in the middle. | ||
513 | */ | ||
514 | if (!test_bit(tid, tidmap)) | ||
515 | continue; | ||
516 | cnt++; | ||
517 | if (dd->pageshadow[ctxttid + tid]) { | ||
518 | struct page *p; | ||
519 | dma_addr_t phys; | ||
520 | |||
521 | p = dd->pageshadow[ctxttid + tid]; | ||
522 | dd->pageshadow[ctxttid + tid] = NULL; | ||
523 | phys = dd->physshadow[ctxttid + tid]; | ||
524 | dd->physshadow[ctxttid + tid] = dd->tidinvalid; | ||
525 | /* PERFORMANCE: below should almost certainly be | ||
526 | * cached | ||
527 | */ | ||
528 | dd->f_put_tid(dd, &tidbase[tid], | ||
529 | RCVHQ_RCV_TYPE_EXPECTED, dd->tidinvalid); | ||
530 | pci_unmap_page(dd->pcidev, phys, PAGE_SIZE, | ||
531 | PCI_DMA_FROMDEVICE); | ||
532 | qib_release_user_pages(&p, 1); | ||
533 | } | ||
534 | } | ||
535 | done: | ||
536 | return ret; | ||
537 | } | ||
538 | |||
539 | /** | ||
540 | * qib_set_part_key - set a partition key | ||
541 | * @rcd: the context | ||
542 | * @key: the key | ||
543 | * | ||
544 | * We can have up to 4 active at a time (other than the default, which is | ||
545 | * always allowed). This is somewhat tricky, since multiple contexts may set | ||
546 | * the same key, so we reference count them, and clean up at exit. All 4 | ||
547 | * partition keys are packed into a single qlogic_ib register. It's an | ||
548 | * error for a process to set the same pkey multiple times. We provide no | ||
549 | * mechanism to de-allocate a pkey at this time, we may eventually need to | ||
550 | * do that. I've used the atomic operations, and no locking, and only make | ||
551 | * a single pass through what's available. This should be more than | ||
552 | * adequate for some time. I'll think about spinlocks or the like if and as | ||
553 | * it's necessary. | ||
554 | */ | ||
555 | static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key) | ||
556 | { | ||
557 | struct qib_pportdata *ppd = rcd->ppd; | ||
558 | int i, any = 0, pidx = -1; | ||
559 | u16 lkey = key & 0x7FFF; | ||
560 | int ret; | ||
561 | |||
562 | if (lkey == (QIB_DEFAULT_P_KEY & 0x7FFF)) { | ||
563 | /* nothing to do; this key always valid */ | ||
564 | ret = 0; | ||
565 | goto bail; | ||
566 | } | ||
567 | |||
568 | if (!lkey) { | ||
569 | ret = -EINVAL; | ||
570 | goto bail; | ||
571 | } | ||
572 | |||
573 | /* | ||
574 | * Set the full membership bit, because it has to be | ||
575 | * set in the register or the packet, and it seems | ||
576 | * cleaner to set in the register than to force all | ||
577 | * callers to set it. | ||
578 | */ | ||
579 | key |= 0x8000; | ||
580 | |||
581 | for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) { | ||
582 | if (!rcd->pkeys[i] && pidx == -1) | ||
583 | pidx = i; | ||
584 | if (rcd->pkeys[i] == key) { | ||
585 | ret = -EEXIST; | ||
586 | goto bail; | ||
587 | } | ||
588 | } | ||
589 | if (pidx == -1) { | ||
590 | ret = -EBUSY; | ||
591 | goto bail; | ||
592 | } | ||
593 | for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) { | ||
594 | if (!ppd->pkeys[i]) { | ||
595 | any++; | ||
596 | continue; | ||
597 | } | ||
598 | if (ppd->pkeys[i] == key) { | ||
599 | atomic_t *pkrefs = &ppd->pkeyrefs[i]; | ||
600 | |||
601 | if (atomic_inc_return(pkrefs) > 1) { | ||
602 | rcd->pkeys[pidx] = key; | ||
603 | ret = 0; | ||
604 | goto bail; | ||
605 | } else { | ||
606 | /* | ||
607 | * lost race, decrement count, catch below | ||
608 | */ | ||
609 | atomic_dec(pkrefs); | ||
610 | any++; | ||
611 | } | ||
612 | } | ||
613 | if ((ppd->pkeys[i] & 0x7FFF) == lkey) { | ||
614 | /* | ||
615 | * It makes no sense to have both the limited and | ||
616 | * full membership PKEY set at the same time since | ||
617 | * the unlimited one will disable the limited one. | ||
618 | */ | ||
619 | ret = -EEXIST; | ||
620 | goto bail; | ||
621 | } | ||
622 | } | ||
623 | if (!any) { | ||
624 | ret = -EBUSY; | ||
625 | goto bail; | ||
626 | } | ||
627 | for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) { | ||
628 | if (!ppd->pkeys[i] && | ||
629 | atomic_inc_return(&ppd->pkeyrefs[i]) == 1) { | ||
630 | rcd->pkeys[pidx] = key; | ||
631 | ppd->pkeys[i] = key; | ||
632 | (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0); | ||
633 | ret = 0; | ||
634 | goto bail; | ||
635 | } | ||
636 | } | ||
637 | ret = -EBUSY; | ||
638 | |||
639 | bail: | ||
640 | return ret; | ||
641 | } | ||
642 | |||
643 | /** | ||
644 | * qib_manage_rcvq - manage a context's receive queue | ||
645 | * @rcd: the context | ||
646 | * @subctxt: the subcontext | ||
647 | * @start_stop: action to carry out | ||
648 | * | ||
649 | * start_stop == 0 disables receive on the context, for use in queue | ||
650 | * overflow conditions. start_stop==1 re-enables, to be used to | ||
651 | * re-init the software copy of the head register | ||
652 | */ | ||
653 | static int qib_manage_rcvq(struct qib_ctxtdata *rcd, unsigned subctxt, | ||
654 | int start_stop) | ||
655 | { | ||
656 | struct qib_devdata *dd = rcd->dd; | ||
657 | unsigned int rcvctrl_op; | ||
658 | |||
659 | if (subctxt) | ||
660 | goto bail; | ||
661 | /* atomically clear receive enable ctxt. */ | ||
662 | if (start_stop) { | ||
663 | /* | ||
664 | * On enable, force in-memory copy of the tail register to | ||
665 | * 0, so that protocol code doesn't have to worry about | ||
666 | * whether or not the chip has yet updated the in-memory | ||
667 | * copy or not on return from the system call. The chip | ||
668 | * always resets it's tail register back to 0 on a | ||
669 | * transition from disabled to enabled. | ||
670 | */ | ||
671 | if (rcd->rcvhdrtail_kvaddr) | ||
672 | qib_clear_rcvhdrtail(rcd); | ||
673 | rcvctrl_op = QIB_RCVCTRL_CTXT_ENB; | ||
674 | } else | ||
675 | rcvctrl_op = QIB_RCVCTRL_CTXT_DIS; | ||
676 | dd->f_rcvctrl(rcd->ppd, rcvctrl_op, rcd->ctxt); | ||
677 | /* always; new head should be equal to new tail; see above */ | ||
678 | bail: | ||
679 | return 0; | ||
680 | } | ||
681 | |||
682 | static void qib_clean_part_key(struct qib_ctxtdata *rcd, | ||
683 | struct qib_devdata *dd) | ||
684 | { | ||
685 | int i, j, pchanged = 0; | ||
686 | u64 oldpkey; | ||
687 | struct qib_pportdata *ppd = rcd->ppd; | ||
688 | |||
689 | /* for debugging only */ | ||
690 | oldpkey = (u64) ppd->pkeys[0] | | ||
691 | ((u64) ppd->pkeys[1] << 16) | | ||
692 | ((u64) ppd->pkeys[2] << 32) | | ||
693 | ((u64) ppd->pkeys[3] << 48); | ||
694 | |||
695 | for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) { | ||
696 | if (!rcd->pkeys[i]) | ||
697 | continue; | ||
698 | for (j = 0; j < ARRAY_SIZE(ppd->pkeys); j++) { | ||
699 | /* check for match independent of the global bit */ | ||
700 | if ((ppd->pkeys[j] & 0x7fff) != | ||
701 | (rcd->pkeys[i] & 0x7fff)) | ||
702 | continue; | ||
703 | if (atomic_dec_and_test(&ppd->pkeyrefs[j])) { | ||
704 | ppd->pkeys[j] = 0; | ||
705 | pchanged++; | ||
706 | } | ||
707 | break; | ||
708 | } | ||
709 | rcd->pkeys[i] = 0; | ||
710 | } | ||
711 | if (pchanged) | ||
712 | (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0); | ||
713 | } | ||
714 | |||
715 | /* common code for the mappings on dma_alloc_coherent mem */ | ||
716 | static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd, | ||
717 | unsigned len, void *kvaddr, u32 write_ok, char *what) | ||
718 | { | ||
719 | struct qib_devdata *dd = rcd->dd; | ||
720 | unsigned long pfn; | ||
721 | int ret; | ||
722 | |||
723 | if ((vma->vm_end - vma->vm_start) > len) { | ||
724 | qib_devinfo(dd->pcidev, | ||
725 | "FAIL on %s: len %lx > %x\n", what, | ||
726 | vma->vm_end - vma->vm_start, len); | ||
727 | ret = -EFAULT; | ||
728 | goto bail; | ||
729 | } | ||
730 | |||
731 | /* | ||
732 | * shared context user code requires rcvhdrq mapped r/w, others | ||
733 | * only allowed readonly mapping. | ||
734 | */ | ||
735 | if (!write_ok) { | ||
736 | if (vma->vm_flags & VM_WRITE) { | ||
737 | qib_devinfo(dd->pcidev, | ||
738 | "%s must be mapped readonly\n", what); | ||
739 | ret = -EPERM; | ||
740 | goto bail; | ||
741 | } | ||
742 | |||
743 | /* don't allow them to later change with mprotect */ | ||
744 | vma->vm_flags &= ~VM_MAYWRITE; | ||
745 | } | ||
746 | |||
747 | pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT; | ||
748 | ret = remap_pfn_range(vma, vma->vm_start, pfn, | ||
749 | len, vma->vm_page_prot); | ||
750 | if (ret) | ||
751 | qib_devinfo(dd->pcidev, "%s ctxt%u mmap of %lx, %x " | ||
752 | "bytes failed: %d\n", what, rcd->ctxt, | ||
753 | pfn, len, ret); | ||
754 | bail: | ||
755 | return ret; | ||
756 | } | ||
757 | |||
758 | static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd, | ||
759 | u64 ureg) | ||
760 | { | ||
761 | unsigned long phys; | ||
762 | unsigned long sz; | ||
763 | int ret; | ||
764 | |||
765 | /* | ||
766 | * This is real hardware, so use io_remap. This is the mechanism | ||
767 | * for the user process to update the head registers for their ctxt | ||
768 | * in the chip. | ||
769 | */ | ||
770 | sz = dd->flags & QIB_HAS_HDRSUPP ? 2 * PAGE_SIZE : PAGE_SIZE; | ||
771 | if ((vma->vm_end - vma->vm_start) > sz) { | ||
772 | qib_devinfo(dd->pcidev, "FAIL mmap userreg: reqlen " | ||
773 | "%lx > PAGE\n", vma->vm_end - vma->vm_start); | ||
774 | ret = -EFAULT; | ||
775 | } else { | ||
776 | phys = dd->physaddr + ureg; | ||
777 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
778 | |||
779 | vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; | ||
780 | ret = io_remap_pfn_range(vma, vma->vm_start, | ||
781 | phys >> PAGE_SHIFT, | ||
782 | vma->vm_end - vma->vm_start, | ||
783 | vma->vm_page_prot); | ||
784 | } | ||
785 | return ret; | ||
786 | } | ||
787 | |||
788 | static int mmap_piobufs(struct vm_area_struct *vma, | ||
789 | struct qib_devdata *dd, | ||
790 | struct qib_ctxtdata *rcd, | ||
791 | unsigned piobufs, unsigned piocnt) | ||
792 | { | ||
793 | unsigned long phys; | ||
794 | int ret; | ||
795 | |||
796 | /* | ||
797 | * When we map the PIO buffers in the chip, we want to map them as | ||
798 | * writeonly, no read possible; unfortunately, x86 doesn't allow | ||
799 | * for this in hardware, but we still prevent users from asking | ||
800 | * for it. | ||
801 | */ | ||
802 | if ((vma->vm_end - vma->vm_start) > (piocnt * dd->palign)) { | ||
803 | qib_devinfo(dd->pcidev, "FAIL mmap piobufs: " | ||
804 | "reqlen %lx > PAGE\n", | ||
805 | vma->vm_end - vma->vm_start); | ||
806 | ret = -EINVAL; | ||
807 | goto bail; | ||
808 | } | ||
809 | |||
810 | phys = dd->physaddr + piobufs; | ||
811 | |||
812 | #if defined(__powerpc__) | ||
813 | /* There isn't a generic way to specify writethrough mappings */ | ||
814 | pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; | ||
815 | pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU; | ||
816 | pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED; | ||
817 | #endif | ||
818 | |||
819 | /* | ||
820 | * don't allow them to later change to readable with mprotect (for when | ||
821 | * not initially mapped readable, as is normally the case) | ||
822 | */ | ||
823 | vma->vm_flags &= ~VM_MAYREAD; | ||
824 | vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; | ||
825 | |||
826 | if (qib_wc_pat) | ||
827 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | ||
828 | |||
829 | ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT, | ||
830 | vma->vm_end - vma->vm_start, | ||
831 | vma->vm_page_prot); | ||
832 | bail: | ||
833 | return ret; | ||
834 | } | ||
835 | |||
836 | static int mmap_rcvegrbufs(struct vm_area_struct *vma, | ||
837 | struct qib_ctxtdata *rcd) | ||
838 | { | ||
839 | struct qib_devdata *dd = rcd->dd; | ||
840 | unsigned long start, size; | ||
841 | size_t total_size, i; | ||
842 | unsigned long pfn; | ||
843 | int ret; | ||
844 | |||
845 | size = rcd->rcvegrbuf_size; | ||
846 | total_size = rcd->rcvegrbuf_chunks * size; | ||
847 | if ((vma->vm_end - vma->vm_start) > total_size) { | ||
848 | qib_devinfo(dd->pcidev, "FAIL on egr bufs: " | ||
849 | "reqlen %lx > actual %lx\n", | ||
850 | vma->vm_end - vma->vm_start, | ||
851 | (unsigned long) total_size); | ||
852 | ret = -EINVAL; | ||
853 | goto bail; | ||
854 | } | ||
855 | |||
856 | if (vma->vm_flags & VM_WRITE) { | ||
857 | qib_devinfo(dd->pcidev, "Can't map eager buffers as " | ||
858 | "writable (flags=%lx)\n", vma->vm_flags); | ||
859 | ret = -EPERM; | ||
860 | goto bail; | ||
861 | } | ||
862 | /* don't allow them to later change to writeable with mprotect */ | ||
863 | vma->vm_flags &= ~VM_MAYWRITE; | ||
864 | |||
865 | start = vma->vm_start; | ||
866 | |||
867 | for (i = 0; i < rcd->rcvegrbuf_chunks; i++, start += size) { | ||
868 | pfn = virt_to_phys(rcd->rcvegrbuf[i]) >> PAGE_SHIFT; | ||
869 | ret = remap_pfn_range(vma, start, pfn, size, | ||
870 | vma->vm_page_prot); | ||
871 | if (ret < 0) | ||
872 | goto bail; | ||
873 | } | ||
874 | ret = 0; | ||
875 | |||
876 | bail: | ||
877 | return ret; | ||
878 | } | ||
879 | |||
880 | /* | ||
881 | * qib_file_vma_fault - handle a VMA page fault. | ||
882 | */ | ||
883 | static int qib_file_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
884 | { | ||
885 | struct page *page; | ||
886 | |||
887 | page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT)); | ||
888 | if (!page) | ||
889 | return VM_FAULT_SIGBUS; | ||
890 | |||
891 | get_page(page); | ||
892 | vmf->page = page; | ||
893 | |||
894 | return 0; | ||
895 | } | ||
896 | |||
897 | static struct vm_operations_struct qib_file_vm_ops = { | ||
898 | .fault = qib_file_vma_fault, | ||
899 | }; | ||
900 | |||
901 | static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr, | ||
902 | struct qib_ctxtdata *rcd, unsigned subctxt) | ||
903 | { | ||
904 | struct qib_devdata *dd = rcd->dd; | ||
905 | unsigned subctxt_cnt; | ||
906 | unsigned long len; | ||
907 | void *addr; | ||
908 | size_t size; | ||
909 | int ret = 0; | ||
910 | |||
911 | subctxt_cnt = rcd->subctxt_cnt; | ||
912 | size = rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size; | ||
913 | |||
914 | /* | ||
915 | * Each process has all the subctxt uregbase, rcvhdrq, and | ||
916 | * rcvegrbufs mmapped - as an array for all the processes, | ||
917 | * and also separately for this process. | ||
918 | */ | ||
919 | if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase)) { | ||
920 | addr = rcd->subctxt_uregbase; | ||
921 | size = PAGE_SIZE * subctxt_cnt; | ||
922 | } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base)) { | ||
923 | addr = rcd->subctxt_rcvhdr_base; | ||
924 | size = rcd->rcvhdrq_size * subctxt_cnt; | ||
925 | } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf)) { | ||
926 | addr = rcd->subctxt_rcvegrbuf; | ||
927 | size *= subctxt_cnt; | ||
928 | } else if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase + | ||
929 | PAGE_SIZE * subctxt)) { | ||
930 | addr = rcd->subctxt_uregbase + PAGE_SIZE * subctxt; | ||
931 | size = PAGE_SIZE; | ||
932 | } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base + | ||
933 | rcd->rcvhdrq_size * subctxt)) { | ||
934 | addr = rcd->subctxt_rcvhdr_base + | ||
935 | rcd->rcvhdrq_size * subctxt; | ||
936 | size = rcd->rcvhdrq_size; | ||
937 | } else if (pgaddr == cvt_kvaddr(&rcd->user_event_mask[subctxt])) { | ||
938 | addr = rcd->user_event_mask; | ||
939 | size = PAGE_SIZE; | ||
940 | } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf + | ||
941 | size * subctxt)) { | ||
942 | addr = rcd->subctxt_rcvegrbuf + size * subctxt; | ||
943 | /* rcvegrbufs are read-only on the slave */ | ||
944 | if (vma->vm_flags & VM_WRITE) { | ||
945 | qib_devinfo(dd->pcidev, | ||
946 | "Can't map eager buffers as " | ||
947 | "writable (flags=%lx)\n", vma->vm_flags); | ||
948 | ret = -EPERM; | ||
949 | goto bail; | ||
950 | } | ||
951 | /* | ||
952 | * Don't allow permission to later change to writeable | ||
953 | * with mprotect. | ||
954 | */ | ||
955 | vma->vm_flags &= ~VM_MAYWRITE; | ||
956 | } else | ||
957 | goto bail; | ||
958 | len = vma->vm_end - vma->vm_start; | ||
959 | if (len > size) { | ||
960 | ret = -EINVAL; | ||
961 | goto bail; | ||
962 | } | ||
963 | |||
964 | vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT; | ||
965 | vma->vm_ops = &qib_file_vm_ops; | ||
966 | vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND; | ||
967 | ret = 1; | ||
968 | |||
969 | bail: | ||
970 | return ret; | ||
971 | } | ||
972 | |||
973 | /** | ||
974 | * qib_mmapf - mmap various structures into user space | ||
975 | * @fp: the file pointer | ||
976 | * @vma: the VM area | ||
977 | * | ||
978 | * We use this to have a shared buffer between the kernel and the user code | ||
979 | * for the rcvhdr queue, egr buffers, and the per-context user regs and pio | ||
980 | * buffers in the chip. We have the open and close entries so we can bump | ||
981 | * the ref count and keep the driver from being unloaded while still mapped. | ||
982 | */ | ||
983 | static int qib_mmapf(struct file *fp, struct vm_area_struct *vma) | ||
984 | { | ||
985 | struct qib_ctxtdata *rcd; | ||
986 | struct qib_devdata *dd; | ||
987 | u64 pgaddr, ureg; | ||
988 | unsigned piobufs, piocnt; | ||
989 | int ret, match = 1; | ||
990 | |||
991 | rcd = ctxt_fp(fp); | ||
992 | if (!rcd || !(vma->vm_flags & VM_SHARED)) { | ||
993 | ret = -EINVAL; | ||
994 | goto bail; | ||
995 | } | ||
996 | dd = rcd->dd; | ||
997 | |||
998 | /* | ||
999 | * This is the qib_do_user_init() code, mapping the shared buffers | ||
1000 | * and per-context user registers into the user process. The address | ||
1001 | * referred to by vm_pgoff is the file offset passed via mmap(). | ||
1002 | * For shared contexts, this is the kernel vmalloc() address of the | ||
1003 | * pages to share with the master. | ||
1004 | * For non-shared or master ctxts, this is a physical address. | ||
1005 | * We only do one mmap for each space mapped. | ||
1006 | */ | ||
1007 | pgaddr = vma->vm_pgoff << PAGE_SHIFT; | ||
1008 | |||
1009 | /* | ||
1010 | * Check for 0 in case one of the allocations failed, but user | ||
1011 | * called mmap anyway. | ||
1012 | */ | ||
1013 | if (!pgaddr) { | ||
1014 | ret = -EINVAL; | ||
1015 | goto bail; | ||
1016 | } | ||
1017 | |||
1018 | /* | ||
1019 | * Physical addresses must fit in 40 bits for our hardware. | ||
1020 | * Check for kernel virtual addresses first, anything else must | ||
1021 | * match a HW or memory address. | ||
1022 | */ | ||
1023 | ret = mmap_kvaddr(vma, pgaddr, rcd, subctxt_fp(fp)); | ||
1024 | if (ret) { | ||
1025 | if (ret > 0) | ||
1026 | ret = 0; | ||
1027 | goto bail; | ||
1028 | } | ||
1029 | |||
1030 | ureg = dd->uregbase + dd->ureg_align * rcd->ctxt; | ||
1031 | if (!rcd->subctxt_cnt) { | ||
1032 | /* ctxt is not shared */ | ||
1033 | piocnt = rcd->piocnt; | ||
1034 | piobufs = rcd->piobufs; | ||
1035 | } else if (!subctxt_fp(fp)) { | ||
1036 | /* caller is the master */ | ||
1037 | piocnt = (rcd->piocnt / rcd->subctxt_cnt) + | ||
1038 | (rcd->piocnt % rcd->subctxt_cnt); | ||
1039 | piobufs = rcd->piobufs + | ||
1040 | dd->palign * (rcd->piocnt - piocnt); | ||
1041 | } else { | ||
1042 | unsigned slave = subctxt_fp(fp) - 1; | ||
1043 | |||
1044 | /* caller is a slave */ | ||
1045 | piocnt = rcd->piocnt / rcd->subctxt_cnt; | ||
1046 | piobufs = rcd->piobufs + dd->palign * piocnt * slave; | ||
1047 | } | ||
1048 | |||
1049 | if (pgaddr == ureg) | ||
1050 | ret = mmap_ureg(vma, dd, ureg); | ||
1051 | else if (pgaddr == piobufs) | ||
1052 | ret = mmap_piobufs(vma, dd, rcd, piobufs, piocnt); | ||
1053 | else if (pgaddr == dd->pioavailregs_phys) | ||
1054 | /* in-memory copy of pioavail registers */ | ||
1055 | ret = qib_mmap_mem(vma, rcd, PAGE_SIZE, | ||
1056 | (void *) dd->pioavailregs_dma, 0, | ||
1057 | "pioavail registers"); | ||
1058 | else if (pgaddr == rcd->rcvegr_phys) | ||
1059 | ret = mmap_rcvegrbufs(vma, rcd); | ||
1060 | else if (pgaddr == (u64) rcd->rcvhdrq_phys) | ||
1061 | /* | ||
1062 | * The rcvhdrq itself; multiple pages, contiguous | ||
1063 | * from an i/o perspective. Shared contexts need | ||
1064 | * to map r/w, so we allow writing. | ||
1065 | */ | ||
1066 | ret = qib_mmap_mem(vma, rcd, rcd->rcvhdrq_size, | ||
1067 | rcd->rcvhdrq, 1, "rcvhdrq"); | ||
1068 | else if (pgaddr == (u64) rcd->rcvhdrqtailaddr_phys) | ||
1069 | /* in-memory copy of rcvhdrq tail register */ | ||
1070 | ret = qib_mmap_mem(vma, rcd, PAGE_SIZE, | ||
1071 | rcd->rcvhdrtail_kvaddr, 0, | ||
1072 | "rcvhdrq tail"); | ||
1073 | else | ||
1074 | match = 0; | ||
1075 | if (!match) | ||
1076 | ret = -EINVAL; | ||
1077 | |||
1078 | vma->vm_private_data = NULL; | ||
1079 | |||
1080 | if (ret < 0) | ||
1081 | qib_devinfo(dd->pcidev, | ||
1082 | "mmap Failure %d: off %llx len %lx\n", | ||
1083 | -ret, (unsigned long long)pgaddr, | ||
1084 | vma->vm_end - vma->vm_start); | ||
1085 | bail: | ||
1086 | return ret; | ||
1087 | } | ||
1088 | |||
1089 | static unsigned int qib_poll_urgent(struct qib_ctxtdata *rcd, | ||
1090 | struct file *fp, | ||
1091 | struct poll_table_struct *pt) | ||
1092 | { | ||
1093 | struct qib_devdata *dd = rcd->dd; | ||
1094 | unsigned pollflag; | ||
1095 | |||
1096 | poll_wait(fp, &rcd->wait, pt); | ||
1097 | |||
1098 | spin_lock_irq(&dd->uctxt_lock); | ||
1099 | if (rcd->urgent != rcd->urgent_poll) { | ||
1100 | pollflag = POLLIN | POLLRDNORM; | ||
1101 | rcd->urgent_poll = rcd->urgent; | ||
1102 | } else { | ||
1103 | pollflag = 0; | ||
1104 | set_bit(QIB_CTXT_WAITING_URG, &rcd->flag); | ||
1105 | } | ||
1106 | spin_unlock_irq(&dd->uctxt_lock); | ||
1107 | |||
1108 | return pollflag; | ||
1109 | } | ||
1110 | |||
1111 | static unsigned int qib_poll_next(struct qib_ctxtdata *rcd, | ||
1112 | struct file *fp, | ||
1113 | struct poll_table_struct *pt) | ||
1114 | { | ||
1115 | struct qib_devdata *dd = rcd->dd; | ||
1116 | unsigned pollflag; | ||
1117 | |||
1118 | poll_wait(fp, &rcd->wait, pt); | ||
1119 | |||
1120 | spin_lock_irq(&dd->uctxt_lock); | ||
1121 | if (dd->f_hdrqempty(rcd)) { | ||
1122 | set_bit(QIB_CTXT_WAITING_RCV, &rcd->flag); | ||
1123 | dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt); | ||
1124 | pollflag = 0; | ||
1125 | } else | ||
1126 | pollflag = POLLIN | POLLRDNORM; | ||
1127 | spin_unlock_irq(&dd->uctxt_lock); | ||
1128 | |||
1129 | return pollflag; | ||
1130 | } | ||
1131 | |||
1132 | static unsigned int qib_poll(struct file *fp, struct poll_table_struct *pt) | ||
1133 | { | ||
1134 | struct qib_ctxtdata *rcd; | ||
1135 | unsigned pollflag; | ||
1136 | |||
1137 | rcd = ctxt_fp(fp); | ||
1138 | if (!rcd) | ||
1139 | pollflag = POLLERR; | ||
1140 | else if (rcd->poll_type == QIB_POLL_TYPE_URGENT) | ||
1141 | pollflag = qib_poll_urgent(rcd, fp, pt); | ||
1142 | else if (rcd->poll_type == QIB_POLL_TYPE_ANYRCV) | ||
1143 | pollflag = qib_poll_next(rcd, fp, pt); | ||
1144 | else /* invalid */ | ||
1145 | pollflag = POLLERR; | ||
1146 | |||
1147 | return pollflag; | ||
1148 | } | ||
1149 | |||
1150 | /* | ||
1151 | * Check that userland and driver are compatible for subcontexts. | ||
1152 | */ | ||
1153 | static int qib_compatible_subctxts(int user_swmajor, int user_swminor) | ||
1154 | { | ||
1155 | /* this code is written long-hand for clarity */ | ||
1156 | if (QIB_USER_SWMAJOR != user_swmajor) { | ||
1157 | /* no promise of compatibility if major mismatch */ | ||
1158 | return 0; | ||
1159 | } | ||
1160 | if (QIB_USER_SWMAJOR == 1) { | ||
1161 | switch (QIB_USER_SWMINOR) { | ||
1162 | case 0: | ||
1163 | case 1: | ||
1164 | case 2: | ||
1165 | /* no subctxt implementation so cannot be compatible */ | ||
1166 | return 0; | ||
1167 | case 3: | ||
1168 | /* 3 is only compatible with itself */ | ||
1169 | return user_swminor == 3; | ||
1170 | default: | ||
1171 | /* >= 4 are compatible (or are expected to be) */ | ||
1172 | return user_swminor >= 4; | ||
1173 | } | ||
1174 | } | ||
1175 | /* make no promises yet for future major versions */ | ||
1176 | return 0; | ||
1177 | } | ||
1178 | |||
1179 | static int init_subctxts(struct qib_devdata *dd, | ||
1180 | struct qib_ctxtdata *rcd, | ||
1181 | const struct qib_user_info *uinfo) | ||
1182 | { | ||
1183 | int ret = 0; | ||
1184 | unsigned num_subctxts; | ||
1185 | size_t size; | ||
1186 | |||
1187 | /* | ||
1188 | * If the user is requesting zero subctxts, | ||
1189 | * skip the subctxt allocation. | ||
1190 | */ | ||
1191 | if (uinfo->spu_subctxt_cnt <= 0) | ||
1192 | goto bail; | ||
1193 | num_subctxts = uinfo->spu_subctxt_cnt; | ||
1194 | |||
1195 | /* Check for subctxt compatibility */ | ||
1196 | if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16, | ||
1197 | uinfo->spu_userversion & 0xffff)) { | ||
1198 | qib_devinfo(dd->pcidev, | ||
1199 | "Mismatched user version (%d.%d) and driver " | ||
1200 | "version (%d.%d) while context sharing. Ensure " | ||
1201 | "that driver and library are from the same " | ||
1202 | "release.\n", | ||
1203 | (int) (uinfo->spu_userversion >> 16), | ||
1204 | (int) (uinfo->spu_userversion & 0xffff), | ||
1205 | QIB_USER_SWMAJOR, QIB_USER_SWMINOR); | ||
1206 | goto bail; | ||
1207 | } | ||
1208 | if (num_subctxts > QLOGIC_IB_MAX_SUBCTXT) { | ||
1209 | ret = -EINVAL; | ||
1210 | goto bail; | ||
1211 | } | ||
1212 | |||
1213 | rcd->subctxt_uregbase = vmalloc_user(PAGE_SIZE * num_subctxts); | ||
1214 | if (!rcd->subctxt_uregbase) { | ||
1215 | ret = -ENOMEM; | ||
1216 | goto bail; | ||
1217 | } | ||
1218 | /* Note: rcd->rcvhdrq_size isn't initialized yet. */ | ||
1219 | size = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize * | ||
1220 | sizeof(u32), PAGE_SIZE) * num_subctxts; | ||
1221 | rcd->subctxt_rcvhdr_base = vmalloc_user(size); | ||
1222 | if (!rcd->subctxt_rcvhdr_base) { | ||
1223 | ret = -ENOMEM; | ||
1224 | goto bail_ureg; | ||
1225 | } | ||
1226 | |||
1227 | rcd->subctxt_rcvegrbuf = vmalloc_user(rcd->rcvegrbuf_chunks * | ||
1228 | rcd->rcvegrbuf_size * | ||
1229 | num_subctxts); | ||
1230 | if (!rcd->subctxt_rcvegrbuf) { | ||
1231 | ret = -ENOMEM; | ||
1232 | goto bail_rhdr; | ||
1233 | } | ||
1234 | |||
1235 | rcd->subctxt_cnt = uinfo->spu_subctxt_cnt; | ||
1236 | rcd->subctxt_id = uinfo->spu_subctxt_id; | ||
1237 | rcd->active_slaves = 1; | ||
1238 | rcd->redirect_seq_cnt = 1; | ||
1239 | set_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag); | ||
1240 | goto bail; | ||
1241 | |||
1242 | bail_rhdr: | ||
1243 | vfree(rcd->subctxt_rcvhdr_base); | ||
1244 | bail_ureg: | ||
1245 | vfree(rcd->subctxt_uregbase); | ||
1246 | rcd->subctxt_uregbase = NULL; | ||
1247 | bail: | ||
1248 | return ret; | ||
1249 | } | ||
1250 | |||
1251 | static int setup_ctxt(struct qib_pportdata *ppd, int ctxt, | ||
1252 | struct file *fp, const struct qib_user_info *uinfo) | ||
1253 | { | ||
1254 | struct qib_devdata *dd = ppd->dd; | ||
1255 | struct qib_ctxtdata *rcd; | ||
1256 | void *ptmp = NULL; | ||
1257 | int ret; | ||
1258 | |||
1259 | rcd = qib_create_ctxtdata(ppd, ctxt); | ||
1260 | |||
1261 | /* | ||
1262 | * Allocate memory for use in qib_tid_update() at open to | ||
1263 | * reduce cost of expected send setup per message segment | ||
1264 | */ | ||
1265 | if (rcd) | ||
1266 | ptmp = kmalloc(dd->rcvtidcnt * sizeof(u16) + | ||
1267 | dd->rcvtidcnt * sizeof(struct page **), | ||
1268 | GFP_KERNEL); | ||
1269 | |||
1270 | if (!rcd || !ptmp) { | ||
1271 | qib_dev_err(dd, "Unable to allocate ctxtdata " | ||
1272 | "memory, failing open\n"); | ||
1273 | ret = -ENOMEM; | ||
1274 | goto bailerr; | ||
1275 | } | ||
1276 | rcd->userversion = uinfo->spu_userversion; | ||
1277 | ret = init_subctxts(dd, rcd, uinfo); | ||
1278 | if (ret) | ||
1279 | goto bailerr; | ||
1280 | rcd->tid_pg_list = ptmp; | ||
1281 | rcd->pid = current->pid; | ||
1282 | init_waitqueue_head(&dd->rcd[ctxt]->wait); | ||
1283 | strlcpy(rcd->comm, current->comm, sizeof(rcd->comm)); | ||
1284 | ctxt_fp(fp) = rcd; | ||
1285 | qib_stats.sps_ctxts++; | ||
1286 | ret = 0; | ||
1287 | goto bail; | ||
1288 | |||
1289 | bailerr: | ||
1290 | dd->rcd[ctxt] = NULL; | ||
1291 | kfree(rcd); | ||
1292 | kfree(ptmp); | ||
1293 | bail: | ||
1294 | return ret; | ||
1295 | } | ||
1296 | |||
1297 | static inline int usable(struct qib_pportdata *ppd, int active_only) | ||
1298 | { | ||
1299 | struct qib_devdata *dd = ppd->dd; | ||
1300 | u32 linkok = active_only ? QIBL_LINKACTIVE : | ||
1301 | (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE); | ||
1302 | |||
1303 | return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid && | ||
1304 | (ppd->lflags & linkok); | ||
1305 | } | ||
1306 | |||
1307 | static int find_free_ctxt(int unit, struct file *fp, | ||
1308 | const struct qib_user_info *uinfo) | ||
1309 | { | ||
1310 | struct qib_devdata *dd = qib_lookup(unit); | ||
1311 | struct qib_pportdata *ppd = NULL; | ||
1312 | int ret; | ||
1313 | u32 ctxt; | ||
1314 | |||
1315 | if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports)) { | ||
1316 | ret = -ENODEV; | ||
1317 | goto bail; | ||
1318 | } | ||
1319 | |||
1320 | /* | ||
1321 | * If users requests specific port, only try that one port, else | ||
1322 | * select "best" port below, based on context. | ||
1323 | */ | ||
1324 | if (uinfo->spu_port) { | ||
1325 | ppd = dd->pport + uinfo->spu_port - 1; | ||
1326 | if (!usable(ppd, 0)) { | ||
1327 | ret = -ENETDOWN; | ||
1328 | goto bail; | ||
1329 | } | ||
1330 | } | ||
1331 | |||
1332 | for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { | ||
1333 | if (dd->rcd[ctxt]) | ||
1334 | continue; | ||
1335 | /* | ||
1336 | * The setting and clearing of user context rcd[x] protected | ||
1337 | * by the qib_mutex | ||
1338 | */ | ||
1339 | if (!ppd) { | ||
1340 | /* choose port based on ctxt, if up, else 1st up */ | ||
1341 | ppd = dd->pport + (ctxt % dd->num_pports); | ||
1342 | if (!usable(ppd, 0)) { | ||
1343 | int i; | ||
1344 | for (i = 0; i < dd->num_pports; i++) { | ||
1345 | ppd = dd->pport + i; | ||
1346 | if (usable(ppd, 0)) | ||
1347 | break; | ||
1348 | } | ||
1349 | if (i == dd->num_pports) { | ||
1350 | ret = -ENETDOWN; | ||
1351 | goto bail; | ||
1352 | } | ||
1353 | } | ||
1354 | } | ||
1355 | ret = setup_ctxt(ppd, ctxt, fp, uinfo); | ||
1356 | goto bail; | ||
1357 | } | ||
1358 | ret = -EBUSY; | ||
1359 | |||
1360 | bail: | ||
1361 | return ret; | ||
1362 | } | ||
1363 | |||
1364 | static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo) | ||
1365 | { | ||
1366 | struct qib_pportdata *ppd; | ||
1367 | int ret = 0, devmax; | ||
1368 | int npresent, nup; | ||
1369 | int ndev; | ||
1370 | u32 port = uinfo->spu_port, ctxt; | ||
1371 | |||
1372 | devmax = qib_count_units(&npresent, &nup); | ||
1373 | |||
1374 | for (ndev = 0; ndev < devmax; ndev++) { | ||
1375 | struct qib_devdata *dd = qib_lookup(ndev); | ||
1376 | |||
1377 | /* device portion of usable() */ | ||
1378 | if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase)) | ||
1379 | continue; | ||
1380 | for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { | ||
1381 | if (dd->rcd[ctxt]) | ||
1382 | continue; | ||
1383 | if (port) { | ||
1384 | if (port > dd->num_pports) | ||
1385 | continue; | ||
1386 | ppd = dd->pport + port - 1; | ||
1387 | if (!usable(ppd, 0)) | ||
1388 | continue; | ||
1389 | } else { | ||
1390 | /* | ||
1391 | * choose port based on ctxt, if up, else | ||
1392 | * first port that's up for multi-port HCA | ||
1393 | */ | ||
1394 | ppd = dd->pport + (ctxt % dd->num_pports); | ||
1395 | if (!usable(ppd, 0)) { | ||
1396 | int j; | ||
1397 | |||
1398 | ppd = NULL; | ||
1399 | for (j = 0; j < dd->num_pports && | ||
1400 | !ppd; j++) | ||
1401 | if (usable(dd->pport + j, 0)) | ||
1402 | ppd = dd->pport + j; | ||
1403 | if (!ppd) | ||
1404 | continue; /* to next unit */ | ||
1405 | } | ||
1406 | } | ||
1407 | ret = setup_ctxt(ppd, ctxt, fp, uinfo); | ||
1408 | goto done; | ||
1409 | } | ||
1410 | } | ||
1411 | |||
1412 | if (npresent) { | ||
1413 | if (nup == 0) | ||
1414 | ret = -ENETDOWN; | ||
1415 | else | ||
1416 | ret = -EBUSY; | ||
1417 | } else | ||
1418 | ret = -ENXIO; | ||
1419 | |||
1420 | done: | ||
1421 | return ret; | ||
1422 | } | ||
1423 | |||
1424 | static int find_shared_ctxt(struct file *fp, | ||
1425 | const struct qib_user_info *uinfo) | ||
1426 | { | ||
1427 | int devmax, ndev, i; | ||
1428 | int ret = 0; | ||
1429 | |||
1430 | devmax = qib_count_units(NULL, NULL); | ||
1431 | |||
1432 | for (ndev = 0; ndev < devmax; ndev++) { | ||
1433 | struct qib_devdata *dd = qib_lookup(ndev); | ||
1434 | |||
1435 | /* device portion of usable() */ | ||
1436 | if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase)) | ||
1437 | continue; | ||
1438 | for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) { | ||
1439 | struct qib_ctxtdata *rcd = dd->rcd[i]; | ||
1440 | |||
1441 | /* Skip ctxts which are not yet open */ | ||
1442 | if (!rcd || !rcd->cnt) | ||
1443 | continue; | ||
1444 | /* Skip ctxt if it doesn't match the requested one */ | ||
1445 | if (rcd->subctxt_id != uinfo->spu_subctxt_id) | ||
1446 | continue; | ||
1447 | /* Verify the sharing process matches the master */ | ||
1448 | if (rcd->subctxt_cnt != uinfo->spu_subctxt_cnt || | ||
1449 | rcd->userversion != uinfo->spu_userversion || | ||
1450 | rcd->cnt >= rcd->subctxt_cnt) { | ||
1451 | ret = -EINVAL; | ||
1452 | goto done; | ||
1453 | } | ||
1454 | ctxt_fp(fp) = rcd; | ||
1455 | subctxt_fp(fp) = rcd->cnt++; | ||
1456 | rcd->subpid[subctxt_fp(fp)] = current->pid; | ||
1457 | tidcursor_fp(fp) = 0; | ||
1458 | rcd->active_slaves |= 1 << subctxt_fp(fp); | ||
1459 | ret = 1; | ||
1460 | goto done; | ||
1461 | } | ||
1462 | } | ||
1463 | |||
1464 | done: | ||
1465 | return ret; | ||
1466 | } | ||
1467 | |||
1468 | static int qib_open(struct inode *in, struct file *fp) | ||
1469 | { | ||
1470 | /* The real work is performed later in qib_assign_ctxt() */ | ||
1471 | fp->private_data = kzalloc(sizeof(struct qib_filedata), GFP_KERNEL); | ||
1472 | if (fp->private_data) /* no cpu affinity by default */ | ||
1473 | ((struct qib_filedata *)fp->private_data)->rec_cpu_num = -1; | ||
1474 | return fp->private_data ? 0 : -ENOMEM; | ||
1475 | } | ||
1476 | |||
1477 | /* | ||
1478 | * Get ctxt early, so can set affinity prior to memory allocation. | ||
1479 | */ | ||
1480 | static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo) | ||
1481 | { | ||
1482 | int ret; | ||
1483 | int i_minor; | ||
1484 | unsigned swmajor, swminor; | ||
1485 | |||
1486 | /* Check to be sure we haven't already initialized this file */ | ||
1487 | if (ctxt_fp(fp)) { | ||
1488 | ret = -EINVAL; | ||
1489 | goto done; | ||
1490 | } | ||
1491 | |||
1492 | /* for now, if major version is different, bail */ | ||
1493 | swmajor = uinfo->spu_userversion >> 16; | ||
1494 | if (swmajor != QIB_USER_SWMAJOR) { | ||
1495 | ret = -ENODEV; | ||
1496 | goto done; | ||
1497 | } | ||
1498 | |||
1499 | swminor = uinfo->spu_userversion & 0xffff; | ||
1500 | |||
1501 | mutex_lock(&qib_mutex); | ||
1502 | |||
1503 | if (qib_compatible_subctxts(swmajor, swminor) && | ||
1504 | uinfo->spu_subctxt_cnt) { | ||
1505 | ret = find_shared_ctxt(fp, uinfo); | ||
1506 | if (ret) { | ||
1507 | if (ret > 0) | ||
1508 | ret = 0; | ||
1509 | goto done_chk_sdma; | ||
1510 | } | ||
1511 | } | ||
1512 | |||
1513 | i_minor = iminor(fp->f_dentry->d_inode) - QIB_USER_MINOR_BASE; | ||
1514 | if (i_minor) | ||
1515 | ret = find_free_ctxt(i_minor - 1, fp, uinfo); | ||
1516 | else | ||
1517 | ret = get_a_ctxt(fp, uinfo); | ||
1518 | |||
1519 | done_chk_sdma: | ||
1520 | if (!ret) { | ||
1521 | struct qib_filedata *fd = fp->private_data; | ||
1522 | const struct qib_ctxtdata *rcd = fd->rcd; | ||
1523 | const struct qib_devdata *dd = rcd->dd; | ||
1524 | |||
1525 | if (dd->flags & QIB_HAS_SEND_DMA) { | ||
1526 | fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev, | ||
1527 | dd->unit, | ||
1528 | rcd->ctxt, | ||
1529 | fd->subctxt); | ||
1530 | if (!fd->pq) | ||
1531 | ret = -ENOMEM; | ||
1532 | } | ||
1533 | |||
1534 | /* | ||
1535 | * If process has NOT already set it's affinity, select and | ||
1536 | * reserve a processor for it, as a rendevous for all | ||
1537 | * users of the driver. If they don't actually later | ||
1538 | * set affinity to this cpu, or set it to some other cpu, | ||
1539 | * it just means that sooner or later we don't recommend | ||
1540 | * a cpu, and let the scheduler do it's best. | ||
1541 | */ | ||
1542 | if (!ret && cpus_weight(current->cpus_allowed) >= | ||
1543 | qib_cpulist_count) { | ||
1544 | int cpu; | ||
1545 | cpu = find_first_zero_bit(qib_cpulist, | ||
1546 | qib_cpulist_count); | ||
1547 | if (cpu != qib_cpulist_count) { | ||
1548 | __set_bit(cpu, qib_cpulist); | ||
1549 | fd->rec_cpu_num = cpu; | ||
1550 | } | ||
1551 | } else if (cpus_weight(current->cpus_allowed) == 1 && | ||
1552 | test_bit(first_cpu(current->cpus_allowed), | ||
1553 | qib_cpulist)) | ||
1554 | qib_devinfo(dd->pcidev, "%s PID %u affinity " | ||
1555 | "set to cpu %d; already allocated\n", | ||
1556 | current->comm, current->pid, | ||
1557 | first_cpu(current->cpus_allowed)); | ||
1558 | } | ||
1559 | |||
1560 | mutex_unlock(&qib_mutex); | ||
1561 | |||
1562 | done: | ||
1563 | return ret; | ||
1564 | } | ||
1565 | |||
1566 | |||
1567 | static int qib_do_user_init(struct file *fp, | ||
1568 | const struct qib_user_info *uinfo) | ||
1569 | { | ||
1570 | int ret; | ||
1571 | struct qib_ctxtdata *rcd = ctxt_fp(fp); | ||
1572 | struct qib_devdata *dd; | ||
1573 | unsigned uctxt; | ||
1574 | |||
1575 | /* Subctxts don't need to initialize anything since master did it. */ | ||
1576 | if (subctxt_fp(fp)) { | ||
1577 | ret = wait_event_interruptible(rcd->wait, | ||
1578 | !test_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag)); | ||
1579 | goto bail; | ||
1580 | } | ||
1581 | |||
1582 | dd = rcd->dd; | ||
1583 | |||
1584 | /* some ctxts may get extra buffers, calculate that here */ | ||
1585 | uctxt = rcd->ctxt - dd->first_user_ctxt; | ||
1586 | if (uctxt < dd->ctxts_extrabuf) { | ||
1587 | rcd->piocnt = dd->pbufsctxt + 1; | ||
1588 | rcd->pio_base = rcd->piocnt * uctxt; | ||
1589 | } else { | ||
1590 | rcd->piocnt = dd->pbufsctxt; | ||
1591 | rcd->pio_base = rcd->piocnt * uctxt + | ||
1592 | dd->ctxts_extrabuf; | ||
1593 | } | ||
1594 | |||
1595 | /* | ||
1596 | * All user buffers are 2KB buffers. If we ever support | ||
1597 | * giving 4KB buffers to user processes, this will need some | ||
1598 | * work. Can't use piobufbase directly, because it has | ||
1599 | * both 2K and 4K buffer base values. So check and handle. | ||
1600 | */ | ||
1601 | if ((rcd->pio_base + rcd->piocnt) > dd->piobcnt2k) { | ||
1602 | if (rcd->pio_base >= dd->piobcnt2k) { | ||
1603 | qib_dev_err(dd, | ||
1604 | "%u:ctxt%u: no 2KB buffers available\n", | ||
1605 | dd->unit, rcd->ctxt); | ||
1606 | ret = -ENOBUFS; | ||
1607 | goto bail; | ||
1608 | } | ||
1609 | rcd->piocnt = dd->piobcnt2k - rcd->pio_base; | ||
1610 | qib_dev_err(dd, "Ctxt%u: would use 4KB bufs, using %u\n", | ||
1611 | rcd->ctxt, rcd->piocnt); | ||
1612 | } | ||
1613 | |||
1614 | rcd->piobufs = dd->pio2k_bufbase + rcd->pio_base * dd->palign; | ||
1615 | qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt, | ||
1616 | TXCHK_CHG_TYPE_USER, rcd); | ||
1617 | /* | ||
1618 | * try to ensure that processes start up with consistent avail update | ||
1619 | * for their own range, at least. If system very quiet, it might | ||
1620 | * have the in-memory copy out of date at startup for this range of | ||
1621 | * buffers, when a context gets re-used. Do after the chg_pioavail | ||
1622 | * and before the rest of setup, so it's "almost certain" the dma | ||
1623 | * will have occurred (can't 100% guarantee, but should be many | ||
1624 | * decimals of 9s, with this ordering), given how much else happens | ||
1625 | * after this. | ||
1626 | */ | ||
1627 | dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); | ||
1628 | |||
1629 | /* | ||
1630 | * Now allocate the rcvhdr Q and eager TIDs; skip the TID | ||
1631 | * array for time being. If rcd->ctxt > chip-supported, | ||
1632 | * we need to do extra stuff here to handle by handling overflow | ||
1633 | * through ctxt 0, someday | ||
1634 | */ | ||
1635 | ret = qib_create_rcvhdrq(dd, rcd); | ||
1636 | if (!ret) | ||
1637 | ret = qib_setup_eagerbufs(rcd); | ||
1638 | if (ret) | ||
1639 | goto bail_pio; | ||
1640 | |||
1641 | rcd->tidcursor = 0; /* start at beginning after open */ | ||
1642 | |||
1643 | /* initialize poll variables... */ | ||
1644 | rcd->urgent = 0; | ||
1645 | rcd->urgent_poll = 0; | ||
1646 | |||
1647 | /* | ||
1648 | * Now enable the ctxt for receive. | ||
1649 | * For chips that are set to DMA the tail register to memory | ||
1650 | * when they change (and when the update bit transitions from | ||
1651 | * 0 to 1. So for those chips, we turn it off and then back on. | ||
1652 | * This will (very briefly) affect any other open ctxts, but the | ||
1653 | * duration is very short, and therefore isn't an issue. We | ||
1654 | * explictly set the in-memory tail copy to 0 beforehand, so we | ||
1655 | * don't have to wait to be sure the DMA update has happened | ||
1656 | * (chip resets head/tail to 0 on transition to enable). | ||
1657 | */ | ||
1658 | if (rcd->rcvhdrtail_kvaddr) | ||
1659 | qib_clear_rcvhdrtail(rcd); | ||
1660 | |||
1661 | dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_TIDFLOW_ENB, | ||
1662 | rcd->ctxt); | ||
1663 | |||
1664 | /* Notify any waiting slaves */ | ||
1665 | if (rcd->subctxt_cnt) { | ||
1666 | clear_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag); | ||
1667 | wake_up(&rcd->wait); | ||
1668 | } | ||
1669 | return 0; | ||
1670 | |||
1671 | bail_pio: | ||
1672 | qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt, | ||
1673 | TXCHK_CHG_TYPE_KERN, rcd); | ||
1674 | bail: | ||
1675 | return ret; | ||
1676 | } | ||
1677 | |||
1678 | /** | ||
1679 | * unlock_exptid - unlock any expected TID entries context still had in use | ||
1680 | * @rcd: ctxt | ||
1681 | * | ||
1682 | * We don't actually update the chip here, because we do a bulk update | ||
1683 | * below, using f_clear_tids. | ||
1684 | */ | ||
1685 | static void unlock_expected_tids(struct qib_ctxtdata *rcd) | ||
1686 | { | ||
1687 | struct qib_devdata *dd = rcd->dd; | ||
1688 | int ctxt_tidbase = rcd->ctxt * dd->rcvtidcnt; | ||
1689 | int i, cnt = 0, maxtid = ctxt_tidbase + dd->rcvtidcnt; | ||
1690 | |||
1691 | for (i = ctxt_tidbase; i < maxtid; i++) { | ||
1692 | struct page *p = dd->pageshadow[i]; | ||
1693 | dma_addr_t phys; | ||
1694 | |||
1695 | if (!p) | ||
1696 | continue; | ||
1697 | |||
1698 | phys = dd->physshadow[i]; | ||
1699 | dd->physshadow[i] = dd->tidinvalid; | ||
1700 | dd->pageshadow[i] = NULL; | ||
1701 | pci_unmap_page(dd->pcidev, phys, PAGE_SIZE, | ||
1702 | PCI_DMA_FROMDEVICE); | ||
1703 | qib_release_user_pages(&p, 1); | ||
1704 | cnt++; | ||
1705 | } | ||
1706 | } | ||
1707 | |||
1708 | static int qib_close(struct inode *in, struct file *fp) | ||
1709 | { | ||
1710 | int ret = 0; | ||
1711 | struct qib_filedata *fd; | ||
1712 | struct qib_ctxtdata *rcd; | ||
1713 | struct qib_devdata *dd; | ||
1714 | unsigned long flags; | ||
1715 | unsigned ctxt; | ||
1716 | pid_t pid; | ||
1717 | |||
1718 | mutex_lock(&qib_mutex); | ||
1719 | |||
1720 | fd = (struct qib_filedata *) fp->private_data; | ||
1721 | fp->private_data = NULL; | ||
1722 | rcd = fd->rcd; | ||
1723 | if (!rcd) { | ||
1724 | mutex_unlock(&qib_mutex); | ||
1725 | goto bail; | ||
1726 | } | ||
1727 | |||
1728 | dd = rcd->dd; | ||
1729 | |||
1730 | /* ensure all pio buffer writes in progress are flushed */ | ||
1731 | qib_flush_wc(); | ||
1732 | |||
1733 | /* drain user sdma queue */ | ||
1734 | if (fd->pq) { | ||
1735 | qib_user_sdma_queue_drain(rcd->ppd, fd->pq); | ||
1736 | qib_user_sdma_queue_destroy(fd->pq); | ||
1737 | } | ||
1738 | |||
1739 | if (fd->rec_cpu_num != -1) | ||
1740 | __clear_bit(fd->rec_cpu_num, qib_cpulist); | ||
1741 | |||
1742 | if (--rcd->cnt) { | ||
1743 | /* | ||
1744 | * XXX If the master closes the context before the slave(s), | ||
1745 | * revoke the mmap for the eager receive queue so | ||
1746 | * the slave(s) don't wait for receive data forever. | ||
1747 | */ | ||
1748 | rcd->active_slaves &= ~(1 << fd->subctxt); | ||
1749 | rcd->subpid[fd->subctxt] = 0; | ||
1750 | mutex_unlock(&qib_mutex); | ||
1751 | goto bail; | ||
1752 | } | ||
1753 | |||
1754 | /* early; no interrupt users after this */ | ||
1755 | spin_lock_irqsave(&dd->uctxt_lock, flags); | ||
1756 | ctxt = rcd->ctxt; | ||
1757 | dd->rcd[ctxt] = NULL; | ||
1758 | pid = rcd->pid; | ||
1759 | rcd->pid = 0; | ||
1760 | spin_unlock_irqrestore(&dd->uctxt_lock, flags); | ||
1761 | |||
1762 | if (rcd->rcvwait_to || rcd->piowait_to || | ||
1763 | rcd->rcvnowait || rcd->pionowait) { | ||
1764 | rcd->rcvwait_to = 0; | ||
1765 | rcd->piowait_to = 0; | ||
1766 | rcd->rcvnowait = 0; | ||
1767 | rcd->pionowait = 0; | ||
1768 | } | ||
1769 | if (rcd->flag) | ||
1770 | rcd->flag = 0; | ||
1771 | |||
1772 | if (dd->kregbase) { | ||
1773 | /* atomically clear receive enable ctxt and intr avail. */ | ||
1774 | dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_DIS | | ||
1775 | QIB_RCVCTRL_INTRAVAIL_DIS, ctxt); | ||
1776 | |||
1777 | /* clean up the pkeys for this ctxt user */ | ||
1778 | qib_clean_part_key(rcd, dd); | ||
1779 | qib_disarm_piobufs(dd, rcd->pio_base, rcd->piocnt); | ||
1780 | qib_chg_pioavailkernel(dd, rcd->pio_base, | ||
1781 | rcd->piocnt, TXCHK_CHG_TYPE_KERN, NULL); | ||
1782 | |||
1783 | dd->f_clear_tids(dd, rcd); | ||
1784 | |||
1785 | if (dd->pageshadow) | ||
1786 | unlock_expected_tids(rcd); | ||
1787 | qib_stats.sps_ctxts--; | ||
1788 | } | ||
1789 | |||
1790 | mutex_unlock(&qib_mutex); | ||
1791 | qib_free_ctxtdata(dd, rcd); /* after releasing the mutex */ | ||
1792 | |||
1793 | bail: | ||
1794 | kfree(fd); | ||
1795 | return ret; | ||
1796 | } | ||
1797 | |||
1798 | static int qib_ctxt_info(struct file *fp, struct qib_ctxt_info __user *uinfo) | ||
1799 | { | ||
1800 | struct qib_ctxt_info info; | ||
1801 | int ret; | ||
1802 | size_t sz; | ||
1803 | struct qib_ctxtdata *rcd = ctxt_fp(fp); | ||
1804 | struct qib_filedata *fd; | ||
1805 | |||
1806 | fd = (struct qib_filedata *) fp->private_data; | ||
1807 | |||
1808 | info.num_active = qib_count_active_units(); | ||
1809 | info.unit = rcd->dd->unit; | ||
1810 | info.port = rcd->ppd->port; | ||
1811 | info.ctxt = rcd->ctxt; | ||
1812 | info.subctxt = subctxt_fp(fp); | ||
1813 | /* Number of user ctxts available for this device. */ | ||
1814 | info.num_ctxts = rcd->dd->cfgctxts - rcd->dd->first_user_ctxt; | ||
1815 | info.num_subctxts = rcd->subctxt_cnt; | ||
1816 | info.rec_cpu = fd->rec_cpu_num; | ||
1817 | sz = sizeof(info); | ||
1818 | |||
1819 | if (copy_to_user(uinfo, &info, sz)) { | ||
1820 | ret = -EFAULT; | ||
1821 | goto bail; | ||
1822 | } | ||
1823 | ret = 0; | ||
1824 | |||
1825 | bail: | ||
1826 | return ret; | ||
1827 | } | ||
1828 | |||
1829 | static int qib_sdma_get_inflight(struct qib_user_sdma_queue *pq, | ||
1830 | u32 __user *inflightp) | ||
1831 | { | ||
1832 | const u32 val = qib_user_sdma_inflight_counter(pq); | ||
1833 | |||
1834 | if (put_user(val, inflightp)) | ||
1835 | return -EFAULT; | ||
1836 | |||
1837 | return 0; | ||
1838 | } | ||
1839 | |||
1840 | static int qib_sdma_get_complete(struct qib_pportdata *ppd, | ||
1841 | struct qib_user_sdma_queue *pq, | ||
1842 | u32 __user *completep) | ||
1843 | { | ||
1844 | u32 val; | ||
1845 | int err; | ||
1846 | |||
1847 | if (!pq) | ||
1848 | return -EINVAL; | ||
1849 | |||
1850 | err = qib_user_sdma_make_progress(ppd, pq); | ||
1851 | if (err < 0) | ||
1852 | return err; | ||
1853 | |||
1854 | val = qib_user_sdma_complete_counter(pq); | ||
1855 | if (put_user(val, completep)) | ||
1856 | return -EFAULT; | ||
1857 | |||
1858 | return 0; | ||
1859 | } | ||
1860 | |||
1861 | static int disarm_req_delay(struct qib_ctxtdata *rcd) | ||
1862 | { | ||
1863 | int ret = 0; | ||
1864 | |||
1865 | if (!usable(rcd->ppd, 1)) { | ||
1866 | int i; | ||
1867 | /* | ||
1868 | * if link is down, or otherwise not usable, delay | ||
1869 | * the caller up to 30 seconds, so we don't thrash | ||
1870 | * in trying to get the chip back to ACTIVE, and | ||
1871 | * set flag so they make the call again. | ||
1872 | */ | ||
1873 | if (rcd->user_event_mask) { | ||
1874 | /* | ||
1875 | * subctxt_cnt is 0 if not shared, so do base | ||
1876 | * separately, first, then remaining subctxt, if any | ||
1877 | */ | ||
1878 | set_bit(_QIB_EVENT_DISARM_BUFS_BIT, | ||
1879 | &rcd->user_event_mask[0]); | ||
1880 | for (i = 1; i < rcd->subctxt_cnt; i++) | ||
1881 | set_bit(_QIB_EVENT_DISARM_BUFS_BIT, | ||
1882 | &rcd->user_event_mask[i]); | ||
1883 | } | ||
1884 | for (i = 0; !usable(rcd->ppd, 1) && i < 300; i++) | ||
1885 | msleep(100); | ||
1886 | ret = -ENETDOWN; | ||
1887 | } | ||
1888 | return ret; | ||
1889 | } | ||
1890 | |||
1891 | /* | ||
1892 | * Find all user contexts in use, and set the specified bit in their | ||
1893 | * event mask. | ||
1894 | * See also find_ctxt() for a similar use, that is specific to send buffers. | ||
1895 | */ | ||
1896 | int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit) | ||
1897 | { | ||
1898 | struct qib_ctxtdata *rcd; | ||
1899 | unsigned ctxt; | ||
1900 | int ret = 0; | ||
1901 | |||
1902 | spin_lock(&ppd->dd->uctxt_lock); | ||
1903 | for (ctxt = ppd->dd->first_user_ctxt; ctxt < ppd->dd->cfgctxts; | ||
1904 | ctxt++) { | ||
1905 | rcd = ppd->dd->rcd[ctxt]; | ||
1906 | if (!rcd) | ||
1907 | continue; | ||
1908 | if (rcd->user_event_mask) { | ||
1909 | int i; | ||
1910 | /* | ||
1911 | * subctxt_cnt is 0 if not shared, so do base | ||
1912 | * separately, first, then remaining subctxt, if any | ||
1913 | */ | ||
1914 | set_bit(evtbit, &rcd->user_event_mask[0]); | ||
1915 | for (i = 1; i < rcd->subctxt_cnt; i++) | ||
1916 | set_bit(evtbit, &rcd->user_event_mask[i]); | ||
1917 | } | ||
1918 | ret = 1; | ||
1919 | break; | ||
1920 | } | ||
1921 | spin_unlock(&ppd->dd->uctxt_lock); | ||
1922 | |||
1923 | return ret; | ||
1924 | } | ||
1925 | |||
1926 | /* | ||
1927 | * clear the event notifier events for this context. | ||
1928 | * For the DISARM_BUFS case, we also take action (this obsoletes | ||
1929 | * the older QIB_CMD_DISARM_BUFS, but we keep it for backwards | ||
1930 | * compatibility. | ||
1931 | * Other bits don't currently require actions, just atomically clear. | ||
1932 | * User process then performs actions appropriate to bit having been | ||
1933 | * set, if desired, and checks again in future. | ||
1934 | */ | ||
1935 | static int qib_user_event_ack(struct qib_ctxtdata *rcd, int subctxt, | ||
1936 | unsigned long events) | ||
1937 | { | ||
1938 | int ret = 0, i; | ||
1939 | |||
1940 | for (i = 0; i <= _QIB_MAX_EVENT_BIT; i++) { | ||
1941 | if (!test_bit(i, &events)) | ||
1942 | continue; | ||
1943 | if (i == _QIB_EVENT_DISARM_BUFS_BIT) { | ||
1944 | (void)qib_disarm_piobufs_ifneeded(rcd); | ||
1945 | ret = disarm_req_delay(rcd); | ||
1946 | } else | ||
1947 | clear_bit(i, &rcd->user_event_mask[subctxt]); | ||
1948 | } | ||
1949 | return ret; | ||
1950 | } | ||
1951 | |||
1952 | static ssize_t qib_write(struct file *fp, const char __user *data, | ||
1953 | size_t count, loff_t *off) | ||
1954 | { | ||
1955 | const struct qib_cmd __user *ucmd; | ||
1956 | struct qib_ctxtdata *rcd; | ||
1957 | const void __user *src; | ||
1958 | size_t consumed, copy = 0; | ||
1959 | struct qib_cmd cmd; | ||
1960 | ssize_t ret = 0; | ||
1961 | void *dest; | ||
1962 | |||
1963 | if (count < sizeof(cmd.type)) { | ||
1964 | ret = -EINVAL; | ||
1965 | goto bail; | ||
1966 | } | ||
1967 | |||
1968 | ucmd = (const struct qib_cmd __user *) data; | ||
1969 | |||
1970 | if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) { | ||
1971 | ret = -EFAULT; | ||
1972 | goto bail; | ||
1973 | } | ||
1974 | |||
1975 | consumed = sizeof(cmd.type); | ||
1976 | |||
1977 | switch (cmd.type) { | ||
1978 | case QIB_CMD_ASSIGN_CTXT: | ||
1979 | case QIB_CMD_USER_INIT: | ||
1980 | copy = sizeof(cmd.cmd.user_info); | ||
1981 | dest = &cmd.cmd.user_info; | ||
1982 | src = &ucmd->cmd.user_info; | ||
1983 | break; | ||
1984 | |||
1985 | case QIB_CMD_RECV_CTRL: | ||
1986 | copy = sizeof(cmd.cmd.recv_ctrl); | ||
1987 | dest = &cmd.cmd.recv_ctrl; | ||
1988 | src = &ucmd->cmd.recv_ctrl; | ||
1989 | break; | ||
1990 | |||
1991 | case QIB_CMD_CTXT_INFO: | ||
1992 | copy = sizeof(cmd.cmd.ctxt_info); | ||
1993 | dest = &cmd.cmd.ctxt_info; | ||
1994 | src = &ucmd->cmd.ctxt_info; | ||
1995 | break; | ||
1996 | |||
1997 | case QIB_CMD_TID_UPDATE: | ||
1998 | case QIB_CMD_TID_FREE: | ||
1999 | copy = sizeof(cmd.cmd.tid_info); | ||
2000 | dest = &cmd.cmd.tid_info; | ||
2001 | src = &ucmd->cmd.tid_info; | ||
2002 | break; | ||
2003 | |||
2004 | case QIB_CMD_SET_PART_KEY: | ||
2005 | copy = sizeof(cmd.cmd.part_key); | ||
2006 | dest = &cmd.cmd.part_key; | ||
2007 | src = &ucmd->cmd.part_key; | ||
2008 | break; | ||
2009 | |||
2010 | case QIB_CMD_DISARM_BUFS: | ||
2011 | case QIB_CMD_PIOAVAILUPD: /* force an update of PIOAvail reg */ | ||
2012 | copy = 0; | ||
2013 | src = NULL; | ||
2014 | dest = NULL; | ||
2015 | break; | ||
2016 | |||
2017 | case QIB_CMD_POLL_TYPE: | ||
2018 | copy = sizeof(cmd.cmd.poll_type); | ||
2019 | dest = &cmd.cmd.poll_type; | ||
2020 | src = &ucmd->cmd.poll_type; | ||
2021 | break; | ||
2022 | |||
2023 | case QIB_CMD_ARMLAUNCH_CTRL: | ||
2024 | copy = sizeof(cmd.cmd.armlaunch_ctrl); | ||
2025 | dest = &cmd.cmd.armlaunch_ctrl; | ||
2026 | src = &ucmd->cmd.armlaunch_ctrl; | ||
2027 | break; | ||
2028 | |||
2029 | case QIB_CMD_SDMA_INFLIGHT: | ||
2030 | copy = sizeof(cmd.cmd.sdma_inflight); | ||
2031 | dest = &cmd.cmd.sdma_inflight; | ||
2032 | src = &ucmd->cmd.sdma_inflight; | ||
2033 | break; | ||
2034 | |||
2035 | case QIB_CMD_SDMA_COMPLETE: | ||
2036 | copy = sizeof(cmd.cmd.sdma_complete); | ||
2037 | dest = &cmd.cmd.sdma_complete; | ||
2038 | src = &ucmd->cmd.sdma_complete; | ||
2039 | break; | ||
2040 | |||
2041 | case QIB_CMD_ACK_EVENT: | ||
2042 | copy = sizeof(cmd.cmd.event_mask); | ||
2043 | dest = &cmd.cmd.event_mask; | ||
2044 | src = &ucmd->cmd.event_mask; | ||
2045 | break; | ||
2046 | |||
2047 | default: | ||
2048 | ret = -EINVAL; | ||
2049 | goto bail; | ||
2050 | } | ||
2051 | |||
2052 | if (copy) { | ||
2053 | if ((count - consumed) < copy) { | ||
2054 | ret = -EINVAL; | ||
2055 | goto bail; | ||
2056 | } | ||
2057 | if (copy_from_user(dest, src, copy)) { | ||
2058 | ret = -EFAULT; | ||
2059 | goto bail; | ||
2060 | } | ||
2061 | consumed += copy; | ||
2062 | } | ||
2063 | |||
2064 | rcd = ctxt_fp(fp); | ||
2065 | if (!rcd && cmd.type != QIB_CMD_ASSIGN_CTXT) { | ||
2066 | ret = -EINVAL; | ||
2067 | goto bail; | ||
2068 | } | ||
2069 | |||
2070 | switch (cmd.type) { | ||
2071 | case QIB_CMD_ASSIGN_CTXT: | ||
2072 | ret = qib_assign_ctxt(fp, &cmd.cmd.user_info); | ||
2073 | if (ret) | ||
2074 | goto bail; | ||
2075 | break; | ||
2076 | |||
2077 | case QIB_CMD_USER_INIT: | ||
2078 | ret = qib_do_user_init(fp, &cmd.cmd.user_info); | ||
2079 | if (ret) | ||
2080 | goto bail; | ||
2081 | ret = qib_get_base_info(fp, (void __user *) (unsigned long) | ||
2082 | cmd.cmd.user_info.spu_base_info, | ||
2083 | cmd.cmd.user_info.spu_base_info_size); | ||
2084 | break; | ||
2085 | |||
2086 | case QIB_CMD_RECV_CTRL: | ||
2087 | ret = qib_manage_rcvq(rcd, subctxt_fp(fp), cmd.cmd.recv_ctrl); | ||
2088 | break; | ||
2089 | |||
2090 | case QIB_CMD_CTXT_INFO: | ||
2091 | ret = qib_ctxt_info(fp, (struct qib_ctxt_info __user *) | ||
2092 | (unsigned long) cmd.cmd.ctxt_info); | ||
2093 | break; | ||
2094 | |||
2095 | case QIB_CMD_TID_UPDATE: | ||
2096 | ret = qib_tid_update(rcd, fp, &cmd.cmd.tid_info); | ||
2097 | break; | ||
2098 | |||
2099 | case QIB_CMD_TID_FREE: | ||
2100 | ret = qib_tid_free(rcd, subctxt_fp(fp), &cmd.cmd.tid_info); | ||
2101 | break; | ||
2102 | |||
2103 | case QIB_CMD_SET_PART_KEY: | ||
2104 | ret = qib_set_part_key(rcd, cmd.cmd.part_key); | ||
2105 | break; | ||
2106 | |||
2107 | case QIB_CMD_DISARM_BUFS: | ||
2108 | (void)qib_disarm_piobufs_ifneeded(rcd); | ||
2109 | ret = disarm_req_delay(rcd); | ||
2110 | break; | ||
2111 | |||
2112 | case QIB_CMD_PIOAVAILUPD: | ||
2113 | qib_force_pio_avail_update(rcd->dd); | ||
2114 | break; | ||
2115 | |||
2116 | case QIB_CMD_POLL_TYPE: | ||
2117 | rcd->poll_type = cmd.cmd.poll_type; | ||
2118 | break; | ||
2119 | |||
2120 | case QIB_CMD_ARMLAUNCH_CTRL: | ||
2121 | rcd->dd->f_set_armlaunch(rcd->dd, cmd.cmd.armlaunch_ctrl); | ||
2122 | break; | ||
2123 | |||
2124 | case QIB_CMD_SDMA_INFLIGHT: | ||
2125 | ret = qib_sdma_get_inflight(user_sdma_queue_fp(fp), | ||
2126 | (u32 __user *) (unsigned long) | ||
2127 | cmd.cmd.sdma_inflight); | ||
2128 | break; | ||
2129 | |||
2130 | case QIB_CMD_SDMA_COMPLETE: | ||
2131 | ret = qib_sdma_get_complete(rcd->ppd, | ||
2132 | user_sdma_queue_fp(fp), | ||
2133 | (u32 __user *) (unsigned long) | ||
2134 | cmd.cmd.sdma_complete); | ||
2135 | break; | ||
2136 | |||
2137 | case QIB_CMD_ACK_EVENT: | ||
2138 | ret = qib_user_event_ack(rcd, subctxt_fp(fp), | ||
2139 | cmd.cmd.event_mask); | ||
2140 | break; | ||
2141 | } | ||
2142 | |||
2143 | if (ret >= 0) | ||
2144 | ret = consumed; | ||
2145 | |||
2146 | bail: | ||
2147 | return ret; | ||
2148 | } | ||
2149 | |||
2150 | static ssize_t qib_aio_write(struct kiocb *iocb, const struct iovec *iov, | ||
2151 | unsigned long dim, loff_t off) | ||
2152 | { | ||
2153 | struct qib_filedata *fp = iocb->ki_filp->private_data; | ||
2154 | struct qib_ctxtdata *rcd = ctxt_fp(iocb->ki_filp); | ||
2155 | struct qib_user_sdma_queue *pq = fp->pq; | ||
2156 | |||
2157 | if (!dim || !pq) | ||
2158 | return -EINVAL; | ||
2159 | |||
2160 | return qib_user_sdma_writev(rcd, pq, iov, dim); | ||
2161 | } | ||
2162 | |||
2163 | static struct class *qib_class; | ||
2164 | static dev_t qib_dev; | ||
2165 | |||
2166 | int qib_cdev_init(int minor, const char *name, | ||
2167 | const struct file_operations *fops, | ||
2168 | struct cdev **cdevp, struct device **devp) | ||
2169 | { | ||
2170 | const dev_t dev = MKDEV(MAJOR(qib_dev), minor); | ||
2171 | struct cdev *cdev; | ||
2172 | struct device *device = NULL; | ||
2173 | int ret; | ||
2174 | |||
2175 | cdev = cdev_alloc(); | ||
2176 | if (!cdev) { | ||
2177 | printk(KERN_ERR QIB_DRV_NAME | ||
2178 | ": Could not allocate cdev for minor %d, %s\n", | ||
2179 | minor, name); | ||
2180 | ret = -ENOMEM; | ||
2181 | goto done; | ||
2182 | } | ||
2183 | |||
2184 | cdev->owner = THIS_MODULE; | ||
2185 | cdev->ops = fops; | ||
2186 | kobject_set_name(&cdev->kobj, name); | ||
2187 | |||
2188 | ret = cdev_add(cdev, dev, 1); | ||
2189 | if (ret < 0) { | ||
2190 | printk(KERN_ERR QIB_DRV_NAME | ||
2191 | ": Could not add cdev for minor %d, %s (err %d)\n", | ||
2192 | minor, name, -ret); | ||
2193 | goto err_cdev; | ||
2194 | } | ||
2195 | |||
2196 | device = device_create(qib_class, NULL, dev, NULL, name); | ||
2197 | if (!IS_ERR(device)) | ||
2198 | goto done; | ||
2199 | ret = PTR_ERR(device); | ||
2200 | device = NULL; | ||
2201 | printk(KERN_ERR QIB_DRV_NAME ": Could not create " | ||
2202 | "device for minor %d, %s (err %d)\n", | ||
2203 | minor, name, -ret); | ||
2204 | err_cdev: | ||
2205 | cdev_del(cdev); | ||
2206 | cdev = NULL; | ||
2207 | done: | ||
2208 | *cdevp = cdev; | ||
2209 | *devp = device; | ||
2210 | return ret; | ||
2211 | } | ||
2212 | |||
2213 | void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp) | ||
2214 | { | ||
2215 | struct device *device = *devp; | ||
2216 | |||
2217 | if (device) { | ||
2218 | device_unregister(device); | ||
2219 | *devp = NULL; | ||
2220 | } | ||
2221 | |||
2222 | if (*cdevp) { | ||
2223 | cdev_del(*cdevp); | ||
2224 | *cdevp = NULL; | ||
2225 | } | ||
2226 | } | ||
2227 | |||
2228 | static struct cdev *wildcard_cdev; | ||
2229 | static struct device *wildcard_device; | ||
2230 | |||
2231 | int __init qib_dev_init(void) | ||
2232 | { | ||
2233 | int ret; | ||
2234 | |||
2235 | ret = alloc_chrdev_region(&qib_dev, 0, QIB_NMINORS, QIB_DRV_NAME); | ||
2236 | if (ret < 0) { | ||
2237 | printk(KERN_ERR QIB_DRV_NAME ": Could not allocate " | ||
2238 | "chrdev region (err %d)\n", -ret); | ||
2239 | goto done; | ||
2240 | } | ||
2241 | |||
2242 | qib_class = class_create(THIS_MODULE, "ipath"); | ||
2243 | if (IS_ERR(qib_class)) { | ||
2244 | ret = PTR_ERR(qib_class); | ||
2245 | printk(KERN_ERR QIB_DRV_NAME ": Could not create " | ||
2246 | "device class (err %d)\n", -ret); | ||
2247 | unregister_chrdev_region(qib_dev, QIB_NMINORS); | ||
2248 | } | ||
2249 | |||
2250 | done: | ||
2251 | return ret; | ||
2252 | } | ||
2253 | |||
2254 | void qib_dev_cleanup(void) | ||
2255 | { | ||
2256 | if (qib_class) { | ||
2257 | class_destroy(qib_class); | ||
2258 | qib_class = NULL; | ||
2259 | } | ||
2260 | |||
2261 | unregister_chrdev_region(qib_dev, QIB_NMINORS); | ||
2262 | } | ||
2263 | |||
2264 | static atomic_t user_count = ATOMIC_INIT(0); | ||
2265 | |||
2266 | static void qib_user_remove(struct qib_devdata *dd) | ||
2267 | { | ||
2268 | if (atomic_dec_return(&user_count) == 0) | ||
2269 | qib_cdev_cleanup(&wildcard_cdev, &wildcard_device); | ||
2270 | |||
2271 | qib_cdev_cleanup(&dd->user_cdev, &dd->user_device); | ||
2272 | } | ||
2273 | |||
2274 | static int qib_user_add(struct qib_devdata *dd) | ||
2275 | { | ||
2276 | char name[10]; | ||
2277 | int ret; | ||
2278 | |||
2279 | if (atomic_inc_return(&user_count) == 1) { | ||
2280 | ret = qib_cdev_init(0, "ipath", &qib_file_ops, | ||
2281 | &wildcard_cdev, &wildcard_device); | ||
2282 | if (ret) | ||
2283 | goto done; | ||
2284 | } | ||
2285 | |||
2286 | snprintf(name, sizeof(name), "ipath%d", dd->unit); | ||
2287 | ret = qib_cdev_init(dd->unit + 1, name, &qib_file_ops, | ||
2288 | &dd->user_cdev, &dd->user_device); | ||
2289 | if (ret) | ||
2290 | qib_user_remove(dd); | ||
2291 | done: | ||
2292 | return ret; | ||
2293 | } | ||
2294 | |||
2295 | /* | ||
2296 | * Create per-unit files in /dev | ||
2297 | */ | ||
2298 | int qib_device_create(struct qib_devdata *dd) | ||
2299 | { | ||
2300 | int r, ret; | ||
2301 | |||
2302 | r = qib_user_add(dd); | ||
2303 | ret = qib_diag_add(dd); | ||
2304 | if (r && !ret) | ||
2305 | ret = r; | ||
2306 | return ret; | ||
2307 | } | ||
2308 | |||
2309 | /* | ||
2310 | * Remove per-unit files in /dev | ||
2311 | * void, core kernel returns no errors for this stuff | ||
2312 | */ | ||
2313 | void qib_device_remove(struct qib_devdata *dd) | ||
2314 | { | ||
2315 | qib_user_remove(dd); | ||
2316 | qib_diag_remove(dd); | ||
2317 | } | ||
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c new file mode 100644 index 000000000000..755470440ef1 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_fs.c | |||
@@ -0,0 +1,613 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. | ||
3 | * Copyright (c) 2006 PathScale, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | |||
34 | #include <linux/module.h> | ||
35 | #include <linux/fs.h> | ||
36 | #include <linux/mount.h> | ||
37 | #include <linux/pagemap.h> | ||
38 | #include <linux/init.h> | ||
39 | #include <linux/namei.h> | ||
40 | |||
41 | #include "qib.h" | ||
42 | |||
43 | #define QIBFS_MAGIC 0x726a77 | ||
44 | |||
45 | static struct super_block *qib_super; | ||
46 | |||
47 | #define private2dd(file) ((file)->f_dentry->d_inode->i_private) | ||
48 | |||
49 | static int qibfs_mknod(struct inode *dir, struct dentry *dentry, | ||
50 | int mode, const struct file_operations *fops, | ||
51 | void *data) | ||
52 | { | ||
53 | int error; | ||
54 | struct inode *inode = new_inode(dir->i_sb); | ||
55 | |||
56 | if (!inode) { | ||
57 | error = -EPERM; | ||
58 | goto bail; | ||
59 | } | ||
60 | |||
61 | inode->i_mode = mode; | ||
62 | inode->i_uid = 0; | ||
63 | inode->i_gid = 0; | ||
64 | inode->i_blocks = 0; | ||
65 | inode->i_atime = CURRENT_TIME; | ||
66 | inode->i_mtime = inode->i_atime; | ||
67 | inode->i_ctime = inode->i_atime; | ||
68 | inode->i_private = data; | ||
69 | if ((mode & S_IFMT) == S_IFDIR) { | ||
70 | inode->i_op = &simple_dir_inode_operations; | ||
71 | inc_nlink(inode); | ||
72 | inc_nlink(dir); | ||
73 | } | ||
74 | |||
75 | inode->i_fop = fops; | ||
76 | |||
77 | d_instantiate(dentry, inode); | ||
78 | error = 0; | ||
79 | |||
80 | bail: | ||
81 | return error; | ||
82 | } | ||
83 | |||
84 | static int create_file(const char *name, mode_t mode, | ||
85 | struct dentry *parent, struct dentry **dentry, | ||
86 | const struct file_operations *fops, void *data) | ||
87 | { | ||
88 | int error; | ||
89 | |||
90 | *dentry = NULL; | ||
91 | mutex_lock(&parent->d_inode->i_mutex); | ||
92 | *dentry = lookup_one_len(name, parent, strlen(name)); | ||
93 | if (!IS_ERR(*dentry)) | ||
94 | error = qibfs_mknod(parent->d_inode, *dentry, | ||
95 | mode, fops, data); | ||
96 | else | ||
97 | error = PTR_ERR(*dentry); | ||
98 | mutex_unlock(&parent->d_inode->i_mutex); | ||
99 | |||
100 | return error; | ||
101 | } | ||
102 | |||
103 | static ssize_t driver_stats_read(struct file *file, char __user *buf, | ||
104 | size_t count, loff_t *ppos) | ||
105 | { | ||
106 | return simple_read_from_buffer(buf, count, ppos, &qib_stats, | ||
107 | sizeof qib_stats); | ||
108 | } | ||
109 | |||
110 | /* | ||
111 | * driver stats field names, one line per stat, single string. Used by | ||
112 | * programs like ipathstats to print the stats in a way which works for | ||
113 | * different versions of drivers, without changing program source. | ||
114 | * if qlogic_ib_stats changes, this needs to change. Names need to be | ||
115 | * 12 chars or less (w/o newline), for proper display by ipathstats utility. | ||
116 | */ | ||
117 | static const char qib_statnames[] = | ||
118 | "KernIntr\n" | ||
119 | "ErrorIntr\n" | ||
120 | "Tx_Errs\n" | ||
121 | "Rcv_Errs\n" | ||
122 | "H/W_Errs\n" | ||
123 | "NoPIOBufs\n" | ||
124 | "CtxtsOpen\n" | ||
125 | "RcvLen_Errs\n" | ||
126 | "EgrBufFull\n" | ||
127 | "EgrHdrFull\n" | ||
128 | ; | ||
129 | |||
130 | static ssize_t driver_names_read(struct file *file, char __user *buf, | ||
131 | size_t count, loff_t *ppos) | ||
132 | { | ||
133 | return simple_read_from_buffer(buf, count, ppos, qib_statnames, | ||
134 | sizeof qib_statnames - 1); /* no null */ | ||
135 | } | ||
136 | |||
137 | static const struct file_operations driver_ops[] = { | ||
138 | { .read = driver_stats_read, }, | ||
139 | { .read = driver_names_read, }, | ||
140 | }; | ||
141 | |||
142 | /* read the per-device counters */ | ||
143 | static ssize_t dev_counters_read(struct file *file, char __user *buf, | ||
144 | size_t count, loff_t *ppos) | ||
145 | { | ||
146 | u64 *counters; | ||
147 | struct qib_devdata *dd = private2dd(file); | ||
148 | |||
149 | return simple_read_from_buffer(buf, count, ppos, counters, | ||
150 | dd->f_read_cntrs(dd, *ppos, NULL, &counters)); | ||
151 | } | ||
152 | |||
153 | /* read the per-device counters */ | ||
154 | static ssize_t dev_names_read(struct file *file, char __user *buf, | ||
155 | size_t count, loff_t *ppos) | ||
156 | { | ||
157 | char *names; | ||
158 | struct qib_devdata *dd = private2dd(file); | ||
159 | |||
160 | return simple_read_from_buffer(buf, count, ppos, names, | ||
161 | dd->f_read_cntrs(dd, *ppos, &names, NULL)); | ||
162 | } | ||
163 | |||
164 | static const struct file_operations cntr_ops[] = { | ||
165 | { .read = dev_counters_read, }, | ||
166 | { .read = dev_names_read, }, | ||
167 | }; | ||
168 | |||
169 | /* | ||
170 | * Could use file->f_dentry->d_inode->i_ino to figure out which file, | ||
171 | * instead of separate routine for each, but for now, this works... | ||
172 | */ | ||
173 | |||
174 | /* read the per-port names (same for each port) */ | ||
175 | static ssize_t portnames_read(struct file *file, char __user *buf, | ||
176 | size_t count, loff_t *ppos) | ||
177 | { | ||
178 | char *names; | ||
179 | struct qib_devdata *dd = private2dd(file); | ||
180 | |||
181 | return simple_read_from_buffer(buf, count, ppos, names, | ||
182 | dd->f_read_portcntrs(dd, *ppos, 0, &names, NULL)); | ||
183 | } | ||
184 | |||
185 | /* read the per-port counters for port 1 (pidx 0) */ | ||
186 | static ssize_t portcntrs_1_read(struct file *file, char __user *buf, | ||
187 | size_t count, loff_t *ppos) | ||
188 | { | ||
189 | u64 *counters; | ||
190 | struct qib_devdata *dd = private2dd(file); | ||
191 | |||
192 | return simple_read_from_buffer(buf, count, ppos, counters, | ||
193 | dd->f_read_portcntrs(dd, *ppos, 0, NULL, &counters)); | ||
194 | } | ||
195 | |||
196 | /* read the per-port counters for port 2 (pidx 1) */ | ||
197 | static ssize_t portcntrs_2_read(struct file *file, char __user *buf, | ||
198 | size_t count, loff_t *ppos) | ||
199 | { | ||
200 | u64 *counters; | ||
201 | struct qib_devdata *dd = private2dd(file); | ||
202 | |||
203 | return simple_read_from_buffer(buf, count, ppos, counters, | ||
204 | dd->f_read_portcntrs(dd, *ppos, 1, NULL, &counters)); | ||
205 | } | ||
206 | |||
207 | static const struct file_operations portcntr_ops[] = { | ||
208 | { .read = portnames_read, }, | ||
209 | { .read = portcntrs_1_read, }, | ||
210 | { .read = portcntrs_2_read, }, | ||
211 | }; | ||
212 | |||
213 | /* | ||
214 | * read the per-port QSFP data for port 1 (pidx 0) | ||
215 | */ | ||
216 | static ssize_t qsfp_1_read(struct file *file, char __user *buf, | ||
217 | size_t count, loff_t *ppos) | ||
218 | { | ||
219 | struct qib_devdata *dd = private2dd(file); | ||
220 | char *tmp; | ||
221 | int ret; | ||
222 | |||
223 | tmp = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
224 | if (!tmp) | ||
225 | return -ENOMEM; | ||
226 | |||
227 | ret = qib_qsfp_dump(dd->pport, tmp, PAGE_SIZE); | ||
228 | if (ret > 0) | ||
229 | ret = simple_read_from_buffer(buf, count, ppos, tmp, ret); | ||
230 | kfree(tmp); | ||
231 | return ret; | ||
232 | } | ||
233 | |||
234 | /* | ||
235 | * read the per-port QSFP data for port 2 (pidx 1) | ||
236 | */ | ||
237 | static ssize_t qsfp_2_read(struct file *file, char __user *buf, | ||
238 | size_t count, loff_t *ppos) | ||
239 | { | ||
240 | struct qib_devdata *dd = private2dd(file); | ||
241 | char *tmp; | ||
242 | int ret; | ||
243 | |||
244 | if (dd->num_pports < 2) | ||
245 | return -ENODEV; | ||
246 | |||
247 | tmp = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
248 | if (!tmp) | ||
249 | return -ENOMEM; | ||
250 | |||
251 | ret = qib_qsfp_dump(dd->pport + 1, tmp, PAGE_SIZE); | ||
252 | if (ret > 0) | ||
253 | ret = simple_read_from_buffer(buf, count, ppos, tmp, ret); | ||
254 | kfree(tmp); | ||
255 | return ret; | ||
256 | } | ||
257 | |||
258 | static const struct file_operations qsfp_ops[] = { | ||
259 | { .read = qsfp_1_read, }, | ||
260 | { .read = qsfp_2_read, }, | ||
261 | }; | ||
262 | |||
263 | static ssize_t flash_read(struct file *file, char __user *buf, | ||
264 | size_t count, loff_t *ppos) | ||
265 | { | ||
266 | struct qib_devdata *dd; | ||
267 | ssize_t ret; | ||
268 | loff_t pos; | ||
269 | char *tmp; | ||
270 | |||
271 | pos = *ppos; | ||
272 | |||
273 | if (pos < 0) { | ||
274 | ret = -EINVAL; | ||
275 | goto bail; | ||
276 | } | ||
277 | |||
278 | if (pos >= sizeof(struct qib_flash)) { | ||
279 | ret = 0; | ||
280 | goto bail; | ||
281 | } | ||
282 | |||
283 | if (count > sizeof(struct qib_flash) - pos) | ||
284 | count = sizeof(struct qib_flash) - pos; | ||
285 | |||
286 | tmp = kmalloc(count, GFP_KERNEL); | ||
287 | if (!tmp) { | ||
288 | ret = -ENOMEM; | ||
289 | goto bail; | ||
290 | } | ||
291 | |||
292 | dd = private2dd(file); | ||
293 | if (qib_eeprom_read(dd, pos, tmp, count)) { | ||
294 | qib_dev_err(dd, "failed to read from flash\n"); | ||
295 | ret = -ENXIO; | ||
296 | goto bail_tmp; | ||
297 | } | ||
298 | |||
299 | if (copy_to_user(buf, tmp, count)) { | ||
300 | ret = -EFAULT; | ||
301 | goto bail_tmp; | ||
302 | } | ||
303 | |||
304 | *ppos = pos + count; | ||
305 | ret = count; | ||
306 | |||
307 | bail_tmp: | ||
308 | kfree(tmp); | ||
309 | |||
310 | bail: | ||
311 | return ret; | ||
312 | } | ||
313 | |||
314 | static ssize_t flash_write(struct file *file, const char __user *buf, | ||
315 | size_t count, loff_t *ppos) | ||
316 | { | ||
317 | struct qib_devdata *dd; | ||
318 | ssize_t ret; | ||
319 | loff_t pos; | ||
320 | char *tmp; | ||
321 | |||
322 | pos = *ppos; | ||
323 | |||
324 | if (pos != 0) { | ||
325 | ret = -EINVAL; | ||
326 | goto bail; | ||
327 | } | ||
328 | |||
329 | if (count != sizeof(struct qib_flash)) { | ||
330 | ret = -EINVAL; | ||
331 | goto bail; | ||
332 | } | ||
333 | |||
334 | tmp = kmalloc(count, GFP_KERNEL); | ||
335 | if (!tmp) { | ||
336 | ret = -ENOMEM; | ||
337 | goto bail; | ||
338 | } | ||
339 | |||
340 | if (copy_from_user(tmp, buf, count)) { | ||
341 | ret = -EFAULT; | ||
342 | goto bail_tmp; | ||
343 | } | ||
344 | |||
345 | dd = private2dd(file); | ||
346 | if (qib_eeprom_write(dd, pos, tmp, count)) { | ||
347 | ret = -ENXIO; | ||
348 | qib_dev_err(dd, "failed to write to flash\n"); | ||
349 | goto bail_tmp; | ||
350 | } | ||
351 | |||
352 | *ppos = pos + count; | ||
353 | ret = count; | ||
354 | |||
355 | bail_tmp: | ||
356 | kfree(tmp); | ||
357 | |||
358 | bail: | ||
359 | return ret; | ||
360 | } | ||
361 | |||
362 | static const struct file_operations flash_ops = { | ||
363 | .read = flash_read, | ||
364 | .write = flash_write, | ||
365 | }; | ||
366 | |||
367 | static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd) | ||
368 | { | ||
369 | struct dentry *dir, *tmp; | ||
370 | char unit[10]; | ||
371 | int ret, i; | ||
372 | |||
373 | /* create the per-unit directory */ | ||
374 | snprintf(unit, sizeof unit, "%u", dd->unit); | ||
375 | ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir, | ||
376 | &simple_dir_operations, dd); | ||
377 | if (ret) { | ||
378 | printk(KERN_ERR "create_file(%s) failed: %d\n", unit, ret); | ||
379 | goto bail; | ||
380 | } | ||
381 | |||
382 | /* create the files in the new directory */ | ||
383 | ret = create_file("counters", S_IFREG|S_IRUGO, dir, &tmp, | ||
384 | &cntr_ops[0], dd); | ||
385 | if (ret) { | ||
386 | printk(KERN_ERR "create_file(%s/counters) failed: %d\n", | ||
387 | unit, ret); | ||
388 | goto bail; | ||
389 | } | ||
390 | ret = create_file("counter_names", S_IFREG|S_IRUGO, dir, &tmp, | ||
391 | &cntr_ops[1], dd); | ||
392 | if (ret) { | ||
393 | printk(KERN_ERR "create_file(%s/counter_names) failed: %d\n", | ||
394 | unit, ret); | ||
395 | goto bail; | ||
396 | } | ||
397 | ret = create_file("portcounter_names", S_IFREG|S_IRUGO, dir, &tmp, | ||
398 | &portcntr_ops[0], dd); | ||
399 | if (ret) { | ||
400 | printk(KERN_ERR "create_file(%s/%s) failed: %d\n", | ||
401 | unit, "portcounter_names", ret); | ||
402 | goto bail; | ||
403 | } | ||
404 | for (i = 1; i <= dd->num_pports; i++) { | ||
405 | char fname[24]; | ||
406 | |||
407 | sprintf(fname, "port%dcounters", i); | ||
408 | /* create the files in the new directory */ | ||
409 | ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp, | ||
410 | &portcntr_ops[i], dd); | ||
411 | if (ret) { | ||
412 | printk(KERN_ERR "create_file(%s/%s) failed: %d\n", | ||
413 | unit, fname, ret); | ||
414 | goto bail; | ||
415 | } | ||
416 | if (!(dd->flags & QIB_HAS_QSFP)) | ||
417 | continue; | ||
418 | sprintf(fname, "qsfp%d", i); | ||
419 | ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp, | ||
420 | &qsfp_ops[i - 1], dd); | ||
421 | if (ret) { | ||
422 | printk(KERN_ERR "create_file(%s/%s) failed: %d\n", | ||
423 | unit, fname, ret); | ||
424 | goto bail; | ||
425 | } | ||
426 | } | ||
427 | |||
428 | ret = create_file("flash", S_IFREG|S_IWUSR|S_IRUGO, dir, &tmp, | ||
429 | &flash_ops, dd); | ||
430 | if (ret) | ||
431 | printk(KERN_ERR "create_file(%s/flash) failed: %d\n", | ||
432 | unit, ret); | ||
433 | bail: | ||
434 | return ret; | ||
435 | } | ||
436 | |||
437 | static int remove_file(struct dentry *parent, char *name) | ||
438 | { | ||
439 | struct dentry *tmp; | ||
440 | int ret; | ||
441 | |||
442 | tmp = lookup_one_len(name, parent, strlen(name)); | ||
443 | |||
444 | if (IS_ERR(tmp)) { | ||
445 | ret = PTR_ERR(tmp); | ||
446 | goto bail; | ||
447 | } | ||
448 | |||
449 | spin_lock(&dcache_lock); | ||
450 | spin_lock(&tmp->d_lock); | ||
451 | if (!(d_unhashed(tmp) && tmp->d_inode)) { | ||
452 | dget_locked(tmp); | ||
453 | __d_drop(tmp); | ||
454 | spin_unlock(&tmp->d_lock); | ||
455 | spin_unlock(&dcache_lock); | ||
456 | simple_unlink(parent->d_inode, tmp); | ||
457 | } else { | ||
458 | spin_unlock(&tmp->d_lock); | ||
459 | spin_unlock(&dcache_lock); | ||
460 | } | ||
461 | |||
462 | ret = 0; | ||
463 | bail: | ||
464 | /* | ||
465 | * We don't expect clients to care about the return value, but | ||
466 | * it's there if they need it. | ||
467 | */ | ||
468 | return ret; | ||
469 | } | ||
470 | |||
471 | static int remove_device_files(struct super_block *sb, | ||
472 | struct qib_devdata *dd) | ||
473 | { | ||
474 | struct dentry *dir, *root; | ||
475 | char unit[10]; | ||
476 | int ret, i; | ||
477 | |||
478 | root = dget(sb->s_root); | ||
479 | mutex_lock(&root->d_inode->i_mutex); | ||
480 | snprintf(unit, sizeof unit, "%u", dd->unit); | ||
481 | dir = lookup_one_len(unit, root, strlen(unit)); | ||
482 | |||
483 | if (IS_ERR(dir)) { | ||
484 | ret = PTR_ERR(dir); | ||
485 | printk(KERN_ERR "Lookup of %s failed\n", unit); | ||
486 | goto bail; | ||
487 | } | ||
488 | |||
489 | remove_file(dir, "counters"); | ||
490 | remove_file(dir, "counter_names"); | ||
491 | remove_file(dir, "portcounter_names"); | ||
492 | for (i = 0; i < dd->num_pports; i++) { | ||
493 | char fname[24]; | ||
494 | |||
495 | sprintf(fname, "port%dcounters", i + 1); | ||
496 | remove_file(dir, fname); | ||
497 | if (dd->flags & QIB_HAS_QSFP) { | ||
498 | sprintf(fname, "qsfp%d", i + 1); | ||
499 | remove_file(dir, fname); | ||
500 | } | ||
501 | } | ||
502 | remove_file(dir, "flash"); | ||
503 | d_delete(dir); | ||
504 | ret = simple_rmdir(root->d_inode, dir); | ||
505 | |||
506 | bail: | ||
507 | mutex_unlock(&root->d_inode->i_mutex); | ||
508 | dput(root); | ||
509 | return ret; | ||
510 | } | ||
511 | |||
512 | /* | ||
513 | * This fills everything in when the fs is mounted, to handle umount/mount | ||
514 | * after device init. The direct add_cntr_files() call handles adding | ||
515 | * them from the init code, when the fs is already mounted. | ||
516 | */ | ||
517 | static int qibfs_fill_super(struct super_block *sb, void *data, int silent) | ||
518 | { | ||
519 | struct qib_devdata *dd, *tmp; | ||
520 | unsigned long flags; | ||
521 | int ret; | ||
522 | |||
523 | static struct tree_descr files[] = { | ||
524 | [2] = {"driver_stats", &driver_ops[0], S_IRUGO}, | ||
525 | [3] = {"driver_stats_names", &driver_ops[1], S_IRUGO}, | ||
526 | {""}, | ||
527 | }; | ||
528 | |||
529 | ret = simple_fill_super(sb, QIBFS_MAGIC, files); | ||
530 | if (ret) { | ||
531 | printk(KERN_ERR "simple_fill_super failed: %d\n", ret); | ||
532 | goto bail; | ||
533 | } | ||
534 | |||
535 | spin_lock_irqsave(&qib_devs_lock, flags); | ||
536 | |||
537 | list_for_each_entry_safe(dd, tmp, &qib_dev_list, list) { | ||
538 | spin_unlock_irqrestore(&qib_devs_lock, flags); | ||
539 | ret = add_cntr_files(sb, dd); | ||
540 | if (ret) { | ||
541 | deactivate_super(sb); | ||
542 | goto bail; | ||
543 | } | ||
544 | spin_lock_irqsave(&qib_devs_lock, flags); | ||
545 | } | ||
546 | |||
547 | spin_unlock_irqrestore(&qib_devs_lock, flags); | ||
548 | |||
549 | bail: | ||
550 | return ret; | ||
551 | } | ||
552 | |||
553 | static int qibfs_get_sb(struct file_system_type *fs_type, int flags, | ||
554 | const char *dev_name, void *data, struct vfsmount *mnt) | ||
555 | { | ||
556 | int ret = get_sb_single(fs_type, flags, data, | ||
557 | qibfs_fill_super, mnt); | ||
558 | if (ret >= 0) | ||
559 | qib_super = mnt->mnt_sb; | ||
560 | return ret; | ||
561 | } | ||
562 | |||
563 | static void qibfs_kill_super(struct super_block *s) | ||
564 | { | ||
565 | kill_litter_super(s); | ||
566 | qib_super = NULL; | ||
567 | } | ||
568 | |||
569 | int qibfs_add(struct qib_devdata *dd) | ||
570 | { | ||
571 | int ret; | ||
572 | |||
573 | /* | ||
574 | * On first unit initialized, qib_super will not yet exist | ||
575 | * because nobody has yet tried to mount the filesystem, so | ||
576 | * we can't consider that to be an error; if an error occurs | ||
577 | * during the mount, that will get a complaint, so this is OK. | ||
578 | * add_cntr_files() for all units is done at mount from | ||
579 | * qibfs_fill_super(), so one way or another, everything works. | ||
580 | */ | ||
581 | if (qib_super == NULL) | ||
582 | ret = 0; | ||
583 | else | ||
584 | ret = add_cntr_files(qib_super, dd); | ||
585 | return ret; | ||
586 | } | ||
587 | |||
588 | int qibfs_remove(struct qib_devdata *dd) | ||
589 | { | ||
590 | int ret = 0; | ||
591 | |||
592 | if (qib_super) | ||
593 | ret = remove_device_files(qib_super, dd); | ||
594 | |||
595 | return ret; | ||
596 | } | ||
597 | |||
598 | static struct file_system_type qibfs_fs_type = { | ||
599 | .owner = THIS_MODULE, | ||
600 | .name = "ipathfs", | ||
601 | .get_sb = qibfs_get_sb, | ||
602 | .kill_sb = qibfs_kill_super, | ||
603 | }; | ||
604 | |||
605 | int __init qib_init_qibfs(void) | ||
606 | { | ||
607 | return register_filesystem(&qibfs_fs_type); | ||
608 | } | ||
609 | |||
610 | int __exit qib_exit_qibfs(void) | ||
611 | { | ||
612 | return unregister_filesystem(&qibfs_fs_type); | ||
613 | } | ||
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c new file mode 100644 index 000000000000..7b6549fd429b --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_iba6120.c | |||
@@ -0,0 +1,3588 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. | ||
3 | * All rights reserved. | ||
4 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | /* | ||
35 | * This file contains all of the code that is specific to the | ||
36 | * QLogic_IB 6120 PCIe chip. | ||
37 | */ | ||
38 | |||
39 | #include <linux/interrupt.h> | ||
40 | #include <linux/pci.h> | ||
41 | #include <linux/delay.h> | ||
42 | #include <rdma/ib_verbs.h> | ||
43 | |||
44 | #include "qib.h" | ||
45 | #include "qib_6120_regs.h" | ||
46 | |||
47 | static void qib_6120_setup_setextled(struct qib_pportdata *, u32); | ||
48 | static void sendctrl_6120_mod(struct qib_pportdata *ppd, u32 op); | ||
49 | static u8 qib_6120_phys_portstate(u64); | ||
50 | static u32 qib_6120_iblink_state(u64); | ||
51 | |||
52 | /* | ||
53 | * This file contains all the chip-specific register information and | ||
54 | * access functions for the QLogic QLogic_IB PCI-Express chip. | ||
55 | * | ||
56 | */ | ||
57 | |||
58 | /* KREG_IDX uses machine-generated #defines */ | ||
59 | #define KREG_IDX(regname) (QIB_6120_##regname##_OFFS / sizeof(u64)) | ||
60 | |||
61 | /* Use defines to tie machine-generated names to lower-case names */ | ||
62 | #define kr_extctrl KREG_IDX(EXTCtrl) | ||
63 | #define kr_extstatus KREG_IDX(EXTStatus) | ||
64 | #define kr_gpio_clear KREG_IDX(GPIOClear) | ||
65 | #define kr_gpio_mask KREG_IDX(GPIOMask) | ||
66 | #define kr_gpio_out KREG_IDX(GPIOOut) | ||
67 | #define kr_gpio_status KREG_IDX(GPIOStatus) | ||
68 | #define kr_rcvctrl KREG_IDX(RcvCtrl) | ||
69 | #define kr_sendctrl KREG_IDX(SendCtrl) | ||
70 | #define kr_partitionkey KREG_IDX(RcvPartitionKey) | ||
71 | #define kr_hwdiagctrl KREG_IDX(HwDiagCtrl) | ||
72 | #define kr_ibcstatus KREG_IDX(IBCStatus) | ||
73 | #define kr_ibcctrl KREG_IDX(IBCCtrl) | ||
74 | #define kr_sendbuffererror KREG_IDX(SendBufErr0) | ||
75 | #define kr_rcvbthqp KREG_IDX(RcvBTHQP) | ||
76 | #define kr_counterregbase KREG_IDX(CntrRegBase) | ||
77 | #define kr_palign KREG_IDX(PageAlign) | ||
78 | #define kr_rcvegrbase KREG_IDX(RcvEgrBase) | ||
79 | #define kr_rcvegrcnt KREG_IDX(RcvEgrCnt) | ||
80 | #define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt) | ||
81 | #define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize) | ||
82 | #define kr_rcvhdrsize KREG_IDX(RcvHdrSize) | ||
83 | #define kr_rcvtidbase KREG_IDX(RcvTIDBase) | ||
84 | #define kr_rcvtidcnt KREG_IDX(RcvTIDCnt) | ||
85 | #define kr_scratch KREG_IDX(Scratch) | ||
86 | #define kr_sendctrl KREG_IDX(SendCtrl) | ||
87 | #define kr_sendpioavailaddr KREG_IDX(SendPIOAvailAddr) | ||
88 | #define kr_sendpiobufbase KREG_IDX(SendPIOBufBase) | ||
89 | #define kr_sendpiobufcnt KREG_IDX(SendPIOBufCnt) | ||
90 | #define kr_sendpiosize KREG_IDX(SendPIOSize) | ||
91 | #define kr_sendregbase KREG_IDX(SendRegBase) | ||
92 | #define kr_userregbase KREG_IDX(UserRegBase) | ||
93 | #define kr_control KREG_IDX(Control) | ||
94 | #define kr_intclear KREG_IDX(IntClear) | ||
95 | #define kr_intmask KREG_IDX(IntMask) | ||
96 | #define kr_intstatus KREG_IDX(IntStatus) | ||
97 | #define kr_errclear KREG_IDX(ErrClear) | ||
98 | #define kr_errmask KREG_IDX(ErrMask) | ||
99 | #define kr_errstatus KREG_IDX(ErrStatus) | ||
100 | #define kr_hwerrclear KREG_IDX(HwErrClear) | ||
101 | #define kr_hwerrmask KREG_IDX(HwErrMask) | ||
102 | #define kr_hwerrstatus KREG_IDX(HwErrStatus) | ||
103 | #define kr_revision KREG_IDX(Revision) | ||
104 | #define kr_portcnt KREG_IDX(PortCnt) | ||
105 | #define kr_serdes_cfg0 KREG_IDX(SerdesCfg0) | ||
106 | #define kr_serdes_cfg1 (kr_serdes_cfg0 + 1) | ||
107 | #define kr_serdes_stat KREG_IDX(SerdesStat) | ||
108 | #define kr_xgxs_cfg KREG_IDX(XGXSCfg) | ||
109 | |||
110 | /* These must only be written via qib_write_kreg_ctxt() */ | ||
111 | #define kr_rcvhdraddr KREG_IDX(RcvHdrAddr0) | ||
112 | #define kr_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0) | ||
113 | |||
114 | #define CREG_IDX(regname) ((QIB_6120_##regname##_OFFS - \ | ||
115 | QIB_6120_LBIntCnt_OFFS) / sizeof(u64)) | ||
116 | |||
117 | #define cr_badformat CREG_IDX(RxBadFormatCnt) | ||
118 | #define cr_erricrc CREG_IDX(RxICRCErrCnt) | ||
119 | #define cr_errlink CREG_IDX(RxLinkProblemCnt) | ||
120 | #define cr_errlpcrc CREG_IDX(RxLPCRCErrCnt) | ||
121 | #define cr_errpkey CREG_IDX(RxPKeyMismatchCnt) | ||
122 | #define cr_rcvflowctrl_err CREG_IDX(RxFlowCtrlErrCnt) | ||
123 | #define cr_err_rlen CREG_IDX(RxLenErrCnt) | ||
124 | #define cr_errslen CREG_IDX(TxLenErrCnt) | ||
125 | #define cr_errtidfull CREG_IDX(RxTIDFullErrCnt) | ||
126 | #define cr_errtidvalid CREG_IDX(RxTIDValidErrCnt) | ||
127 | #define cr_errvcrc CREG_IDX(RxVCRCErrCnt) | ||
128 | #define cr_ibstatuschange CREG_IDX(IBStatusChangeCnt) | ||
129 | #define cr_lbint CREG_IDX(LBIntCnt) | ||
130 | #define cr_invalidrlen CREG_IDX(RxMaxMinLenErrCnt) | ||
131 | #define cr_invalidslen CREG_IDX(TxMaxMinLenErrCnt) | ||
132 | #define cr_lbflowstall CREG_IDX(LBFlowStallCnt) | ||
133 | #define cr_pktrcv CREG_IDX(RxDataPktCnt) | ||
134 | #define cr_pktrcvflowctrl CREG_IDX(RxFlowPktCnt) | ||
135 | #define cr_pktsend CREG_IDX(TxDataPktCnt) | ||
136 | #define cr_pktsendflow CREG_IDX(TxFlowPktCnt) | ||
137 | #define cr_portovfl CREG_IDX(RxP0HdrEgrOvflCnt) | ||
138 | #define cr_rcvebp CREG_IDX(RxEBPCnt) | ||
139 | #define cr_rcvovfl CREG_IDX(RxBufOvflCnt) | ||
140 | #define cr_senddropped CREG_IDX(TxDroppedPktCnt) | ||
141 | #define cr_sendstall CREG_IDX(TxFlowStallCnt) | ||
142 | #define cr_sendunderrun CREG_IDX(TxUnderrunCnt) | ||
143 | #define cr_wordrcv CREG_IDX(RxDwordCnt) | ||
144 | #define cr_wordsend CREG_IDX(TxDwordCnt) | ||
145 | #define cr_txunsupvl CREG_IDX(TxUnsupVLErrCnt) | ||
146 | #define cr_rxdroppkt CREG_IDX(RxDroppedPktCnt) | ||
147 | #define cr_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt) | ||
148 | #define cr_iblinkdown CREG_IDX(IBLinkDownedCnt) | ||
149 | #define cr_ibsymbolerr CREG_IDX(IBSymbolErrCnt) | ||
150 | |||
151 | #define SYM_RMASK(regname, fldname) ((u64) \ | ||
152 | QIB_6120_##regname##_##fldname##_RMASK) | ||
153 | #define SYM_MASK(regname, fldname) ((u64) \ | ||
154 | QIB_6120_##regname##_##fldname##_RMASK << \ | ||
155 | QIB_6120_##regname##_##fldname##_LSB) | ||
156 | #define SYM_LSB(regname, fldname) (QIB_6120_##regname##_##fldname##_LSB) | ||
157 | |||
158 | #define SYM_FIELD(value, regname, fldname) ((u64) \ | ||
159 | (((value) >> SYM_LSB(regname, fldname)) & \ | ||
160 | SYM_RMASK(regname, fldname))) | ||
161 | #define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask) | ||
162 | #define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask) | ||
163 | |||
164 | /* link training states, from IBC */ | ||
165 | #define IB_6120_LT_STATE_DISABLED 0x00 | ||
166 | #define IB_6120_LT_STATE_LINKUP 0x01 | ||
167 | #define IB_6120_LT_STATE_POLLACTIVE 0x02 | ||
168 | #define IB_6120_LT_STATE_POLLQUIET 0x03 | ||
169 | #define IB_6120_LT_STATE_SLEEPDELAY 0x04 | ||
170 | #define IB_6120_LT_STATE_SLEEPQUIET 0x05 | ||
171 | #define IB_6120_LT_STATE_CFGDEBOUNCE 0x08 | ||
172 | #define IB_6120_LT_STATE_CFGRCVFCFG 0x09 | ||
173 | #define IB_6120_LT_STATE_CFGWAITRMT 0x0a | ||
174 | #define IB_6120_LT_STATE_CFGIDLE 0x0b | ||
175 | #define IB_6120_LT_STATE_RECOVERRETRAIN 0x0c | ||
176 | #define IB_6120_LT_STATE_RECOVERWAITRMT 0x0e | ||
177 | #define IB_6120_LT_STATE_RECOVERIDLE 0x0f | ||
178 | |||
179 | /* link state machine states from IBC */ | ||
180 | #define IB_6120_L_STATE_DOWN 0x0 | ||
181 | #define IB_6120_L_STATE_INIT 0x1 | ||
182 | #define IB_6120_L_STATE_ARM 0x2 | ||
183 | #define IB_6120_L_STATE_ACTIVE 0x3 | ||
184 | #define IB_6120_L_STATE_ACT_DEFER 0x4 | ||
185 | |||
186 | static const u8 qib_6120_physportstate[0x20] = { | ||
187 | [IB_6120_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED, | ||
188 | [IB_6120_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP, | ||
189 | [IB_6120_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL, | ||
190 | [IB_6120_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL, | ||
191 | [IB_6120_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP, | ||
192 | [IB_6120_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP, | ||
193 | [IB_6120_LT_STATE_CFGDEBOUNCE] = | ||
194 | IB_PHYSPORTSTATE_CFG_TRAIN, | ||
195 | [IB_6120_LT_STATE_CFGRCVFCFG] = | ||
196 | IB_PHYSPORTSTATE_CFG_TRAIN, | ||
197 | [IB_6120_LT_STATE_CFGWAITRMT] = | ||
198 | IB_PHYSPORTSTATE_CFG_TRAIN, | ||
199 | [IB_6120_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN, | ||
200 | [IB_6120_LT_STATE_RECOVERRETRAIN] = | ||
201 | IB_PHYSPORTSTATE_LINK_ERR_RECOVER, | ||
202 | [IB_6120_LT_STATE_RECOVERWAITRMT] = | ||
203 | IB_PHYSPORTSTATE_LINK_ERR_RECOVER, | ||
204 | [IB_6120_LT_STATE_RECOVERIDLE] = | ||
205 | IB_PHYSPORTSTATE_LINK_ERR_RECOVER, | ||
206 | [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN, | ||
207 | [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN, | ||
208 | [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN, | ||
209 | [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN, | ||
210 | [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN, | ||
211 | [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN, | ||
212 | [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN, | ||
213 | [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN | ||
214 | }; | ||
215 | |||
216 | |||
217 | struct qib_chip_specific { | ||
218 | u64 __iomem *cregbase; | ||
219 | u64 *cntrs; | ||
220 | u64 *portcntrs; | ||
221 | void *dummy_hdrq; /* used after ctxt close */ | ||
222 | dma_addr_t dummy_hdrq_phys; | ||
223 | spinlock_t kernel_tid_lock; /* no back to back kernel TID writes */ | ||
224 | spinlock_t user_tid_lock; /* no back to back user TID writes */ | ||
225 | spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */ | ||
226 | spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */ | ||
227 | u64 hwerrmask; | ||
228 | u64 errormask; | ||
229 | u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */ | ||
230 | u64 gpio_mask; /* shadow the gpio mask register */ | ||
231 | u64 extctrl; /* shadow the gpio output enable, etc... */ | ||
232 | /* | ||
233 | * these 5 fields are used to establish deltas for IB symbol | ||
234 | * errors and linkrecovery errors. They can be reported on | ||
235 | * some chips during link negotiation prior to INIT, and with | ||
236 | * DDR when faking DDR negotiations with non-IBTA switches. | ||
237 | * The chip counters are adjusted at driver unload if there is | ||
238 | * a non-zero delta. | ||
239 | */ | ||
240 | u64 ibdeltainprog; | ||
241 | u64 ibsymdelta; | ||
242 | u64 ibsymsnap; | ||
243 | u64 iblnkerrdelta; | ||
244 | u64 iblnkerrsnap; | ||
245 | u64 ibcctrl; /* shadow for kr_ibcctrl */ | ||
246 | u32 lastlinkrecov; /* link recovery issue */ | ||
247 | int irq; | ||
248 | u32 cntrnamelen; | ||
249 | u32 portcntrnamelen; | ||
250 | u32 ncntrs; | ||
251 | u32 nportcntrs; | ||
252 | /* used with gpio interrupts to implement IB counters */ | ||
253 | u32 rxfc_unsupvl_errs; | ||
254 | u32 overrun_thresh_errs; | ||
255 | /* | ||
256 | * these count only cases where _successive_ LocalLinkIntegrity | ||
257 | * errors were seen in the receive headers of IB standard packets | ||
258 | */ | ||
259 | u32 lli_errs; | ||
260 | u32 lli_counter; | ||
261 | u64 lli_thresh; | ||
262 | u64 sword; /* total dwords sent (sample result) */ | ||
263 | u64 rword; /* total dwords received (sample result) */ | ||
264 | u64 spkts; /* total packets sent (sample result) */ | ||
265 | u64 rpkts; /* total packets received (sample result) */ | ||
266 | u64 xmit_wait; /* # of ticks no data sent (sample result) */ | ||
267 | struct timer_list pma_timer; | ||
268 | char emsgbuf[128]; | ||
269 | char bitsmsgbuf[64]; | ||
270 | u8 pma_sample_status; | ||
271 | }; | ||
272 | |||
273 | /* ibcctrl bits */ | ||
274 | #define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1 | ||
275 | /* cycle through TS1/TS2 till OK */ | ||
276 | #define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2 | ||
277 | /* wait for TS1, then go on */ | ||
278 | #define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3 | ||
279 | #define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16 | ||
280 | |||
281 | #define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */ | ||
282 | #define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */ | ||
283 | #define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */ | ||
284 | #define QLOGIC_IB_IBCC_LINKCMD_SHIFT 18 | ||
285 | |||
286 | /* | ||
287 | * We could have a single register get/put routine, that takes a group type, | ||
288 | * but this is somewhat clearer and cleaner. It also gives us some error | ||
289 | * checking. 64 bit register reads should always work, but are inefficient | ||
290 | * on opteron (the northbridge always generates 2 separate HT 32 bit reads), | ||
291 | * so we use kreg32 wherever possible. User register and counter register | ||
292 | * reads are always 32 bit reads, so only one form of those routines. | ||
293 | */ | ||
294 | |||
295 | /** | ||
296 | * qib_read_ureg32 - read 32-bit virtualized per-context register | ||
297 | * @dd: device | ||
298 | * @regno: register number | ||
299 | * @ctxt: context number | ||
300 | * | ||
301 | * Return the contents of a register that is virtualized to be per context. | ||
302 | * Returns -1 on errors (not distinguishable from valid contents at | ||
303 | * runtime; we may add a separate error variable at some point). | ||
304 | */ | ||
305 | static inline u32 qib_read_ureg32(const struct qib_devdata *dd, | ||
306 | enum qib_ureg regno, int ctxt) | ||
307 | { | ||
308 | if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) | ||
309 | return 0; | ||
310 | |||
311 | if (dd->userbase) | ||
312 | return readl(regno + (u64 __iomem *) | ||
313 | ((char __iomem *)dd->userbase + | ||
314 | dd->ureg_align * ctxt)); | ||
315 | else | ||
316 | return readl(regno + (u64 __iomem *) | ||
317 | (dd->uregbase + | ||
318 | (char __iomem *)dd->kregbase + | ||
319 | dd->ureg_align * ctxt)); | ||
320 | } | ||
321 | |||
322 | /** | ||
323 | * qib_write_ureg - write 32-bit virtualized per-context register | ||
324 | * @dd: device | ||
325 | * @regno: register number | ||
326 | * @value: value | ||
327 | * @ctxt: context | ||
328 | * | ||
329 | * Write the contents of a register that is virtualized to be per context. | ||
330 | */ | ||
331 | static inline void qib_write_ureg(const struct qib_devdata *dd, | ||
332 | enum qib_ureg regno, u64 value, int ctxt) | ||
333 | { | ||
334 | u64 __iomem *ubase; | ||
335 | if (dd->userbase) | ||
336 | ubase = (u64 __iomem *) | ||
337 | ((char __iomem *) dd->userbase + | ||
338 | dd->ureg_align * ctxt); | ||
339 | else | ||
340 | ubase = (u64 __iomem *) | ||
341 | (dd->uregbase + | ||
342 | (char __iomem *) dd->kregbase + | ||
343 | dd->ureg_align * ctxt); | ||
344 | |||
345 | if (dd->kregbase && (dd->flags & QIB_PRESENT)) | ||
346 | writeq(value, &ubase[regno]); | ||
347 | } | ||
348 | |||
349 | static inline u32 qib_read_kreg32(const struct qib_devdata *dd, | ||
350 | const u16 regno) | ||
351 | { | ||
352 | if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) | ||
353 | return -1; | ||
354 | return readl((u32 __iomem *)&dd->kregbase[regno]); | ||
355 | } | ||
356 | |||
357 | static inline u64 qib_read_kreg64(const struct qib_devdata *dd, | ||
358 | const u16 regno) | ||
359 | { | ||
360 | if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) | ||
361 | return -1; | ||
362 | |||
363 | return readq(&dd->kregbase[regno]); | ||
364 | } | ||
365 | |||
366 | static inline void qib_write_kreg(const struct qib_devdata *dd, | ||
367 | const u16 regno, u64 value) | ||
368 | { | ||
369 | if (dd->kregbase && (dd->flags & QIB_PRESENT)) | ||
370 | writeq(value, &dd->kregbase[regno]); | ||
371 | } | ||
372 | |||
373 | /** | ||
374 | * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register | ||
375 | * @dd: the qlogic_ib device | ||
376 | * @regno: the register number to write | ||
377 | * @ctxt: the context containing the register | ||
378 | * @value: the value to write | ||
379 | */ | ||
380 | static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd, | ||
381 | const u16 regno, unsigned ctxt, | ||
382 | u64 value) | ||
383 | { | ||
384 | qib_write_kreg(dd, regno + ctxt, value); | ||
385 | } | ||
386 | |||
387 | static inline void write_6120_creg(const struct qib_devdata *dd, | ||
388 | u16 regno, u64 value) | ||
389 | { | ||
390 | if (dd->cspec->cregbase && (dd->flags & QIB_PRESENT)) | ||
391 | writeq(value, &dd->cspec->cregbase[regno]); | ||
392 | } | ||
393 | |||
394 | static inline u64 read_6120_creg(const struct qib_devdata *dd, u16 regno) | ||
395 | { | ||
396 | if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT)) | ||
397 | return 0; | ||
398 | return readq(&dd->cspec->cregbase[regno]); | ||
399 | } | ||
400 | |||
401 | static inline u32 read_6120_creg32(const struct qib_devdata *dd, u16 regno) | ||
402 | { | ||
403 | if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT)) | ||
404 | return 0; | ||
405 | return readl(&dd->cspec->cregbase[regno]); | ||
406 | } | ||
407 | |||
408 | /* kr_control bits */ | ||
409 | #define QLOGIC_IB_C_RESET 1U | ||
410 | |||
411 | /* kr_intstatus, kr_intclear, kr_intmask bits */ | ||
412 | #define QLOGIC_IB_I_RCVURG_MASK ((1U << 5) - 1) | ||
413 | #define QLOGIC_IB_I_RCVURG_SHIFT 0 | ||
414 | #define QLOGIC_IB_I_RCVAVAIL_MASK ((1U << 5) - 1) | ||
415 | #define QLOGIC_IB_I_RCVAVAIL_SHIFT 12 | ||
416 | |||
417 | #define QLOGIC_IB_C_FREEZEMODE 0x00000002 | ||
418 | #define QLOGIC_IB_C_LINKENABLE 0x00000004 | ||
419 | #define QLOGIC_IB_I_ERROR 0x0000000080000000ULL | ||
420 | #define QLOGIC_IB_I_SPIOSENT 0x0000000040000000ULL | ||
421 | #define QLOGIC_IB_I_SPIOBUFAVAIL 0x0000000020000000ULL | ||
422 | #define QLOGIC_IB_I_GPIO 0x0000000010000000ULL | ||
423 | #define QLOGIC_IB_I_BITSEXTANT \ | ||
424 | ((QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT) | \ | ||
425 | (QLOGIC_IB_I_RCVAVAIL_MASK << \ | ||
426 | QLOGIC_IB_I_RCVAVAIL_SHIFT) | \ | ||
427 | QLOGIC_IB_I_ERROR | QLOGIC_IB_I_SPIOSENT | \ | ||
428 | QLOGIC_IB_I_SPIOBUFAVAIL | QLOGIC_IB_I_GPIO) | ||
429 | |||
430 | /* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */ | ||
431 | #define QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL | ||
432 | #define QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT 0 | ||
433 | #define QLOGIC_IB_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL | ||
434 | #define QLOGIC_IB_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL | ||
435 | #define QLOGIC_IB_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL | ||
436 | #define QLOGIC_IB_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL | ||
437 | #define QLOGIC_IB_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL | ||
438 | #define QLOGIC_IB_HWE_COREPLL_FBSLIP 0x0080000000000000ULL | ||
439 | #define QLOGIC_IB_HWE_COREPLL_RFSLIP 0x0100000000000000ULL | ||
440 | #define QLOGIC_IB_HWE_PCIE1PLLFAILED 0x0400000000000000ULL | ||
441 | #define QLOGIC_IB_HWE_PCIE0PLLFAILED 0x0800000000000000ULL | ||
442 | #define QLOGIC_IB_HWE_SERDESPLLFAILED 0x1000000000000000ULL | ||
443 | |||
444 | |||
445 | /* kr_extstatus bits */ | ||
446 | #define QLOGIC_IB_EXTS_FREQSEL 0x2 | ||
447 | #define QLOGIC_IB_EXTS_SERDESSEL 0x4 | ||
448 | #define QLOGIC_IB_EXTS_MEMBIST_ENDTEST 0x0000000000004000 | ||
449 | #define QLOGIC_IB_EXTS_MEMBIST_FOUND 0x0000000000008000 | ||
450 | |||
451 | /* kr_xgxsconfig bits */ | ||
452 | #define QLOGIC_IB_XGXS_RESET 0x5ULL | ||
453 | |||
454 | #define _QIB_GPIO_SDA_NUM 1 | ||
455 | #define _QIB_GPIO_SCL_NUM 0 | ||
456 | |||
457 | /* Bits in GPIO for the added IB link interrupts */ | ||
458 | #define GPIO_RXUVL_BIT 3 | ||
459 | #define GPIO_OVRUN_BIT 4 | ||
460 | #define GPIO_LLI_BIT 5 | ||
461 | #define GPIO_ERRINTR_MASK 0x38 | ||
462 | |||
463 | |||
464 | #define QLOGIC_IB_RT_BUFSIZE_MASK 0xe0000000ULL | ||
465 | #define QLOGIC_IB_RT_BUFSIZE_SHIFTVAL(tid) \ | ||
466 | ((((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) >> 29) + 11 - 1) | ||
467 | #define QLOGIC_IB_RT_BUFSIZE(tid) (1 << QLOGIC_IB_RT_BUFSIZE_SHIFTVAL(tid)) | ||
468 | #define QLOGIC_IB_RT_IS_VALID(tid) \ | ||
469 | (((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) && \ | ||
470 | ((((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) != QLOGIC_IB_RT_BUFSIZE_MASK))) | ||
471 | #define QLOGIC_IB_RT_ADDR_MASK 0x1FFFFFFFULL /* 29 bits valid */ | ||
472 | #define QLOGIC_IB_RT_ADDR_SHIFT 10 | ||
473 | |||
474 | #define QLOGIC_IB_R_INTRAVAIL_SHIFT 16 | ||
475 | #define QLOGIC_IB_R_TAILUPD_SHIFT 31 | ||
476 | #define IBA6120_R_PKEY_DIS_SHIFT 30 | ||
477 | |||
478 | #define PBC_6120_VL15_SEND_CTRL (1ULL << 31) /* pbc; VL15; link_buf only */ | ||
479 | |||
480 | #define IBCBUSFRSPCPARITYERR HWE_MASK(IBCBusFromSPCParityErr) | ||
481 | #define IBCBUSTOSPCPARITYERR HWE_MASK(IBCBusToSPCParityErr) | ||
482 | |||
483 | #define SYM_MASK_BIT(regname, fldname, bit) ((u64) \ | ||
484 | ((1ULL << (SYM_LSB(regname, fldname) + (bit))))) | ||
485 | |||
486 | #define TXEMEMPARITYERR_PIOBUF \ | ||
487 | SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 0) | ||
488 | #define TXEMEMPARITYERR_PIOPBC \ | ||
489 | SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 1) | ||
490 | #define TXEMEMPARITYERR_PIOLAUNCHFIFO \ | ||
491 | SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 2) | ||
492 | |||
493 | #define RXEMEMPARITYERR_RCVBUF \ | ||
494 | SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 0) | ||
495 | #define RXEMEMPARITYERR_LOOKUPQ \ | ||
496 | SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 1) | ||
497 | #define RXEMEMPARITYERR_EXPTID \ | ||
498 | SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 2) | ||
499 | #define RXEMEMPARITYERR_EAGERTID \ | ||
500 | SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 3) | ||
501 | #define RXEMEMPARITYERR_FLAGBUF \ | ||
502 | SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 4) | ||
503 | #define RXEMEMPARITYERR_DATAINFO \ | ||
504 | SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 5) | ||
505 | #define RXEMEMPARITYERR_HDRINFO \ | ||
506 | SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 6) | ||
507 | |||
508 | /* 6120 specific hardware errors... */ | ||
509 | static const struct qib_hwerror_msgs qib_6120_hwerror_msgs[] = { | ||
510 | /* generic hardware errors */ | ||
511 | QLOGIC_IB_HWE_MSG(IBCBUSFRSPCPARITYERR, "QIB2IB Parity"), | ||
512 | QLOGIC_IB_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2QIB Parity"), | ||
513 | |||
514 | QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOBUF, | ||
515 | "TXE PIOBUF Memory Parity"), | ||
516 | QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOPBC, | ||
517 | "TXE PIOPBC Memory Parity"), | ||
518 | QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOLAUNCHFIFO, | ||
519 | "TXE PIOLAUNCHFIFO Memory Parity"), | ||
520 | |||
521 | QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_RCVBUF, | ||
522 | "RXE RCVBUF Memory Parity"), | ||
523 | QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_LOOKUPQ, | ||
524 | "RXE LOOKUPQ Memory Parity"), | ||
525 | QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EAGERTID, | ||
526 | "RXE EAGERTID Memory Parity"), | ||
527 | QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EXPTID, | ||
528 | "RXE EXPTID Memory Parity"), | ||
529 | QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_FLAGBUF, | ||
530 | "RXE FLAGBUF Memory Parity"), | ||
531 | QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_DATAINFO, | ||
532 | "RXE DATAINFO Memory Parity"), | ||
533 | QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_HDRINFO, | ||
534 | "RXE HDRINFO Memory Parity"), | ||
535 | |||
536 | /* chip-specific hardware errors */ | ||
537 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEPOISONEDTLP, | ||
538 | "PCIe Poisoned TLP"), | ||
539 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLTIMEOUT, | ||
540 | "PCIe completion timeout"), | ||
541 | /* | ||
542 | * In practice, it's unlikely wthat we'll see PCIe PLL, or bus | ||
543 | * parity or memory parity error failures, because most likely we | ||
544 | * won't be able to talk to the core of the chip. Nonetheless, we | ||
545 | * might see them, if they are in parts of the PCIe core that aren't | ||
546 | * essential. | ||
547 | */ | ||
548 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE1PLLFAILED, | ||
549 | "PCIePLL1"), | ||
550 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE0PLLFAILED, | ||
551 | "PCIePLL0"), | ||
552 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXTLH, | ||
553 | "PCIe XTLH core parity"), | ||
554 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXADM, | ||
555 | "PCIe ADM TX core parity"), | ||
556 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYRADM, | ||
557 | "PCIe ADM RX core parity"), | ||
558 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SERDESPLLFAILED, | ||
559 | "SerDes PLL"), | ||
560 | }; | ||
561 | |||
562 | #define TXE_PIO_PARITY (TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC) | ||
563 | #define _QIB_PLL_FAIL (QLOGIC_IB_HWE_COREPLL_FBSLIP | \ | ||
564 | QLOGIC_IB_HWE_COREPLL_RFSLIP) | ||
565 | |||
566 | /* variables for sanity checking interrupt and errors */ | ||
567 | #define IB_HWE_BITSEXTANT \ | ||
568 | (HWE_MASK(RXEMemParityErr) | \ | ||
569 | HWE_MASK(TXEMemParityErr) | \ | ||
570 | (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK << \ | ||
571 | QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) | \ | ||
572 | QLOGIC_IB_HWE_PCIE1PLLFAILED | \ | ||
573 | QLOGIC_IB_HWE_PCIE0PLLFAILED | \ | ||
574 | QLOGIC_IB_HWE_PCIEPOISONEDTLP | \ | ||
575 | QLOGIC_IB_HWE_PCIECPLTIMEOUT | \ | ||
576 | QLOGIC_IB_HWE_PCIEBUSPARITYXTLH | \ | ||
577 | QLOGIC_IB_HWE_PCIEBUSPARITYXADM | \ | ||
578 | QLOGIC_IB_HWE_PCIEBUSPARITYRADM | \ | ||
579 | HWE_MASK(PowerOnBISTFailed) | \ | ||
580 | QLOGIC_IB_HWE_COREPLL_FBSLIP | \ | ||
581 | QLOGIC_IB_HWE_COREPLL_RFSLIP | \ | ||
582 | QLOGIC_IB_HWE_SERDESPLLFAILED | \ | ||
583 | HWE_MASK(IBCBusToSPCParityErr) | \ | ||
584 | HWE_MASK(IBCBusFromSPCParityErr)) | ||
585 | |||
586 | #define IB_E_BITSEXTANT \ | ||
587 | (ERR_MASK(RcvFormatErr) | ERR_MASK(RcvVCRCErr) | \ | ||
588 | ERR_MASK(RcvICRCErr) | ERR_MASK(RcvMinPktLenErr) | \ | ||
589 | ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvLongPktLenErr) | \ | ||
590 | ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvUnexpectedCharErr) | \ | ||
591 | ERR_MASK(RcvUnsupportedVLErr) | ERR_MASK(RcvEBPErr) | \ | ||
592 | ERR_MASK(RcvIBFlowErr) | ERR_MASK(RcvBadVersionErr) | \ | ||
593 | ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | \ | ||
594 | ERR_MASK(RcvBadTidErr) | ERR_MASK(RcvHdrLenErr) | \ | ||
595 | ERR_MASK(RcvHdrErr) | ERR_MASK(RcvIBLostLinkErr) | \ | ||
596 | ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendMaxPktLenErr) | \ | ||
597 | ERR_MASK(SendUnderRunErr) | ERR_MASK(SendPktLenErr) | \ | ||
598 | ERR_MASK(SendDroppedSmpPktErr) | \ | ||
599 | ERR_MASK(SendDroppedDataPktErr) | \ | ||
600 | ERR_MASK(SendPioArmLaunchErr) | \ | ||
601 | ERR_MASK(SendUnexpectedPktNumErr) | \ | ||
602 | ERR_MASK(SendUnsupportedVLErr) | ERR_MASK(IBStatusChanged) | \ | ||
603 | ERR_MASK(InvalidAddrErr) | ERR_MASK(ResetNegated) | \ | ||
604 | ERR_MASK(HardwareErr)) | ||
605 | |||
606 | #define QLOGIC_IB_E_PKTERRS ( \ | ||
607 | ERR_MASK(SendPktLenErr) | \ | ||
608 | ERR_MASK(SendDroppedDataPktErr) | \ | ||
609 | ERR_MASK(RcvVCRCErr) | \ | ||
610 | ERR_MASK(RcvICRCErr) | \ | ||
611 | ERR_MASK(RcvShortPktLenErr) | \ | ||
612 | ERR_MASK(RcvEBPErr)) | ||
613 | |||
614 | /* These are all rcv-related errors which we want to count for stats */ | ||
615 | #define E_SUM_PKTERRS \ | ||
616 | (ERR_MASK(RcvHdrLenErr) | ERR_MASK(RcvBadTidErr) | \ | ||
617 | ERR_MASK(RcvBadVersionErr) | ERR_MASK(RcvHdrErr) | \ | ||
618 | ERR_MASK(RcvLongPktLenErr) | ERR_MASK(RcvShortPktLenErr) | \ | ||
619 | ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \ | ||
620 | ERR_MASK(RcvFormatErr) | ERR_MASK(RcvUnsupportedVLErr) | \ | ||
621 | ERR_MASK(RcvUnexpectedCharErr) | ERR_MASK(RcvEBPErr)) | ||
622 | |||
623 | /* These are all send-related errors which we want to count for stats */ | ||
624 | #define E_SUM_ERRS \ | ||
625 | (ERR_MASK(SendPioArmLaunchErr) | \ | ||
626 | ERR_MASK(SendUnexpectedPktNumErr) | \ | ||
627 | ERR_MASK(SendDroppedDataPktErr) | \ | ||
628 | ERR_MASK(SendDroppedSmpPktErr) | \ | ||
629 | ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnsupportedVLErr) | \ | ||
630 | ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \ | ||
631 | ERR_MASK(InvalidAddrErr)) | ||
632 | |||
633 | /* | ||
634 | * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore | ||
635 | * errors not related to freeze and cancelling buffers. Can't ignore | ||
636 | * armlaunch because could get more while still cleaning up, and need | ||
637 | * to cancel those as they happen. | ||
638 | */ | ||
639 | #define E_SPKT_ERRS_IGNORE \ | ||
640 | (ERR_MASK(SendDroppedDataPktErr) | \ | ||
641 | ERR_MASK(SendDroppedSmpPktErr) | \ | ||
642 | ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendMinPktLenErr) | \ | ||
643 | ERR_MASK(SendPktLenErr)) | ||
644 | |||
645 | /* | ||
646 | * these are errors that can occur when the link changes state while | ||
647 | * a packet is being sent or received. This doesn't cover things | ||
648 | * like EBP or VCRC that can be the result of a sending having the | ||
649 | * link change state, so we receive a "known bad" packet. | ||
650 | */ | ||
651 | #define E_SUM_LINK_PKTERRS \ | ||
652 | (ERR_MASK(SendDroppedDataPktErr) | \ | ||
653 | ERR_MASK(SendDroppedSmpPktErr) | \ | ||
654 | ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \ | ||
655 | ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \ | ||
656 | ERR_MASK(RcvUnexpectedCharErr)) | ||
657 | |||
658 | static void qib_6120_put_tid_2(struct qib_devdata *, u64 __iomem *, | ||
659 | u32, unsigned long); | ||
660 | |||
661 | /* | ||
662 | * On platforms using this chip, and not having ordered WC stores, we | ||
663 | * can get TXE parity errors due to speculative reads to the PIO buffers, | ||
664 | * and this, due to a chip issue can result in (many) false parity error | ||
665 | * reports. So it's a debug print on those, and an info print on systems | ||
666 | * where the speculative reads don't occur. | ||
667 | */ | ||
668 | static void qib_6120_txe_recover(struct qib_devdata *dd) | ||
669 | { | ||
670 | if (!qib_unordered_wc()) | ||
671 | qib_devinfo(dd->pcidev, | ||
672 | "Recovering from TXE PIO parity error\n"); | ||
673 | } | ||
674 | |||
675 | /* enable/disable chip from delivering interrupts */ | ||
676 | static void qib_6120_set_intr_state(struct qib_devdata *dd, u32 enable) | ||
677 | { | ||
678 | if (enable) { | ||
679 | if (dd->flags & QIB_BADINTR) | ||
680 | return; | ||
681 | qib_write_kreg(dd, kr_intmask, ~0ULL); | ||
682 | /* force re-interrupt of any pending interrupts. */ | ||
683 | qib_write_kreg(dd, kr_intclear, 0ULL); | ||
684 | } else | ||
685 | qib_write_kreg(dd, kr_intmask, 0ULL); | ||
686 | } | ||
687 | |||
688 | /* | ||
689 | * Try to cleanup as much as possible for anything that might have gone | ||
690 | * wrong while in freeze mode, such as pio buffers being written by user | ||
691 | * processes (causing armlaunch), send errors due to going into freeze mode, | ||
692 | * etc., and try to avoid causing extra interrupts while doing so. | ||
693 | * Forcibly update the in-memory pioavail register copies after cleanup | ||
694 | * because the chip won't do it while in freeze mode (the register values | ||
695 | * themselves are kept correct). | ||
696 | * Make sure that we don't lose any important interrupts by using the chip | ||
697 | * feature that says that writing 0 to a bit in *clear that is set in | ||
698 | * *status will cause an interrupt to be generated again (if allowed by | ||
699 | * the *mask value). | ||
700 | * This is in chip-specific code because of all of the register accesses, | ||
701 | * even though the details are similar on most chips | ||
702 | */ | ||
703 | static void qib_6120_clear_freeze(struct qib_devdata *dd) | ||
704 | { | ||
705 | /* disable error interrupts, to avoid confusion */ | ||
706 | qib_write_kreg(dd, kr_errmask, 0ULL); | ||
707 | |||
708 | /* also disable interrupts; errormask is sometimes overwriten */ | ||
709 | qib_6120_set_intr_state(dd, 0); | ||
710 | |||
711 | qib_cancel_sends(dd->pport); | ||
712 | |||
713 | /* clear the freeze, and be sure chip saw it */ | ||
714 | qib_write_kreg(dd, kr_control, dd->control); | ||
715 | qib_read_kreg32(dd, kr_scratch); | ||
716 | |||
717 | /* force in-memory update now we are out of freeze */ | ||
718 | qib_force_pio_avail_update(dd); | ||
719 | |||
720 | /* | ||
721 | * force new interrupt if any hwerr, error or interrupt bits are | ||
722 | * still set, and clear "safe" send packet errors related to freeze | ||
723 | * and cancelling sends. Re-enable error interrupts before possible | ||
724 | * force of re-interrupt on pending interrupts. | ||
725 | */ | ||
726 | qib_write_kreg(dd, kr_hwerrclear, 0ULL); | ||
727 | qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE); | ||
728 | qib_write_kreg(dd, kr_errmask, dd->cspec->errormask); | ||
729 | qib_6120_set_intr_state(dd, 1); | ||
730 | } | ||
731 | |||
732 | /** | ||
733 | * qib_handle_6120_hwerrors - display hardware errors. | ||
734 | * @dd: the qlogic_ib device | ||
735 | * @msg: the output buffer | ||
736 | * @msgl: the size of the output buffer | ||
737 | * | ||
738 | * Use same msg buffer as regular errors to avoid excessive stack | ||
739 | * use. Most hardware errors are catastrophic, but for right now, | ||
740 | * we'll print them and continue. Reuse the same message buffer as | ||
741 | * handle_6120_errors() to avoid excessive stack usage. | ||
742 | */ | ||
743 | static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg, | ||
744 | size_t msgl) | ||
745 | { | ||
746 | u64 hwerrs; | ||
747 | u32 bits, ctrl; | ||
748 | int isfatal = 0; | ||
749 | char *bitsmsg; | ||
750 | int log_idx; | ||
751 | |||
752 | hwerrs = qib_read_kreg64(dd, kr_hwerrstatus); | ||
753 | if (!hwerrs) | ||
754 | return; | ||
755 | if (hwerrs == ~0ULL) { | ||
756 | qib_dev_err(dd, "Read of hardware error status failed " | ||
757 | "(all bits set); ignoring\n"); | ||
758 | return; | ||
759 | } | ||
760 | qib_stats.sps_hwerrs++; | ||
761 | |||
762 | /* Always clear the error status register, except MEMBISTFAIL, | ||
763 | * regardless of whether we continue or stop using the chip. | ||
764 | * We want that set so we know it failed, even across driver reload. | ||
765 | * We'll still ignore it in the hwerrmask. We do this partly for | ||
766 | * diagnostics, but also for support */ | ||
767 | qib_write_kreg(dd, kr_hwerrclear, | ||
768 | hwerrs & ~HWE_MASK(PowerOnBISTFailed)); | ||
769 | |||
770 | hwerrs &= dd->cspec->hwerrmask; | ||
771 | |||
772 | /* We log some errors to EEPROM, check if we have any of those. */ | ||
773 | for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) | ||
774 | if (hwerrs & dd->eep_st_masks[log_idx].hwerrs_to_log) | ||
775 | qib_inc_eeprom_err(dd, log_idx, 1); | ||
776 | |||
777 | /* | ||
778 | * Make sure we get this much out, unless told to be quiet, | ||
779 | * or it's occurred within the last 5 seconds. | ||
780 | */ | ||
781 | if (hwerrs & ~(TXE_PIO_PARITY | RXEMEMPARITYERR_EAGERTID)) | ||
782 | qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx " | ||
783 | "(cleared)\n", (unsigned long long) hwerrs); | ||
784 | |||
785 | if (hwerrs & ~IB_HWE_BITSEXTANT) | ||
786 | qib_dev_err(dd, "hwerror interrupt with unknown errors " | ||
787 | "%llx set\n", (unsigned long long) | ||
788 | (hwerrs & ~IB_HWE_BITSEXTANT)); | ||
789 | |||
790 | ctrl = qib_read_kreg32(dd, kr_control); | ||
791 | if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) { | ||
792 | /* | ||
793 | * Parity errors in send memory are recoverable, | ||
794 | * just cancel the send (if indicated in * sendbuffererror), | ||
795 | * count the occurrence, unfreeze (if no other handled | ||
796 | * hardware error bits are set), and continue. They can | ||
797 | * occur if a processor speculative read is done to the PIO | ||
798 | * buffer while we are sending a packet, for example. | ||
799 | */ | ||
800 | if (hwerrs & TXE_PIO_PARITY) { | ||
801 | qib_6120_txe_recover(dd); | ||
802 | hwerrs &= ~TXE_PIO_PARITY; | ||
803 | } | ||
804 | |||
805 | if (!hwerrs) { | ||
806 | static u32 freeze_cnt; | ||
807 | |||
808 | freeze_cnt++; | ||
809 | qib_6120_clear_freeze(dd); | ||
810 | } else | ||
811 | isfatal = 1; | ||
812 | } | ||
813 | |||
814 | *msg = '\0'; | ||
815 | |||
816 | if (hwerrs & HWE_MASK(PowerOnBISTFailed)) { | ||
817 | isfatal = 1; | ||
818 | strlcat(msg, "[Memory BIST test failed, InfiniPath hardware" | ||
819 | " unusable]", msgl); | ||
820 | /* ignore from now on, so disable until driver reloaded */ | ||
821 | dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed); | ||
822 | qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); | ||
823 | } | ||
824 | |||
825 | qib_format_hwerrors(hwerrs, qib_6120_hwerror_msgs, | ||
826 | ARRAY_SIZE(qib_6120_hwerror_msgs), msg, msgl); | ||
827 | |||
828 | bitsmsg = dd->cspec->bitsmsgbuf; | ||
829 | if (hwerrs & (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK << | ||
830 | QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT)) { | ||
831 | bits = (u32) ((hwerrs >> | ||
832 | QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) & | ||
833 | QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK); | ||
834 | snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, | ||
835 | "[PCIe Mem Parity Errs %x] ", bits); | ||
836 | strlcat(msg, bitsmsg, msgl); | ||
837 | } | ||
838 | |||
839 | if (hwerrs & _QIB_PLL_FAIL) { | ||
840 | isfatal = 1; | ||
841 | snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, | ||
842 | "[PLL failed (%llx), InfiniPath hardware unusable]", | ||
843 | (unsigned long long) hwerrs & _QIB_PLL_FAIL); | ||
844 | strlcat(msg, bitsmsg, msgl); | ||
845 | /* ignore from now on, so disable until driver reloaded */ | ||
846 | dd->cspec->hwerrmask &= ~(hwerrs & _QIB_PLL_FAIL); | ||
847 | qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); | ||
848 | } | ||
849 | |||
850 | if (hwerrs & QLOGIC_IB_HWE_SERDESPLLFAILED) { | ||
851 | /* | ||
852 | * If it occurs, it is left masked since the external | ||
853 | * interface is unused | ||
854 | */ | ||
855 | dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_SERDESPLLFAILED; | ||
856 | qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); | ||
857 | } | ||
858 | |||
859 | if (hwerrs) | ||
860 | /* | ||
861 | * if any set that we aren't ignoring; only | ||
862 | * make the complaint once, in case it's stuck | ||
863 | * or recurring, and we get here multiple | ||
864 | * times. | ||
865 | */ | ||
866 | qib_dev_err(dd, "%s hardware error\n", msg); | ||
867 | else | ||
868 | *msg = 0; /* recovered from all of them */ | ||
869 | |||
870 | if (isfatal && !dd->diag_client) { | ||
871 | qib_dev_err(dd, "Fatal Hardware Error, no longer" | ||
872 | " usable, SN %.16s\n", dd->serial); | ||
873 | /* | ||
874 | * for /sys status file and user programs to print; if no | ||
875 | * trailing brace is copied, we'll know it was truncated. | ||
876 | */ | ||
877 | if (dd->freezemsg) | ||
878 | snprintf(dd->freezemsg, dd->freezelen, | ||
879 | "{%s}", msg); | ||
880 | qib_disable_after_error(dd); | ||
881 | } | ||
882 | } | ||
883 | |||
884 | /* | ||
885 | * Decode the error status into strings, deciding whether to always | ||
886 | * print * it or not depending on "normal packet errors" vs everything | ||
887 | * else. Return 1 if "real" errors, otherwise 0 if only packet | ||
888 | * errors, so caller can decide what to print with the string. | ||
889 | */ | ||
890 | static int qib_decode_6120_err(struct qib_devdata *dd, char *buf, size_t blen, | ||
891 | u64 err) | ||
892 | { | ||
893 | int iserr = 1; | ||
894 | |||
895 | *buf = '\0'; | ||
896 | if (err & QLOGIC_IB_E_PKTERRS) { | ||
897 | if (!(err & ~QLOGIC_IB_E_PKTERRS)) | ||
898 | iserr = 0; | ||
899 | if ((err & ERR_MASK(RcvICRCErr)) && | ||
900 | !(err&(ERR_MASK(RcvVCRCErr)|ERR_MASK(RcvEBPErr)))) | ||
901 | strlcat(buf, "CRC ", blen); | ||
902 | if (!iserr) | ||
903 | goto done; | ||
904 | } | ||
905 | if (err & ERR_MASK(RcvHdrLenErr)) | ||
906 | strlcat(buf, "rhdrlen ", blen); | ||
907 | if (err & ERR_MASK(RcvBadTidErr)) | ||
908 | strlcat(buf, "rbadtid ", blen); | ||
909 | if (err & ERR_MASK(RcvBadVersionErr)) | ||
910 | strlcat(buf, "rbadversion ", blen); | ||
911 | if (err & ERR_MASK(RcvHdrErr)) | ||
912 | strlcat(buf, "rhdr ", blen); | ||
913 | if (err & ERR_MASK(RcvLongPktLenErr)) | ||
914 | strlcat(buf, "rlongpktlen ", blen); | ||
915 | if (err & ERR_MASK(RcvMaxPktLenErr)) | ||
916 | strlcat(buf, "rmaxpktlen ", blen); | ||
917 | if (err & ERR_MASK(RcvMinPktLenErr)) | ||
918 | strlcat(buf, "rminpktlen ", blen); | ||
919 | if (err & ERR_MASK(SendMinPktLenErr)) | ||
920 | strlcat(buf, "sminpktlen ", blen); | ||
921 | if (err & ERR_MASK(RcvFormatErr)) | ||
922 | strlcat(buf, "rformaterr ", blen); | ||
923 | if (err & ERR_MASK(RcvUnsupportedVLErr)) | ||
924 | strlcat(buf, "runsupvl ", blen); | ||
925 | if (err & ERR_MASK(RcvUnexpectedCharErr)) | ||
926 | strlcat(buf, "runexpchar ", blen); | ||
927 | if (err & ERR_MASK(RcvIBFlowErr)) | ||
928 | strlcat(buf, "ribflow ", blen); | ||
929 | if (err & ERR_MASK(SendUnderRunErr)) | ||
930 | strlcat(buf, "sunderrun ", blen); | ||
931 | if (err & ERR_MASK(SendPioArmLaunchErr)) | ||
932 | strlcat(buf, "spioarmlaunch ", blen); | ||
933 | if (err & ERR_MASK(SendUnexpectedPktNumErr)) | ||
934 | strlcat(buf, "sunexperrpktnum ", blen); | ||
935 | if (err & ERR_MASK(SendDroppedSmpPktErr)) | ||
936 | strlcat(buf, "sdroppedsmppkt ", blen); | ||
937 | if (err & ERR_MASK(SendMaxPktLenErr)) | ||
938 | strlcat(buf, "smaxpktlen ", blen); | ||
939 | if (err & ERR_MASK(SendUnsupportedVLErr)) | ||
940 | strlcat(buf, "sunsupVL ", blen); | ||
941 | if (err & ERR_MASK(InvalidAddrErr)) | ||
942 | strlcat(buf, "invalidaddr ", blen); | ||
943 | if (err & ERR_MASK(RcvEgrFullErr)) | ||
944 | strlcat(buf, "rcvegrfull ", blen); | ||
945 | if (err & ERR_MASK(RcvHdrFullErr)) | ||
946 | strlcat(buf, "rcvhdrfull ", blen); | ||
947 | if (err & ERR_MASK(IBStatusChanged)) | ||
948 | strlcat(buf, "ibcstatuschg ", blen); | ||
949 | if (err & ERR_MASK(RcvIBLostLinkErr)) | ||
950 | strlcat(buf, "riblostlink ", blen); | ||
951 | if (err & ERR_MASK(HardwareErr)) | ||
952 | strlcat(buf, "hardware ", blen); | ||
953 | if (err & ERR_MASK(ResetNegated)) | ||
954 | strlcat(buf, "reset ", blen); | ||
955 | done: | ||
956 | return iserr; | ||
957 | } | ||
958 | |||
959 | /* | ||
960 | * Called when we might have an error that is specific to a particular | ||
961 | * PIO buffer, and may need to cancel that buffer, so it can be re-used. | ||
962 | */ | ||
963 | static void qib_disarm_6120_senderrbufs(struct qib_pportdata *ppd) | ||
964 | { | ||
965 | unsigned long sbuf[2]; | ||
966 | struct qib_devdata *dd = ppd->dd; | ||
967 | |||
968 | /* | ||
969 | * It's possible that sendbuffererror could have bits set; might | ||
970 | * have already done this as a result of hardware error handling. | ||
971 | */ | ||
972 | sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror); | ||
973 | sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1); | ||
974 | |||
975 | if (sbuf[0] || sbuf[1]) | ||
976 | qib_disarm_piobufs_set(dd, sbuf, | ||
977 | dd->piobcnt2k + dd->piobcnt4k); | ||
978 | } | ||
979 | |||
980 | static int chk_6120_linkrecovery(struct qib_devdata *dd, u64 ibcs) | ||
981 | { | ||
982 | int ret = 1; | ||
983 | u32 ibstate = qib_6120_iblink_state(ibcs); | ||
984 | u32 linkrecov = read_6120_creg32(dd, cr_iblinkerrrecov); | ||
985 | |||
986 | if (linkrecov != dd->cspec->lastlinkrecov) { | ||
987 | /* and no more until active again */ | ||
988 | dd->cspec->lastlinkrecov = 0; | ||
989 | qib_set_linkstate(dd->pport, QIB_IB_LINKDOWN); | ||
990 | ret = 0; | ||
991 | } | ||
992 | if (ibstate == IB_PORT_ACTIVE) | ||
993 | dd->cspec->lastlinkrecov = | ||
994 | read_6120_creg32(dd, cr_iblinkerrrecov); | ||
995 | return ret; | ||
996 | } | ||
997 | |||
998 | static void handle_6120_errors(struct qib_devdata *dd, u64 errs) | ||
999 | { | ||
1000 | char *msg; | ||
1001 | u64 ignore_this_time = 0; | ||
1002 | u64 iserr = 0; | ||
1003 | int log_idx; | ||
1004 | struct qib_pportdata *ppd = dd->pport; | ||
1005 | u64 mask; | ||
1006 | |||
1007 | /* don't report errors that are masked */ | ||
1008 | errs &= dd->cspec->errormask; | ||
1009 | msg = dd->cspec->emsgbuf; | ||
1010 | |||
1011 | /* do these first, they are most important */ | ||
1012 | if (errs & ERR_MASK(HardwareErr)) | ||
1013 | qib_handle_6120_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf); | ||
1014 | else | ||
1015 | for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) | ||
1016 | if (errs & dd->eep_st_masks[log_idx].errs_to_log) | ||
1017 | qib_inc_eeprom_err(dd, log_idx, 1); | ||
1018 | |||
1019 | if (errs & ~IB_E_BITSEXTANT) | ||
1020 | qib_dev_err(dd, "error interrupt with unknown errors " | ||
1021 | "%llx set\n", | ||
1022 | (unsigned long long) (errs & ~IB_E_BITSEXTANT)); | ||
1023 | |||
1024 | if (errs & E_SUM_ERRS) { | ||
1025 | qib_disarm_6120_senderrbufs(ppd); | ||
1026 | if ((errs & E_SUM_LINK_PKTERRS) && | ||
1027 | !(ppd->lflags & QIBL_LINKACTIVE)) { | ||
1028 | /* | ||
1029 | * This can happen when trying to bring the link | ||
1030 | * up, but the IB link changes state at the "wrong" | ||
1031 | * time. The IB logic then complains that the packet | ||
1032 | * isn't valid. We don't want to confuse people, so | ||
1033 | * we just don't print them, except at debug | ||
1034 | */ | ||
1035 | ignore_this_time = errs & E_SUM_LINK_PKTERRS; | ||
1036 | } | ||
1037 | } else if ((errs & E_SUM_LINK_PKTERRS) && | ||
1038 | !(ppd->lflags & QIBL_LINKACTIVE)) { | ||
1039 | /* | ||
1040 | * This can happen when SMA is trying to bring the link | ||
1041 | * up, but the IB link changes state at the "wrong" time. | ||
1042 | * The IB logic then complains that the packet isn't | ||
1043 | * valid. We don't want to confuse people, so we just | ||
1044 | * don't print them, except at debug | ||
1045 | */ | ||
1046 | ignore_this_time = errs & E_SUM_LINK_PKTERRS; | ||
1047 | } | ||
1048 | |||
1049 | qib_write_kreg(dd, kr_errclear, errs); | ||
1050 | |||
1051 | errs &= ~ignore_this_time; | ||
1052 | if (!errs) | ||
1053 | goto done; | ||
1054 | |||
1055 | /* | ||
1056 | * The ones we mask off are handled specially below | ||
1057 | * or above. | ||
1058 | */ | ||
1059 | mask = ERR_MASK(IBStatusChanged) | ERR_MASK(RcvEgrFullErr) | | ||
1060 | ERR_MASK(RcvHdrFullErr) | ERR_MASK(HardwareErr); | ||
1061 | qib_decode_6120_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask); | ||
1062 | |||
1063 | if (errs & E_SUM_PKTERRS) | ||
1064 | qib_stats.sps_rcverrs++; | ||
1065 | if (errs & E_SUM_ERRS) | ||
1066 | qib_stats.sps_txerrs++; | ||
1067 | |||
1068 | iserr = errs & ~(E_SUM_PKTERRS | QLOGIC_IB_E_PKTERRS); | ||
1069 | |||
1070 | if (errs & ERR_MASK(IBStatusChanged)) { | ||
1071 | u64 ibcs = qib_read_kreg64(dd, kr_ibcstatus); | ||
1072 | u32 ibstate = qib_6120_iblink_state(ibcs); | ||
1073 | int handle = 1; | ||
1074 | |||
1075 | if (ibstate != IB_PORT_INIT && dd->cspec->lastlinkrecov) | ||
1076 | handle = chk_6120_linkrecovery(dd, ibcs); | ||
1077 | /* | ||
1078 | * Since going into a recovery state causes the link state | ||
1079 | * to go down and since recovery is transitory, it is better | ||
1080 | * if we "miss" ever seeing the link training state go into | ||
1081 | * recovery (i.e., ignore this transition for link state | ||
1082 | * special handling purposes) without updating lastibcstat. | ||
1083 | */ | ||
1084 | if (handle && qib_6120_phys_portstate(ibcs) == | ||
1085 | IB_PHYSPORTSTATE_LINK_ERR_RECOVER) | ||
1086 | handle = 0; | ||
1087 | if (handle) | ||
1088 | qib_handle_e_ibstatuschanged(ppd, ibcs); | ||
1089 | } | ||
1090 | |||
1091 | if (errs & ERR_MASK(ResetNegated)) { | ||
1092 | qib_dev_err(dd, "Got reset, requires re-init " | ||
1093 | "(unload and reload driver)\n"); | ||
1094 | dd->flags &= ~QIB_INITTED; /* needs re-init */ | ||
1095 | /* mark as having had error */ | ||
1096 | *dd->devstatusp |= QIB_STATUS_HWERROR; | ||
1097 | *dd->pport->statusp &= ~QIB_STATUS_IB_CONF; | ||
1098 | } | ||
1099 | |||
1100 | if (*msg && iserr) | ||
1101 | qib_dev_porterr(dd, ppd->port, "%s error\n", msg); | ||
1102 | |||
1103 | if (ppd->state_wanted & ppd->lflags) | ||
1104 | wake_up_interruptible(&ppd->state_wait); | ||
1105 | |||
1106 | /* | ||
1107 | * If there were hdrq or egrfull errors, wake up any processes | ||
1108 | * waiting in poll. We used to try to check which contexts had | ||
1109 | * the overflow, but given the cost of that and the chip reads | ||
1110 | * to support it, it's better to just wake everybody up if we | ||
1111 | * get an overflow; waiters can poll again if it's not them. | ||
1112 | */ | ||
1113 | if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) { | ||
1114 | qib_handle_urcv(dd, ~0U); | ||
1115 | if (errs & ERR_MASK(RcvEgrFullErr)) | ||
1116 | qib_stats.sps_buffull++; | ||
1117 | else | ||
1118 | qib_stats.sps_hdrfull++; | ||
1119 | } | ||
1120 | done: | ||
1121 | return; | ||
1122 | } | ||
1123 | |||
1124 | /** | ||
1125 | * qib_6120_init_hwerrors - enable hardware errors | ||
1126 | * @dd: the qlogic_ib device | ||
1127 | * | ||
1128 | * now that we have finished initializing everything that might reasonably | ||
1129 | * cause a hardware error, and cleared those errors bits as they occur, | ||
1130 | * we can enable hardware errors in the mask (potentially enabling | ||
1131 | * freeze mode), and enable hardware errors as errors (along with | ||
1132 | * everything else) in errormask | ||
1133 | */ | ||
1134 | static void qib_6120_init_hwerrors(struct qib_devdata *dd) | ||
1135 | { | ||
1136 | u64 val; | ||
1137 | u64 extsval; | ||
1138 | |||
1139 | extsval = qib_read_kreg64(dd, kr_extstatus); | ||
1140 | |||
1141 | if (!(extsval & QLOGIC_IB_EXTS_MEMBIST_ENDTEST)) | ||
1142 | qib_dev_err(dd, "MemBIST did not complete!\n"); | ||
1143 | |||
1144 | /* init so all hwerrors interrupt, and enter freeze, ajdust below */ | ||
1145 | val = ~0ULL; | ||
1146 | if (dd->minrev < 2) { | ||
1147 | /* | ||
1148 | * Avoid problem with internal interface bus parity | ||
1149 | * checking. Fixed in Rev2. | ||
1150 | */ | ||
1151 | val &= ~QLOGIC_IB_HWE_PCIEBUSPARITYRADM; | ||
1152 | } | ||
1153 | /* avoid some intel cpu's speculative read freeze mode issue */ | ||
1154 | val &= ~TXEMEMPARITYERR_PIOBUF; | ||
1155 | |||
1156 | dd->cspec->hwerrmask = val; | ||
1157 | |||
1158 | qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed)); | ||
1159 | qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); | ||
1160 | |||
1161 | /* clear all */ | ||
1162 | qib_write_kreg(dd, kr_errclear, ~0ULL); | ||
1163 | /* enable errors that are masked, at least this first time. */ | ||
1164 | qib_write_kreg(dd, kr_errmask, ~0ULL); | ||
1165 | dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask); | ||
1166 | /* clear any interrupts up to this point (ints still not enabled) */ | ||
1167 | qib_write_kreg(dd, kr_intclear, ~0ULL); | ||
1168 | |||
1169 | qib_write_kreg(dd, kr_rcvbthqp, | ||
1170 | dd->qpn_mask << (QIB_6120_RcvBTHQP_BTHQP_Mask_LSB - 1) | | ||
1171 | QIB_KD_QP); | ||
1172 | } | ||
1173 | |||
1174 | /* | ||
1175 | * Disable and enable the armlaunch error. Used for PIO bandwidth testing | ||
1176 | * on chips that are count-based, rather than trigger-based. There is no | ||
1177 | * reference counting, but that's also fine, given the intended use. | ||
1178 | * Only chip-specific because it's all register accesses | ||
1179 | */ | ||
1180 | static void qib_set_6120_armlaunch(struct qib_devdata *dd, u32 enable) | ||
1181 | { | ||
1182 | if (enable) { | ||
1183 | qib_write_kreg(dd, kr_errclear, | ||
1184 | ERR_MASK(SendPioArmLaunchErr)); | ||
1185 | dd->cspec->errormask |= ERR_MASK(SendPioArmLaunchErr); | ||
1186 | } else | ||
1187 | dd->cspec->errormask &= ~ERR_MASK(SendPioArmLaunchErr); | ||
1188 | qib_write_kreg(dd, kr_errmask, dd->cspec->errormask); | ||
1189 | } | ||
1190 | |||
1191 | /* | ||
1192 | * Formerly took parameter <which> in pre-shifted, | ||
1193 | * pre-merged form with LinkCmd and LinkInitCmd | ||
1194 | * together, and assuming the zero was NOP. | ||
1195 | */ | ||
1196 | static void qib_set_ib_6120_lstate(struct qib_pportdata *ppd, u16 linkcmd, | ||
1197 | u16 linitcmd) | ||
1198 | { | ||
1199 | u64 mod_wd; | ||
1200 | struct qib_devdata *dd = ppd->dd; | ||
1201 | unsigned long flags; | ||
1202 | |||
1203 | if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) { | ||
1204 | /* | ||
1205 | * If we are told to disable, note that so link-recovery | ||
1206 | * code does not attempt to bring us back up. | ||
1207 | */ | ||
1208 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
1209 | ppd->lflags |= QIBL_IB_LINK_DISABLED; | ||
1210 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
1211 | } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) { | ||
1212 | /* | ||
1213 | * Any other linkinitcmd will lead to LINKDOWN and then | ||
1214 | * to INIT (if all is well), so clear flag to let | ||
1215 | * link-recovery code attempt to bring us back up. | ||
1216 | */ | ||
1217 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
1218 | ppd->lflags &= ~QIBL_IB_LINK_DISABLED; | ||
1219 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
1220 | } | ||
1221 | |||
1222 | mod_wd = (linkcmd << QLOGIC_IB_IBCC_LINKCMD_SHIFT) | | ||
1223 | (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT); | ||
1224 | |||
1225 | qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl | mod_wd); | ||
1226 | /* write to chip to prevent back-to-back writes of control reg */ | ||
1227 | qib_write_kreg(dd, kr_scratch, 0); | ||
1228 | } | ||
1229 | |||
1230 | /** | ||
1231 | * qib_6120_bringup_serdes - bring up the serdes | ||
1232 | * @dd: the qlogic_ib device | ||
1233 | */ | ||
1234 | static int qib_6120_bringup_serdes(struct qib_pportdata *ppd) | ||
1235 | { | ||
1236 | struct qib_devdata *dd = ppd->dd; | ||
1237 | u64 val, config1, prev_val, hwstat, ibc; | ||
1238 | |||
1239 | /* Put IBC in reset, sends disabled */ | ||
1240 | dd->control &= ~QLOGIC_IB_C_LINKENABLE; | ||
1241 | qib_write_kreg(dd, kr_control, 0ULL); | ||
1242 | |||
1243 | dd->cspec->ibdeltainprog = 1; | ||
1244 | dd->cspec->ibsymsnap = read_6120_creg32(dd, cr_ibsymbolerr); | ||
1245 | dd->cspec->iblnkerrsnap = read_6120_creg32(dd, cr_iblinkerrrecov); | ||
1246 | |||
1247 | /* flowcontrolwatermark is in units of KBytes */ | ||
1248 | ibc = 0x5ULL << SYM_LSB(IBCCtrl, FlowCtrlWaterMark); | ||
1249 | /* | ||
1250 | * How often flowctrl sent. More or less in usecs; balance against | ||
1251 | * watermark value, so that in theory senders always get a flow | ||
1252 | * control update in time to not let the IB link go idle. | ||
1253 | */ | ||
1254 | ibc |= 0x3ULL << SYM_LSB(IBCCtrl, FlowCtrlPeriod); | ||
1255 | /* max error tolerance */ | ||
1256 | dd->cspec->lli_thresh = 0xf; | ||
1257 | ibc |= (u64) dd->cspec->lli_thresh << SYM_LSB(IBCCtrl, PhyerrThreshold); | ||
1258 | /* use "real" buffer space for */ | ||
1259 | ibc |= 4ULL << SYM_LSB(IBCCtrl, CreditScale); | ||
1260 | /* IB credit flow control. */ | ||
1261 | ibc |= 0xfULL << SYM_LSB(IBCCtrl, OverrunThreshold); | ||
1262 | /* | ||
1263 | * set initial max size pkt IBC will send, including ICRC; it's the | ||
1264 | * PIO buffer size in dwords, less 1; also see qib_set_mtu() | ||
1265 | */ | ||
1266 | ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << SYM_LSB(IBCCtrl, MaxPktLen); | ||
1267 | dd->cspec->ibcctrl = ibc; /* without linkcmd or linkinitcmd! */ | ||
1268 | |||
1269 | /* initially come up waiting for TS1, without sending anything. */ | ||
1270 | val = dd->cspec->ibcctrl | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE << | ||
1271 | QLOGIC_IB_IBCC_LINKINITCMD_SHIFT); | ||
1272 | qib_write_kreg(dd, kr_ibcctrl, val); | ||
1273 | |||
1274 | val = qib_read_kreg64(dd, kr_serdes_cfg0); | ||
1275 | config1 = qib_read_kreg64(dd, kr_serdes_cfg1); | ||
1276 | |||
1277 | /* | ||
1278 | * Force reset on, also set rxdetect enable. Must do before reading | ||
1279 | * serdesstatus at least for simulation, or some of the bits in | ||
1280 | * serdes status will come back as undefined and cause simulation | ||
1281 | * failures | ||
1282 | */ | ||
1283 | val |= SYM_MASK(SerdesCfg0, ResetPLL) | | ||
1284 | SYM_MASK(SerdesCfg0, RxDetEnX) | | ||
1285 | (SYM_MASK(SerdesCfg0, L1PwrDnA) | | ||
1286 | SYM_MASK(SerdesCfg0, L1PwrDnB) | | ||
1287 | SYM_MASK(SerdesCfg0, L1PwrDnC) | | ||
1288 | SYM_MASK(SerdesCfg0, L1PwrDnD)); | ||
1289 | qib_write_kreg(dd, kr_serdes_cfg0, val); | ||
1290 | /* be sure chip saw it */ | ||
1291 | qib_read_kreg64(dd, kr_scratch); | ||
1292 | udelay(5); /* need pll reset set at least for a bit */ | ||
1293 | /* | ||
1294 | * after PLL is reset, set the per-lane Resets and TxIdle and | ||
1295 | * clear the PLL reset and rxdetect (to get falling edge). | ||
1296 | * Leave L1PWR bits set (permanently) | ||
1297 | */ | ||
1298 | val &= ~(SYM_MASK(SerdesCfg0, RxDetEnX) | | ||
1299 | SYM_MASK(SerdesCfg0, ResetPLL) | | ||
1300 | (SYM_MASK(SerdesCfg0, L1PwrDnA) | | ||
1301 | SYM_MASK(SerdesCfg0, L1PwrDnB) | | ||
1302 | SYM_MASK(SerdesCfg0, L1PwrDnC) | | ||
1303 | SYM_MASK(SerdesCfg0, L1PwrDnD))); | ||
1304 | val |= (SYM_MASK(SerdesCfg0, ResetA) | | ||
1305 | SYM_MASK(SerdesCfg0, ResetB) | | ||
1306 | SYM_MASK(SerdesCfg0, ResetC) | | ||
1307 | SYM_MASK(SerdesCfg0, ResetD)) | | ||
1308 | SYM_MASK(SerdesCfg0, TxIdeEnX); | ||
1309 | qib_write_kreg(dd, kr_serdes_cfg0, val); | ||
1310 | /* be sure chip saw it */ | ||
1311 | (void) qib_read_kreg64(dd, kr_scratch); | ||
1312 | /* need PLL reset clear for at least 11 usec before lane | ||
1313 | * resets cleared; give it a few more to be sure */ | ||
1314 | udelay(15); | ||
1315 | val &= ~((SYM_MASK(SerdesCfg0, ResetA) | | ||
1316 | SYM_MASK(SerdesCfg0, ResetB) | | ||
1317 | SYM_MASK(SerdesCfg0, ResetC) | | ||
1318 | SYM_MASK(SerdesCfg0, ResetD)) | | ||
1319 | SYM_MASK(SerdesCfg0, TxIdeEnX)); | ||
1320 | |||
1321 | qib_write_kreg(dd, kr_serdes_cfg0, val); | ||
1322 | /* be sure chip saw it */ | ||
1323 | (void) qib_read_kreg64(dd, kr_scratch); | ||
1324 | |||
1325 | val = qib_read_kreg64(dd, kr_xgxs_cfg); | ||
1326 | prev_val = val; | ||
1327 | if (val & QLOGIC_IB_XGXS_RESET) | ||
1328 | val &= ~QLOGIC_IB_XGXS_RESET; | ||
1329 | if (SYM_FIELD(val, XGXSCfg, polarity_inv) != ppd->rx_pol_inv) { | ||
1330 | /* need to compensate for Tx inversion in partner */ | ||
1331 | val &= ~SYM_MASK(XGXSCfg, polarity_inv); | ||
1332 | val |= (u64)ppd->rx_pol_inv << SYM_LSB(XGXSCfg, polarity_inv); | ||
1333 | } | ||
1334 | if (val != prev_val) | ||
1335 | qib_write_kreg(dd, kr_xgxs_cfg, val); | ||
1336 | |||
1337 | val = qib_read_kreg64(dd, kr_serdes_cfg0); | ||
1338 | |||
1339 | /* clear current and de-emphasis bits */ | ||
1340 | config1 &= ~0x0ffffffff00ULL; | ||
1341 | /* set current to 20ma */ | ||
1342 | config1 |= 0x00000000000ULL; | ||
1343 | /* set de-emphasis to -5.68dB */ | ||
1344 | config1 |= 0x0cccc000000ULL; | ||
1345 | qib_write_kreg(dd, kr_serdes_cfg1, config1); | ||
1346 | |||
1347 | /* base and port guid same for single port */ | ||
1348 | ppd->guid = dd->base_guid; | ||
1349 | |||
1350 | /* | ||
1351 | * the process of setting and un-resetting the serdes normally | ||
1352 | * causes a serdes PLL error, so check for that and clear it | ||
1353 | * here. Also clearr hwerr bit in errstatus, but not others. | ||
1354 | */ | ||
1355 | hwstat = qib_read_kreg64(dd, kr_hwerrstatus); | ||
1356 | if (hwstat) { | ||
1357 | /* should just have PLL, clear all set, in an case */ | ||
1358 | if (hwstat & ~QLOGIC_IB_HWE_SERDESPLLFAILED) | ||
1359 | qib_write_kreg(dd, kr_hwerrclear, hwstat); | ||
1360 | qib_write_kreg(dd, kr_errclear, ERR_MASK(HardwareErr)); | ||
1361 | } | ||
1362 | |||
1363 | dd->control |= QLOGIC_IB_C_LINKENABLE; | ||
1364 | dd->control &= ~QLOGIC_IB_C_FREEZEMODE; | ||
1365 | qib_write_kreg(dd, kr_control, dd->control); | ||
1366 | |||
1367 | return 0; | ||
1368 | } | ||
1369 | |||
1370 | /** | ||
1371 | * qib_6120_quiet_serdes - set serdes to txidle | ||
1372 | * @ppd: physical port of the qlogic_ib device | ||
1373 | * Called when driver is being unloaded | ||
1374 | */ | ||
1375 | static void qib_6120_quiet_serdes(struct qib_pportdata *ppd) | ||
1376 | { | ||
1377 | struct qib_devdata *dd = ppd->dd; | ||
1378 | u64 val; | ||
1379 | |||
1380 | qib_set_ib_6120_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); | ||
1381 | |||
1382 | /* disable IBC */ | ||
1383 | dd->control &= ~QLOGIC_IB_C_LINKENABLE; | ||
1384 | qib_write_kreg(dd, kr_control, | ||
1385 | dd->control | QLOGIC_IB_C_FREEZEMODE); | ||
1386 | |||
1387 | if (dd->cspec->ibsymdelta || dd->cspec->iblnkerrdelta || | ||
1388 | dd->cspec->ibdeltainprog) { | ||
1389 | u64 diagc; | ||
1390 | |||
1391 | /* enable counter writes */ | ||
1392 | diagc = qib_read_kreg64(dd, kr_hwdiagctrl); | ||
1393 | qib_write_kreg(dd, kr_hwdiagctrl, | ||
1394 | diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable)); | ||
1395 | |||
1396 | if (dd->cspec->ibsymdelta || dd->cspec->ibdeltainprog) { | ||
1397 | val = read_6120_creg32(dd, cr_ibsymbolerr); | ||
1398 | if (dd->cspec->ibdeltainprog) | ||
1399 | val -= val - dd->cspec->ibsymsnap; | ||
1400 | val -= dd->cspec->ibsymdelta; | ||
1401 | write_6120_creg(dd, cr_ibsymbolerr, val); | ||
1402 | } | ||
1403 | if (dd->cspec->iblnkerrdelta || dd->cspec->ibdeltainprog) { | ||
1404 | val = read_6120_creg32(dd, cr_iblinkerrrecov); | ||
1405 | if (dd->cspec->ibdeltainprog) | ||
1406 | val -= val - dd->cspec->iblnkerrsnap; | ||
1407 | val -= dd->cspec->iblnkerrdelta; | ||
1408 | write_6120_creg(dd, cr_iblinkerrrecov, val); | ||
1409 | } | ||
1410 | |||
1411 | /* and disable counter writes */ | ||
1412 | qib_write_kreg(dd, kr_hwdiagctrl, diagc); | ||
1413 | } | ||
1414 | |||
1415 | val = qib_read_kreg64(dd, kr_serdes_cfg0); | ||
1416 | val |= SYM_MASK(SerdesCfg0, TxIdeEnX); | ||
1417 | qib_write_kreg(dd, kr_serdes_cfg0, val); | ||
1418 | } | ||
1419 | |||
1420 | /** | ||
1421 | * qib_6120_setup_setextled - set the state of the two external LEDs | ||
1422 | * @dd: the qlogic_ib device | ||
1423 | * @on: whether the link is up or not | ||
1424 | * | ||
1425 | * The exact combo of LEDs if on is true is determined by looking | ||
1426 | * at the ibcstatus. | ||
1427 | |||
1428 | * These LEDs indicate the physical and logical state of IB link. | ||
1429 | * For this chip (at least with recommended board pinouts), LED1 | ||
1430 | * is Yellow (logical state) and LED2 is Green (physical state), | ||
1431 | * | ||
1432 | * Note: We try to match the Mellanox HCA LED behavior as best | ||
1433 | * we can. Green indicates physical link state is OK (something is | ||
1434 | * plugged in, and we can train). | ||
1435 | * Amber indicates the link is logically up (ACTIVE). | ||
1436 | * Mellanox further blinks the amber LED to indicate data packet | ||
1437 | * activity, but we have no hardware support for that, so it would | ||
1438 | * require waking up every 10-20 msecs and checking the counters | ||
1439 | * on the chip, and then turning the LED off if appropriate. That's | ||
1440 | * visible overhead, so not something we will do. | ||
1441 | * | ||
1442 | */ | ||
1443 | static void qib_6120_setup_setextled(struct qib_pportdata *ppd, u32 on) | ||
1444 | { | ||
1445 | u64 extctl, val, lst, ltst; | ||
1446 | unsigned long flags; | ||
1447 | struct qib_devdata *dd = ppd->dd; | ||
1448 | |||
1449 | /* | ||
1450 | * The diags use the LED to indicate diag info, so we leave | ||
1451 | * the external LED alone when the diags are running. | ||
1452 | */ | ||
1453 | if (dd->diag_client) | ||
1454 | return; | ||
1455 | |||
1456 | /* Allow override of LED display for, e.g. Locating system in rack */ | ||
1457 | if (ppd->led_override) { | ||
1458 | ltst = (ppd->led_override & QIB_LED_PHYS) ? | ||
1459 | IB_PHYSPORTSTATE_LINKUP : IB_PHYSPORTSTATE_DISABLED, | ||
1460 | lst = (ppd->led_override & QIB_LED_LOG) ? | ||
1461 | IB_PORT_ACTIVE : IB_PORT_DOWN; | ||
1462 | } else if (on) { | ||
1463 | val = qib_read_kreg64(dd, kr_ibcstatus); | ||
1464 | ltst = qib_6120_phys_portstate(val); | ||
1465 | lst = qib_6120_iblink_state(val); | ||
1466 | } else { | ||
1467 | ltst = 0; | ||
1468 | lst = 0; | ||
1469 | } | ||
1470 | |||
1471 | spin_lock_irqsave(&dd->cspec->gpio_lock, flags); | ||
1472 | extctl = dd->cspec->extctrl & ~(SYM_MASK(EXTCtrl, LEDPriPortGreenOn) | | ||
1473 | SYM_MASK(EXTCtrl, LEDPriPortYellowOn)); | ||
1474 | |||
1475 | if (ltst == IB_PHYSPORTSTATE_LINKUP) | ||
1476 | extctl |= SYM_MASK(EXTCtrl, LEDPriPortYellowOn); | ||
1477 | if (lst == IB_PORT_ACTIVE) | ||
1478 | extctl |= SYM_MASK(EXTCtrl, LEDPriPortGreenOn); | ||
1479 | dd->cspec->extctrl = extctl; | ||
1480 | qib_write_kreg(dd, kr_extctrl, extctl); | ||
1481 | spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); | ||
1482 | } | ||
1483 | |||
1484 | static void qib_6120_free_irq(struct qib_devdata *dd) | ||
1485 | { | ||
1486 | if (dd->cspec->irq) { | ||
1487 | free_irq(dd->cspec->irq, dd); | ||
1488 | dd->cspec->irq = 0; | ||
1489 | } | ||
1490 | qib_nomsi(dd); | ||
1491 | } | ||
1492 | |||
1493 | /** | ||
1494 | * qib_6120_setup_cleanup - clean up any per-chip chip-specific stuff | ||
1495 | * @dd: the qlogic_ib device | ||
1496 | * | ||
1497 | * This is called during driver unload. | ||
1498 | */ | ||
1499 | static void qib_6120_setup_cleanup(struct qib_devdata *dd) | ||
1500 | { | ||
1501 | qib_6120_free_irq(dd); | ||
1502 | kfree(dd->cspec->cntrs); | ||
1503 | kfree(dd->cspec->portcntrs); | ||
1504 | if (dd->cspec->dummy_hdrq) { | ||
1505 | dma_free_coherent(&dd->pcidev->dev, | ||
1506 | ALIGN(dd->rcvhdrcnt * | ||
1507 | dd->rcvhdrentsize * | ||
1508 | sizeof(u32), PAGE_SIZE), | ||
1509 | dd->cspec->dummy_hdrq, | ||
1510 | dd->cspec->dummy_hdrq_phys); | ||
1511 | dd->cspec->dummy_hdrq = NULL; | ||
1512 | } | ||
1513 | } | ||
1514 | |||
1515 | static void qib_wantpiobuf_6120_intr(struct qib_devdata *dd, u32 needint) | ||
1516 | { | ||
1517 | unsigned long flags; | ||
1518 | |||
1519 | spin_lock_irqsave(&dd->sendctrl_lock, flags); | ||
1520 | if (needint) | ||
1521 | dd->sendctrl |= SYM_MASK(SendCtrl, PIOIntBufAvail); | ||
1522 | else | ||
1523 | dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOIntBufAvail); | ||
1524 | qib_write_kreg(dd, kr_sendctrl, dd->sendctrl); | ||
1525 | qib_write_kreg(dd, kr_scratch, 0ULL); | ||
1526 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | ||
1527 | } | ||
1528 | |||
1529 | /* | ||
1530 | * handle errors and unusual events first, separate function | ||
1531 | * to improve cache hits for fast path interrupt handling | ||
1532 | */ | ||
1533 | static noinline void unlikely_6120_intr(struct qib_devdata *dd, u64 istat) | ||
1534 | { | ||
1535 | if (unlikely(istat & ~QLOGIC_IB_I_BITSEXTANT)) | ||
1536 | qib_dev_err(dd, "interrupt with unknown interrupts %Lx set\n", | ||
1537 | istat & ~QLOGIC_IB_I_BITSEXTANT); | ||
1538 | |||
1539 | if (istat & QLOGIC_IB_I_ERROR) { | ||
1540 | u64 estat = 0; | ||
1541 | |||
1542 | qib_stats.sps_errints++; | ||
1543 | estat = qib_read_kreg64(dd, kr_errstatus); | ||
1544 | if (!estat) | ||
1545 | qib_devinfo(dd->pcidev, "error interrupt (%Lx), " | ||
1546 | "but no error bits set!\n", istat); | ||
1547 | handle_6120_errors(dd, estat); | ||
1548 | } | ||
1549 | |||
1550 | if (istat & QLOGIC_IB_I_GPIO) { | ||
1551 | u32 gpiostatus; | ||
1552 | u32 to_clear = 0; | ||
1553 | |||
1554 | /* | ||
1555 | * GPIO_3..5 on IBA6120 Rev2 chips indicate | ||
1556 | * errors that we need to count. | ||
1557 | */ | ||
1558 | gpiostatus = qib_read_kreg32(dd, kr_gpio_status); | ||
1559 | /* First the error-counter case. */ | ||
1560 | if (gpiostatus & GPIO_ERRINTR_MASK) { | ||
1561 | /* want to clear the bits we see asserted. */ | ||
1562 | to_clear |= (gpiostatus & GPIO_ERRINTR_MASK); | ||
1563 | |||
1564 | /* | ||
1565 | * Count appropriately, clear bits out of our copy, | ||
1566 | * as they have been "handled". | ||
1567 | */ | ||
1568 | if (gpiostatus & (1 << GPIO_RXUVL_BIT)) | ||
1569 | dd->cspec->rxfc_unsupvl_errs++; | ||
1570 | if (gpiostatus & (1 << GPIO_OVRUN_BIT)) | ||
1571 | dd->cspec->overrun_thresh_errs++; | ||
1572 | if (gpiostatus & (1 << GPIO_LLI_BIT)) | ||
1573 | dd->cspec->lli_errs++; | ||
1574 | gpiostatus &= ~GPIO_ERRINTR_MASK; | ||
1575 | } | ||
1576 | if (gpiostatus) { | ||
1577 | /* | ||
1578 | * Some unexpected bits remain. If they could have | ||
1579 | * caused the interrupt, complain and clear. | ||
1580 | * To avoid repetition of this condition, also clear | ||
1581 | * the mask. It is almost certainly due to error. | ||
1582 | */ | ||
1583 | const u32 mask = qib_read_kreg32(dd, kr_gpio_mask); | ||
1584 | |||
1585 | /* | ||
1586 | * Also check that the chip reflects our shadow, | ||
1587 | * and report issues, If they caused the interrupt. | ||
1588 | * we will suppress by refreshing from the shadow. | ||
1589 | */ | ||
1590 | if (mask & gpiostatus) { | ||
1591 | to_clear |= (gpiostatus & mask); | ||
1592 | dd->cspec->gpio_mask &= ~(gpiostatus & mask); | ||
1593 | qib_write_kreg(dd, kr_gpio_mask, | ||
1594 | dd->cspec->gpio_mask); | ||
1595 | } | ||
1596 | } | ||
1597 | if (to_clear) | ||
1598 | qib_write_kreg(dd, kr_gpio_clear, (u64) to_clear); | ||
1599 | } | ||
1600 | } | ||
1601 | |||
1602 | static irqreturn_t qib_6120intr(int irq, void *data) | ||
1603 | { | ||
1604 | struct qib_devdata *dd = data; | ||
1605 | irqreturn_t ret; | ||
1606 | u32 istat, ctxtrbits, rmask, crcs = 0; | ||
1607 | unsigned i; | ||
1608 | |||
1609 | if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) { | ||
1610 | /* | ||
1611 | * This return value is not great, but we do not want the | ||
1612 | * interrupt core code to remove our interrupt handler | ||
1613 | * because we don't appear to be handling an interrupt | ||
1614 | * during a chip reset. | ||
1615 | */ | ||
1616 | ret = IRQ_HANDLED; | ||
1617 | goto bail; | ||
1618 | } | ||
1619 | |||
1620 | istat = qib_read_kreg32(dd, kr_intstatus); | ||
1621 | |||
1622 | if (unlikely(!istat)) { | ||
1623 | ret = IRQ_NONE; /* not our interrupt, or already handled */ | ||
1624 | goto bail; | ||
1625 | } | ||
1626 | if (unlikely(istat == -1)) { | ||
1627 | qib_bad_intrstatus(dd); | ||
1628 | /* don't know if it was our interrupt or not */ | ||
1629 | ret = IRQ_NONE; | ||
1630 | goto bail; | ||
1631 | } | ||
1632 | |||
1633 | qib_stats.sps_ints++; | ||
1634 | if (dd->int_counter != (u32) -1) | ||
1635 | dd->int_counter++; | ||
1636 | |||
1637 | if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT | | ||
1638 | QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR))) | ||
1639 | unlikely_6120_intr(dd, istat); | ||
1640 | |||
1641 | /* | ||
1642 | * Clear the interrupt bits we found set, relatively early, so we | ||
1643 | * "know" know the chip will have seen this by the time we process | ||
1644 | * the queue, and will re-interrupt if necessary. The processor | ||
1645 | * itself won't take the interrupt again until we return. | ||
1646 | */ | ||
1647 | qib_write_kreg(dd, kr_intclear, istat); | ||
1648 | |||
1649 | /* | ||
1650 | * Handle kernel receive queues before checking for pio buffers | ||
1651 | * available since receives can overflow; piobuf waiters can afford | ||
1652 | * a few extra cycles, since they were waiting anyway. | ||
1653 | */ | ||
1654 | ctxtrbits = istat & | ||
1655 | ((QLOGIC_IB_I_RCVAVAIL_MASK << QLOGIC_IB_I_RCVAVAIL_SHIFT) | | ||
1656 | (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT)); | ||
1657 | if (ctxtrbits) { | ||
1658 | rmask = (1U << QLOGIC_IB_I_RCVAVAIL_SHIFT) | | ||
1659 | (1U << QLOGIC_IB_I_RCVURG_SHIFT); | ||
1660 | for (i = 0; i < dd->first_user_ctxt; i++) { | ||
1661 | if (ctxtrbits & rmask) { | ||
1662 | ctxtrbits &= ~rmask; | ||
1663 | crcs += qib_kreceive(dd->rcd[i], | ||
1664 | &dd->cspec->lli_counter, | ||
1665 | NULL); | ||
1666 | } | ||
1667 | rmask <<= 1; | ||
1668 | } | ||
1669 | if (crcs) { | ||
1670 | u32 cntr = dd->cspec->lli_counter; | ||
1671 | cntr += crcs; | ||
1672 | if (cntr) { | ||
1673 | if (cntr > dd->cspec->lli_thresh) { | ||
1674 | dd->cspec->lli_counter = 0; | ||
1675 | dd->cspec->lli_errs++; | ||
1676 | } else | ||
1677 | dd->cspec->lli_counter += cntr; | ||
1678 | } | ||
1679 | } | ||
1680 | |||
1681 | |||
1682 | if (ctxtrbits) { | ||
1683 | ctxtrbits = | ||
1684 | (ctxtrbits >> QLOGIC_IB_I_RCVAVAIL_SHIFT) | | ||
1685 | (ctxtrbits >> QLOGIC_IB_I_RCVURG_SHIFT); | ||
1686 | qib_handle_urcv(dd, ctxtrbits); | ||
1687 | } | ||
1688 | } | ||
1689 | |||
1690 | if ((istat & QLOGIC_IB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED)) | ||
1691 | qib_ib_piobufavail(dd); | ||
1692 | |||
1693 | ret = IRQ_HANDLED; | ||
1694 | bail: | ||
1695 | return ret; | ||
1696 | } | ||
1697 | |||
1698 | /* | ||
1699 | * Set up our chip-specific interrupt handler | ||
1700 | * The interrupt type has already been setup, so | ||
1701 | * we just need to do the registration and error checking. | ||
1702 | */ | ||
1703 | static void qib_setup_6120_interrupt(struct qib_devdata *dd) | ||
1704 | { | ||
1705 | /* | ||
1706 | * If the chip supports added error indication via GPIO pins, | ||
1707 | * enable interrupts on those bits so the interrupt routine | ||
1708 | * can count the events. Also set flag so interrupt routine | ||
1709 | * can know they are expected. | ||
1710 | */ | ||
1711 | if (SYM_FIELD(dd->revision, Revision_R, | ||
1712 | ChipRevMinor) > 1) { | ||
1713 | /* Rev2+ reports extra errors via internal GPIO pins */ | ||
1714 | dd->cspec->gpio_mask |= GPIO_ERRINTR_MASK; | ||
1715 | qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask); | ||
1716 | } | ||
1717 | |||
1718 | if (!dd->cspec->irq) | ||
1719 | qib_dev_err(dd, "irq is 0, BIOS error? Interrupts won't " | ||
1720 | "work\n"); | ||
1721 | else { | ||
1722 | int ret; | ||
1723 | ret = request_irq(dd->cspec->irq, qib_6120intr, 0, | ||
1724 | QIB_DRV_NAME, dd); | ||
1725 | if (ret) | ||
1726 | qib_dev_err(dd, "Couldn't setup interrupt " | ||
1727 | "(irq=%d): %d\n", dd->cspec->irq, | ||
1728 | ret); | ||
1729 | } | ||
1730 | } | ||
1731 | |||
1732 | /** | ||
1733 | * pe_boardname - fill in the board name | ||
1734 | * @dd: the qlogic_ib device | ||
1735 | * | ||
1736 | * info is based on the board revision register | ||
1737 | */ | ||
1738 | static void pe_boardname(struct qib_devdata *dd) | ||
1739 | { | ||
1740 | char *n; | ||
1741 | u32 boardid, namelen; | ||
1742 | |||
1743 | boardid = SYM_FIELD(dd->revision, Revision, | ||
1744 | BoardID); | ||
1745 | |||
1746 | switch (boardid) { | ||
1747 | case 2: | ||
1748 | n = "InfiniPath_QLE7140"; | ||
1749 | break; | ||
1750 | default: | ||
1751 | qib_dev_err(dd, "Unknown 6120 board with ID %u\n", boardid); | ||
1752 | n = "Unknown_InfiniPath_6120"; | ||
1753 | break; | ||
1754 | } | ||
1755 | namelen = strlen(n) + 1; | ||
1756 | dd->boardname = kmalloc(namelen, GFP_KERNEL); | ||
1757 | if (!dd->boardname) | ||
1758 | qib_dev_err(dd, "Failed allocation for board name: %s\n", n); | ||
1759 | else | ||
1760 | snprintf(dd->boardname, namelen, "%s", n); | ||
1761 | |||
1762 | if (dd->majrev != 4 || !dd->minrev || dd->minrev > 2) | ||
1763 | qib_dev_err(dd, "Unsupported InfiniPath hardware revision " | ||
1764 | "%u.%u!\n", dd->majrev, dd->minrev); | ||
1765 | |||
1766 | snprintf(dd->boardversion, sizeof(dd->boardversion), | ||
1767 | "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n", | ||
1768 | QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname, | ||
1769 | (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch), | ||
1770 | dd->majrev, dd->minrev, | ||
1771 | (unsigned)SYM_FIELD(dd->revision, Revision_R, SW)); | ||
1772 | |||
1773 | } | ||
1774 | |||
1775 | /* | ||
1776 | * This routine sleeps, so it can only be called from user context, not | ||
1777 | * from interrupt context. If we need interrupt context, we can split | ||
1778 | * it into two routines. | ||
1779 | */ | ||
1780 | static int qib_6120_setup_reset(struct qib_devdata *dd) | ||
1781 | { | ||
1782 | u64 val; | ||
1783 | int i; | ||
1784 | int ret; | ||
1785 | u16 cmdval; | ||
1786 | u8 int_line, clinesz; | ||
1787 | |||
1788 | qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz); | ||
1789 | |||
1790 | /* Use ERROR so it shows up in logs, etc. */ | ||
1791 | qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit); | ||
1792 | |||
1793 | /* no interrupts till re-initted */ | ||
1794 | qib_6120_set_intr_state(dd, 0); | ||
1795 | |||
1796 | dd->cspec->ibdeltainprog = 0; | ||
1797 | dd->cspec->ibsymdelta = 0; | ||
1798 | dd->cspec->iblnkerrdelta = 0; | ||
1799 | |||
1800 | /* | ||
1801 | * Keep chip from being accessed until we are ready. Use | ||
1802 | * writeq() directly, to allow the write even though QIB_PRESENT | ||
1803 | * isnt' set. | ||
1804 | */ | ||
1805 | dd->flags &= ~(QIB_INITTED | QIB_PRESENT); | ||
1806 | dd->int_counter = 0; /* so we check interrupts work again */ | ||
1807 | val = dd->control | QLOGIC_IB_C_RESET; | ||
1808 | writeq(val, &dd->kregbase[kr_control]); | ||
1809 | mb(); /* prevent compiler re-ordering around actual reset */ | ||
1810 | |||
1811 | for (i = 1; i <= 5; i++) { | ||
1812 | /* | ||
1813 | * Allow MBIST, etc. to complete; longer on each retry. | ||
1814 | * We sometimes get machine checks from bus timeout if no | ||
1815 | * response, so for now, make it *really* long. | ||
1816 | */ | ||
1817 | msleep(1000 + (1 + i) * 2000); | ||
1818 | |||
1819 | qib_pcie_reenable(dd, cmdval, int_line, clinesz); | ||
1820 | |||
1821 | /* | ||
1822 | * Use readq directly, so we don't need to mark it as PRESENT | ||
1823 | * until we get a successful indication that all is well. | ||
1824 | */ | ||
1825 | val = readq(&dd->kregbase[kr_revision]); | ||
1826 | if (val == dd->revision) { | ||
1827 | dd->flags |= QIB_PRESENT; /* it's back */ | ||
1828 | ret = qib_reinit_intr(dd); | ||
1829 | goto bail; | ||
1830 | } | ||
1831 | } | ||
1832 | ret = 0; /* failed */ | ||
1833 | |||
1834 | bail: | ||
1835 | if (ret) { | ||
1836 | if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL)) | ||
1837 | qib_dev_err(dd, "Reset failed to setup PCIe or " | ||
1838 | "interrupts; continuing anyway\n"); | ||
1839 | /* clear the reset error, init error/hwerror mask */ | ||
1840 | qib_6120_init_hwerrors(dd); | ||
1841 | /* for Rev2 error interrupts; nop for rev 1 */ | ||
1842 | qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask); | ||
1843 | /* clear the reset error, init error/hwerror mask */ | ||
1844 | qib_6120_init_hwerrors(dd); | ||
1845 | } | ||
1846 | return ret; | ||
1847 | } | ||
1848 | |||
1849 | /** | ||
1850 | * qib_6120_put_tid - write a TID in chip | ||
1851 | * @dd: the qlogic_ib device | ||
1852 | * @tidptr: pointer to the expected TID (in chip) to update | ||
1853 | * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) | ||
1854 | * for expected | ||
1855 | * @pa: physical address of in memory buffer; tidinvalid if freeing | ||
1856 | * | ||
1857 | * This exists as a separate routine to allow for special locking etc. | ||
1858 | * It's used for both the full cleanup on exit, as well as the normal | ||
1859 | * setup and teardown. | ||
1860 | */ | ||
1861 | static void qib_6120_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr, | ||
1862 | u32 type, unsigned long pa) | ||
1863 | { | ||
1864 | u32 __iomem *tidp32 = (u32 __iomem *)tidptr; | ||
1865 | unsigned long flags; | ||
1866 | int tidx; | ||
1867 | spinlock_t *tidlockp; /* select appropriate spinlock */ | ||
1868 | |||
1869 | if (!dd->kregbase) | ||
1870 | return; | ||
1871 | |||
1872 | if (pa != dd->tidinvalid) { | ||
1873 | if (pa & ((1U << 11) - 1)) { | ||
1874 | qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n", | ||
1875 | pa); | ||
1876 | return; | ||
1877 | } | ||
1878 | pa >>= 11; | ||
1879 | if (pa & ~QLOGIC_IB_RT_ADDR_MASK) { | ||
1880 | qib_dev_err(dd, "Physical page address 0x%lx " | ||
1881 | "larger than supported\n", pa); | ||
1882 | return; | ||
1883 | } | ||
1884 | |||
1885 | if (type == RCVHQ_RCV_TYPE_EAGER) | ||
1886 | pa |= dd->tidtemplate; | ||
1887 | else /* for now, always full 4KB page */ | ||
1888 | pa |= 2 << 29; | ||
1889 | } | ||
1890 | |||
1891 | /* | ||
1892 | * Avoid chip issue by writing the scratch register | ||
1893 | * before and after the TID, and with an io write barrier. | ||
1894 | * We use a spinlock around the writes, so they can't intermix | ||
1895 | * with other TID (eager or expected) writes (the chip problem | ||
1896 | * is triggered by back to back TID writes). Unfortunately, this | ||
1897 | * call can be done from interrupt level for the ctxt 0 eager TIDs, | ||
1898 | * so we have to use irqsave locks. | ||
1899 | */ | ||
1900 | /* | ||
1901 | * Assumes tidptr always > egrtidbase | ||
1902 | * if type == RCVHQ_RCV_TYPE_EAGER. | ||
1903 | */ | ||
1904 | tidx = tidptr - dd->egrtidbase; | ||
1905 | |||
1906 | tidlockp = (type == RCVHQ_RCV_TYPE_EAGER && tidx < dd->rcvhdrcnt) | ||
1907 | ? &dd->cspec->kernel_tid_lock : &dd->cspec->user_tid_lock; | ||
1908 | spin_lock_irqsave(tidlockp, flags); | ||
1909 | qib_write_kreg(dd, kr_scratch, 0xfeeddeaf); | ||
1910 | writel(pa, tidp32); | ||
1911 | qib_write_kreg(dd, kr_scratch, 0xdeadbeef); | ||
1912 | mmiowb(); | ||
1913 | spin_unlock_irqrestore(tidlockp, flags); | ||
1914 | } | ||
1915 | |||
1916 | /** | ||
1917 | * qib_6120_put_tid_2 - write a TID in chip, Revision 2 or higher | ||
1918 | * @dd: the qlogic_ib device | ||
1919 | * @tidptr: pointer to the expected TID (in chip) to update | ||
1920 | * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) | ||
1921 | * for expected | ||
1922 | * @pa: physical address of in memory buffer; tidinvalid if freeing | ||
1923 | * | ||
1924 | * This exists as a separate routine to allow for selection of the | ||
1925 | * appropriate "flavor". The static calls in cleanup just use the | ||
1926 | * revision-agnostic form, as they are not performance critical. | ||
1927 | */ | ||
1928 | static void qib_6120_put_tid_2(struct qib_devdata *dd, u64 __iomem *tidptr, | ||
1929 | u32 type, unsigned long pa) | ||
1930 | { | ||
1931 | u32 __iomem *tidp32 = (u32 __iomem *)tidptr; | ||
1932 | u32 tidx; | ||
1933 | |||
1934 | if (!dd->kregbase) | ||
1935 | return; | ||
1936 | |||
1937 | if (pa != dd->tidinvalid) { | ||
1938 | if (pa & ((1U << 11) - 1)) { | ||
1939 | qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n", | ||
1940 | pa); | ||
1941 | return; | ||
1942 | } | ||
1943 | pa >>= 11; | ||
1944 | if (pa & ~QLOGIC_IB_RT_ADDR_MASK) { | ||
1945 | qib_dev_err(dd, "Physical page address 0x%lx " | ||
1946 | "larger than supported\n", pa); | ||
1947 | return; | ||
1948 | } | ||
1949 | |||
1950 | if (type == RCVHQ_RCV_TYPE_EAGER) | ||
1951 | pa |= dd->tidtemplate; | ||
1952 | else /* for now, always full 4KB page */ | ||
1953 | pa |= 2 << 29; | ||
1954 | } | ||
1955 | tidx = tidptr - dd->egrtidbase; | ||
1956 | writel(pa, tidp32); | ||
1957 | mmiowb(); | ||
1958 | } | ||
1959 | |||
1960 | |||
1961 | /** | ||
1962 | * qib_6120_clear_tids - clear all TID entries for a context, expected and eager | ||
1963 | * @dd: the qlogic_ib device | ||
1964 | * @ctxt: the context | ||
1965 | * | ||
1966 | * clear all TID entries for a context, expected and eager. | ||
1967 | * Used from qib_close(). On this chip, TIDs are only 32 bits, | ||
1968 | * not 64, but they are still on 64 bit boundaries, so tidbase | ||
1969 | * is declared as u64 * for the pointer math, even though we write 32 bits | ||
1970 | */ | ||
1971 | static void qib_6120_clear_tids(struct qib_devdata *dd, | ||
1972 | struct qib_ctxtdata *rcd) | ||
1973 | { | ||
1974 | u64 __iomem *tidbase; | ||
1975 | unsigned long tidinv; | ||
1976 | u32 ctxt; | ||
1977 | int i; | ||
1978 | |||
1979 | if (!dd->kregbase || !rcd) | ||
1980 | return; | ||
1981 | |||
1982 | ctxt = rcd->ctxt; | ||
1983 | |||
1984 | tidinv = dd->tidinvalid; | ||
1985 | tidbase = (u64 __iomem *) | ||
1986 | ((char __iomem *)(dd->kregbase) + | ||
1987 | dd->rcvtidbase + | ||
1988 | ctxt * dd->rcvtidcnt * sizeof(*tidbase)); | ||
1989 | |||
1990 | for (i = 0; i < dd->rcvtidcnt; i++) | ||
1991 | /* use func pointer because could be one of two funcs */ | ||
1992 | dd->f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED, | ||
1993 | tidinv); | ||
1994 | |||
1995 | tidbase = (u64 __iomem *) | ||
1996 | ((char __iomem *)(dd->kregbase) + | ||
1997 | dd->rcvegrbase + | ||
1998 | rcd->rcvegr_tid_base * sizeof(*tidbase)); | ||
1999 | |||
2000 | for (i = 0; i < rcd->rcvegrcnt; i++) | ||
2001 | /* use func pointer because could be one of two funcs */ | ||
2002 | dd->f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER, | ||
2003 | tidinv); | ||
2004 | } | ||
2005 | |||
2006 | /** | ||
2007 | * qib_6120_tidtemplate - setup constants for TID updates | ||
2008 | * @dd: the qlogic_ib device | ||
2009 | * | ||
2010 | * We setup stuff that we use a lot, to avoid calculating each time | ||
2011 | */ | ||
2012 | static void qib_6120_tidtemplate(struct qib_devdata *dd) | ||
2013 | { | ||
2014 | u32 egrsize = dd->rcvegrbufsize; | ||
2015 | |||
2016 | /* | ||
2017 | * For now, we always allocate 4KB buffers (at init) so we can | ||
2018 | * receive max size packets. We may want a module parameter to | ||
2019 | * specify 2KB or 4KB and/or make be per ctxt instead of per device | ||
2020 | * for those who want to reduce memory footprint. Note that the | ||
2021 | * rcvhdrentsize size must be large enough to hold the largest | ||
2022 | * IB header (currently 96 bytes) that we expect to handle (plus of | ||
2023 | * course the 2 dwords of RHF). | ||
2024 | */ | ||
2025 | if (egrsize == 2048) | ||
2026 | dd->tidtemplate = 1U << 29; | ||
2027 | else if (egrsize == 4096) | ||
2028 | dd->tidtemplate = 2U << 29; | ||
2029 | dd->tidinvalid = 0; | ||
2030 | } | ||
2031 | |||
2032 | int __attribute__((weak)) qib_unordered_wc(void) | ||
2033 | { | ||
2034 | return 0; | ||
2035 | } | ||
2036 | |||
2037 | /** | ||
2038 | * qib_6120_get_base_info - set chip-specific flags for user code | ||
2039 | * @rcd: the qlogic_ib ctxt | ||
2040 | * @kbase: qib_base_info pointer | ||
2041 | * | ||
2042 | * We set the PCIE flag because the lower bandwidth on PCIe vs | ||
2043 | * HyperTransport can affect some user packet algorithms. | ||
2044 | */ | ||
2045 | static int qib_6120_get_base_info(struct qib_ctxtdata *rcd, | ||
2046 | struct qib_base_info *kinfo) | ||
2047 | { | ||
2048 | if (qib_unordered_wc()) | ||
2049 | kinfo->spi_runtime_flags |= QIB_RUNTIME_FORCE_WC_ORDER; | ||
2050 | |||
2051 | kinfo->spi_runtime_flags |= QIB_RUNTIME_PCIE | | ||
2052 | QIB_RUNTIME_FORCE_PIOAVAIL | QIB_RUNTIME_PIO_REGSWAPPED; | ||
2053 | return 0; | ||
2054 | } | ||
2055 | |||
2056 | |||
2057 | static struct qib_message_header * | ||
2058 | qib_6120_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr) | ||
2059 | { | ||
2060 | return (struct qib_message_header *) | ||
2061 | &rhf_addr[sizeof(u64) / sizeof(u32)]; | ||
2062 | } | ||
2063 | |||
2064 | static void qib_6120_config_ctxts(struct qib_devdata *dd) | ||
2065 | { | ||
2066 | dd->ctxtcnt = qib_read_kreg32(dd, kr_portcnt); | ||
2067 | if (qib_n_krcv_queues > 1) { | ||
2068 | dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports; | ||
2069 | if (dd->first_user_ctxt > dd->ctxtcnt) | ||
2070 | dd->first_user_ctxt = dd->ctxtcnt; | ||
2071 | dd->qpn_mask = dd->first_user_ctxt <= 2 ? 2 : 6; | ||
2072 | } else | ||
2073 | dd->first_user_ctxt = dd->num_pports; | ||
2074 | dd->n_krcv_queues = dd->first_user_ctxt; | ||
2075 | } | ||
2076 | |||
2077 | static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd, | ||
2078 | u32 updegr, u32 egrhd) | ||
2079 | { | ||
2080 | qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); | ||
2081 | if (updegr) | ||
2082 | qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt); | ||
2083 | } | ||
2084 | |||
2085 | static u32 qib_6120_hdrqempty(struct qib_ctxtdata *rcd) | ||
2086 | { | ||
2087 | u32 head, tail; | ||
2088 | |||
2089 | head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt); | ||
2090 | if (rcd->rcvhdrtail_kvaddr) | ||
2091 | tail = qib_get_rcvhdrtail(rcd); | ||
2092 | else | ||
2093 | tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt); | ||
2094 | return head == tail; | ||
2095 | } | ||
2096 | |||
2097 | /* | ||
2098 | * Used when we close any ctxt, for DMA already in flight | ||
2099 | * at close. Can't be done until we know hdrq size, so not | ||
2100 | * early in chip init. | ||
2101 | */ | ||
2102 | static void alloc_dummy_hdrq(struct qib_devdata *dd) | ||
2103 | { | ||
2104 | dd->cspec->dummy_hdrq = dma_alloc_coherent(&dd->pcidev->dev, | ||
2105 | dd->rcd[0]->rcvhdrq_size, | ||
2106 | &dd->cspec->dummy_hdrq_phys, | ||
2107 | GFP_KERNEL | __GFP_COMP); | ||
2108 | if (!dd->cspec->dummy_hdrq) { | ||
2109 | qib_devinfo(dd->pcidev, "Couldn't allocate dummy hdrq\n"); | ||
2110 | /* fallback to just 0'ing */ | ||
2111 | dd->cspec->dummy_hdrq_phys = 0UL; | ||
2112 | } | ||
2113 | } | ||
2114 | |||
2115 | /* | ||
2116 | * Modify the RCVCTRL register in chip-specific way. This | ||
2117 | * is a function because bit positions and (future) register | ||
2118 | * location is chip-specific, but the needed operations are | ||
2119 | * generic. <op> is a bit-mask because we often want to | ||
2120 | * do multiple modifications. | ||
2121 | */ | ||
2122 | static void rcvctrl_6120_mod(struct qib_pportdata *ppd, unsigned int op, | ||
2123 | int ctxt) | ||
2124 | { | ||
2125 | struct qib_devdata *dd = ppd->dd; | ||
2126 | u64 mask, val; | ||
2127 | unsigned long flags; | ||
2128 | |||
2129 | spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); | ||
2130 | |||
2131 | if (op & QIB_RCVCTRL_TAILUPD_ENB) | ||
2132 | dd->rcvctrl |= (1ULL << QLOGIC_IB_R_TAILUPD_SHIFT); | ||
2133 | if (op & QIB_RCVCTRL_TAILUPD_DIS) | ||
2134 | dd->rcvctrl &= ~(1ULL << QLOGIC_IB_R_TAILUPD_SHIFT); | ||
2135 | if (op & QIB_RCVCTRL_PKEY_ENB) | ||
2136 | dd->rcvctrl &= ~(1ULL << IBA6120_R_PKEY_DIS_SHIFT); | ||
2137 | if (op & QIB_RCVCTRL_PKEY_DIS) | ||
2138 | dd->rcvctrl |= (1ULL << IBA6120_R_PKEY_DIS_SHIFT); | ||
2139 | if (ctxt < 0) | ||
2140 | mask = (1ULL << dd->ctxtcnt) - 1; | ||
2141 | else | ||
2142 | mask = (1ULL << ctxt); | ||
2143 | if (op & QIB_RCVCTRL_CTXT_ENB) { | ||
2144 | /* always done for specific ctxt */ | ||
2145 | dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, PortEnable)); | ||
2146 | if (!(dd->flags & QIB_NODMA_RTAIL)) | ||
2147 | dd->rcvctrl |= 1ULL << QLOGIC_IB_R_TAILUPD_SHIFT; | ||
2148 | /* Write these registers before the context is enabled. */ | ||
2149 | qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt, | ||
2150 | dd->rcd[ctxt]->rcvhdrqtailaddr_phys); | ||
2151 | qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt, | ||
2152 | dd->rcd[ctxt]->rcvhdrq_phys); | ||
2153 | |||
2154 | if (ctxt == 0 && !dd->cspec->dummy_hdrq) | ||
2155 | alloc_dummy_hdrq(dd); | ||
2156 | } | ||
2157 | if (op & QIB_RCVCTRL_CTXT_DIS) | ||
2158 | dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, PortEnable)); | ||
2159 | if (op & QIB_RCVCTRL_INTRAVAIL_ENB) | ||
2160 | dd->rcvctrl |= (mask << QLOGIC_IB_R_INTRAVAIL_SHIFT); | ||
2161 | if (op & QIB_RCVCTRL_INTRAVAIL_DIS) | ||
2162 | dd->rcvctrl &= ~(mask << QLOGIC_IB_R_INTRAVAIL_SHIFT); | ||
2163 | qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl); | ||
2164 | if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rhdrhead_intr_off) { | ||
2165 | /* arm rcv interrupt */ | ||
2166 | val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) | | ||
2167 | dd->rhdrhead_intr_off; | ||
2168 | qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); | ||
2169 | } | ||
2170 | if (op & QIB_RCVCTRL_CTXT_ENB) { | ||
2171 | /* | ||
2172 | * Init the context registers also; if we were | ||
2173 | * disabled, tail and head should both be zero | ||
2174 | * already from the enable, but since we don't | ||
2175 | * know, we have to do it explictly. | ||
2176 | */ | ||
2177 | val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt); | ||
2178 | qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt); | ||
2179 | |||
2180 | val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt); | ||
2181 | dd->rcd[ctxt]->head = val; | ||
2182 | /* If kctxt, interrupt on next receive. */ | ||
2183 | if (ctxt < dd->first_user_ctxt) | ||
2184 | val |= dd->rhdrhead_intr_off; | ||
2185 | qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); | ||
2186 | } | ||
2187 | if (op & QIB_RCVCTRL_CTXT_DIS) { | ||
2188 | /* | ||
2189 | * Be paranoid, and never write 0's to these, just use an | ||
2190 | * unused page. Of course, | ||
2191 | * rcvhdraddr points to a large chunk of memory, so this | ||
2192 | * could still trash things, but at least it won't trash | ||
2193 | * page 0, and by disabling the ctxt, it should stop "soon", | ||
2194 | * even if a packet or two is in already in flight after we | ||
2195 | * disabled the ctxt. Only 6120 has this issue. | ||
2196 | */ | ||
2197 | if (ctxt >= 0) { | ||
2198 | qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt, | ||
2199 | dd->cspec->dummy_hdrq_phys); | ||
2200 | qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt, | ||
2201 | dd->cspec->dummy_hdrq_phys); | ||
2202 | } else { | ||
2203 | unsigned i; | ||
2204 | |||
2205 | for (i = 0; i < dd->cfgctxts; i++) { | ||
2206 | qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, | ||
2207 | i, dd->cspec->dummy_hdrq_phys); | ||
2208 | qib_write_kreg_ctxt(dd, kr_rcvhdraddr, | ||
2209 | i, dd->cspec->dummy_hdrq_phys); | ||
2210 | } | ||
2211 | } | ||
2212 | } | ||
2213 | spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); | ||
2214 | } | ||
2215 | |||
2216 | /* | ||
2217 | * Modify the SENDCTRL register in chip-specific way. This | ||
2218 | * is a function there may be multiple such registers with | ||
2219 | * slightly different layouts. Only operations actually used | ||
2220 | * are implemented yet. | ||
2221 | * Chip requires no back-back sendctrl writes, so write | ||
2222 | * scratch register after writing sendctrl | ||
2223 | */ | ||
2224 | static void sendctrl_6120_mod(struct qib_pportdata *ppd, u32 op) | ||
2225 | { | ||
2226 | struct qib_devdata *dd = ppd->dd; | ||
2227 | u64 tmp_dd_sendctrl; | ||
2228 | unsigned long flags; | ||
2229 | |||
2230 | spin_lock_irqsave(&dd->sendctrl_lock, flags); | ||
2231 | |||
2232 | /* First the ones that are "sticky", saved in shadow */ | ||
2233 | if (op & QIB_SENDCTRL_CLEAR) | ||
2234 | dd->sendctrl = 0; | ||
2235 | if (op & QIB_SENDCTRL_SEND_DIS) | ||
2236 | dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOEnable); | ||
2237 | else if (op & QIB_SENDCTRL_SEND_ENB) | ||
2238 | dd->sendctrl |= SYM_MASK(SendCtrl, PIOEnable); | ||
2239 | if (op & QIB_SENDCTRL_AVAIL_DIS) | ||
2240 | dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOBufAvailUpd); | ||
2241 | else if (op & QIB_SENDCTRL_AVAIL_ENB) | ||
2242 | dd->sendctrl |= SYM_MASK(SendCtrl, PIOBufAvailUpd); | ||
2243 | |||
2244 | if (op & QIB_SENDCTRL_DISARM_ALL) { | ||
2245 | u32 i, last; | ||
2246 | |||
2247 | tmp_dd_sendctrl = dd->sendctrl; | ||
2248 | /* | ||
2249 | * disarm any that are not yet launched, disabling sends | ||
2250 | * and updates until done. | ||
2251 | */ | ||
2252 | last = dd->piobcnt2k + dd->piobcnt4k; | ||
2253 | tmp_dd_sendctrl &= | ||
2254 | ~(SYM_MASK(SendCtrl, PIOEnable) | | ||
2255 | SYM_MASK(SendCtrl, PIOBufAvailUpd)); | ||
2256 | for (i = 0; i < last; i++) { | ||
2257 | qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl | | ||
2258 | SYM_MASK(SendCtrl, Disarm) | i); | ||
2259 | qib_write_kreg(dd, kr_scratch, 0); | ||
2260 | } | ||
2261 | } | ||
2262 | |||
2263 | tmp_dd_sendctrl = dd->sendctrl; | ||
2264 | |||
2265 | if (op & QIB_SENDCTRL_FLUSH) | ||
2266 | tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Abort); | ||
2267 | if (op & QIB_SENDCTRL_DISARM) | ||
2268 | tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) | | ||
2269 | ((op & QIB_6120_SendCtrl_DisarmPIOBuf_RMASK) << | ||
2270 | SYM_LSB(SendCtrl, DisarmPIOBuf)); | ||
2271 | if (op & QIB_SENDCTRL_AVAIL_BLIP) | ||
2272 | tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, PIOBufAvailUpd); | ||
2273 | |||
2274 | qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl); | ||
2275 | qib_write_kreg(dd, kr_scratch, 0); | ||
2276 | |||
2277 | if (op & QIB_SENDCTRL_AVAIL_BLIP) { | ||
2278 | qib_write_kreg(dd, kr_sendctrl, dd->sendctrl); | ||
2279 | qib_write_kreg(dd, kr_scratch, 0); | ||
2280 | } | ||
2281 | |||
2282 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | ||
2283 | |||
2284 | if (op & QIB_SENDCTRL_FLUSH) { | ||
2285 | u32 v; | ||
2286 | /* | ||
2287 | * ensure writes have hit chip, then do a few | ||
2288 | * more reads, to allow DMA of pioavail registers | ||
2289 | * to occur, so in-memory copy is in sync with | ||
2290 | * the chip. Not always safe to sleep. | ||
2291 | */ | ||
2292 | v = qib_read_kreg32(dd, kr_scratch); | ||
2293 | qib_write_kreg(dd, kr_scratch, v); | ||
2294 | v = qib_read_kreg32(dd, kr_scratch); | ||
2295 | qib_write_kreg(dd, kr_scratch, v); | ||
2296 | qib_read_kreg32(dd, kr_scratch); | ||
2297 | } | ||
2298 | } | ||
2299 | |||
2300 | /** | ||
2301 | * qib_portcntr_6120 - read a per-port counter | ||
2302 | * @dd: the qlogic_ib device | ||
2303 | * @creg: the counter to snapshot | ||
2304 | */ | ||
2305 | static u64 qib_portcntr_6120(struct qib_pportdata *ppd, u32 reg) | ||
2306 | { | ||
2307 | u64 ret = 0ULL; | ||
2308 | struct qib_devdata *dd = ppd->dd; | ||
2309 | u16 creg; | ||
2310 | /* 0xffff for unimplemented or synthesized counters */ | ||
2311 | static const u16 xlator[] = { | ||
2312 | [QIBPORTCNTR_PKTSEND] = cr_pktsend, | ||
2313 | [QIBPORTCNTR_WORDSEND] = cr_wordsend, | ||
2314 | [QIBPORTCNTR_PSXMITDATA] = 0xffff, | ||
2315 | [QIBPORTCNTR_PSXMITPKTS] = 0xffff, | ||
2316 | [QIBPORTCNTR_PSXMITWAIT] = 0xffff, | ||
2317 | [QIBPORTCNTR_SENDSTALL] = cr_sendstall, | ||
2318 | [QIBPORTCNTR_PKTRCV] = cr_pktrcv, | ||
2319 | [QIBPORTCNTR_PSRCVDATA] = 0xffff, | ||
2320 | [QIBPORTCNTR_PSRCVPKTS] = 0xffff, | ||
2321 | [QIBPORTCNTR_RCVEBP] = cr_rcvebp, | ||
2322 | [QIBPORTCNTR_RCVOVFL] = cr_rcvovfl, | ||
2323 | [QIBPORTCNTR_WORDRCV] = cr_wordrcv, | ||
2324 | [QIBPORTCNTR_RXDROPPKT] = cr_rxdroppkt, | ||
2325 | [QIBPORTCNTR_RXLOCALPHYERR] = 0xffff, | ||
2326 | [QIBPORTCNTR_RXVLERR] = 0xffff, | ||
2327 | [QIBPORTCNTR_ERRICRC] = cr_erricrc, | ||
2328 | [QIBPORTCNTR_ERRVCRC] = cr_errvcrc, | ||
2329 | [QIBPORTCNTR_ERRLPCRC] = cr_errlpcrc, | ||
2330 | [QIBPORTCNTR_BADFORMAT] = cr_badformat, | ||
2331 | [QIBPORTCNTR_ERR_RLEN] = cr_err_rlen, | ||
2332 | [QIBPORTCNTR_IBSYMBOLERR] = cr_ibsymbolerr, | ||
2333 | [QIBPORTCNTR_INVALIDRLEN] = cr_invalidrlen, | ||
2334 | [QIBPORTCNTR_UNSUPVL] = cr_txunsupvl, | ||
2335 | [QIBPORTCNTR_EXCESSBUFOVFL] = 0xffff, | ||
2336 | [QIBPORTCNTR_ERRLINK] = cr_errlink, | ||
2337 | [QIBPORTCNTR_IBLINKDOWN] = cr_iblinkdown, | ||
2338 | [QIBPORTCNTR_IBLINKERRRECOV] = cr_iblinkerrrecov, | ||
2339 | [QIBPORTCNTR_LLI] = 0xffff, | ||
2340 | [QIBPORTCNTR_PSINTERVAL] = 0xffff, | ||
2341 | [QIBPORTCNTR_PSSTART] = 0xffff, | ||
2342 | [QIBPORTCNTR_PSSTAT] = 0xffff, | ||
2343 | [QIBPORTCNTR_VL15PKTDROP] = 0xffff, | ||
2344 | [QIBPORTCNTR_ERRPKEY] = cr_errpkey, | ||
2345 | [QIBPORTCNTR_KHDROVFL] = 0xffff, | ||
2346 | }; | ||
2347 | |||
2348 | if (reg >= ARRAY_SIZE(xlator)) { | ||
2349 | qib_devinfo(ppd->dd->pcidev, | ||
2350 | "Unimplemented portcounter %u\n", reg); | ||
2351 | goto done; | ||
2352 | } | ||
2353 | creg = xlator[reg]; | ||
2354 | |||
2355 | /* handle counters requests not implemented as chip counters */ | ||
2356 | if (reg == QIBPORTCNTR_LLI) | ||
2357 | ret = dd->cspec->lli_errs; | ||
2358 | else if (reg == QIBPORTCNTR_EXCESSBUFOVFL) | ||
2359 | ret = dd->cspec->overrun_thresh_errs; | ||
2360 | else if (reg == QIBPORTCNTR_KHDROVFL) { | ||
2361 | int i; | ||
2362 | |||
2363 | /* sum over all kernel contexts */ | ||
2364 | for (i = 0; i < dd->first_user_ctxt; i++) | ||
2365 | ret += read_6120_creg32(dd, cr_portovfl + i); | ||
2366 | } else if (reg == QIBPORTCNTR_PSSTAT) | ||
2367 | ret = dd->cspec->pma_sample_status; | ||
2368 | if (creg == 0xffff) | ||
2369 | goto done; | ||
2370 | |||
2371 | /* | ||
2372 | * only fast incrementing counters are 64bit; use 32 bit reads to | ||
2373 | * avoid two independent reads when on opteron | ||
2374 | */ | ||
2375 | if (creg == cr_wordsend || creg == cr_wordrcv || | ||
2376 | creg == cr_pktsend || creg == cr_pktrcv) | ||
2377 | ret = read_6120_creg(dd, creg); | ||
2378 | else | ||
2379 | ret = read_6120_creg32(dd, creg); | ||
2380 | if (creg == cr_ibsymbolerr) { | ||
2381 | if (dd->cspec->ibdeltainprog) | ||
2382 | ret -= ret - dd->cspec->ibsymsnap; | ||
2383 | ret -= dd->cspec->ibsymdelta; | ||
2384 | } else if (creg == cr_iblinkerrrecov) { | ||
2385 | if (dd->cspec->ibdeltainprog) | ||
2386 | ret -= ret - dd->cspec->iblnkerrsnap; | ||
2387 | ret -= dd->cspec->iblnkerrdelta; | ||
2388 | } | ||
2389 | if (reg == QIBPORTCNTR_RXDROPPKT) /* add special cased count */ | ||
2390 | ret += dd->cspec->rxfc_unsupvl_errs; | ||
2391 | |||
2392 | done: | ||
2393 | return ret; | ||
2394 | } | ||
2395 | |||
2396 | /* | ||
2397 | * Device counter names (not port-specific), one line per stat, | ||
2398 | * single string. Used by utilities like ipathstats to print the stats | ||
2399 | * in a way which works for different versions of drivers, without changing | ||
2400 | * the utility. Names need to be 12 chars or less (w/o newline), for proper | ||
2401 | * display by utility. | ||
2402 | * Non-error counters are first. | ||
2403 | * Start of "error" conters is indicated by a leading "E " on the first | ||
2404 | * "error" counter, and doesn't count in label length. | ||
2405 | * The EgrOvfl list needs to be last so we truncate them at the configured | ||
2406 | * context count for the device. | ||
2407 | * cntr6120indices contains the corresponding register indices. | ||
2408 | */ | ||
2409 | static const char cntr6120names[] = | ||
2410 | "Interrupts\n" | ||
2411 | "HostBusStall\n" | ||
2412 | "E RxTIDFull\n" | ||
2413 | "RxTIDInvalid\n" | ||
2414 | "Ctxt0EgrOvfl\n" | ||
2415 | "Ctxt1EgrOvfl\n" | ||
2416 | "Ctxt2EgrOvfl\n" | ||
2417 | "Ctxt3EgrOvfl\n" | ||
2418 | "Ctxt4EgrOvfl\n"; | ||
2419 | |||
2420 | static const size_t cntr6120indices[] = { | ||
2421 | cr_lbint, | ||
2422 | cr_lbflowstall, | ||
2423 | cr_errtidfull, | ||
2424 | cr_errtidvalid, | ||
2425 | cr_portovfl + 0, | ||
2426 | cr_portovfl + 1, | ||
2427 | cr_portovfl + 2, | ||
2428 | cr_portovfl + 3, | ||
2429 | cr_portovfl + 4, | ||
2430 | }; | ||
2431 | |||
2432 | /* | ||
2433 | * same as cntr6120names and cntr6120indices, but for port-specific counters. | ||
2434 | * portcntr6120indices is somewhat complicated by some registers needing | ||
2435 | * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG | ||
2436 | */ | ||
2437 | static const char portcntr6120names[] = | ||
2438 | "TxPkt\n" | ||
2439 | "TxFlowPkt\n" | ||
2440 | "TxWords\n" | ||
2441 | "RxPkt\n" | ||
2442 | "RxFlowPkt\n" | ||
2443 | "RxWords\n" | ||
2444 | "TxFlowStall\n" | ||
2445 | "E IBStatusChng\n" | ||
2446 | "IBLinkDown\n" | ||
2447 | "IBLnkRecov\n" | ||
2448 | "IBRxLinkErr\n" | ||
2449 | "IBSymbolErr\n" | ||
2450 | "RxLLIErr\n" | ||
2451 | "RxBadFormat\n" | ||
2452 | "RxBadLen\n" | ||
2453 | "RxBufOvrfl\n" | ||
2454 | "RxEBP\n" | ||
2455 | "RxFlowCtlErr\n" | ||
2456 | "RxICRCerr\n" | ||
2457 | "RxLPCRCerr\n" | ||
2458 | "RxVCRCerr\n" | ||
2459 | "RxInvalLen\n" | ||
2460 | "RxInvalPKey\n" | ||
2461 | "RxPktDropped\n" | ||
2462 | "TxBadLength\n" | ||
2463 | "TxDropped\n" | ||
2464 | "TxInvalLen\n" | ||
2465 | "TxUnderrun\n" | ||
2466 | "TxUnsupVL\n" | ||
2467 | ; | ||
2468 | |||
2469 | #define _PORT_VIRT_FLAG 0x8000 /* "virtual", need adjustments */ | ||
2470 | static const size_t portcntr6120indices[] = { | ||
2471 | QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG, | ||
2472 | cr_pktsendflow, | ||
2473 | QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG, | ||
2474 | QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG, | ||
2475 | cr_pktrcvflowctrl, | ||
2476 | QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG, | ||
2477 | QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG, | ||
2478 | cr_ibstatuschange, | ||
2479 | QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG, | ||
2480 | QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG, | ||
2481 | QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG, | ||
2482 | QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG, | ||
2483 | QIBPORTCNTR_LLI | _PORT_VIRT_FLAG, | ||
2484 | QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG, | ||
2485 | QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG, | ||
2486 | QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG, | ||
2487 | QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG, | ||
2488 | cr_rcvflowctrl_err, | ||
2489 | QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG, | ||
2490 | QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG, | ||
2491 | QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG, | ||
2492 | QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG, | ||
2493 | QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG, | ||
2494 | QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG, | ||
2495 | cr_invalidslen, | ||
2496 | cr_senddropped, | ||
2497 | cr_errslen, | ||
2498 | cr_sendunderrun, | ||
2499 | cr_txunsupvl, | ||
2500 | }; | ||
2501 | |||
2502 | /* do all the setup to make the counter reads efficient later */ | ||
2503 | static void init_6120_cntrnames(struct qib_devdata *dd) | ||
2504 | { | ||
2505 | int i, j = 0; | ||
2506 | char *s; | ||
2507 | |||
2508 | for (i = 0, s = (char *)cntr6120names; s && j <= dd->cfgctxts; | ||
2509 | i++) { | ||
2510 | /* we always have at least one counter before the egrovfl */ | ||
2511 | if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12)) | ||
2512 | j = 1; | ||
2513 | s = strchr(s + 1, '\n'); | ||
2514 | if (s && j) | ||
2515 | j++; | ||
2516 | } | ||
2517 | dd->cspec->ncntrs = i; | ||
2518 | if (!s) | ||
2519 | /* full list; size is without terminating null */ | ||
2520 | dd->cspec->cntrnamelen = sizeof(cntr6120names) - 1; | ||
2521 | else | ||
2522 | dd->cspec->cntrnamelen = 1 + s - cntr6120names; | ||
2523 | dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs | ||
2524 | * sizeof(u64), GFP_KERNEL); | ||
2525 | if (!dd->cspec->cntrs) | ||
2526 | qib_dev_err(dd, "Failed allocation for counters\n"); | ||
2527 | |||
2528 | for (i = 0, s = (char *)portcntr6120names; s; i++) | ||
2529 | s = strchr(s + 1, '\n'); | ||
2530 | dd->cspec->nportcntrs = i - 1; | ||
2531 | dd->cspec->portcntrnamelen = sizeof(portcntr6120names) - 1; | ||
2532 | dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs | ||
2533 | * sizeof(u64), GFP_KERNEL); | ||
2534 | if (!dd->cspec->portcntrs) | ||
2535 | qib_dev_err(dd, "Failed allocation for portcounters\n"); | ||
2536 | } | ||
2537 | |||
2538 | static u32 qib_read_6120cntrs(struct qib_devdata *dd, loff_t pos, char **namep, | ||
2539 | u64 **cntrp) | ||
2540 | { | ||
2541 | u32 ret; | ||
2542 | |||
2543 | if (namep) { | ||
2544 | ret = dd->cspec->cntrnamelen; | ||
2545 | if (pos >= ret) | ||
2546 | ret = 0; /* final read after getting everything */ | ||
2547 | else | ||
2548 | *namep = (char *)cntr6120names; | ||
2549 | } else { | ||
2550 | u64 *cntr = dd->cspec->cntrs; | ||
2551 | int i; | ||
2552 | |||
2553 | ret = dd->cspec->ncntrs * sizeof(u64); | ||
2554 | if (!cntr || pos >= ret) { | ||
2555 | /* everything read, or couldn't get memory */ | ||
2556 | ret = 0; | ||
2557 | goto done; | ||
2558 | } | ||
2559 | if (pos >= ret) { | ||
2560 | ret = 0; /* final read after getting everything */ | ||
2561 | goto done; | ||
2562 | } | ||
2563 | *cntrp = cntr; | ||
2564 | for (i = 0; i < dd->cspec->ncntrs; i++) | ||
2565 | *cntr++ = read_6120_creg32(dd, cntr6120indices[i]); | ||
2566 | } | ||
2567 | done: | ||
2568 | return ret; | ||
2569 | } | ||
2570 | |||
2571 | static u32 qib_read_6120portcntrs(struct qib_devdata *dd, loff_t pos, u32 port, | ||
2572 | char **namep, u64 **cntrp) | ||
2573 | { | ||
2574 | u32 ret; | ||
2575 | |||
2576 | if (namep) { | ||
2577 | ret = dd->cspec->portcntrnamelen; | ||
2578 | if (pos >= ret) | ||
2579 | ret = 0; /* final read after getting everything */ | ||
2580 | else | ||
2581 | *namep = (char *)portcntr6120names; | ||
2582 | } else { | ||
2583 | u64 *cntr = dd->cspec->portcntrs; | ||
2584 | struct qib_pportdata *ppd = &dd->pport[port]; | ||
2585 | int i; | ||
2586 | |||
2587 | ret = dd->cspec->nportcntrs * sizeof(u64); | ||
2588 | if (!cntr || pos >= ret) { | ||
2589 | /* everything read, or couldn't get memory */ | ||
2590 | ret = 0; | ||
2591 | goto done; | ||
2592 | } | ||
2593 | *cntrp = cntr; | ||
2594 | for (i = 0; i < dd->cspec->nportcntrs; i++) { | ||
2595 | if (portcntr6120indices[i] & _PORT_VIRT_FLAG) | ||
2596 | *cntr++ = qib_portcntr_6120(ppd, | ||
2597 | portcntr6120indices[i] & | ||
2598 | ~_PORT_VIRT_FLAG); | ||
2599 | else | ||
2600 | *cntr++ = read_6120_creg32(dd, | ||
2601 | portcntr6120indices[i]); | ||
2602 | } | ||
2603 | } | ||
2604 | done: | ||
2605 | return ret; | ||
2606 | } | ||
2607 | |||
2608 | static void qib_chk_6120_errormask(struct qib_devdata *dd) | ||
2609 | { | ||
2610 | static u32 fixed; | ||
2611 | u32 ctrl; | ||
2612 | unsigned long errormask; | ||
2613 | unsigned long hwerrs; | ||
2614 | |||
2615 | if (!dd->cspec->errormask || !(dd->flags & QIB_INITTED)) | ||
2616 | return; | ||
2617 | |||
2618 | errormask = qib_read_kreg64(dd, kr_errmask); | ||
2619 | |||
2620 | if (errormask == dd->cspec->errormask) | ||
2621 | return; | ||
2622 | fixed++; | ||
2623 | |||
2624 | hwerrs = qib_read_kreg64(dd, kr_hwerrstatus); | ||
2625 | ctrl = qib_read_kreg32(dd, kr_control); | ||
2626 | |||
2627 | qib_write_kreg(dd, kr_errmask, | ||
2628 | dd->cspec->errormask); | ||
2629 | |||
2630 | if ((hwerrs & dd->cspec->hwerrmask) || | ||
2631 | (ctrl & QLOGIC_IB_C_FREEZEMODE)) { | ||
2632 | qib_write_kreg(dd, kr_hwerrclear, 0ULL); | ||
2633 | qib_write_kreg(dd, kr_errclear, 0ULL); | ||
2634 | /* force re-interrupt of pending events, just in case */ | ||
2635 | qib_write_kreg(dd, kr_intclear, 0ULL); | ||
2636 | qib_devinfo(dd->pcidev, | ||
2637 | "errormask fixed(%u) %lx->%lx, ctrl %x hwerr %lx\n", | ||
2638 | fixed, errormask, (unsigned long)dd->cspec->errormask, | ||
2639 | ctrl, hwerrs); | ||
2640 | } | ||
2641 | } | ||
2642 | |||
2643 | /** | ||
2644 | * qib_get_faststats - get word counters from chip before they overflow | ||
2645 | * @opaque - contains a pointer to the qlogic_ib device qib_devdata | ||
2646 | * | ||
2647 | * This needs more work; in particular, decision on whether we really | ||
2648 | * need traffic_wds done the way it is | ||
2649 | * called from add_timer | ||
2650 | */ | ||
2651 | static void qib_get_6120_faststats(unsigned long opaque) | ||
2652 | { | ||
2653 | struct qib_devdata *dd = (struct qib_devdata *) opaque; | ||
2654 | struct qib_pportdata *ppd = dd->pport; | ||
2655 | unsigned long flags; | ||
2656 | u64 traffic_wds; | ||
2657 | |||
2658 | /* | ||
2659 | * don't access the chip while running diags, or memory diags can | ||
2660 | * fail | ||
2661 | */ | ||
2662 | if (!(dd->flags & QIB_INITTED) || dd->diag_client) | ||
2663 | /* but re-arm the timer, for diags case; won't hurt other */ | ||
2664 | goto done; | ||
2665 | |||
2666 | /* | ||
2667 | * We now try to maintain an activity timer, based on traffic | ||
2668 | * exceeding a threshold, so we need to check the word-counts | ||
2669 | * even if they are 64-bit. | ||
2670 | */ | ||
2671 | traffic_wds = qib_portcntr_6120(ppd, cr_wordsend) + | ||
2672 | qib_portcntr_6120(ppd, cr_wordrcv); | ||
2673 | spin_lock_irqsave(&dd->eep_st_lock, flags); | ||
2674 | traffic_wds -= dd->traffic_wds; | ||
2675 | dd->traffic_wds += traffic_wds; | ||
2676 | if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD) | ||
2677 | atomic_add(5, &dd->active_time); /* S/B #define */ | ||
2678 | spin_unlock_irqrestore(&dd->eep_st_lock, flags); | ||
2679 | |||
2680 | qib_chk_6120_errormask(dd); | ||
2681 | done: | ||
2682 | mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER); | ||
2683 | } | ||
2684 | |||
2685 | /* no interrupt fallback for these chips */ | ||
2686 | static int qib_6120_nointr_fallback(struct qib_devdata *dd) | ||
2687 | { | ||
2688 | return 0; | ||
2689 | } | ||
2690 | |||
2691 | /* | ||
2692 | * reset the XGXS (between serdes and IBC). Slightly less intrusive | ||
2693 | * than resetting the IBC or external link state, and useful in some | ||
2694 | * cases to cause some retraining. To do this right, we reset IBC | ||
2695 | * as well. | ||
2696 | */ | ||
2697 | static void qib_6120_xgxs_reset(struct qib_pportdata *ppd) | ||
2698 | { | ||
2699 | u64 val, prev_val; | ||
2700 | struct qib_devdata *dd = ppd->dd; | ||
2701 | |||
2702 | prev_val = qib_read_kreg64(dd, kr_xgxs_cfg); | ||
2703 | val = prev_val | QLOGIC_IB_XGXS_RESET; | ||
2704 | prev_val &= ~QLOGIC_IB_XGXS_RESET; /* be sure */ | ||
2705 | qib_write_kreg(dd, kr_control, | ||
2706 | dd->control & ~QLOGIC_IB_C_LINKENABLE); | ||
2707 | qib_write_kreg(dd, kr_xgxs_cfg, val); | ||
2708 | qib_read_kreg32(dd, kr_scratch); | ||
2709 | qib_write_kreg(dd, kr_xgxs_cfg, prev_val); | ||
2710 | qib_write_kreg(dd, kr_control, dd->control); | ||
2711 | } | ||
2712 | |||
2713 | static int qib_6120_get_ib_cfg(struct qib_pportdata *ppd, int which) | ||
2714 | { | ||
2715 | int ret; | ||
2716 | |||
2717 | switch (which) { | ||
2718 | case QIB_IB_CFG_LWID: | ||
2719 | ret = ppd->link_width_active; | ||
2720 | break; | ||
2721 | |||
2722 | case QIB_IB_CFG_SPD: | ||
2723 | ret = ppd->link_speed_active; | ||
2724 | break; | ||
2725 | |||
2726 | case QIB_IB_CFG_LWID_ENB: | ||
2727 | ret = ppd->link_width_enabled; | ||
2728 | break; | ||
2729 | |||
2730 | case QIB_IB_CFG_SPD_ENB: | ||
2731 | ret = ppd->link_speed_enabled; | ||
2732 | break; | ||
2733 | |||
2734 | case QIB_IB_CFG_OP_VLS: | ||
2735 | ret = ppd->vls_operational; | ||
2736 | break; | ||
2737 | |||
2738 | case QIB_IB_CFG_VL_HIGH_CAP: | ||
2739 | ret = 0; | ||
2740 | break; | ||
2741 | |||
2742 | case QIB_IB_CFG_VL_LOW_CAP: | ||
2743 | ret = 0; | ||
2744 | break; | ||
2745 | |||
2746 | case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */ | ||
2747 | ret = SYM_FIELD(ppd->dd->cspec->ibcctrl, IBCCtrl, | ||
2748 | OverrunThreshold); | ||
2749 | break; | ||
2750 | |||
2751 | case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */ | ||
2752 | ret = SYM_FIELD(ppd->dd->cspec->ibcctrl, IBCCtrl, | ||
2753 | PhyerrThreshold); | ||
2754 | break; | ||
2755 | |||
2756 | case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */ | ||
2757 | /* will only take effect when the link state changes */ | ||
2758 | ret = (ppd->dd->cspec->ibcctrl & | ||
2759 | SYM_MASK(IBCCtrl, LinkDownDefaultState)) ? | ||
2760 | IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL; | ||
2761 | break; | ||
2762 | |||
2763 | case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */ | ||
2764 | ret = 0; /* no heartbeat on this chip */ | ||
2765 | break; | ||
2766 | |||
2767 | case QIB_IB_CFG_PMA_TICKS: | ||
2768 | ret = 250; /* 1 usec. */ | ||
2769 | break; | ||
2770 | |||
2771 | default: | ||
2772 | ret = -EINVAL; | ||
2773 | break; | ||
2774 | } | ||
2775 | return ret; | ||
2776 | } | ||
2777 | |||
2778 | /* | ||
2779 | * We assume range checking is already done, if needed. | ||
2780 | */ | ||
2781 | static int qib_6120_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val) | ||
2782 | { | ||
2783 | struct qib_devdata *dd = ppd->dd; | ||
2784 | int ret = 0; | ||
2785 | u64 val64; | ||
2786 | u16 lcmd, licmd; | ||
2787 | |||
2788 | switch (which) { | ||
2789 | case QIB_IB_CFG_LWID_ENB: | ||
2790 | ppd->link_width_enabled = val; | ||
2791 | break; | ||
2792 | |||
2793 | case QIB_IB_CFG_SPD_ENB: | ||
2794 | ppd->link_speed_enabled = val; | ||
2795 | break; | ||
2796 | |||
2797 | case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */ | ||
2798 | val64 = SYM_FIELD(dd->cspec->ibcctrl, IBCCtrl, | ||
2799 | OverrunThreshold); | ||
2800 | if (val64 != val) { | ||
2801 | dd->cspec->ibcctrl &= | ||
2802 | ~SYM_MASK(IBCCtrl, OverrunThreshold); | ||
2803 | dd->cspec->ibcctrl |= (u64) val << | ||
2804 | SYM_LSB(IBCCtrl, OverrunThreshold); | ||
2805 | qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl); | ||
2806 | qib_write_kreg(dd, kr_scratch, 0); | ||
2807 | } | ||
2808 | break; | ||
2809 | |||
2810 | case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */ | ||
2811 | val64 = SYM_FIELD(dd->cspec->ibcctrl, IBCCtrl, | ||
2812 | PhyerrThreshold); | ||
2813 | if (val64 != val) { | ||
2814 | dd->cspec->ibcctrl &= | ||
2815 | ~SYM_MASK(IBCCtrl, PhyerrThreshold); | ||
2816 | dd->cspec->ibcctrl |= (u64) val << | ||
2817 | SYM_LSB(IBCCtrl, PhyerrThreshold); | ||
2818 | qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl); | ||
2819 | qib_write_kreg(dd, kr_scratch, 0); | ||
2820 | } | ||
2821 | break; | ||
2822 | |||
2823 | case QIB_IB_CFG_PKEYS: /* update pkeys */ | ||
2824 | val64 = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) | | ||
2825 | ((u64) ppd->pkeys[2] << 32) | | ||
2826 | ((u64) ppd->pkeys[3] << 48); | ||
2827 | qib_write_kreg(dd, kr_partitionkey, val64); | ||
2828 | break; | ||
2829 | |||
2830 | case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */ | ||
2831 | /* will only take effect when the link state changes */ | ||
2832 | if (val == IB_LINKINITCMD_POLL) | ||
2833 | dd->cspec->ibcctrl &= | ||
2834 | ~SYM_MASK(IBCCtrl, LinkDownDefaultState); | ||
2835 | else /* SLEEP */ | ||
2836 | dd->cspec->ibcctrl |= | ||
2837 | SYM_MASK(IBCCtrl, LinkDownDefaultState); | ||
2838 | qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl); | ||
2839 | qib_write_kreg(dd, kr_scratch, 0); | ||
2840 | break; | ||
2841 | |||
2842 | case QIB_IB_CFG_MTU: /* update the MTU in IBC */ | ||
2843 | /* | ||
2844 | * Update our housekeeping variables, and set IBC max | ||
2845 | * size, same as init code; max IBC is max we allow in | ||
2846 | * buffer, less the qword pbc, plus 1 for ICRC, in dwords | ||
2847 | * Set even if it's unchanged, print debug message only | ||
2848 | * on changes. | ||
2849 | */ | ||
2850 | val = (ppd->ibmaxlen >> 2) + 1; | ||
2851 | dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, MaxPktLen); | ||
2852 | dd->cspec->ibcctrl |= (u64)val << | ||
2853 | SYM_LSB(IBCCtrl, MaxPktLen); | ||
2854 | qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl); | ||
2855 | qib_write_kreg(dd, kr_scratch, 0); | ||
2856 | break; | ||
2857 | |||
2858 | case QIB_IB_CFG_LSTATE: /* set the IB link state */ | ||
2859 | switch (val & 0xffff0000) { | ||
2860 | case IB_LINKCMD_DOWN: | ||
2861 | lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN; | ||
2862 | if (!dd->cspec->ibdeltainprog) { | ||
2863 | dd->cspec->ibdeltainprog = 1; | ||
2864 | dd->cspec->ibsymsnap = | ||
2865 | read_6120_creg32(dd, cr_ibsymbolerr); | ||
2866 | dd->cspec->iblnkerrsnap = | ||
2867 | read_6120_creg32(dd, cr_iblinkerrrecov); | ||
2868 | } | ||
2869 | break; | ||
2870 | |||
2871 | case IB_LINKCMD_ARMED: | ||
2872 | lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED; | ||
2873 | break; | ||
2874 | |||
2875 | case IB_LINKCMD_ACTIVE: | ||
2876 | lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE; | ||
2877 | break; | ||
2878 | |||
2879 | default: | ||
2880 | ret = -EINVAL; | ||
2881 | qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16); | ||
2882 | goto bail; | ||
2883 | } | ||
2884 | switch (val & 0xffff) { | ||
2885 | case IB_LINKINITCMD_NOP: | ||
2886 | licmd = 0; | ||
2887 | break; | ||
2888 | |||
2889 | case IB_LINKINITCMD_POLL: | ||
2890 | licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL; | ||
2891 | break; | ||
2892 | |||
2893 | case IB_LINKINITCMD_SLEEP: | ||
2894 | licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP; | ||
2895 | break; | ||
2896 | |||
2897 | case IB_LINKINITCMD_DISABLE: | ||
2898 | licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE; | ||
2899 | break; | ||
2900 | |||
2901 | default: | ||
2902 | ret = -EINVAL; | ||
2903 | qib_dev_err(dd, "bad linkinitcmd req 0x%x\n", | ||
2904 | val & 0xffff); | ||
2905 | goto bail; | ||
2906 | } | ||
2907 | qib_set_ib_6120_lstate(ppd, lcmd, licmd); | ||
2908 | goto bail; | ||
2909 | |||
2910 | case QIB_IB_CFG_HRTBT: | ||
2911 | ret = -EINVAL; | ||
2912 | break; | ||
2913 | |||
2914 | default: | ||
2915 | ret = -EINVAL; | ||
2916 | } | ||
2917 | bail: | ||
2918 | return ret; | ||
2919 | } | ||
2920 | |||
2921 | static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what) | ||
2922 | { | ||
2923 | int ret = 0; | ||
2924 | if (!strncmp(what, "ibc", 3)) { | ||
2925 | ppd->dd->cspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback); | ||
2926 | qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n", | ||
2927 | ppd->dd->unit, ppd->port); | ||
2928 | } else if (!strncmp(what, "off", 3)) { | ||
2929 | ppd->dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback); | ||
2930 | qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback " | ||
2931 | "(normal)\n", ppd->dd->unit, ppd->port); | ||
2932 | } else | ||
2933 | ret = -EINVAL; | ||
2934 | if (!ret) { | ||
2935 | qib_write_kreg(ppd->dd, kr_ibcctrl, ppd->dd->cspec->ibcctrl); | ||
2936 | qib_write_kreg(ppd->dd, kr_scratch, 0); | ||
2937 | } | ||
2938 | return ret; | ||
2939 | } | ||
2940 | |||
2941 | static void pma_6120_timer(unsigned long data) | ||
2942 | { | ||
2943 | struct qib_pportdata *ppd = (struct qib_pportdata *)data; | ||
2944 | struct qib_chip_specific *cs = ppd->dd->cspec; | ||
2945 | struct qib_ibport *ibp = &ppd->ibport_data; | ||
2946 | unsigned long flags; | ||
2947 | |||
2948 | spin_lock_irqsave(&ibp->lock, flags); | ||
2949 | if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED) { | ||
2950 | cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING; | ||
2951 | qib_snapshot_counters(ppd, &cs->sword, &cs->rword, | ||
2952 | &cs->spkts, &cs->rpkts, &cs->xmit_wait); | ||
2953 | mod_timer(&cs->pma_timer, | ||
2954 | jiffies + usecs_to_jiffies(ibp->pma_sample_interval)); | ||
2955 | } else if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) { | ||
2956 | u64 ta, tb, tc, td, te; | ||
2957 | |||
2958 | cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE; | ||
2959 | qib_snapshot_counters(ppd, &ta, &tb, &tc, &td, &te); | ||
2960 | |||
2961 | cs->sword = ta - cs->sword; | ||
2962 | cs->rword = tb - cs->rword; | ||
2963 | cs->spkts = tc - cs->spkts; | ||
2964 | cs->rpkts = td - cs->rpkts; | ||
2965 | cs->xmit_wait = te - cs->xmit_wait; | ||
2966 | } | ||
2967 | spin_unlock_irqrestore(&ibp->lock, flags); | ||
2968 | } | ||
2969 | |||
2970 | /* | ||
2971 | * Note that the caller has the ibp->lock held. | ||
2972 | */ | ||
2973 | static void qib_set_cntr_6120_sample(struct qib_pportdata *ppd, u32 intv, | ||
2974 | u32 start) | ||
2975 | { | ||
2976 | struct qib_chip_specific *cs = ppd->dd->cspec; | ||
2977 | |||
2978 | if (start && intv) { | ||
2979 | cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_STARTED; | ||
2980 | mod_timer(&cs->pma_timer, jiffies + usecs_to_jiffies(start)); | ||
2981 | } else if (intv) { | ||
2982 | cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING; | ||
2983 | qib_snapshot_counters(ppd, &cs->sword, &cs->rword, | ||
2984 | &cs->spkts, &cs->rpkts, &cs->xmit_wait); | ||
2985 | mod_timer(&cs->pma_timer, jiffies + usecs_to_jiffies(intv)); | ||
2986 | } else { | ||
2987 | cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE; | ||
2988 | cs->sword = 0; | ||
2989 | cs->rword = 0; | ||
2990 | cs->spkts = 0; | ||
2991 | cs->rpkts = 0; | ||
2992 | cs->xmit_wait = 0; | ||
2993 | } | ||
2994 | } | ||
2995 | |||
2996 | static u32 qib_6120_iblink_state(u64 ibcs) | ||
2997 | { | ||
2998 | u32 state = (u32)SYM_FIELD(ibcs, IBCStatus, LinkState); | ||
2999 | |||
3000 | switch (state) { | ||
3001 | case IB_6120_L_STATE_INIT: | ||
3002 | state = IB_PORT_INIT; | ||
3003 | break; | ||
3004 | case IB_6120_L_STATE_ARM: | ||
3005 | state = IB_PORT_ARMED; | ||
3006 | break; | ||
3007 | case IB_6120_L_STATE_ACTIVE: | ||
3008 | /* fall through */ | ||
3009 | case IB_6120_L_STATE_ACT_DEFER: | ||
3010 | state = IB_PORT_ACTIVE; | ||
3011 | break; | ||
3012 | default: /* fall through */ | ||
3013 | case IB_6120_L_STATE_DOWN: | ||
3014 | state = IB_PORT_DOWN; | ||
3015 | break; | ||
3016 | } | ||
3017 | return state; | ||
3018 | } | ||
3019 | |||
3020 | /* returns the IBTA port state, rather than the IBC link training state */ | ||
3021 | static u8 qib_6120_phys_portstate(u64 ibcs) | ||
3022 | { | ||
3023 | u8 state = (u8)SYM_FIELD(ibcs, IBCStatus, LinkTrainingState); | ||
3024 | return qib_6120_physportstate[state]; | ||
3025 | } | ||
3026 | |||
3027 | static int qib_6120_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs) | ||
3028 | { | ||
3029 | unsigned long flags; | ||
3030 | |||
3031 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
3032 | ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY; | ||
3033 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
3034 | |||
3035 | if (ibup) { | ||
3036 | if (ppd->dd->cspec->ibdeltainprog) { | ||
3037 | ppd->dd->cspec->ibdeltainprog = 0; | ||
3038 | ppd->dd->cspec->ibsymdelta += | ||
3039 | read_6120_creg32(ppd->dd, cr_ibsymbolerr) - | ||
3040 | ppd->dd->cspec->ibsymsnap; | ||
3041 | ppd->dd->cspec->iblnkerrdelta += | ||
3042 | read_6120_creg32(ppd->dd, cr_iblinkerrrecov) - | ||
3043 | ppd->dd->cspec->iblnkerrsnap; | ||
3044 | } | ||
3045 | qib_hol_init(ppd); | ||
3046 | } else { | ||
3047 | ppd->dd->cspec->lli_counter = 0; | ||
3048 | if (!ppd->dd->cspec->ibdeltainprog) { | ||
3049 | ppd->dd->cspec->ibdeltainprog = 1; | ||
3050 | ppd->dd->cspec->ibsymsnap = | ||
3051 | read_6120_creg32(ppd->dd, cr_ibsymbolerr); | ||
3052 | ppd->dd->cspec->iblnkerrsnap = | ||
3053 | read_6120_creg32(ppd->dd, cr_iblinkerrrecov); | ||
3054 | } | ||
3055 | qib_hol_down(ppd); | ||
3056 | } | ||
3057 | |||
3058 | qib_6120_setup_setextled(ppd, ibup); | ||
3059 | |||
3060 | return 0; | ||
3061 | } | ||
3062 | |||
3063 | /* Does read/modify/write to appropriate registers to | ||
3064 | * set output and direction bits selected by mask. | ||
3065 | * these are in their canonical postions (e.g. lsb of | ||
3066 | * dir will end up in D48 of extctrl on existing chips). | ||
3067 | * returns contents of GP Inputs. | ||
3068 | */ | ||
3069 | static int gpio_6120_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask) | ||
3070 | { | ||
3071 | u64 read_val, new_out; | ||
3072 | unsigned long flags; | ||
3073 | |||
3074 | if (mask) { | ||
3075 | /* some bits being written, lock access to GPIO */ | ||
3076 | dir &= mask; | ||
3077 | out &= mask; | ||
3078 | spin_lock_irqsave(&dd->cspec->gpio_lock, flags); | ||
3079 | dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe)); | ||
3080 | dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe)); | ||
3081 | new_out = (dd->cspec->gpio_out & ~mask) | out; | ||
3082 | |||
3083 | qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl); | ||
3084 | qib_write_kreg(dd, kr_gpio_out, new_out); | ||
3085 | dd->cspec->gpio_out = new_out; | ||
3086 | spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); | ||
3087 | } | ||
3088 | /* | ||
3089 | * It is unlikely that a read at this time would get valid | ||
3090 | * data on a pin whose direction line was set in the same | ||
3091 | * call to this function. We include the read here because | ||
3092 | * that allows us to potentially combine a change on one pin with | ||
3093 | * a read on another, and because the old code did something like | ||
3094 | * this. | ||
3095 | */ | ||
3096 | read_val = qib_read_kreg64(dd, kr_extstatus); | ||
3097 | return SYM_FIELD(read_val, EXTStatus, GPIOIn); | ||
3098 | } | ||
3099 | |||
3100 | /* | ||
3101 | * Read fundamental info we need to use the chip. These are | ||
3102 | * the registers that describe chip capabilities, and are | ||
3103 | * saved in shadow registers. | ||
3104 | */ | ||
3105 | static void get_6120_chip_params(struct qib_devdata *dd) | ||
3106 | { | ||
3107 | u64 val; | ||
3108 | u32 piobufs; | ||
3109 | int mtu; | ||
3110 | |||
3111 | dd->uregbase = qib_read_kreg32(dd, kr_userregbase); | ||
3112 | |||
3113 | dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt); | ||
3114 | dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase); | ||
3115 | dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase); | ||
3116 | dd->palign = qib_read_kreg32(dd, kr_palign); | ||
3117 | dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase); | ||
3118 | dd->pio2k_bufbase = dd->piobufbase & 0xffffffff; | ||
3119 | |||
3120 | dd->rcvhdrcnt = qib_read_kreg32(dd, kr_rcvegrcnt); | ||
3121 | |||
3122 | val = qib_read_kreg64(dd, kr_sendpiosize); | ||
3123 | dd->piosize2k = val & ~0U; | ||
3124 | dd->piosize4k = val >> 32; | ||
3125 | |||
3126 | mtu = ib_mtu_enum_to_int(qib_ibmtu); | ||
3127 | if (mtu == -1) | ||
3128 | mtu = QIB_DEFAULT_MTU; | ||
3129 | dd->pport->ibmtu = (u32)mtu; | ||
3130 | |||
3131 | val = qib_read_kreg64(dd, kr_sendpiobufcnt); | ||
3132 | dd->piobcnt2k = val & ~0U; | ||
3133 | dd->piobcnt4k = val >> 32; | ||
3134 | /* these may be adjusted in init_chip_wc_pat() */ | ||
3135 | dd->pio2kbase = (u32 __iomem *) | ||
3136 | (((char __iomem *)dd->kregbase) + dd->pio2k_bufbase); | ||
3137 | if (dd->piobcnt4k) { | ||
3138 | dd->pio4kbase = (u32 __iomem *) | ||
3139 | (((char __iomem *) dd->kregbase) + | ||
3140 | (dd->piobufbase >> 32)); | ||
3141 | /* | ||
3142 | * 4K buffers take 2 pages; we use roundup just to be | ||
3143 | * paranoid; we calculate it once here, rather than on | ||
3144 | * ever buf allocate | ||
3145 | */ | ||
3146 | dd->align4k = ALIGN(dd->piosize4k, dd->palign); | ||
3147 | } | ||
3148 | |||
3149 | piobufs = dd->piobcnt4k + dd->piobcnt2k; | ||
3150 | |||
3151 | dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) / | ||
3152 | (sizeof(u64) * BITS_PER_BYTE / 2); | ||
3153 | } | ||
3154 | |||
3155 | /* | ||
3156 | * The chip base addresses in cspec and cpspec have to be set | ||
3157 | * after possible init_chip_wc_pat(), rather than in | ||
3158 | * get_6120_chip_params(), so split out as separate function | ||
3159 | */ | ||
3160 | static void set_6120_baseaddrs(struct qib_devdata *dd) | ||
3161 | { | ||
3162 | u32 cregbase; | ||
3163 | cregbase = qib_read_kreg32(dd, kr_counterregbase); | ||
3164 | dd->cspec->cregbase = (u64 __iomem *) | ||
3165 | ((char __iomem *) dd->kregbase + cregbase); | ||
3166 | |||
3167 | dd->egrtidbase = (u64 __iomem *) | ||
3168 | ((char __iomem *) dd->kregbase + dd->rcvegrbase); | ||
3169 | } | ||
3170 | |||
3171 | /* | ||
3172 | * Write the final few registers that depend on some of the | ||
3173 | * init setup. Done late in init, just before bringing up | ||
3174 | * the serdes. | ||
3175 | */ | ||
3176 | static int qib_late_6120_initreg(struct qib_devdata *dd) | ||
3177 | { | ||
3178 | int ret = 0; | ||
3179 | u64 val; | ||
3180 | |||
3181 | qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize); | ||
3182 | qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize); | ||
3183 | qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt); | ||
3184 | qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys); | ||
3185 | val = qib_read_kreg64(dd, kr_sendpioavailaddr); | ||
3186 | if (val != dd->pioavailregs_phys) { | ||
3187 | qib_dev_err(dd, "Catastrophic software error, " | ||
3188 | "SendPIOAvailAddr written as %lx, " | ||
3189 | "read back as %llx\n", | ||
3190 | (unsigned long) dd->pioavailregs_phys, | ||
3191 | (unsigned long long) val); | ||
3192 | ret = -EINVAL; | ||
3193 | } | ||
3194 | return ret; | ||
3195 | } | ||
3196 | |||
3197 | static int init_6120_variables(struct qib_devdata *dd) | ||
3198 | { | ||
3199 | int ret = 0; | ||
3200 | struct qib_pportdata *ppd; | ||
3201 | u32 sbufs; | ||
3202 | |||
3203 | ppd = (struct qib_pportdata *)(dd + 1); | ||
3204 | dd->pport = ppd; | ||
3205 | dd->num_pports = 1; | ||
3206 | |||
3207 | dd->cspec = (struct qib_chip_specific *)(ppd + dd->num_pports); | ||
3208 | ppd->cpspec = NULL; /* not used in this chip */ | ||
3209 | |||
3210 | spin_lock_init(&dd->cspec->kernel_tid_lock); | ||
3211 | spin_lock_init(&dd->cspec->user_tid_lock); | ||
3212 | spin_lock_init(&dd->cspec->rcvmod_lock); | ||
3213 | spin_lock_init(&dd->cspec->gpio_lock); | ||
3214 | |||
3215 | /* we haven't yet set QIB_PRESENT, so use read directly */ | ||
3216 | dd->revision = readq(&dd->kregbase[kr_revision]); | ||
3217 | |||
3218 | if ((dd->revision & 0xffffffffU) == 0xffffffffU) { | ||
3219 | qib_dev_err(dd, "Revision register read failure, " | ||
3220 | "giving up initialization\n"); | ||
3221 | ret = -ENODEV; | ||
3222 | goto bail; | ||
3223 | } | ||
3224 | dd->flags |= QIB_PRESENT; /* now register routines work */ | ||
3225 | |||
3226 | dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, | ||
3227 | ChipRevMajor); | ||
3228 | dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, | ||
3229 | ChipRevMinor); | ||
3230 | |||
3231 | get_6120_chip_params(dd); | ||
3232 | pe_boardname(dd); /* fill in boardname */ | ||
3233 | |||
3234 | /* | ||
3235 | * GPIO bits for TWSI data and clock, | ||
3236 | * used for serial EEPROM. | ||
3237 | */ | ||
3238 | dd->gpio_sda_num = _QIB_GPIO_SDA_NUM; | ||
3239 | dd->gpio_scl_num = _QIB_GPIO_SCL_NUM; | ||
3240 | dd->twsi_eeprom_dev = QIB_TWSI_NO_DEV; | ||
3241 | |||
3242 | if (qib_unordered_wc()) | ||
3243 | dd->flags |= QIB_PIO_FLUSH_WC; | ||
3244 | |||
3245 | /* | ||
3246 | * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity. | ||
3247 | * 2 is Some Misc, 3 is reserved for future. | ||
3248 | */ | ||
3249 | dd->eep_st_masks[0].hwerrs_to_log = HWE_MASK(TXEMemParityErr); | ||
3250 | |||
3251 | /* Ignore errors in PIO/PBC on systems with unordered write-combining */ | ||
3252 | if (qib_unordered_wc()) | ||
3253 | dd->eep_st_masks[0].hwerrs_to_log &= ~TXE_PIO_PARITY; | ||
3254 | |||
3255 | dd->eep_st_masks[1].hwerrs_to_log = HWE_MASK(RXEMemParityErr); | ||
3256 | |||
3257 | dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated); | ||
3258 | |||
3259 | qib_init_pportdata(ppd, dd, 0, 1); | ||
3260 | ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X; | ||
3261 | ppd->link_speed_supported = QIB_IB_SDR; | ||
3262 | ppd->link_width_enabled = IB_WIDTH_4X; | ||
3263 | ppd->link_speed_enabled = ppd->link_speed_supported; | ||
3264 | /* these can't change for this chip, so set once */ | ||
3265 | ppd->link_width_active = ppd->link_width_enabled; | ||
3266 | ppd->link_speed_active = ppd->link_speed_enabled; | ||
3267 | ppd->vls_supported = IB_VL_VL0; | ||
3268 | ppd->vls_operational = ppd->vls_supported; | ||
3269 | |||
3270 | dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE; | ||
3271 | dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE; | ||
3272 | dd->rhf_offset = 0; | ||
3273 | |||
3274 | /* we always allocate at least 2048 bytes for eager buffers */ | ||
3275 | ret = ib_mtu_enum_to_int(qib_ibmtu); | ||
3276 | dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU; | ||
3277 | |||
3278 | qib_6120_tidtemplate(dd); | ||
3279 | |||
3280 | /* | ||
3281 | * We can request a receive interrupt for 1 or | ||
3282 | * more packets from current offset. For now, we set this | ||
3283 | * up for a single packet. | ||
3284 | */ | ||
3285 | dd->rhdrhead_intr_off = 1ULL << 32; | ||
3286 | |||
3287 | /* setup the stats timer; the add_timer is done at end of init */ | ||
3288 | init_timer(&dd->stats_timer); | ||
3289 | dd->stats_timer.function = qib_get_6120_faststats; | ||
3290 | dd->stats_timer.data = (unsigned long) dd; | ||
3291 | |||
3292 | init_timer(&dd->cspec->pma_timer); | ||
3293 | dd->cspec->pma_timer.function = pma_6120_timer; | ||
3294 | dd->cspec->pma_timer.data = (unsigned long) ppd; | ||
3295 | |||
3296 | dd->ureg_align = qib_read_kreg32(dd, kr_palign); | ||
3297 | |||
3298 | dd->piosize2kmax_dwords = dd->piosize2k >> 2; | ||
3299 | qib_6120_config_ctxts(dd); | ||
3300 | qib_set_ctxtcnt(dd); | ||
3301 | |||
3302 | if (qib_wc_pat) { | ||
3303 | ret = init_chip_wc_pat(dd, 0); | ||
3304 | if (ret) | ||
3305 | goto bail; | ||
3306 | } | ||
3307 | set_6120_baseaddrs(dd); /* set chip access pointers now */ | ||
3308 | |||
3309 | ret = 0; | ||
3310 | if (qib_mini_init) | ||
3311 | goto bail; | ||
3312 | |||
3313 | qib_num_cfg_vls = 1; /* if any 6120's, only one VL */ | ||
3314 | |||
3315 | ret = qib_create_ctxts(dd); | ||
3316 | init_6120_cntrnames(dd); | ||
3317 | |||
3318 | /* use all of 4KB buffers for the kernel, otherwise 16 */ | ||
3319 | sbufs = dd->piobcnt4k ? dd->piobcnt4k : 16; | ||
3320 | |||
3321 | dd->lastctxt_piobuf = dd->piobcnt2k + dd->piobcnt4k - sbufs; | ||
3322 | dd->pbufsctxt = dd->lastctxt_piobuf / | ||
3323 | (dd->cfgctxts - dd->first_user_ctxt); | ||
3324 | |||
3325 | if (ret) | ||
3326 | goto bail; | ||
3327 | bail: | ||
3328 | return ret; | ||
3329 | } | ||
3330 | |||
3331 | /* | ||
3332 | * For this chip, we want to use the same buffer every time | ||
3333 | * when we are trying to bring the link up (they are always VL15 | ||
3334 | * packets). At that link state the packet should always go out immediately | ||
3335 | * (or at least be discarded at the tx interface if the link is down). | ||
3336 | * If it doesn't, and the buffer isn't available, that means some other | ||
3337 | * sender has gotten ahead of us, and is preventing our packet from going | ||
3338 | * out. In that case, we flush all packets, and try again. If that still | ||
3339 | * fails, we fail the request, and hope things work the next time around. | ||
3340 | * | ||
3341 | * We don't need very complicated heuristics on whether the packet had | ||
3342 | * time to go out or not, since even at SDR 1X, it goes out in very short | ||
3343 | * time periods, covered by the chip reads done here and as part of the | ||
3344 | * flush. | ||
3345 | */ | ||
3346 | static u32 __iomem *get_6120_link_buf(struct qib_pportdata *ppd, u32 *bnum) | ||
3347 | { | ||
3348 | u32 __iomem *buf; | ||
3349 | u32 lbuf = ppd->dd->piobcnt2k + ppd->dd->piobcnt4k - 1; | ||
3350 | |||
3351 | /* | ||
3352 | * always blip to get avail list updated, since it's almost | ||
3353 | * always needed, and is fairly cheap. | ||
3354 | */ | ||
3355 | sendctrl_6120_mod(ppd->dd->pport, QIB_SENDCTRL_AVAIL_BLIP); | ||
3356 | qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */ | ||
3357 | buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf); | ||
3358 | if (buf) | ||
3359 | goto done; | ||
3360 | |||
3361 | sendctrl_6120_mod(ppd, QIB_SENDCTRL_DISARM_ALL | QIB_SENDCTRL_FLUSH | | ||
3362 | QIB_SENDCTRL_AVAIL_BLIP); | ||
3363 | ppd->dd->upd_pio_shadow = 1; /* update our idea of what's busy */ | ||
3364 | qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */ | ||
3365 | buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf); | ||
3366 | done: | ||
3367 | return buf; | ||
3368 | } | ||
3369 | |||
3370 | static u32 __iomem *qib_6120_getsendbuf(struct qib_pportdata *ppd, u64 pbc, | ||
3371 | u32 *pbufnum) | ||
3372 | { | ||
3373 | u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK; | ||
3374 | struct qib_devdata *dd = ppd->dd; | ||
3375 | u32 __iomem *buf; | ||
3376 | |||
3377 | if (((pbc >> 32) & PBC_6120_VL15_SEND_CTRL) && | ||
3378 | !(ppd->lflags & (QIBL_IB_AUTONEG_INPROG | QIBL_LINKACTIVE))) | ||
3379 | buf = get_6120_link_buf(ppd, pbufnum); | ||
3380 | else { | ||
3381 | |||
3382 | if ((plen + 1) > dd->piosize2kmax_dwords) | ||
3383 | first = dd->piobcnt2k; | ||
3384 | else | ||
3385 | first = 0; | ||
3386 | /* try 4k if all 2k busy, so same last for both sizes */ | ||
3387 | last = dd->piobcnt2k + dd->piobcnt4k - 1; | ||
3388 | buf = qib_getsendbuf_range(dd, pbufnum, first, last); | ||
3389 | } | ||
3390 | return buf; | ||
3391 | } | ||
3392 | |||
3393 | static int init_sdma_6120_regs(struct qib_pportdata *ppd) | ||
3394 | { | ||
3395 | return -ENODEV; | ||
3396 | } | ||
3397 | |||
3398 | static u16 qib_sdma_6120_gethead(struct qib_pportdata *ppd) | ||
3399 | { | ||
3400 | return 0; | ||
3401 | } | ||
3402 | |||
3403 | static int qib_sdma_6120_busy(struct qib_pportdata *ppd) | ||
3404 | { | ||
3405 | return 0; | ||
3406 | } | ||
3407 | |||
3408 | static void qib_sdma_update_6120_tail(struct qib_pportdata *ppd, u16 tail) | ||
3409 | { | ||
3410 | } | ||
3411 | |||
3412 | static void qib_6120_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op) | ||
3413 | { | ||
3414 | } | ||
3415 | |||
3416 | static void qib_sdma_set_6120_desc_cnt(struct qib_pportdata *ppd, unsigned cnt) | ||
3417 | { | ||
3418 | } | ||
3419 | |||
3420 | /* | ||
3421 | * the pbc doesn't need a VL15 indicator, but we need it for link_buf. | ||
3422 | * The chip ignores the bit if set. | ||
3423 | */ | ||
3424 | static u32 qib_6120_setpbc_control(struct qib_pportdata *ppd, u32 plen, | ||
3425 | u8 srate, u8 vl) | ||
3426 | { | ||
3427 | return vl == 15 ? PBC_6120_VL15_SEND_CTRL : 0; | ||
3428 | } | ||
3429 | |||
3430 | static void qib_6120_initvl15_bufs(struct qib_devdata *dd) | ||
3431 | { | ||
3432 | } | ||
3433 | |||
3434 | static void qib_6120_init_ctxt(struct qib_ctxtdata *rcd) | ||
3435 | { | ||
3436 | rcd->rcvegrcnt = rcd->dd->rcvhdrcnt; | ||
3437 | rcd->rcvegr_tid_base = rcd->ctxt * rcd->rcvegrcnt; | ||
3438 | } | ||
3439 | |||
3440 | static void qib_6120_txchk_change(struct qib_devdata *dd, u32 start, | ||
3441 | u32 len, u32 avail, struct qib_ctxtdata *rcd) | ||
3442 | { | ||
3443 | } | ||
3444 | |||
3445 | static void writescratch(struct qib_devdata *dd, u32 val) | ||
3446 | { | ||
3447 | (void) qib_write_kreg(dd, kr_scratch, val); | ||
3448 | } | ||
3449 | |||
3450 | static int qib_6120_tempsense_rd(struct qib_devdata *dd, int regnum) | ||
3451 | { | ||
3452 | return -ENXIO; | ||
3453 | } | ||
3454 | |||
3455 | /* Dummy function, as 6120 boards never disable EEPROM Write */ | ||
3456 | static int qib_6120_eeprom_wen(struct qib_devdata *dd, int wen) | ||
3457 | { | ||
3458 | return 1; | ||
3459 | } | ||
3460 | |||
3461 | /** | ||
3462 | * qib_init_iba6120_funcs - set up the chip-specific function pointers | ||
3463 | * @pdev: pci_dev of the qlogic_ib device | ||
3464 | * @ent: pci_device_id matching this chip | ||
3465 | * | ||
3466 | * This is global, and is called directly at init to set up the | ||
3467 | * chip-specific function pointers for later use. | ||
3468 | * | ||
3469 | * It also allocates/partially-inits the qib_devdata struct for | ||
3470 | * this device. | ||
3471 | */ | ||
3472 | struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev, | ||
3473 | const struct pci_device_id *ent) | ||
3474 | { | ||
3475 | struct qib_devdata *dd; | ||
3476 | int ret; | ||
3477 | |||
3478 | #ifndef CONFIG_PCI_MSI | ||
3479 | qib_early_err(&pdev->dev, "QLogic PCIE device 0x%x cannot " | ||
3480 | "work if CONFIG_PCI_MSI is not enabled\n", | ||
3481 | ent->device); | ||
3482 | dd = ERR_PTR(-ENODEV); | ||
3483 | goto bail; | ||
3484 | #endif | ||
3485 | |||
3486 | dd = qib_alloc_devdata(pdev, sizeof(struct qib_pportdata) + | ||
3487 | sizeof(struct qib_chip_specific)); | ||
3488 | if (IS_ERR(dd)) | ||
3489 | goto bail; | ||
3490 | |||
3491 | dd->f_bringup_serdes = qib_6120_bringup_serdes; | ||
3492 | dd->f_cleanup = qib_6120_setup_cleanup; | ||
3493 | dd->f_clear_tids = qib_6120_clear_tids; | ||
3494 | dd->f_free_irq = qib_6120_free_irq; | ||
3495 | dd->f_get_base_info = qib_6120_get_base_info; | ||
3496 | dd->f_get_msgheader = qib_6120_get_msgheader; | ||
3497 | dd->f_getsendbuf = qib_6120_getsendbuf; | ||
3498 | dd->f_gpio_mod = gpio_6120_mod; | ||
3499 | dd->f_eeprom_wen = qib_6120_eeprom_wen; | ||
3500 | dd->f_hdrqempty = qib_6120_hdrqempty; | ||
3501 | dd->f_ib_updown = qib_6120_ib_updown; | ||
3502 | dd->f_init_ctxt = qib_6120_init_ctxt; | ||
3503 | dd->f_initvl15_bufs = qib_6120_initvl15_bufs; | ||
3504 | dd->f_intr_fallback = qib_6120_nointr_fallback; | ||
3505 | dd->f_late_initreg = qib_late_6120_initreg; | ||
3506 | dd->f_setpbc_control = qib_6120_setpbc_control; | ||
3507 | dd->f_portcntr = qib_portcntr_6120; | ||
3508 | dd->f_put_tid = (dd->minrev >= 2) ? | ||
3509 | qib_6120_put_tid_2 : | ||
3510 | qib_6120_put_tid; | ||
3511 | dd->f_quiet_serdes = qib_6120_quiet_serdes; | ||
3512 | dd->f_rcvctrl = rcvctrl_6120_mod; | ||
3513 | dd->f_read_cntrs = qib_read_6120cntrs; | ||
3514 | dd->f_read_portcntrs = qib_read_6120portcntrs; | ||
3515 | dd->f_reset = qib_6120_setup_reset; | ||
3516 | dd->f_init_sdma_regs = init_sdma_6120_regs; | ||
3517 | dd->f_sdma_busy = qib_sdma_6120_busy; | ||
3518 | dd->f_sdma_gethead = qib_sdma_6120_gethead; | ||
3519 | dd->f_sdma_sendctrl = qib_6120_sdma_sendctrl; | ||
3520 | dd->f_sdma_set_desc_cnt = qib_sdma_set_6120_desc_cnt; | ||
3521 | dd->f_sdma_update_tail = qib_sdma_update_6120_tail; | ||
3522 | dd->f_sendctrl = sendctrl_6120_mod; | ||
3523 | dd->f_set_armlaunch = qib_set_6120_armlaunch; | ||
3524 | dd->f_set_cntr_sample = qib_set_cntr_6120_sample; | ||
3525 | dd->f_iblink_state = qib_6120_iblink_state; | ||
3526 | dd->f_ibphys_portstate = qib_6120_phys_portstate; | ||
3527 | dd->f_get_ib_cfg = qib_6120_get_ib_cfg; | ||
3528 | dd->f_set_ib_cfg = qib_6120_set_ib_cfg; | ||
3529 | dd->f_set_ib_loopback = qib_6120_set_loopback; | ||
3530 | dd->f_set_intr_state = qib_6120_set_intr_state; | ||
3531 | dd->f_setextled = qib_6120_setup_setextled; | ||
3532 | dd->f_txchk_change = qib_6120_txchk_change; | ||
3533 | dd->f_update_usrhead = qib_update_6120_usrhead; | ||
3534 | dd->f_wantpiobuf_intr = qib_wantpiobuf_6120_intr; | ||
3535 | dd->f_xgxs_reset = qib_6120_xgxs_reset; | ||
3536 | dd->f_writescratch = writescratch; | ||
3537 | dd->f_tempsense_rd = qib_6120_tempsense_rd; | ||
3538 | /* | ||
3539 | * Do remaining pcie setup and save pcie values in dd. | ||
3540 | * Any error printing is already done by the init code. | ||
3541 | * On return, we have the chip mapped and accessible, | ||
3542 | * but chip registers are not set up until start of | ||
3543 | * init_6120_variables. | ||
3544 | */ | ||
3545 | ret = qib_pcie_ddinit(dd, pdev, ent); | ||
3546 | if (ret < 0) | ||
3547 | goto bail_free; | ||
3548 | |||
3549 | /* initialize chip-specific variables */ | ||
3550 | ret = init_6120_variables(dd); | ||
3551 | if (ret) | ||
3552 | goto bail_cleanup; | ||
3553 | |||
3554 | if (qib_mini_init) | ||
3555 | goto bail; | ||
3556 | |||
3557 | #ifndef CONFIG_PCI_MSI | ||
3558 | qib_dev_err(dd, "PCI_MSI not configured, NO interrupts\n"); | ||
3559 | #endif | ||
3560 | |||
3561 | if (qib_pcie_params(dd, 8, NULL, NULL)) | ||
3562 | qib_dev_err(dd, "Failed to setup PCIe or interrupts; " | ||
3563 | "continuing anyway\n"); | ||
3564 | dd->cspec->irq = pdev->irq; /* save IRQ */ | ||
3565 | |||
3566 | /* clear diagctrl register, in case diags were running and crashed */ | ||
3567 | qib_write_kreg(dd, kr_hwdiagctrl, 0); | ||
3568 | |||
3569 | if (qib_read_kreg64(dd, kr_hwerrstatus) & | ||
3570 | QLOGIC_IB_HWE_SERDESPLLFAILED) | ||
3571 | qib_write_kreg(dd, kr_hwerrclear, | ||
3572 | QLOGIC_IB_HWE_SERDESPLLFAILED); | ||
3573 | |||
3574 | /* setup interrupt handler (interrupt type handled above) */ | ||
3575 | qib_setup_6120_interrupt(dd); | ||
3576 | /* Note that qpn_mask is set by qib_6120_config_ctxts() first */ | ||
3577 | qib_6120_init_hwerrors(dd); | ||
3578 | |||
3579 | goto bail; | ||
3580 | |||
3581 | bail_cleanup: | ||
3582 | qib_pcie_ddcleanup(dd); | ||
3583 | bail_free: | ||
3584 | qib_free_devdata(dd); | ||
3585 | dd = ERR_PTR(ret); | ||
3586 | bail: | ||
3587 | return dd; | ||
3588 | } | ||
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c new file mode 100644 index 000000000000..6fd8d74e7392 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_iba7220.c | |||
@@ -0,0 +1,4618 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. | ||
3 | * All rights reserved. | ||
4 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | /* | ||
35 | * This file contains all of the code that is specific to the | ||
36 | * QLogic_IB 7220 chip (except that specific to the SerDes) | ||
37 | */ | ||
38 | |||
39 | #include <linux/interrupt.h> | ||
40 | #include <linux/pci.h> | ||
41 | #include <linux/delay.h> | ||
42 | #include <linux/io.h> | ||
43 | #include <rdma/ib_verbs.h> | ||
44 | |||
45 | #include "qib.h" | ||
46 | #include "qib_7220.h" | ||
47 | |||
48 | static void qib_setup_7220_setextled(struct qib_pportdata *, u32); | ||
49 | static void qib_7220_handle_hwerrors(struct qib_devdata *, char *, size_t); | ||
50 | static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op); | ||
51 | static u32 qib_7220_iblink_state(u64); | ||
52 | static u8 qib_7220_phys_portstate(u64); | ||
53 | static void qib_sdma_update_7220_tail(struct qib_pportdata *, u16); | ||
54 | static void qib_set_ib_7220_lstate(struct qib_pportdata *, u16, u16); | ||
55 | |||
56 | /* | ||
57 | * This file contains almost all the chip-specific register information and | ||
58 | * access functions for the QLogic QLogic_IB 7220 PCI-Express chip, with the | ||
59 | * exception of SerDes support, which in in qib_sd7220.c. | ||
60 | */ | ||
61 | |||
62 | /* Below uses machine-generated qib_chipnum_regs.h file */ | ||
63 | #define KREG_IDX(regname) (QIB_7220_##regname##_OFFS / sizeof(u64)) | ||
64 | |||
65 | /* Use defines to tie machine-generated names to lower-case names */ | ||
66 | #define kr_control KREG_IDX(Control) | ||
67 | #define kr_counterregbase KREG_IDX(CntrRegBase) | ||
68 | #define kr_errclear KREG_IDX(ErrClear) | ||
69 | #define kr_errmask KREG_IDX(ErrMask) | ||
70 | #define kr_errstatus KREG_IDX(ErrStatus) | ||
71 | #define kr_extctrl KREG_IDX(EXTCtrl) | ||
72 | #define kr_extstatus KREG_IDX(EXTStatus) | ||
73 | #define kr_gpio_clear KREG_IDX(GPIOClear) | ||
74 | #define kr_gpio_mask KREG_IDX(GPIOMask) | ||
75 | #define kr_gpio_out KREG_IDX(GPIOOut) | ||
76 | #define kr_gpio_status KREG_IDX(GPIOStatus) | ||
77 | #define kr_hrtbt_guid KREG_IDX(HRTBT_GUID) | ||
78 | #define kr_hwdiagctrl KREG_IDX(HwDiagCtrl) | ||
79 | #define kr_hwerrclear KREG_IDX(HwErrClear) | ||
80 | #define kr_hwerrmask KREG_IDX(HwErrMask) | ||
81 | #define kr_hwerrstatus KREG_IDX(HwErrStatus) | ||
82 | #define kr_ibcctrl KREG_IDX(IBCCtrl) | ||
83 | #define kr_ibcddrctrl KREG_IDX(IBCDDRCtrl) | ||
84 | #define kr_ibcddrstatus KREG_IDX(IBCDDRStatus) | ||
85 | #define kr_ibcstatus KREG_IDX(IBCStatus) | ||
86 | #define kr_ibserdesctrl KREG_IDX(IBSerDesCtrl) | ||
87 | #define kr_intclear KREG_IDX(IntClear) | ||
88 | #define kr_intmask KREG_IDX(IntMask) | ||
89 | #define kr_intstatus KREG_IDX(IntStatus) | ||
90 | #define kr_ncmodectrl KREG_IDX(IBNCModeCtrl) | ||
91 | #define kr_palign KREG_IDX(PageAlign) | ||
92 | #define kr_partitionkey KREG_IDX(RcvPartitionKey) | ||
93 | #define kr_portcnt KREG_IDX(PortCnt) | ||
94 | #define kr_rcvbthqp KREG_IDX(RcvBTHQP) | ||
95 | #define kr_rcvctrl KREG_IDX(RcvCtrl) | ||
96 | #define kr_rcvegrbase KREG_IDX(RcvEgrBase) | ||
97 | #define kr_rcvegrcnt KREG_IDX(RcvEgrCnt) | ||
98 | #define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt) | ||
99 | #define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize) | ||
100 | #define kr_rcvhdrsize KREG_IDX(RcvHdrSize) | ||
101 | #define kr_rcvpktledcnt KREG_IDX(RcvPktLEDCnt) | ||
102 | #define kr_rcvtidbase KREG_IDX(RcvTIDBase) | ||
103 | #define kr_rcvtidcnt KREG_IDX(RcvTIDCnt) | ||
104 | #define kr_revision KREG_IDX(Revision) | ||
105 | #define kr_scratch KREG_IDX(Scratch) | ||
106 | #define kr_sendbuffererror KREG_IDX(SendBufErr0) | ||
107 | #define kr_sendctrl KREG_IDX(SendCtrl) | ||
108 | #define kr_senddmabase KREG_IDX(SendDmaBase) | ||
109 | #define kr_senddmabufmask0 KREG_IDX(SendDmaBufMask0) | ||
110 | #define kr_senddmabufmask1 (KREG_IDX(SendDmaBufMask0) + 1) | ||
111 | #define kr_senddmabufmask2 (KREG_IDX(SendDmaBufMask0) + 2) | ||
112 | #define kr_senddmahead KREG_IDX(SendDmaHead) | ||
113 | #define kr_senddmaheadaddr KREG_IDX(SendDmaHeadAddr) | ||
114 | #define kr_senddmalengen KREG_IDX(SendDmaLenGen) | ||
115 | #define kr_senddmastatus KREG_IDX(SendDmaStatus) | ||
116 | #define kr_senddmatail KREG_IDX(SendDmaTail) | ||
117 | #define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr) | ||
118 | #define kr_sendpiobufbase KREG_IDX(SendBufBase) | ||
119 | #define kr_sendpiobufcnt KREG_IDX(SendBufCnt) | ||
120 | #define kr_sendpiosize KREG_IDX(SendBufSize) | ||
121 | #define kr_sendregbase KREG_IDX(SendRegBase) | ||
122 | #define kr_userregbase KREG_IDX(UserRegBase) | ||
123 | #define kr_xgxs_cfg KREG_IDX(XGXSCfg) | ||
124 | |||
125 | /* These must only be written via qib_write_kreg_ctxt() */ | ||
126 | #define kr_rcvhdraddr KREG_IDX(RcvHdrAddr0) | ||
127 | #define kr_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0) | ||
128 | |||
129 | |||
130 | #define CREG_IDX(regname) ((QIB_7220_##regname##_OFFS - \ | ||
131 | QIB_7220_LBIntCnt_OFFS) / sizeof(u64)) | ||
132 | |||
133 | #define cr_badformat CREG_IDX(RxVersionErrCnt) | ||
134 | #define cr_erricrc CREG_IDX(RxICRCErrCnt) | ||
135 | #define cr_errlink CREG_IDX(RxLinkMalformCnt) | ||
136 | #define cr_errlpcrc CREG_IDX(RxLPCRCErrCnt) | ||
137 | #define cr_errpkey CREG_IDX(RxPKeyMismatchCnt) | ||
138 | #define cr_rcvflowctrl_err CREG_IDX(RxFlowCtrlViolCnt) | ||
139 | #define cr_err_rlen CREG_IDX(RxLenErrCnt) | ||
140 | #define cr_errslen CREG_IDX(TxLenErrCnt) | ||
141 | #define cr_errtidfull CREG_IDX(RxTIDFullErrCnt) | ||
142 | #define cr_errtidvalid CREG_IDX(RxTIDValidErrCnt) | ||
143 | #define cr_errvcrc CREG_IDX(RxVCRCErrCnt) | ||
144 | #define cr_ibstatuschange CREG_IDX(IBStatusChangeCnt) | ||
145 | #define cr_lbint CREG_IDX(LBIntCnt) | ||
146 | #define cr_invalidrlen CREG_IDX(RxMaxMinLenErrCnt) | ||
147 | #define cr_invalidslen CREG_IDX(TxMaxMinLenErrCnt) | ||
148 | #define cr_lbflowstall CREG_IDX(LBFlowStallCnt) | ||
149 | #define cr_pktrcv CREG_IDX(RxDataPktCnt) | ||
150 | #define cr_pktrcvflowctrl CREG_IDX(RxFlowPktCnt) | ||
151 | #define cr_pktsend CREG_IDX(TxDataPktCnt) | ||
152 | #define cr_pktsendflow CREG_IDX(TxFlowPktCnt) | ||
153 | #define cr_portovfl CREG_IDX(RxP0HdrEgrOvflCnt) | ||
154 | #define cr_rcvebp CREG_IDX(RxEBPCnt) | ||
155 | #define cr_rcvovfl CREG_IDX(RxBufOvflCnt) | ||
156 | #define cr_senddropped CREG_IDX(TxDroppedPktCnt) | ||
157 | #define cr_sendstall CREG_IDX(TxFlowStallCnt) | ||
158 | #define cr_sendunderrun CREG_IDX(TxUnderrunCnt) | ||
159 | #define cr_wordrcv CREG_IDX(RxDwordCnt) | ||
160 | #define cr_wordsend CREG_IDX(TxDwordCnt) | ||
161 | #define cr_txunsupvl CREG_IDX(TxUnsupVLErrCnt) | ||
162 | #define cr_rxdroppkt CREG_IDX(RxDroppedPktCnt) | ||
163 | #define cr_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt) | ||
164 | #define cr_iblinkdown CREG_IDX(IBLinkDownedCnt) | ||
165 | #define cr_ibsymbolerr CREG_IDX(IBSymbolErrCnt) | ||
166 | #define cr_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt) | ||
167 | #define cr_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt) | ||
168 | #define cr_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt) | ||
169 | #define cr_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt) | ||
170 | #define cr_rxvlerr CREG_IDX(RxVlErrCnt) | ||
171 | #define cr_rxdlidfltr CREG_IDX(RxDlidFltrCnt) | ||
172 | #define cr_psstat CREG_IDX(PSStat) | ||
173 | #define cr_psstart CREG_IDX(PSStart) | ||
174 | #define cr_psinterval CREG_IDX(PSInterval) | ||
175 | #define cr_psrcvdatacount CREG_IDX(PSRcvDataCount) | ||
176 | #define cr_psrcvpktscount CREG_IDX(PSRcvPktsCount) | ||
177 | #define cr_psxmitdatacount CREG_IDX(PSXmitDataCount) | ||
178 | #define cr_psxmitpktscount CREG_IDX(PSXmitPktsCount) | ||
179 | #define cr_psxmitwaitcount CREG_IDX(PSXmitWaitCount) | ||
180 | #define cr_txsdmadesc CREG_IDX(TxSDmaDescCnt) | ||
181 | #define cr_pcieretrydiag CREG_IDX(PcieRetryBufDiagQwordCnt) | ||
182 | |||
183 | #define SYM_RMASK(regname, fldname) ((u64) \ | ||
184 | QIB_7220_##regname##_##fldname##_RMASK) | ||
185 | #define SYM_MASK(regname, fldname) ((u64) \ | ||
186 | QIB_7220_##regname##_##fldname##_RMASK << \ | ||
187 | QIB_7220_##regname##_##fldname##_LSB) | ||
188 | #define SYM_LSB(regname, fldname) (QIB_7220_##regname##_##fldname##_LSB) | ||
189 | #define SYM_FIELD(value, regname, fldname) ((u64) \ | ||
190 | (((value) >> SYM_LSB(regname, fldname)) & \ | ||
191 | SYM_RMASK(regname, fldname))) | ||
192 | #define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask) | ||
193 | #define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask) | ||
194 | |||
195 | /* ibcctrl bits */ | ||
196 | #define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1 | ||
197 | /* cycle through TS1/TS2 till OK */ | ||
198 | #define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2 | ||
199 | /* wait for TS1, then go on */ | ||
200 | #define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3 | ||
201 | #define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16 | ||
202 | |||
203 | #define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */ | ||
204 | #define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */ | ||
205 | #define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */ | ||
206 | |||
207 | #define BLOB_7220_IBCHG 0x81 | ||
208 | |||
209 | /* | ||
210 | * We could have a single register get/put routine, that takes a group type, | ||
211 | * but this is somewhat clearer and cleaner. It also gives us some error | ||
212 | * checking. 64 bit register reads should always work, but are inefficient | ||
213 | * on opteron (the northbridge always generates 2 separate HT 32 bit reads), | ||
214 | * so we use kreg32 wherever possible. User register and counter register | ||
215 | * reads are always 32 bit reads, so only one form of those routines. | ||
216 | */ | ||
217 | |||
218 | /** | ||
219 | * qib_read_ureg32 - read 32-bit virtualized per-context register | ||
220 | * @dd: device | ||
221 | * @regno: register number | ||
222 | * @ctxt: context number | ||
223 | * | ||
224 | * Return the contents of a register that is virtualized to be per context. | ||
225 | * Returns -1 on errors (not distinguishable from valid contents at | ||
226 | * runtime; we may add a separate error variable at some point). | ||
227 | */ | ||
228 | static inline u32 qib_read_ureg32(const struct qib_devdata *dd, | ||
229 | enum qib_ureg regno, int ctxt) | ||
230 | { | ||
231 | if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) | ||
232 | return 0; | ||
233 | |||
234 | if (dd->userbase) | ||
235 | return readl(regno + (u64 __iomem *) | ||
236 | ((char __iomem *)dd->userbase + | ||
237 | dd->ureg_align * ctxt)); | ||
238 | else | ||
239 | return readl(regno + (u64 __iomem *) | ||
240 | (dd->uregbase + | ||
241 | (char __iomem *)dd->kregbase + | ||
242 | dd->ureg_align * ctxt)); | ||
243 | } | ||
244 | |||
245 | /** | ||
246 | * qib_write_ureg - write 32-bit virtualized per-context register | ||
247 | * @dd: device | ||
248 | * @regno: register number | ||
249 | * @value: value | ||
250 | * @ctxt: context | ||
251 | * | ||
252 | * Write the contents of a register that is virtualized to be per context. | ||
253 | */ | ||
254 | static inline void qib_write_ureg(const struct qib_devdata *dd, | ||
255 | enum qib_ureg regno, u64 value, int ctxt) | ||
256 | { | ||
257 | u64 __iomem *ubase; | ||
258 | |||
259 | if (dd->userbase) | ||
260 | ubase = (u64 __iomem *) | ||
261 | ((char __iomem *) dd->userbase + | ||
262 | dd->ureg_align * ctxt); | ||
263 | else | ||
264 | ubase = (u64 __iomem *) | ||
265 | (dd->uregbase + | ||
266 | (char __iomem *) dd->kregbase + | ||
267 | dd->ureg_align * ctxt); | ||
268 | |||
269 | if (dd->kregbase && (dd->flags & QIB_PRESENT)) | ||
270 | writeq(value, &ubase[regno]); | ||
271 | } | ||
272 | |||
273 | /** | ||
274 | * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register | ||
275 | * @dd: the qlogic_ib device | ||
276 | * @regno: the register number to write | ||
277 | * @ctxt: the context containing the register | ||
278 | * @value: the value to write | ||
279 | */ | ||
280 | static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd, | ||
281 | const u16 regno, unsigned ctxt, | ||
282 | u64 value) | ||
283 | { | ||
284 | qib_write_kreg(dd, regno + ctxt, value); | ||
285 | } | ||
286 | |||
287 | static inline void write_7220_creg(const struct qib_devdata *dd, | ||
288 | u16 regno, u64 value) | ||
289 | { | ||
290 | if (dd->cspec->cregbase && (dd->flags & QIB_PRESENT)) | ||
291 | writeq(value, &dd->cspec->cregbase[regno]); | ||
292 | } | ||
293 | |||
294 | static inline u64 read_7220_creg(const struct qib_devdata *dd, u16 regno) | ||
295 | { | ||
296 | if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT)) | ||
297 | return 0; | ||
298 | return readq(&dd->cspec->cregbase[regno]); | ||
299 | } | ||
300 | |||
301 | static inline u32 read_7220_creg32(const struct qib_devdata *dd, u16 regno) | ||
302 | { | ||
303 | if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT)) | ||
304 | return 0; | ||
305 | return readl(&dd->cspec->cregbase[regno]); | ||
306 | } | ||
307 | |||
308 | /* kr_revision bits */ | ||
309 | #define QLOGIC_IB_R_EMULATORREV_MASK ((1ULL << 22) - 1) | ||
310 | #define QLOGIC_IB_R_EMULATORREV_SHIFT 40 | ||
311 | |||
312 | /* kr_control bits */ | ||
313 | #define QLOGIC_IB_C_RESET (1U << 7) | ||
314 | |||
315 | /* kr_intstatus, kr_intclear, kr_intmask bits */ | ||
316 | #define QLOGIC_IB_I_RCVURG_MASK ((1ULL << 17) - 1) | ||
317 | #define QLOGIC_IB_I_RCVURG_SHIFT 32 | ||
318 | #define QLOGIC_IB_I_RCVAVAIL_MASK ((1ULL << 17) - 1) | ||
319 | #define QLOGIC_IB_I_RCVAVAIL_SHIFT 0 | ||
320 | #define QLOGIC_IB_I_SERDESTRIMDONE (1ULL << 27) | ||
321 | |||
322 | #define QLOGIC_IB_C_FREEZEMODE 0x00000002 | ||
323 | #define QLOGIC_IB_C_LINKENABLE 0x00000004 | ||
324 | |||
325 | #define QLOGIC_IB_I_SDMAINT 0x8000000000000000ULL | ||
326 | #define QLOGIC_IB_I_SDMADISABLED 0x4000000000000000ULL | ||
327 | #define QLOGIC_IB_I_ERROR 0x0000000080000000ULL | ||
328 | #define QLOGIC_IB_I_SPIOSENT 0x0000000040000000ULL | ||
329 | #define QLOGIC_IB_I_SPIOBUFAVAIL 0x0000000020000000ULL | ||
330 | #define QLOGIC_IB_I_GPIO 0x0000000010000000ULL | ||
331 | |||
332 | /* variables for sanity checking interrupt and errors */ | ||
333 | #define QLOGIC_IB_I_BITSEXTANT \ | ||
334 | (QLOGIC_IB_I_SDMAINT | QLOGIC_IB_I_SDMADISABLED | \ | ||
335 | (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT) | \ | ||
336 | (QLOGIC_IB_I_RCVAVAIL_MASK << \ | ||
337 | QLOGIC_IB_I_RCVAVAIL_SHIFT) | \ | ||
338 | QLOGIC_IB_I_ERROR | QLOGIC_IB_I_SPIOSENT | \ | ||
339 | QLOGIC_IB_I_SPIOBUFAVAIL | QLOGIC_IB_I_GPIO | \ | ||
340 | QLOGIC_IB_I_SERDESTRIMDONE) | ||
341 | |||
342 | #define IB_HWE_BITSEXTANT \ | ||
343 | (HWE_MASK(RXEMemParityErr) | \ | ||
344 | HWE_MASK(TXEMemParityErr) | \ | ||
345 | (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK << \ | ||
346 | QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) | \ | ||
347 | QLOGIC_IB_HWE_PCIE1PLLFAILED | \ | ||
348 | QLOGIC_IB_HWE_PCIE0PLLFAILED | \ | ||
349 | QLOGIC_IB_HWE_PCIEPOISONEDTLP | \ | ||
350 | QLOGIC_IB_HWE_PCIECPLTIMEOUT | \ | ||
351 | QLOGIC_IB_HWE_PCIEBUSPARITYXTLH | \ | ||
352 | QLOGIC_IB_HWE_PCIEBUSPARITYXADM | \ | ||
353 | QLOGIC_IB_HWE_PCIEBUSPARITYRADM | \ | ||
354 | HWE_MASK(PowerOnBISTFailed) | \ | ||
355 | QLOGIC_IB_HWE_COREPLL_FBSLIP | \ | ||
356 | QLOGIC_IB_HWE_COREPLL_RFSLIP | \ | ||
357 | QLOGIC_IB_HWE_SERDESPLLFAILED | \ | ||
358 | HWE_MASK(IBCBusToSPCParityErr) | \ | ||
359 | HWE_MASK(IBCBusFromSPCParityErr) | \ | ||
360 | QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR | \ | ||
361 | QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR | \ | ||
362 | QLOGIC_IB_HWE_SDMAMEMREADERR | \ | ||
363 | QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED | \ | ||
364 | QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT | \ | ||
365 | QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT | \ | ||
366 | QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT | \ | ||
367 | QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT | \ | ||
368 | QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR | \ | ||
369 | QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR | \ | ||
370 | QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR | \ | ||
371 | QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR) | ||
372 | |||
373 | #define IB_E_BITSEXTANT \ | ||
374 | (ERR_MASK(RcvFormatErr) | ERR_MASK(RcvVCRCErr) | \ | ||
375 | ERR_MASK(RcvICRCErr) | ERR_MASK(RcvMinPktLenErr) | \ | ||
376 | ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvLongPktLenErr) | \ | ||
377 | ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvUnexpectedCharErr) | \ | ||
378 | ERR_MASK(RcvUnsupportedVLErr) | ERR_MASK(RcvEBPErr) | \ | ||
379 | ERR_MASK(RcvIBFlowErr) | ERR_MASK(RcvBadVersionErr) | \ | ||
380 | ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | \ | ||
381 | ERR_MASK(RcvBadTidErr) | ERR_MASK(RcvHdrLenErr) | \ | ||
382 | ERR_MASK(RcvHdrErr) | ERR_MASK(RcvIBLostLinkErr) | \ | ||
383 | ERR_MASK(SendSpecialTriggerErr) | \ | ||
384 | ERR_MASK(SDmaDisabledErr) | ERR_MASK(SendMinPktLenErr) | \ | ||
385 | ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnderRunErr) | \ | ||
386 | ERR_MASK(SendPktLenErr) | ERR_MASK(SendDroppedSmpPktErr) | \ | ||
387 | ERR_MASK(SendDroppedDataPktErr) | \ | ||
388 | ERR_MASK(SendPioArmLaunchErr) | \ | ||
389 | ERR_MASK(SendUnexpectedPktNumErr) | \ | ||
390 | ERR_MASK(SendUnsupportedVLErr) | ERR_MASK(SendBufMisuseErr) | \ | ||
391 | ERR_MASK(SDmaGenMismatchErr) | ERR_MASK(SDmaOutOfBoundErr) | \ | ||
392 | ERR_MASK(SDmaTailOutOfBoundErr) | ERR_MASK(SDmaBaseErr) | \ | ||
393 | ERR_MASK(SDma1stDescErr) | ERR_MASK(SDmaRpyTagErr) | \ | ||
394 | ERR_MASK(SDmaDwEnErr) | ERR_MASK(SDmaMissingDwErr) | \ | ||
395 | ERR_MASK(SDmaUnexpDataErr) | \ | ||
396 | ERR_MASK(IBStatusChanged) | ERR_MASK(InvalidAddrErr) | \ | ||
397 | ERR_MASK(ResetNegated) | ERR_MASK(HardwareErr) | \ | ||
398 | ERR_MASK(SDmaDescAddrMisalignErr) | \ | ||
399 | ERR_MASK(InvalidEEPCmd)) | ||
400 | |||
401 | /* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */ | ||
402 | #define QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK 0x00000000000000ffULL | ||
403 | #define QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT 0 | ||
404 | #define QLOGIC_IB_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL | ||
405 | #define QLOGIC_IB_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL | ||
406 | #define QLOGIC_IB_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL | ||
407 | #define QLOGIC_IB_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL | ||
408 | #define QLOGIC_IB_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL | ||
409 | #define QLOGIC_IB_HWE_COREPLL_FBSLIP 0x0080000000000000ULL | ||
410 | #define QLOGIC_IB_HWE_COREPLL_RFSLIP 0x0100000000000000ULL | ||
411 | #define QLOGIC_IB_HWE_PCIE1PLLFAILED 0x0400000000000000ULL | ||
412 | #define QLOGIC_IB_HWE_PCIE0PLLFAILED 0x0800000000000000ULL | ||
413 | #define QLOGIC_IB_HWE_SERDESPLLFAILED 0x1000000000000000ULL | ||
414 | /* specific to this chip */ | ||
415 | #define QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR 0x0000000000000040ULL | ||
416 | #define QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR 0x0000000000000080ULL | ||
417 | #define QLOGIC_IB_HWE_SDMAMEMREADERR 0x0000000010000000ULL | ||
418 | #define QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED 0x2000000000000000ULL | ||
419 | #define QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT 0x0100000000000000ULL | ||
420 | #define QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT 0x0200000000000000ULL | ||
421 | #define QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT 0x0400000000000000ULL | ||
422 | #define QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT 0x0800000000000000ULL | ||
423 | #define QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR 0x0000008000000000ULL | ||
424 | #define QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL | ||
425 | #define QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR 0x0000001000000000ULL | ||
426 | #define QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR 0x0000002000000000ULL | ||
427 | |||
428 | #define IBA7220_IBCC_LINKCMD_SHIFT 19 | ||
429 | |||
430 | /* kr_ibcddrctrl bits */ | ||
431 | #define IBA7220_IBC_DLIDLMC_MASK 0xFFFFFFFFUL | ||
432 | #define IBA7220_IBC_DLIDLMC_SHIFT 32 | ||
433 | |||
434 | #define IBA7220_IBC_HRTBT_MASK (SYM_RMASK(IBCDDRCtrl, HRTBT_AUTO) | \ | ||
435 | SYM_RMASK(IBCDDRCtrl, HRTBT_ENB)) | ||
436 | #define IBA7220_IBC_HRTBT_SHIFT SYM_LSB(IBCDDRCtrl, HRTBT_ENB) | ||
437 | |||
438 | #define IBA7220_IBC_LANE_REV_SUPPORTED (1<<8) | ||
439 | #define IBA7220_IBC_LREV_MASK 1 | ||
440 | #define IBA7220_IBC_LREV_SHIFT 8 | ||
441 | #define IBA7220_IBC_RXPOL_MASK 1 | ||
442 | #define IBA7220_IBC_RXPOL_SHIFT 7 | ||
443 | #define IBA7220_IBC_WIDTH_SHIFT 5 | ||
444 | #define IBA7220_IBC_WIDTH_MASK 0x3 | ||
445 | #define IBA7220_IBC_WIDTH_1X_ONLY (0 << IBA7220_IBC_WIDTH_SHIFT) | ||
446 | #define IBA7220_IBC_WIDTH_4X_ONLY (1 << IBA7220_IBC_WIDTH_SHIFT) | ||
447 | #define IBA7220_IBC_WIDTH_AUTONEG (2 << IBA7220_IBC_WIDTH_SHIFT) | ||
448 | #define IBA7220_IBC_SPEED_AUTONEG (1 << 1) | ||
449 | #define IBA7220_IBC_SPEED_SDR (1 << 2) | ||
450 | #define IBA7220_IBC_SPEED_DDR (1 << 3) | ||
451 | #define IBA7220_IBC_SPEED_AUTONEG_MASK (0x7 << 1) | ||
452 | #define IBA7220_IBC_IBTA_1_2_MASK (1) | ||
453 | |||
454 | /* kr_ibcddrstatus */ | ||
455 | /* link latency shift is 0, don't bother defining */ | ||
456 | #define IBA7220_DDRSTAT_LINKLAT_MASK 0x3ffffff | ||
457 | |||
458 | /* kr_extstatus bits */ | ||
459 | #define QLOGIC_IB_EXTS_FREQSEL 0x2 | ||
460 | #define QLOGIC_IB_EXTS_SERDESSEL 0x4 | ||
461 | #define QLOGIC_IB_EXTS_MEMBIST_ENDTEST 0x0000000000004000 | ||
462 | #define QLOGIC_IB_EXTS_MEMBIST_DISABLED 0x0000000000008000 | ||
463 | |||
464 | /* kr_xgxsconfig bits */ | ||
465 | #define QLOGIC_IB_XGXS_RESET 0x5ULL | ||
466 | #define QLOGIC_IB_XGXS_FC_SAFE (1ULL << 63) | ||
467 | |||
468 | /* kr_rcvpktledcnt */ | ||
469 | #define IBA7220_LEDBLINK_ON_SHIFT 32 /* 4ns period on after packet */ | ||
470 | #define IBA7220_LEDBLINK_OFF_SHIFT 0 /* 4ns period off before next on */ | ||
471 | |||
472 | #define _QIB_GPIO_SDA_NUM 1 | ||
473 | #define _QIB_GPIO_SCL_NUM 0 | ||
474 | #define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7220 cards. */ | ||
475 | #define QIB_TWSI_TEMP_DEV 0x98 | ||
476 | |||
477 | /* HW counter clock is at 4nsec */ | ||
478 | #define QIB_7220_PSXMITWAIT_CHECK_RATE 4000 | ||
479 | |||
480 | #define IBA7220_R_INTRAVAIL_SHIFT 17 | ||
481 | #define IBA7220_R_PKEY_DIS_SHIFT 34 | ||
482 | #define IBA7220_R_TAILUPD_SHIFT 35 | ||
483 | #define IBA7220_R_CTXTCFG_SHIFT 36 | ||
484 | |||
485 | #define IBA7220_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */ | ||
486 | |||
487 | /* | ||
488 | * the size bits give us 2^N, in KB units. 0 marks as invalid, | ||
489 | * and 7 is reserved. We currently use only 2KB and 4KB | ||
490 | */ | ||
491 | #define IBA7220_TID_SZ_SHIFT 37 /* shift to 3bit size selector */ | ||
492 | #define IBA7220_TID_SZ_2K (1UL << IBA7220_TID_SZ_SHIFT) /* 2KB */ | ||
493 | #define IBA7220_TID_SZ_4K (2UL << IBA7220_TID_SZ_SHIFT) /* 4KB */ | ||
494 | #define IBA7220_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */ | ||
495 | #define PBC_7220_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */ | ||
496 | #define PBC_7220_VL15_SEND_CTRL (1ULL << 31) /* control version of same */ | ||
497 | |||
498 | #define AUTONEG_TRIES 5 /* sequential retries to negotiate DDR */ | ||
499 | |||
500 | /* packet rate matching delay multiplier */ | ||
501 | static u8 rate_to_delay[2][2] = { | ||
502 | /* 1x, 4x */ | ||
503 | { 8, 2 }, /* SDR */ | ||
504 | { 4, 1 } /* DDR */ | ||
505 | }; | ||
506 | |||
507 | static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = { | ||
508 | [IB_RATE_2_5_GBPS] = 8, | ||
509 | [IB_RATE_5_GBPS] = 4, | ||
510 | [IB_RATE_10_GBPS] = 2, | ||
511 | [IB_RATE_20_GBPS] = 1 | ||
512 | }; | ||
513 | |||
514 | #define IBA7220_LINKSPEED_SHIFT SYM_LSB(IBCStatus, LinkSpeedActive) | ||
515 | #define IBA7220_LINKWIDTH_SHIFT SYM_LSB(IBCStatus, LinkWidthActive) | ||
516 | |||
517 | /* link training states, from IBC */ | ||
518 | #define IB_7220_LT_STATE_DISABLED 0x00 | ||
519 | #define IB_7220_LT_STATE_LINKUP 0x01 | ||
520 | #define IB_7220_LT_STATE_POLLACTIVE 0x02 | ||
521 | #define IB_7220_LT_STATE_POLLQUIET 0x03 | ||
522 | #define IB_7220_LT_STATE_SLEEPDELAY 0x04 | ||
523 | #define IB_7220_LT_STATE_SLEEPQUIET 0x05 | ||
524 | #define IB_7220_LT_STATE_CFGDEBOUNCE 0x08 | ||
525 | #define IB_7220_LT_STATE_CFGRCVFCFG 0x09 | ||
526 | #define IB_7220_LT_STATE_CFGWAITRMT 0x0a | ||
527 | #define IB_7220_LT_STATE_CFGIDLE 0x0b | ||
528 | #define IB_7220_LT_STATE_RECOVERRETRAIN 0x0c | ||
529 | #define IB_7220_LT_STATE_RECOVERWAITRMT 0x0e | ||
530 | #define IB_7220_LT_STATE_RECOVERIDLE 0x0f | ||
531 | |||
532 | /* link state machine states from IBC */ | ||
533 | #define IB_7220_L_STATE_DOWN 0x0 | ||
534 | #define IB_7220_L_STATE_INIT 0x1 | ||
535 | #define IB_7220_L_STATE_ARM 0x2 | ||
536 | #define IB_7220_L_STATE_ACTIVE 0x3 | ||
537 | #define IB_7220_L_STATE_ACT_DEFER 0x4 | ||
538 | |||
539 | static const u8 qib_7220_physportstate[0x20] = { | ||
540 | [IB_7220_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED, | ||
541 | [IB_7220_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP, | ||
542 | [IB_7220_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL, | ||
543 | [IB_7220_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL, | ||
544 | [IB_7220_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP, | ||
545 | [IB_7220_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP, | ||
546 | [IB_7220_LT_STATE_CFGDEBOUNCE] = | ||
547 | IB_PHYSPORTSTATE_CFG_TRAIN, | ||
548 | [IB_7220_LT_STATE_CFGRCVFCFG] = | ||
549 | IB_PHYSPORTSTATE_CFG_TRAIN, | ||
550 | [IB_7220_LT_STATE_CFGWAITRMT] = | ||
551 | IB_PHYSPORTSTATE_CFG_TRAIN, | ||
552 | [IB_7220_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN, | ||
553 | [IB_7220_LT_STATE_RECOVERRETRAIN] = | ||
554 | IB_PHYSPORTSTATE_LINK_ERR_RECOVER, | ||
555 | [IB_7220_LT_STATE_RECOVERWAITRMT] = | ||
556 | IB_PHYSPORTSTATE_LINK_ERR_RECOVER, | ||
557 | [IB_7220_LT_STATE_RECOVERIDLE] = | ||
558 | IB_PHYSPORTSTATE_LINK_ERR_RECOVER, | ||
559 | [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN, | ||
560 | [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN, | ||
561 | [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN, | ||
562 | [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN, | ||
563 | [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN, | ||
564 | [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN, | ||
565 | [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN, | ||
566 | [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN | ||
567 | }; | ||
568 | |||
569 | int qib_special_trigger; | ||
570 | module_param_named(special_trigger, qib_special_trigger, int, S_IRUGO); | ||
571 | MODULE_PARM_DESC(special_trigger, "Enable SpecialTrigger arm/launch"); | ||
572 | |||
573 | #define IBCBUSFRSPCPARITYERR HWE_MASK(IBCBusFromSPCParityErr) | ||
574 | #define IBCBUSTOSPCPARITYERR HWE_MASK(IBCBusToSPCParityErr) | ||
575 | |||
576 | #define SYM_MASK_BIT(regname, fldname, bit) ((u64) \ | ||
577 | (1ULL << (SYM_LSB(regname, fldname) + (bit)))) | ||
578 | |||
579 | #define TXEMEMPARITYERR_PIOBUF \ | ||
580 | SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 0) | ||
581 | #define TXEMEMPARITYERR_PIOPBC \ | ||
582 | SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 1) | ||
583 | #define TXEMEMPARITYERR_PIOLAUNCHFIFO \ | ||
584 | SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 2) | ||
585 | |||
586 | #define RXEMEMPARITYERR_RCVBUF \ | ||
587 | SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 0) | ||
588 | #define RXEMEMPARITYERR_LOOKUPQ \ | ||
589 | SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 1) | ||
590 | #define RXEMEMPARITYERR_EXPTID \ | ||
591 | SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 2) | ||
592 | #define RXEMEMPARITYERR_EAGERTID \ | ||
593 | SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 3) | ||
594 | #define RXEMEMPARITYERR_FLAGBUF \ | ||
595 | SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 4) | ||
596 | #define RXEMEMPARITYERR_DATAINFO \ | ||
597 | SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 5) | ||
598 | #define RXEMEMPARITYERR_HDRINFO \ | ||
599 | SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 6) | ||
600 | |||
601 | /* 7220 specific hardware errors... */ | ||
602 | static const struct qib_hwerror_msgs qib_7220_hwerror_msgs[] = { | ||
603 | /* generic hardware errors */ | ||
604 | QLOGIC_IB_HWE_MSG(IBCBUSFRSPCPARITYERR, "QIB2IB Parity"), | ||
605 | QLOGIC_IB_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2QIB Parity"), | ||
606 | |||
607 | QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOBUF, | ||
608 | "TXE PIOBUF Memory Parity"), | ||
609 | QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOPBC, | ||
610 | "TXE PIOPBC Memory Parity"), | ||
611 | QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOLAUNCHFIFO, | ||
612 | "TXE PIOLAUNCHFIFO Memory Parity"), | ||
613 | |||
614 | QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_RCVBUF, | ||
615 | "RXE RCVBUF Memory Parity"), | ||
616 | QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_LOOKUPQ, | ||
617 | "RXE LOOKUPQ Memory Parity"), | ||
618 | QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EAGERTID, | ||
619 | "RXE EAGERTID Memory Parity"), | ||
620 | QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EXPTID, | ||
621 | "RXE EXPTID Memory Parity"), | ||
622 | QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_FLAGBUF, | ||
623 | "RXE FLAGBUF Memory Parity"), | ||
624 | QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_DATAINFO, | ||
625 | "RXE DATAINFO Memory Parity"), | ||
626 | QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_HDRINFO, | ||
627 | "RXE HDRINFO Memory Parity"), | ||
628 | |||
629 | /* chip-specific hardware errors */ | ||
630 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEPOISONEDTLP, | ||
631 | "PCIe Poisoned TLP"), | ||
632 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLTIMEOUT, | ||
633 | "PCIe completion timeout"), | ||
634 | /* | ||
635 | * In practice, it's unlikely wthat we'll see PCIe PLL, or bus | ||
636 | * parity or memory parity error failures, because most likely we | ||
637 | * won't be able to talk to the core of the chip. Nonetheless, we | ||
638 | * might see them, if they are in parts of the PCIe core that aren't | ||
639 | * essential. | ||
640 | */ | ||
641 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE1PLLFAILED, | ||
642 | "PCIePLL1"), | ||
643 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE0PLLFAILED, | ||
644 | "PCIePLL0"), | ||
645 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXTLH, | ||
646 | "PCIe XTLH core parity"), | ||
647 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXADM, | ||
648 | "PCIe ADM TX core parity"), | ||
649 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYRADM, | ||
650 | "PCIe ADM RX core parity"), | ||
651 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SERDESPLLFAILED, | ||
652 | "SerDes PLL"), | ||
653 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR, | ||
654 | "PCIe cpl header queue"), | ||
655 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR, | ||
656 | "PCIe cpl data queue"), | ||
657 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SDMAMEMREADERR, | ||
658 | "Send DMA memory read"), | ||
659 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED, | ||
660 | "uC PLL clock not locked"), | ||
661 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT, | ||
662 | "PCIe serdes Q0 no clock"), | ||
663 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT, | ||
664 | "PCIe serdes Q1 no clock"), | ||
665 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT, | ||
666 | "PCIe serdes Q2 no clock"), | ||
667 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT, | ||
668 | "PCIe serdes Q3 no clock"), | ||
669 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR, | ||
670 | "DDS RXEQ memory parity"), | ||
671 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR, | ||
672 | "IB uC memory parity"), | ||
673 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR, | ||
674 | "PCIe uC oct0 memory parity"), | ||
675 | QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR, | ||
676 | "PCIe uC oct1 memory parity"), | ||
677 | }; | ||
678 | |||
679 | #define RXE_PARITY (RXEMEMPARITYERR_EAGERTID|RXEMEMPARITYERR_EXPTID) | ||
680 | |||
681 | #define QLOGIC_IB_E_PKTERRS (\ | ||
682 | ERR_MASK(SendPktLenErr) | \ | ||
683 | ERR_MASK(SendDroppedDataPktErr) | \ | ||
684 | ERR_MASK(RcvVCRCErr) | \ | ||
685 | ERR_MASK(RcvICRCErr) | \ | ||
686 | ERR_MASK(RcvShortPktLenErr) | \ | ||
687 | ERR_MASK(RcvEBPErr)) | ||
688 | |||
689 | /* Convenience for decoding Send DMA errors */ | ||
690 | #define QLOGIC_IB_E_SDMAERRS ( \ | ||
691 | ERR_MASK(SDmaGenMismatchErr) | \ | ||
692 | ERR_MASK(SDmaOutOfBoundErr) | \ | ||
693 | ERR_MASK(SDmaTailOutOfBoundErr) | ERR_MASK(SDmaBaseErr) | \ | ||
694 | ERR_MASK(SDma1stDescErr) | ERR_MASK(SDmaRpyTagErr) | \ | ||
695 | ERR_MASK(SDmaDwEnErr) | ERR_MASK(SDmaMissingDwErr) | \ | ||
696 | ERR_MASK(SDmaUnexpDataErr) | \ | ||
697 | ERR_MASK(SDmaDescAddrMisalignErr) | \ | ||
698 | ERR_MASK(SDmaDisabledErr) | \ | ||
699 | ERR_MASK(SendBufMisuseErr)) | ||
700 | |||
701 | /* These are all rcv-related errors which we want to count for stats */ | ||
702 | #define E_SUM_PKTERRS \ | ||
703 | (ERR_MASK(RcvHdrLenErr) | ERR_MASK(RcvBadTidErr) | \ | ||
704 | ERR_MASK(RcvBadVersionErr) | ERR_MASK(RcvHdrErr) | \ | ||
705 | ERR_MASK(RcvLongPktLenErr) | ERR_MASK(RcvShortPktLenErr) | \ | ||
706 | ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \ | ||
707 | ERR_MASK(RcvFormatErr) | ERR_MASK(RcvUnsupportedVLErr) | \ | ||
708 | ERR_MASK(RcvUnexpectedCharErr) | ERR_MASK(RcvEBPErr)) | ||
709 | |||
710 | /* These are all send-related errors which we want to count for stats */ | ||
711 | #define E_SUM_ERRS \ | ||
712 | (ERR_MASK(SendPioArmLaunchErr) | ERR_MASK(SendUnexpectedPktNumErr) | \ | ||
713 | ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \ | ||
714 | ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnsupportedVLErr) | \ | ||
715 | ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \ | ||
716 | ERR_MASK(InvalidAddrErr)) | ||
717 | |||
718 | /* | ||
719 | * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore | ||
720 | * errors not related to freeze and cancelling buffers. Can't ignore | ||
721 | * armlaunch because could get more while still cleaning up, and need | ||
722 | * to cancel those as they happen. | ||
723 | */ | ||
724 | #define E_SPKT_ERRS_IGNORE \ | ||
725 | (ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \ | ||
726 | ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendMinPktLenErr) | \ | ||
727 | ERR_MASK(SendPktLenErr)) | ||
728 | |||
729 | /* | ||
730 | * these are errors that can occur when the link changes state while | ||
731 | * a packet is being sent or received. This doesn't cover things | ||
732 | * like EBP or VCRC that can be the result of a sending having the | ||
733 | * link change state, so we receive a "known bad" packet. | ||
734 | */ | ||
735 | #define E_SUM_LINK_PKTERRS \ | ||
736 | (ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \ | ||
737 | ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \ | ||
738 | ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \ | ||
739 | ERR_MASK(RcvUnexpectedCharErr)) | ||
740 | |||
741 | static void autoneg_7220_work(struct work_struct *); | ||
742 | static u32 __iomem *qib_7220_getsendbuf(struct qib_pportdata *, u64, u32 *); | ||
743 | |||
744 | /* | ||
745 | * Called when we might have an error that is specific to a particular | ||
746 | * PIO buffer, and may need to cancel that buffer, so it can be re-used. | ||
747 | * because we don't need to force the update of pioavail. | ||
748 | */ | ||
749 | static void qib_disarm_7220_senderrbufs(struct qib_pportdata *ppd) | ||
750 | { | ||
751 | unsigned long sbuf[3]; | ||
752 | struct qib_devdata *dd = ppd->dd; | ||
753 | |||
754 | /* | ||
755 | * It's possible that sendbuffererror could have bits set; might | ||
756 | * have already done this as a result of hardware error handling. | ||
757 | */ | ||
758 | /* read these before writing errorclear */ | ||
759 | sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror); | ||
760 | sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1); | ||
761 | sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2); | ||
762 | |||
763 | if (sbuf[0] || sbuf[1] || sbuf[2]) | ||
764 | qib_disarm_piobufs_set(dd, sbuf, | ||
765 | dd->piobcnt2k + dd->piobcnt4k); | ||
766 | } | ||
767 | |||
768 | static void qib_7220_txe_recover(struct qib_devdata *dd) | ||
769 | { | ||
770 | qib_devinfo(dd->pcidev, "Recovering from TXE PIO parity error\n"); | ||
771 | qib_disarm_7220_senderrbufs(dd->pport); | ||
772 | } | ||
773 | |||
774 | /* | ||
775 | * This is called with interrupts disabled and sdma_lock held. | ||
776 | */ | ||
777 | static void qib_7220_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op) | ||
778 | { | ||
779 | struct qib_devdata *dd = ppd->dd; | ||
780 | u64 set_sendctrl = 0; | ||
781 | u64 clr_sendctrl = 0; | ||
782 | |||
783 | if (op & QIB_SDMA_SENDCTRL_OP_ENABLE) | ||
784 | set_sendctrl |= SYM_MASK(SendCtrl, SDmaEnable); | ||
785 | else | ||
786 | clr_sendctrl |= SYM_MASK(SendCtrl, SDmaEnable); | ||
787 | |||
788 | if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE) | ||
789 | set_sendctrl |= SYM_MASK(SendCtrl, SDmaIntEnable); | ||
790 | else | ||
791 | clr_sendctrl |= SYM_MASK(SendCtrl, SDmaIntEnable); | ||
792 | |||
793 | if (op & QIB_SDMA_SENDCTRL_OP_HALT) | ||
794 | set_sendctrl |= SYM_MASK(SendCtrl, SDmaHalt); | ||
795 | else | ||
796 | clr_sendctrl |= SYM_MASK(SendCtrl, SDmaHalt); | ||
797 | |||
798 | spin_lock(&dd->sendctrl_lock); | ||
799 | |||
800 | dd->sendctrl |= set_sendctrl; | ||
801 | dd->sendctrl &= ~clr_sendctrl; | ||
802 | |||
803 | qib_write_kreg(dd, kr_sendctrl, dd->sendctrl); | ||
804 | qib_write_kreg(dd, kr_scratch, 0); | ||
805 | |||
806 | spin_unlock(&dd->sendctrl_lock); | ||
807 | } | ||
808 | |||
809 | static void qib_decode_7220_sdma_errs(struct qib_pportdata *ppd, | ||
810 | u64 err, char *buf, size_t blen) | ||
811 | { | ||
812 | static const struct { | ||
813 | u64 err; | ||
814 | const char *msg; | ||
815 | } errs[] = { | ||
816 | { ERR_MASK(SDmaGenMismatchErr), | ||
817 | "SDmaGenMismatch" }, | ||
818 | { ERR_MASK(SDmaOutOfBoundErr), | ||
819 | "SDmaOutOfBound" }, | ||
820 | { ERR_MASK(SDmaTailOutOfBoundErr), | ||
821 | "SDmaTailOutOfBound" }, | ||
822 | { ERR_MASK(SDmaBaseErr), | ||
823 | "SDmaBase" }, | ||
824 | { ERR_MASK(SDma1stDescErr), | ||
825 | "SDma1stDesc" }, | ||
826 | { ERR_MASK(SDmaRpyTagErr), | ||
827 | "SDmaRpyTag" }, | ||
828 | { ERR_MASK(SDmaDwEnErr), | ||
829 | "SDmaDwEn" }, | ||
830 | { ERR_MASK(SDmaMissingDwErr), | ||
831 | "SDmaMissingDw" }, | ||
832 | { ERR_MASK(SDmaUnexpDataErr), | ||
833 | "SDmaUnexpData" }, | ||
834 | { ERR_MASK(SDmaDescAddrMisalignErr), | ||
835 | "SDmaDescAddrMisalign" }, | ||
836 | { ERR_MASK(SendBufMisuseErr), | ||
837 | "SendBufMisuse" }, | ||
838 | { ERR_MASK(SDmaDisabledErr), | ||
839 | "SDmaDisabled" }, | ||
840 | }; | ||
841 | int i; | ||
842 | size_t bidx = 0; | ||
843 | |||
844 | for (i = 0; i < ARRAY_SIZE(errs); i++) { | ||
845 | if (err & errs[i].err) | ||
846 | bidx += scnprintf(buf + bidx, blen - bidx, | ||
847 | "%s ", errs[i].msg); | ||
848 | } | ||
849 | } | ||
850 | |||
851 | /* | ||
852 | * This is called as part of link down clean up so disarm and flush | ||
853 | * all send buffers so that SMP packets can be sent. | ||
854 | */ | ||
855 | static void qib_7220_sdma_hw_clean_up(struct qib_pportdata *ppd) | ||
856 | { | ||
857 | /* This will trigger the Abort interrupt */ | ||
858 | sendctrl_7220_mod(ppd, QIB_SENDCTRL_DISARM_ALL | QIB_SENDCTRL_FLUSH | | ||
859 | QIB_SENDCTRL_AVAIL_BLIP); | ||
860 | ppd->dd->upd_pio_shadow = 1; /* update our idea of what's busy */ | ||
861 | } | ||
862 | |||
863 | static void qib_sdma_7220_setlengen(struct qib_pportdata *ppd) | ||
864 | { | ||
865 | /* | ||
866 | * Set SendDmaLenGen and clear and set | ||
867 | * the MSB of the generation count to enable generation checking | ||
868 | * and load the internal generation counter. | ||
869 | */ | ||
870 | qib_write_kreg(ppd->dd, kr_senddmalengen, ppd->sdma_descq_cnt); | ||
871 | qib_write_kreg(ppd->dd, kr_senddmalengen, | ||
872 | ppd->sdma_descq_cnt | | ||
873 | (1ULL << QIB_7220_SendDmaLenGen_Generation_MSB)); | ||
874 | } | ||
875 | |||
876 | static void qib_7220_sdma_hw_start_up(struct qib_pportdata *ppd) | ||
877 | { | ||
878 | qib_sdma_7220_setlengen(ppd); | ||
879 | qib_sdma_update_7220_tail(ppd, 0); /* Set SendDmaTail */ | ||
880 | ppd->sdma_head_dma[0] = 0; | ||
881 | } | ||
882 | |||
883 | #define DISABLES_SDMA ( \ | ||
884 | ERR_MASK(SDmaDisabledErr) | \ | ||
885 | ERR_MASK(SDmaBaseErr) | \ | ||
886 | ERR_MASK(SDmaTailOutOfBoundErr) | \ | ||
887 | ERR_MASK(SDmaOutOfBoundErr) | \ | ||
888 | ERR_MASK(SDma1stDescErr) | \ | ||
889 | ERR_MASK(SDmaRpyTagErr) | \ | ||
890 | ERR_MASK(SDmaGenMismatchErr) | \ | ||
891 | ERR_MASK(SDmaDescAddrMisalignErr) | \ | ||
892 | ERR_MASK(SDmaMissingDwErr) | \ | ||
893 | ERR_MASK(SDmaDwEnErr)) | ||
894 | |||
895 | static void sdma_7220_errors(struct qib_pportdata *ppd, u64 errs) | ||
896 | { | ||
897 | unsigned long flags; | ||
898 | struct qib_devdata *dd = ppd->dd; | ||
899 | char *msg; | ||
900 | |||
901 | errs &= QLOGIC_IB_E_SDMAERRS; | ||
902 | |||
903 | msg = dd->cspec->sdmamsgbuf; | ||
904 | qib_decode_7220_sdma_errs(ppd, errs, msg, sizeof dd->cspec->sdmamsgbuf); | ||
905 | spin_lock_irqsave(&ppd->sdma_lock, flags); | ||
906 | |||
907 | if (errs & ERR_MASK(SendBufMisuseErr)) { | ||
908 | unsigned long sbuf[3]; | ||
909 | |||
910 | sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror); | ||
911 | sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1); | ||
912 | sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2); | ||
913 | |||
914 | qib_dev_err(ppd->dd, | ||
915 | "IB%u:%u SendBufMisuse: %04lx %016lx %016lx\n", | ||
916 | ppd->dd->unit, ppd->port, sbuf[2], sbuf[1], | ||
917 | sbuf[0]); | ||
918 | } | ||
919 | |||
920 | if (errs & ERR_MASK(SDmaUnexpDataErr)) | ||
921 | qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", ppd->dd->unit, | ||
922 | ppd->port); | ||
923 | |||
924 | switch (ppd->sdma_state.current_state) { | ||
925 | case qib_sdma_state_s00_hw_down: | ||
926 | /* not expecting any interrupts */ | ||
927 | break; | ||
928 | |||
929 | case qib_sdma_state_s10_hw_start_up_wait: | ||
930 | /* handled in intr path */ | ||
931 | break; | ||
932 | |||
933 | case qib_sdma_state_s20_idle: | ||
934 | /* not expecting any interrupts */ | ||
935 | break; | ||
936 | |||
937 | case qib_sdma_state_s30_sw_clean_up_wait: | ||
938 | /* not expecting any interrupts */ | ||
939 | break; | ||
940 | |||
941 | case qib_sdma_state_s40_hw_clean_up_wait: | ||
942 | if (errs & ERR_MASK(SDmaDisabledErr)) | ||
943 | __qib_sdma_process_event(ppd, | ||
944 | qib_sdma_event_e50_hw_cleaned); | ||
945 | break; | ||
946 | |||
947 | case qib_sdma_state_s50_hw_halt_wait: | ||
948 | /* handled in intr path */ | ||
949 | break; | ||
950 | |||
951 | case qib_sdma_state_s99_running: | ||
952 | if (errs & DISABLES_SDMA) | ||
953 | __qib_sdma_process_event(ppd, | ||
954 | qib_sdma_event_e7220_err_halted); | ||
955 | break; | ||
956 | } | ||
957 | |||
958 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); | ||
959 | } | ||
960 | |||
961 | /* | ||
962 | * Decode the error status into strings, deciding whether to always | ||
963 | * print * it or not depending on "normal packet errors" vs everything | ||
964 | * else. Return 1 if "real" errors, otherwise 0 if only packet | ||
965 | * errors, so caller can decide what to print with the string. | ||
966 | */ | ||
967 | static int qib_decode_7220_err(struct qib_devdata *dd, char *buf, size_t blen, | ||
968 | u64 err) | ||
969 | { | ||
970 | int iserr = 1; | ||
971 | |||
972 | *buf = '\0'; | ||
973 | if (err & QLOGIC_IB_E_PKTERRS) { | ||
974 | if (!(err & ~QLOGIC_IB_E_PKTERRS)) | ||
975 | iserr = 0; | ||
976 | if ((err & ERR_MASK(RcvICRCErr)) && | ||
977 | !(err & (ERR_MASK(RcvVCRCErr) | ERR_MASK(RcvEBPErr)))) | ||
978 | strlcat(buf, "CRC ", blen); | ||
979 | if (!iserr) | ||
980 | goto done; | ||
981 | } | ||
982 | if (err & ERR_MASK(RcvHdrLenErr)) | ||
983 | strlcat(buf, "rhdrlen ", blen); | ||
984 | if (err & ERR_MASK(RcvBadTidErr)) | ||
985 | strlcat(buf, "rbadtid ", blen); | ||
986 | if (err & ERR_MASK(RcvBadVersionErr)) | ||
987 | strlcat(buf, "rbadversion ", blen); | ||
988 | if (err & ERR_MASK(RcvHdrErr)) | ||
989 | strlcat(buf, "rhdr ", blen); | ||
990 | if (err & ERR_MASK(SendSpecialTriggerErr)) | ||
991 | strlcat(buf, "sendspecialtrigger ", blen); | ||
992 | if (err & ERR_MASK(RcvLongPktLenErr)) | ||
993 | strlcat(buf, "rlongpktlen ", blen); | ||
994 | if (err & ERR_MASK(RcvMaxPktLenErr)) | ||
995 | strlcat(buf, "rmaxpktlen ", blen); | ||
996 | if (err & ERR_MASK(RcvMinPktLenErr)) | ||
997 | strlcat(buf, "rminpktlen ", blen); | ||
998 | if (err & ERR_MASK(SendMinPktLenErr)) | ||
999 | strlcat(buf, "sminpktlen ", blen); | ||
1000 | if (err & ERR_MASK(RcvFormatErr)) | ||
1001 | strlcat(buf, "rformaterr ", blen); | ||
1002 | if (err & ERR_MASK(RcvUnsupportedVLErr)) | ||
1003 | strlcat(buf, "runsupvl ", blen); | ||
1004 | if (err & ERR_MASK(RcvUnexpectedCharErr)) | ||
1005 | strlcat(buf, "runexpchar ", blen); | ||
1006 | if (err & ERR_MASK(RcvIBFlowErr)) | ||
1007 | strlcat(buf, "ribflow ", blen); | ||
1008 | if (err & ERR_MASK(SendUnderRunErr)) | ||
1009 | strlcat(buf, "sunderrun ", blen); | ||
1010 | if (err & ERR_MASK(SendPioArmLaunchErr)) | ||
1011 | strlcat(buf, "spioarmlaunch ", blen); | ||
1012 | if (err & ERR_MASK(SendUnexpectedPktNumErr)) | ||
1013 | strlcat(buf, "sunexperrpktnum ", blen); | ||
1014 | if (err & ERR_MASK(SendDroppedSmpPktErr)) | ||
1015 | strlcat(buf, "sdroppedsmppkt ", blen); | ||
1016 | if (err & ERR_MASK(SendMaxPktLenErr)) | ||
1017 | strlcat(buf, "smaxpktlen ", blen); | ||
1018 | if (err & ERR_MASK(SendUnsupportedVLErr)) | ||
1019 | strlcat(buf, "sunsupVL ", blen); | ||
1020 | if (err & ERR_MASK(InvalidAddrErr)) | ||
1021 | strlcat(buf, "invalidaddr ", blen); | ||
1022 | if (err & ERR_MASK(RcvEgrFullErr)) | ||
1023 | strlcat(buf, "rcvegrfull ", blen); | ||
1024 | if (err & ERR_MASK(RcvHdrFullErr)) | ||
1025 | strlcat(buf, "rcvhdrfull ", blen); | ||
1026 | if (err & ERR_MASK(IBStatusChanged)) | ||
1027 | strlcat(buf, "ibcstatuschg ", blen); | ||
1028 | if (err & ERR_MASK(RcvIBLostLinkErr)) | ||
1029 | strlcat(buf, "riblostlink ", blen); | ||
1030 | if (err & ERR_MASK(HardwareErr)) | ||
1031 | strlcat(buf, "hardware ", blen); | ||
1032 | if (err & ERR_MASK(ResetNegated)) | ||
1033 | strlcat(buf, "reset ", blen); | ||
1034 | if (err & QLOGIC_IB_E_SDMAERRS) | ||
1035 | qib_decode_7220_sdma_errs(dd->pport, err, buf, blen); | ||
1036 | if (err & ERR_MASK(InvalidEEPCmd)) | ||
1037 | strlcat(buf, "invalideepromcmd ", blen); | ||
1038 | done: | ||
1039 | return iserr; | ||
1040 | } | ||
1041 | |||
1042 | static void reenable_7220_chase(unsigned long opaque) | ||
1043 | { | ||
1044 | struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; | ||
1045 | ppd->cpspec->chase_timer.expires = 0; | ||
1046 | qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN, | ||
1047 | QLOGIC_IB_IBCC_LINKINITCMD_POLL); | ||
1048 | } | ||
1049 | |||
1050 | static void handle_7220_chase(struct qib_pportdata *ppd, u64 ibcst) | ||
1051 | { | ||
1052 | u8 ibclt; | ||
1053 | u64 tnow; | ||
1054 | |||
1055 | ibclt = (u8)SYM_FIELD(ibcst, IBCStatus, LinkTrainingState); | ||
1056 | |||
1057 | /* | ||
1058 | * Detect and handle the state chase issue, where we can | ||
1059 | * get stuck if we are unlucky on timing on both sides of | ||
1060 | * the link. If we are, we disable, set a timer, and | ||
1061 | * then re-enable. | ||
1062 | */ | ||
1063 | switch (ibclt) { | ||
1064 | case IB_7220_LT_STATE_CFGRCVFCFG: | ||
1065 | case IB_7220_LT_STATE_CFGWAITRMT: | ||
1066 | case IB_7220_LT_STATE_TXREVLANES: | ||
1067 | case IB_7220_LT_STATE_CFGENH: | ||
1068 | tnow = get_jiffies_64(); | ||
1069 | if (ppd->cpspec->chase_end && | ||
1070 | time_after64(tnow, ppd->cpspec->chase_end)) { | ||
1071 | ppd->cpspec->chase_end = 0; | ||
1072 | qib_set_ib_7220_lstate(ppd, | ||
1073 | QLOGIC_IB_IBCC_LINKCMD_DOWN, | ||
1074 | QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); | ||
1075 | ppd->cpspec->chase_timer.expires = jiffies + | ||
1076 | QIB_CHASE_DIS_TIME; | ||
1077 | add_timer(&ppd->cpspec->chase_timer); | ||
1078 | } else if (!ppd->cpspec->chase_end) | ||
1079 | ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME; | ||
1080 | break; | ||
1081 | |||
1082 | default: | ||
1083 | ppd->cpspec->chase_end = 0; | ||
1084 | break; | ||
1085 | } | ||
1086 | } | ||
1087 | |||
1088 | static void handle_7220_errors(struct qib_devdata *dd, u64 errs) | ||
1089 | { | ||
1090 | char *msg; | ||
1091 | u64 ignore_this_time = 0; | ||
1092 | u64 iserr = 0; | ||
1093 | int log_idx; | ||
1094 | struct qib_pportdata *ppd = dd->pport; | ||
1095 | u64 mask; | ||
1096 | |||
1097 | /* don't report errors that are masked */ | ||
1098 | errs &= dd->cspec->errormask; | ||
1099 | msg = dd->cspec->emsgbuf; | ||
1100 | |||
1101 | /* do these first, they are most important */ | ||
1102 | if (errs & ERR_MASK(HardwareErr)) | ||
1103 | qib_7220_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf); | ||
1104 | else | ||
1105 | for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) | ||
1106 | if (errs & dd->eep_st_masks[log_idx].errs_to_log) | ||
1107 | qib_inc_eeprom_err(dd, log_idx, 1); | ||
1108 | |||
1109 | if (errs & QLOGIC_IB_E_SDMAERRS) | ||
1110 | sdma_7220_errors(ppd, errs); | ||
1111 | |||
1112 | if (errs & ~IB_E_BITSEXTANT) | ||
1113 | qib_dev_err(dd, "error interrupt with unknown errors " | ||
1114 | "%llx set\n", (unsigned long long) | ||
1115 | (errs & ~IB_E_BITSEXTANT)); | ||
1116 | |||
1117 | if (errs & E_SUM_ERRS) { | ||
1118 | qib_disarm_7220_senderrbufs(ppd); | ||
1119 | if ((errs & E_SUM_LINK_PKTERRS) && | ||
1120 | !(ppd->lflags & QIBL_LINKACTIVE)) { | ||
1121 | /* | ||
1122 | * This can happen when trying to bring the link | ||
1123 | * up, but the IB link changes state at the "wrong" | ||
1124 | * time. The IB logic then complains that the packet | ||
1125 | * isn't valid. We don't want to confuse people, so | ||
1126 | * we just don't print them, except at debug | ||
1127 | */ | ||
1128 | ignore_this_time = errs & E_SUM_LINK_PKTERRS; | ||
1129 | } | ||
1130 | } else if ((errs & E_SUM_LINK_PKTERRS) && | ||
1131 | !(ppd->lflags & QIBL_LINKACTIVE)) { | ||
1132 | /* | ||
1133 | * This can happen when SMA is trying to bring the link | ||
1134 | * up, but the IB link changes state at the "wrong" time. | ||
1135 | * The IB logic then complains that the packet isn't | ||
1136 | * valid. We don't want to confuse people, so we just | ||
1137 | * don't print them, except at debug | ||
1138 | */ | ||
1139 | ignore_this_time = errs & E_SUM_LINK_PKTERRS; | ||
1140 | } | ||
1141 | |||
1142 | qib_write_kreg(dd, kr_errclear, errs); | ||
1143 | |||
1144 | errs &= ~ignore_this_time; | ||
1145 | if (!errs) | ||
1146 | goto done; | ||
1147 | |||
1148 | /* | ||
1149 | * The ones we mask off are handled specially below | ||
1150 | * or above. Also mask SDMADISABLED by default as it | ||
1151 | * is too chatty. | ||
1152 | */ | ||
1153 | mask = ERR_MASK(IBStatusChanged) | | ||
1154 | ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | | ||
1155 | ERR_MASK(HardwareErr) | ERR_MASK(SDmaDisabledErr); | ||
1156 | |||
1157 | qib_decode_7220_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask); | ||
1158 | |||
1159 | if (errs & E_SUM_PKTERRS) | ||
1160 | qib_stats.sps_rcverrs++; | ||
1161 | if (errs & E_SUM_ERRS) | ||
1162 | qib_stats.sps_txerrs++; | ||
1163 | iserr = errs & ~(E_SUM_PKTERRS | QLOGIC_IB_E_PKTERRS | | ||
1164 | ERR_MASK(SDmaDisabledErr)); | ||
1165 | |||
1166 | if (errs & ERR_MASK(IBStatusChanged)) { | ||
1167 | u64 ibcs; | ||
1168 | |||
1169 | ibcs = qib_read_kreg64(dd, kr_ibcstatus); | ||
1170 | if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) | ||
1171 | handle_7220_chase(ppd, ibcs); | ||
1172 | |||
1173 | /* Update our picture of width and speed from chip */ | ||
1174 | ppd->link_width_active = | ||
1175 | ((ibcs >> IBA7220_LINKWIDTH_SHIFT) & 1) ? | ||
1176 | IB_WIDTH_4X : IB_WIDTH_1X; | ||
1177 | ppd->link_speed_active = | ||
1178 | ((ibcs >> IBA7220_LINKSPEED_SHIFT) & 1) ? | ||
1179 | QIB_IB_DDR : QIB_IB_SDR; | ||
1180 | |||
1181 | /* | ||
1182 | * Since going into a recovery state causes the link state | ||
1183 | * to go down and since recovery is transitory, it is better | ||
1184 | * if we "miss" ever seeing the link training state go into | ||
1185 | * recovery (i.e., ignore this transition for link state | ||
1186 | * special handling purposes) without updating lastibcstat. | ||
1187 | */ | ||
1188 | if (qib_7220_phys_portstate(ibcs) != | ||
1189 | IB_PHYSPORTSTATE_LINK_ERR_RECOVER) | ||
1190 | qib_handle_e_ibstatuschanged(ppd, ibcs); | ||
1191 | } | ||
1192 | |||
1193 | if (errs & ERR_MASK(ResetNegated)) { | ||
1194 | qib_dev_err(dd, "Got reset, requires re-init " | ||
1195 | "(unload and reload driver)\n"); | ||
1196 | dd->flags &= ~QIB_INITTED; /* needs re-init */ | ||
1197 | /* mark as having had error */ | ||
1198 | *dd->devstatusp |= QIB_STATUS_HWERROR; | ||
1199 | *dd->pport->statusp &= ~QIB_STATUS_IB_CONF; | ||
1200 | } | ||
1201 | |||
1202 | if (*msg && iserr) | ||
1203 | qib_dev_porterr(dd, ppd->port, "%s error\n", msg); | ||
1204 | |||
1205 | if (ppd->state_wanted & ppd->lflags) | ||
1206 | wake_up_interruptible(&ppd->state_wait); | ||
1207 | |||
1208 | /* | ||
1209 | * If there were hdrq or egrfull errors, wake up any processes | ||
1210 | * waiting in poll. We used to try to check which contexts had | ||
1211 | * the overflow, but given the cost of that and the chip reads | ||
1212 | * to support it, it's better to just wake everybody up if we | ||
1213 | * get an overflow; waiters can poll again if it's not them. | ||
1214 | */ | ||
1215 | if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) { | ||
1216 | qib_handle_urcv(dd, ~0U); | ||
1217 | if (errs & ERR_MASK(RcvEgrFullErr)) | ||
1218 | qib_stats.sps_buffull++; | ||
1219 | else | ||
1220 | qib_stats.sps_hdrfull++; | ||
1221 | } | ||
1222 | done: | ||
1223 | return; | ||
1224 | } | ||
1225 | |||
1226 | /* enable/disable chip from delivering interrupts */ | ||
1227 | static void qib_7220_set_intr_state(struct qib_devdata *dd, u32 enable) | ||
1228 | { | ||
1229 | if (enable) { | ||
1230 | if (dd->flags & QIB_BADINTR) | ||
1231 | return; | ||
1232 | qib_write_kreg(dd, kr_intmask, ~0ULL); | ||
1233 | /* force re-interrupt of any pending interrupts. */ | ||
1234 | qib_write_kreg(dd, kr_intclear, 0ULL); | ||
1235 | } else | ||
1236 | qib_write_kreg(dd, kr_intmask, 0ULL); | ||
1237 | } | ||
1238 | |||
1239 | /* | ||
1240 | * Try to cleanup as much as possible for anything that might have gone | ||
1241 | * wrong while in freeze mode, such as pio buffers being written by user | ||
1242 | * processes (causing armlaunch), send errors due to going into freeze mode, | ||
1243 | * etc., and try to avoid causing extra interrupts while doing so. | ||
1244 | * Forcibly update the in-memory pioavail register copies after cleanup | ||
1245 | * because the chip won't do it while in freeze mode (the register values | ||
1246 | * themselves are kept correct). | ||
1247 | * Make sure that we don't lose any important interrupts by using the chip | ||
1248 | * feature that says that writing 0 to a bit in *clear that is set in | ||
1249 | * *status will cause an interrupt to be generated again (if allowed by | ||
1250 | * the *mask value). | ||
1251 | * This is in chip-specific code because of all of the register accesses, | ||
1252 | * even though the details are similar on most chips. | ||
1253 | */ | ||
1254 | static void qib_7220_clear_freeze(struct qib_devdata *dd) | ||
1255 | { | ||
1256 | /* disable error interrupts, to avoid confusion */ | ||
1257 | qib_write_kreg(dd, kr_errmask, 0ULL); | ||
1258 | |||
1259 | /* also disable interrupts; errormask is sometimes overwriten */ | ||
1260 | qib_7220_set_intr_state(dd, 0); | ||
1261 | |||
1262 | qib_cancel_sends(dd->pport); | ||
1263 | |||
1264 | /* clear the freeze, and be sure chip saw it */ | ||
1265 | qib_write_kreg(dd, kr_control, dd->control); | ||
1266 | qib_read_kreg32(dd, kr_scratch); | ||
1267 | |||
1268 | /* force in-memory update now we are out of freeze */ | ||
1269 | qib_force_pio_avail_update(dd); | ||
1270 | |||
1271 | /* | ||
1272 | * force new interrupt if any hwerr, error or interrupt bits are | ||
1273 | * still set, and clear "safe" send packet errors related to freeze | ||
1274 | * and cancelling sends. Re-enable error interrupts before possible | ||
1275 | * force of re-interrupt on pending interrupts. | ||
1276 | */ | ||
1277 | qib_write_kreg(dd, kr_hwerrclear, 0ULL); | ||
1278 | qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE); | ||
1279 | qib_write_kreg(dd, kr_errmask, dd->cspec->errormask); | ||
1280 | qib_7220_set_intr_state(dd, 1); | ||
1281 | } | ||
1282 | |||
1283 | /** | ||
1284 | * qib_7220_handle_hwerrors - display hardware errors. | ||
1285 | * @dd: the qlogic_ib device | ||
1286 | * @msg: the output buffer | ||
1287 | * @msgl: the size of the output buffer | ||
1288 | * | ||
1289 | * Use same msg buffer as regular errors to avoid excessive stack | ||
1290 | * use. Most hardware errors are catastrophic, but for right now, | ||
1291 | * we'll print them and continue. We reuse the same message buffer as | ||
1292 | * handle_7220_errors() to avoid excessive stack usage. | ||
1293 | */ | ||
1294 | static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg, | ||
1295 | size_t msgl) | ||
1296 | { | ||
1297 | u64 hwerrs; | ||
1298 | u32 bits, ctrl; | ||
1299 | int isfatal = 0; | ||
1300 | char *bitsmsg; | ||
1301 | int log_idx; | ||
1302 | |||
1303 | hwerrs = qib_read_kreg64(dd, kr_hwerrstatus); | ||
1304 | if (!hwerrs) | ||
1305 | goto bail; | ||
1306 | if (hwerrs == ~0ULL) { | ||
1307 | qib_dev_err(dd, "Read of hardware error status failed " | ||
1308 | "(all bits set); ignoring\n"); | ||
1309 | goto bail; | ||
1310 | } | ||
1311 | qib_stats.sps_hwerrs++; | ||
1312 | |||
1313 | /* | ||
1314 | * Always clear the error status register, except MEMBISTFAIL, | ||
1315 | * regardless of whether we continue or stop using the chip. | ||
1316 | * We want that set so we know it failed, even across driver reload. | ||
1317 | * We'll still ignore it in the hwerrmask. We do this partly for | ||
1318 | * diagnostics, but also for support. | ||
1319 | */ | ||
1320 | qib_write_kreg(dd, kr_hwerrclear, | ||
1321 | hwerrs & ~HWE_MASK(PowerOnBISTFailed)); | ||
1322 | |||
1323 | hwerrs &= dd->cspec->hwerrmask; | ||
1324 | |||
1325 | /* We log some errors to EEPROM, check if we have any of those. */ | ||
1326 | for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) | ||
1327 | if (hwerrs & dd->eep_st_masks[log_idx].hwerrs_to_log) | ||
1328 | qib_inc_eeprom_err(dd, log_idx, 1); | ||
1329 | if (hwerrs & ~(TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC | | ||
1330 | RXE_PARITY)) | ||
1331 | qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx " | ||
1332 | "(cleared)\n", (unsigned long long) hwerrs); | ||
1333 | |||
1334 | if (hwerrs & ~IB_HWE_BITSEXTANT) | ||
1335 | qib_dev_err(dd, "hwerror interrupt with unknown errors " | ||
1336 | "%llx set\n", (unsigned long long) | ||
1337 | (hwerrs & ~IB_HWE_BITSEXTANT)); | ||
1338 | |||
1339 | if (hwerrs & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR) | ||
1340 | qib_sd7220_clr_ibpar(dd); | ||
1341 | |||
1342 | ctrl = qib_read_kreg32(dd, kr_control); | ||
1343 | if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) { | ||
1344 | /* | ||
1345 | * Parity errors in send memory are recoverable by h/w | ||
1346 | * just do housekeeping, exit freeze mode and continue. | ||
1347 | */ | ||
1348 | if (hwerrs & (TXEMEMPARITYERR_PIOBUF | | ||
1349 | TXEMEMPARITYERR_PIOPBC)) { | ||
1350 | qib_7220_txe_recover(dd); | ||
1351 | hwerrs &= ~(TXEMEMPARITYERR_PIOBUF | | ||
1352 | TXEMEMPARITYERR_PIOPBC); | ||
1353 | } | ||
1354 | if (hwerrs) | ||
1355 | isfatal = 1; | ||
1356 | else | ||
1357 | qib_7220_clear_freeze(dd); | ||
1358 | } | ||
1359 | |||
1360 | *msg = '\0'; | ||
1361 | |||
1362 | if (hwerrs & HWE_MASK(PowerOnBISTFailed)) { | ||
1363 | isfatal = 1; | ||
1364 | strlcat(msg, "[Memory BIST test failed, " | ||
1365 | "InfiniPath hardware unusable]", msgl); | ||
1366 | /* ignore from now on, so disable until driver reloaded */ | ||
1367 | dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed); | ||
1368 | qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); | ||
1369 | } | ||
1370 | |||
1371 | qib_format_hwerrors(hwerrs, qib_7220_hwerror_msgs, | ||
1372 | ARRAY_SIZE(qib_7220_hwerror_msgs), msg, msgl); | ||
1373 | |||
1374 | bitsmsg = dd->cspec->bitsmsgbuf; | ||
1375 | if (hwerrs & (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK << | ||
1376 | QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT)) { | ||
1377 | bits = (u32) ((hwerrs >> | ||
1378 | QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) & | ||
1379 | QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK); | ||
1380 | snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, | ||
1381 | "[PCIe Mem Parity Errs %x] ", bits); | ||
1382 | strlcat(msg, bitsmsg, msgl); | ||
1383 | } | ||
1384 | |||
1385 | #define _QIB_PLL_FAIL (QLOGIC_IB_HWE_COREPLL_FBSLIP | \ | ||
1386 | QLOGIC_IB_HWE_COREPLL_RFSLIP) | ||
1387 | |||
1388 | if (hwerrs & _QIB_PLL_FAIL) { | ||
1389 | isfatal = 1; | ||
1390 | snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, | ||
1391 | "[PLL failed (%llx), InfiniPath hardware unusable]", | ||
1392 | (unsigned long long) hwerrs & _QIB_PLL_FAIL); | ||
1393 | strlcat(msg, bitsmsg, msgl); | ||
1394 | /* ignore from now on, so disable until driver reloaded */ | ||
1395 | dd->cspec->hwerrmask &= ~(hwerrs & _QIB_PLL_FAIL); | ||
1396 | qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); | ||
1397 | } | ||
1398 | |||
1399 | if (hwerrs & QLOGIC_IB_HWE_SERDESPLLFAILED) { | ||
1400 | /* | ||
1401 | * If it occurs, it is left masked since the eternal | ||
1402 | * interface is unused. | ||
1403 | */ | ||
1404 | dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_SERDESPLLFAILED; | ||
1405 | qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); | ||
1406 | } | ||
1407 | |||
1408 | qib_dev_err(dd, "%s hardware error\n", msg); | ||
1409 | |||
1410 | if (isfatal && !dd->diag_client) { | ||
1411 | qib_dev_err(dd, "Fatal Hardware Error, no longer" | ||
1412 | " usable, SN %.16s\n", dd->serial); | ||
1413 | /* | ||
1414 | * For /sys status file and user programs to print; if no | ||
1415 | * trailing brace is copied, we'll know it was truncated. | ||
1416 | */ | ||
1417 | if (dd->freezemsg) | ||
1418 | snprintf(dd->freezemsg, dd->freezelen, | ||
1419 | "{%s}", msg); | ||
1420 | qib_disable_after_error(dd); | ||
1421 | } | ||
1422 | bail:; | ||
1423 | } | ||
1424 | |||
1425 | /** | ||
1426 | * qib_7220_init_hwerrors - enable hardware errors | ||
1427 | * @dd: the qlogic_ib device | ||
1428 | * | ||
1429 | * now that we have finished initializing everything that might reasonably | ||
1430 | * cause a hardware error, and cleared those errors bits as they occur, | ||
1431 | * we can enable hardware errors in the mask (potentially enabling | ||
1432 | * freeze mode), and enable hardware errors as errors (along with | ||
1433 | * everything else) in errormask | ||
1434 | */ | ||
1435 | static void qib_7220_init_hwerrors(struct qib_devdata *dd) | ||
1436 | { | ||
1437 | u64 val; | ||
1438 | u64 extsval; | ||
1439 | |||
1440 | extsval = qib_read_kreg64(dd, kr_extstatus); | ||
1441 | |||
1442 | if (!(extsval & (QLOGIC_IB_EXTS_MEMBIST_ENDTEST | | ||
1443 | QLOGIC_IB_EXTS_MEMBIST_DISABLED))) | ||
1444 | qib_dev_err(dd, "MemBIST did not complete!\n"); | ||
1445 | if (extsval & QLOGIC_IB_EXTS_MEMBIST_DISABLED) | ||
1446 | qib_devinfo(dd->pcidev, "MemBIST is disabled.\n"); | ||
1447 | |||
1448 | val = ~0ULL; /* default to all hwerrors become interrupts, */ | ||
1449 | |||
1450 | val &= ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR; | ||
1451 | dd->cspec->hwerrmask = val; | ||
1452 | |||
1453 | qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed)); | ||
1454 | qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); | ||
1455 | |||
1456 | /* clear all */ | ||
1457 | qib_write_kreg(dd, kr_errclear, ~0ULL); | ||
1458 | /* enable errors that are masked, at least this first time. */ | ||
1459 | qib_write_kreg(dd, kr_errmask, ~0ULL); | ||
1460 | dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask); | ||
1461 | /* clear any interrupts up to this point (ints still not enabled) */ | ||
1462 | qib_write_kreg(dd, kr_intclear, ~0ULL); | ||
1463 | } | ||
1464 | |||
1465 | /* | ||
1466 | * Disable and enable the armlaunch error. Used for PIO bandwidth testing | ||
1467 | * on chips that are count-based, rather than trigger-based. There is no | ||
1468 | * reference counting, but that's also fine, given the intended use. | ||
1469 | * Only chip-specific because it's all register accesses | ||
1470 | */ | ||
1471 | static void qib_set_7220_armlaunch(struct qib_devdata *dd, u32 enable) | ||
1472 | { | ||
1473 | if (enable) { | ||
1474 | qib_write_kreg(dd, kr_errclear, ERR_MASK(SendPioArmLaunchErr)); | ||
1475 | dd->cspec->errormask |= ERR_MASK(SendPioArmLaunchErr); | ||
1476 | } else | ||
1477 | dd->cspec->errormask &= ~ERR_MASK(SendPioArmLaunchErr); | ||
1478 | qib_write_kreg(dd, kr_errmask, dd->cspec->errormask); | ||
1479 | } | ||
1480 | |||
1481 | /* | ||
1482 | * Formerly took parameter <which> in pre-shifted, | ||
1483 | * pre-merged form with LinkCmd and LinkInitCmd | ||
1484 | * together, and assuming the zero was NOP. | ||
1485 | */ | ||
1486 | static void qib_set_ib_7220_lstate(struct qib_pportdata *ppd, u16 linkcmd, | ||
1487 | u16 linitcmd) | ||
1488 | { | ||
1489 | u64 mod_wd; | ||
1490 | struct qib_devdata *dd = ppd->dd; | ||
1491 | unsigned long flags; | ||
1492 | |||
1493 | if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) { | ||
1494 | /* | ||
1495 | * If we are told to disable, note that so link-recovery | ||
1496 | * code does not attempt to bring us back up. | ||
1497 | */ | ||
1498 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
1499 | ppd->lflags |= QIBL_IB_LINK_DISABLED; | ||
1500 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
1501 | } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) { | ||
1502 | /* | ||
1503 | * Any other linkinitcmd will lead to LINKDOWN and then | ||
1504 | * to INIT (if all is well), so clear flag to let | ||
1505 | * link-recovery code attempt to bring us back up. | ||
1506 | */ | ||
1507 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
1508 | ppd->lflags &= ~QIBL_IB_LINK_DISABLED; | ||
1509 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
1510 | } | ||
1511 | |||
1512 | mod_wd = (linkcmd << IBA7220_IBCC_LINKCMD_SHIFT) | | ||
1513 | (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT); | ||
1514 | |||
1515 | qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl | mod_wd); | ||
1516 | /* write to chip to prevent back-to-back writes of ibc reg */ | ||
1517 | qib_write_kreg(dd, kr_scratch, 0); | ||
1518 | } | ||
1519 | |||
1520 | /* | ||
1521 | * All detailed interaction with the SerDes has been moved to qib_sd7220.c | ||
1522 | * | ||
1523 | * The portion of IBA7220-specific bringup_serdes() that actually deals with | ||
1524 | * registers and memory within the SerDes itself is qib_sd7220_init(). | ||
1525 | */ | ||
1526 | |||
1527 | /** | ||
1528 | * qib_7220_bringup_serdes - bring up the serdes | ||
1529 | * @ppd: physical port on the qlogic_ib device | ||
1530 | */ | ||
1531 | static int qib_7220_bringup_serdes(struct qib_pportdata *ppd) | ||
1532 | { | ||
1533 | struct qib_devdata *dd = ppd->dd; | ||
1534 | u64 val, prev_val, guid, ibc; | ||
1535 | int ret = 0; | ||
1536 | |||
1537 | /* Put IBC in reset, sends disabled */ | ||
1538 | dd->control &= ~QLOGIC_IB_C_LINKENABLE; | ||
1539 | qib_write_kreg(dd, kr_control, 0ULL); | ||
1540 | |||
1541 | if (qib_compat_ddr_negotiate) { | ||
1542 | ppd->cpspec->ibdeltainprog = 1; | ||
1543 | ppd->cpspec->ibsymsnap = read_7220_creg32(dd, cr_ibsymbolerr); | ||
1544 | ppd->cpspec->iblnkerrsnap = | ||
1545 | read_7220_creg32(dd, cr_iblinkerrrecov); | ||
1546 | } | ||
1547 | |||
1548 | /* flowcontrolwatermark is in units of KBytes */ | ||
1549 | ibc = 0x5ULL << SYM_LSB(IBCCtrl, FlowCtrlWaterMark); | ||
1550 | /* | ||
1551 | * How often flowctrl sent. More or less in usecs; balance against | ||
1552 | * watermark value, so that in theory senders always get a flow | ||
1553 | * control update in time to not let the IB link go idle. | ||
1554 | */ | ||
1555 | ibc |= 0x3ULL << SYM_LSB(IBCCtrl, FlowCtrlPeriod); | ||
1556 | /* max error tolerance */ | ||
1557 | ibc |= 0xfULL << SYM_LSB(IBCCtrl, PhyerrThreshold); | ||
1558 | /* use "real" buffer space for */ | ||
1559 | ibc |= 4ULL << SYM_LSB(IBCCtrl, CreditScale); | ||
1560 | /* IB credit flow control. */ | ||
1561 | ibc |= 0xfULL << SYM_LSB(IBCCtrl, OverrunThreshold); | ||
1562 | /* | ||
1563 | * set initial max size pkt IBC will send, including ICRC; it's the | ||
1564 | * PIO buffer size in dwords, less 1; also see qib_set_mtu() | ||
1565 | */ | ||
1566 | ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << SYM_LSB(IBCCtrl, MaxPktLen); | ||
1567 | ppd->cpspec->ibcctrl = ibc; /* without linkcmd or linkinitcmd! */ | ||
1568 | |||
1569 | /* initially come up waiting for TS1, without sending anything. */ | ||
1570 | val = ppd->cpspec->ibcctrl | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE << | ||
1571 | QLOGIC_IB_IBCC_LINKINITCMD_SHIFT); | ||
1572 | qib_write_kreg(dd, kr_ibcctrl, val); | ||
1573 | |||
1574 | if (!ppd->cpspec->ibcddrctrl) { | ||
1575 | /* not on re-init after reset */ | ||
1576 | ppd->cpspec->ibcddrctrl = qib_read_kreg64(dd, kr_ibcddrctrl); | ||
1577 | |||
1578 | if (ppd->link_speed_enabled == (QIB_IB_SDR | QIB_IB_DDR)) | ||
1579 | ppd->cpspec->ibcddrctrl |= | ||
1580 | IBA7220_IBC_SPEED_AUTONEG_MASK | | ||
1581 | IBA7220_IBC_IBTA_1_2_MASK; | ||
1582 | else | ||
1583 | ppd->cpspec->ibcddrctrl |= | ||
1584 | ppd->link_speed_enabled == QIB_IB_DDR ? | ||
1585 | IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR; | ||
1586 | if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) == | ||
1587 | (IB_WIDTH_1X | IB_WIDTH_4X)) | ||
1588 | ppd->cpspec->ibcddrctrl |= IBA7220_IBC_WIDTH_AUTONEG; | ||
1589 | else | ||
1590 | ppd->cpspec->ibcddrctrl |= | ||
1591 | ppd->link_width_enabled == IB_WIDTH_4X ? | ||
1592 | IBA7220_IBC_WIDTH_4X_ONLY : | ||
1593 | IBA7220_IBC_WIDTH_1X_ONLY; | ||
1594 | |||
1595 | /* always enable these on driver reload, not sticky */ | ||
1596 | ppd->cpspec->ibcddrctrl |= | ||
1597 | IBA7220_IBC_RXPOL_MASK << IBA7220_IBC_RXPOL_SHIFT; | ||
1598 | ppd->cpspec->ibcddrctrl |= | ||
1599 | IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT; | ||
1600 | |||
1601 | /* enable automatic lane reversal detection for receive */ | ||
1602 | ppd->cpspec->ibcddrctrl |= IBA7220_IBC_LANE_REV_SUPPORTED; | ||
1603 | } else | ||
1604 | /* write to chip to prevent back-to-back writes of ibc reg */ | ||
1605 | qib_write_kreg(dd, kr_scratch, 0); | ||
1606 | |||
1607 | qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl); | ||
1608 | qib_write_kreg(dd, kr_scratch, 0); | ||
1609 | |||
1610 | qib_write_kreg(dd, kr_ncmodectrl, 0Ull); | ||
1611 | qib_write_kreg(dd, kr_scratch, 0); | ||
1612 | |||
1613 | ret = qib_sd7220_init(dd); | ||
1614 | |||
1615 | val = qib_read_kreg64(dd, kr_xgxs_cfg); | ||
1616 | prev_val = val; | ||
1617 | val |= QLOGIC_IB_XGXS_FC_SAFE; | ||
1618 | if (val != prev_val) { | ||
1619 | qib_write_kreg(dd, kr_xgxs_cfg, val); | ||
1620 | qib_read_kreg32(dd, kr_scratch); | ||
1621 | } | ||
1622 | if (val & QLOGIC_IB_XGXS_RESET) | ||
1623 | val &= ~QLOGIC_IB_XGXS_RESET; | ||
1624 | if (val != prev_val) | ||
1625 | qib_write_kreg(dd, kr_xgxs_cfg, val); | ||
1626 | |||
1627 | /* first time through, set port guid */ | ||
1628 | if (!ppd->guid) | ||
1629 | ppd->guid = dd->base_guid; | ||
1630 | guid = be64_to_cpu(ppd->guid); | ||
1631 | |||
1632 | qib_write_kreg(dd, kr_hrtbt_guid, guid); | ||
1633 | if (!ret) { | ||
1634 | dd->control |= QLOGIC_IB_C_LINKENABLE; | ||
1635 | qib_write_kreg(dd, kr_control, dd->control); | ||
1636 | } else | ||
1637 | /* write to chip to prevent back-to-back writes of ibc reg */ | ||
1638 | qib_write_kreg(dd, kr_scratch, 0); | ||
1639 | return ret; | ||
1640 | } | ||
1641 | |||
1642 | /** | ||
1643 | * qib_7220_quiet_serdes - set serdes to txidle | ||
1644 | * @ppd: physical port of the qlogic_ib device | ||
1645 | * Called when driver is being unloaded | ||
1646 | */ | ||
1647 | static void qib_7220_quiet_serdes(struct qib_pportdata *ppd) | ||
1648 | { | ||
1649 | u64 val; | ||
1650 | struct qib_devdata *dd = ppd->dd; | ||
1651 | unsigned long flags; | ||
1652 | |||
1653 | /* disable IBC */ | ||
1654 | dd->control &= ~QLOGIC_IB_C_LINKENABLE; | ||
1655 | qib_write_kreg(dd, kr_control, | ||
1656 | dd->control | QLOGIC_IB_C_FREEZEMODE); | ||
1657 | |||
1658 | ppd->cpspec->chase_end = 0; | ||
1659 | if (ppd->cpspec->chase_timer.data) /* if initted */ | ||
1660 | del_timer_sync(&ppd->cpspec->chase_timer); | ||
1661 | |||
1662 | if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta || | ||
1663 | ppd->cpspec->ibdeltainprog) { | ||
1664 | u64 diagc; | ||
1665 | |||
1666 | /* enable counter writes */ | ||
1667 | diagc = qib_read_kreg64(dd, kr_hwdiagctrl); | ||
1668 | qib_write_kreg(dd, kr_hwdiagctrl, | ||
1669 | diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable)); | ||
1670 | |||
1671 | if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) { | ||
1672 | val = read_7220_creg32(dd, cr_ibsymbolerr); | ||
1673 | if (ppd->cpspec->ibdeltainprog) | ||
1674 | val -= val - ppd->cpspec->ibsymsnap; | ||
1675 | val -= ppd->cpspec->ibsymdelta; | ||
1676 | write_7220_creg(dd, cr_ibsymbolerr, val); | ||
1677 | } | ||
1678 | if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) { | ||
1679 | val = read_7220_creg32(dd, cr_iblinkerrrecov); | ||
1680 | if (ppd->cpspec->ibdeltainprog) | ||
1681 | val -= val - ppd->cpspec->iblnkerrsnap; | ||
1682 | val -= ppd->cpspec->iblnkerrdelta; | ||
1683 | write_7220_creg(dd, cr_iblinkerrrecov, val); | ||
1684 | } | ||
1685 | |||
1686 | /* and disable counter writes */ | ||
1687 | qib_write_kreg(dd, kr_hwdiagctrl, diagc); | ||
1688 | } | ||
1689 | qib_set_ib_7220_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); | ||
1690 | |||
1691 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
1692 | ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; | ||
1693 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
1694 | wake_up(&ppd->cpspec->autoneg_wait); | ||
1695 | cancel_delayed_work(&ppd->cpspec->autoneg_work); | ||
1696 | flush_scheduled_work(); | ||
1697 | |||
1698 | shutdown_7220_relock_poll(ppd->dd); | ||
1699 | val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg); | ||
1700 | val |= QLOGIC_IB_XGXS_RESET; | ||
1701 | qib_write_kreg(ppd->dd, kr_xgxs_cfg, val); | ||
1702 | } | ||
1703 | |||
1704 | /** | ||
1705 | * qib_setup_7220_setextled - set the state of the two external LEDs | ||
1706 | * @dd: the qlogic_ib device | ||
1707 | * @on: whether the link is up or not | ||
1708 | * | ||
1709 | * The exact combo of LEDs if on is true is determined by looking | ||
1710 | * at the ibcstatus. | ||
1711 | * | ||
1712 | * These LEDs indicate the physical and logical state of IB link. | ||
1713 | * For this chip (at least with recommended board pinouts), LED1 | ||
1714 | * is Yellow (logical state) and LED2 is Green (physical state), | ||
1715 | * | ||
1716 | * Note: We try to match the Mellanox HCA LED behavior as best | ||
1717 | * we can. Green indicates physical link state is OK (something is | ||
1718 | * plugged in, and we can train). | ||
1719 | * Amber indicates the link is logically up (ACTIVE). | ||
1720 | * Mellanox further blinks the amber LED to indicate data packet | ||
1721 | * activity, but we have no hardware support for that, so it would | ||
1722 | * require waking up every 10-20 msecs and checking the counters | ||
1723 | * on the chip, and then turning the LED off if appropriate. That's | ||
1724 | * visible overhead, so not something we will do. | ||
1725 | * | ||
1726 | */ | ||
1727 | static void qib_setup_7220_setextled(struct qib_pportdata *ppd, u32 on) | ||
1728 | { | ||
1729 | struct qib_devdata *dd = ppd->dd; | ||
1730 | u64 extctl, ledblink = 0, val, lst, ltst; | ||
1731 | unsigned long flags; | ||
1732 | |||
1733 | /* | ||
1734 | * The diags use the LED to indicate diag info, so we leave | ||
1735 | * the external LED alone when the diags are running. | ||
1736 | */ | ||
1737 | if (dd->diag_client) | ||
1738 | return; | ||
1739 | |||
1740 | if (ppd->led_override) { | ||
1741 | ltst = (ppd->led_override & QIB_LED_PHYS) ? | ||
1742 | IB_PHYSPORTSTATE_LINKUP : IB_PHYSPORTSTATE_DISABLED, | ||
1743 | lst = (ppd->led_override & QIB_LED_LOG) ? | ||
1744 | IB_PORT_ACTIVE : IB_PORT_DOWN; | ||
1745 | } else if (on) { | ||
1746 | val = qib_read_kreg64(dd, kr_ibcstatus); | ||
1747 | ltst = qib_7220_phys_portstate(val); | ||
1748 | lst = qib_7220_iblink_state(val); | ||
1749 | } else { | ||
1750 | ltst = 0; | ||
1751 | lst = 0; | ||
1752 | } | ||
1753 | |||
1754 | spin_lock_irqsave(&dd->cspec->gpio_lock, flags); | ||
1755 | extctl = dd->cspec->extctrl & ~(SYM_MASK(EXTCtrl, LEDPriPortGreenOn) | | ||
1756 | SYM_MASK(EXTCtrl, LEDPriPortYellowOn)); | ||
1757 | if (ltst == IB_PHYSPORTSTATE_LINKUP) { | ||
1758 | extctl |= SYM_MASK(EXTCtrl, LEDPriPortGreenOn); | ||
1759 | /* | ||
1760 | * counts are in chip clock (4ns) periods. | ||
1761 | * This is 1/16 sec (66.6ms) on, | ||
1762 | * 3/16 sec (187.5 ms) off, with packets rcvd | ||
1763 | */ | ||
1764 | ledblink = ((66600 * 1000UL / 4) << IBA7220_LEDBLINK_ON_SHIFT) | ||
1765 | | ((187500 * 1000UL / 4) << IBA7220_LEDBLINK_OFF_SHIFT); | ||
1766 | } | ||
1767 | if (lst == IB_PORT_ACTIVE) | ||
1768 | extctl |= SYM_MASK(EXTCtrl, LEDPriPortYellowOn); | ||
1769 | dd->cspec->extctrl = extctl; | ||
1770 | qib_write_kreg(dd, kr_extctrl, extctl); | ||
1771 | spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); | ||
1772 | |||
1773 | if (ledblink) /* blink the LED on packet receive */ | ||
1774 | qib_write_kreg(dd, kr_rcvpktledcnt, ledblink); | ||
1775 | } | ||
1776 | |||
1777 | static void qib_7220_free_irq(struct qib_devdata *dd) | ||
1778 | { | ||
1779 | if (dd->cspec->irq) { | ||
1780 | free_irq(dd->cspec->irq, dd); | ||
1781 | dd->cspec->irq = 0; | ||
1782 | } | ||
1783 | qib_nomsi(dd); | ||
1784 | } | ||
1785 | |||
1786 | /* | ||
1787 | * qib_setup_7220_cleanup - clean up any per-chip chip-specific stuff | ||
1788 | * @dd: the qlogic_ib device | ||
1789 | * | ||
1790 | * This is called during driver unload. | ||
1791 | * | ||
1792 | */ | ||
1793 | static void qib_setup_7220_cleanup(struct qib_devdata *dd) | ||
1794 | { | ||
1795 | qib_7220_free_irq(dd); | ||
1796 | kfree(dd->cspec->cntrs); | ||
1797 | kfree(dd->cspec->portcntrs); | ||
1798 | } | ||
1799 | |||
1800 | /* | ||
1801 | * This is only called for SDmaInt. | ||
1802 | * SDmaDisabled is handled on the error path. | ||
1803 | */ | ||
1804 | static void sdma_7220_intr(struct qib_pportdata *ppd, u64 istat) | ||
1805 | { | ||
1806 | unsigned long flags; | ||
1807 | |||
1808 | spin_lock_irqsave(&ppd->sdma_lock, flags); | ||
1809 | |||
1810 | switch (ppd->sdma_state.current_state) { | ||
1811 | case qib_sdma_state_s00_hw_down: | ||
1812 | break; | ||
1813 | |||
1814 | case qib_sdma_state_s10_hw_start_up_wait: | ||
1815 | __qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started); | ||
1816 | break; | ||
1817 | |||
1818 | case qib_sdma_state_s20_idle: | ||
1819 | break; | ||
1820 | |||
1821 | case qib_sdma_state_s30_sw_clean_up_wait: | ||
1822 | break; | ||
1823 | |||
1824 | case qib_sdma_state_s40_hw_clean_up_wait: | ||
1825 | break; | ||
1826 | |||
1827 | case qib_sdma_state_s50_hw_halt_wait: | ||
1828 | __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted); | ||
1829 | break; | ||
1830 | |||
1831 | case qib_sdma_state_s99_running: | ||
1832 | /* too chatty to print here */ | ||
1833 | __qib_sdma_intr(ppd); | ||
1834 | break; | ||
1835 | } | ||
1836 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); | ||
1837 | } | ||
1838 | |||
1839 | static void qib_wantpiobuf_7220_intr(struct qib_devdata *dd, u32 needint) | ||
1840 | { | ||
1841 | unsigned long flags; | ||
1842 | |||
1843 | spin_lock_irqsave(&dd->sendctrl_lock, flags); | ||
1844 | if (needint) { | ||
1845 | if (!(dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd))) | ||
1846 | goto done; | ||
1847 | /* | ||
1848 | * blip the availupd off, next write will be on, so | ||
1849 | * we ensure an avail update, regardless of threshold or | ||
1850 | * buffers becoming free, whenever we want an interrupt | ||
1851 | */ | ||
1852 | qib_write_kreg(dd, kr_sendctrl, dd->sendctrl & | ||
1853 | ~SYM_MASK(SendCtrl, SendBufAvailUpd)); | ||
1854 | qib_write_kreg(dd, kr_scratch, 0ULL); | ||
1855 | dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail); | ||
1856 | } else | ||
1857 | dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail); | ||
1858 | qib_write_kreg(dd, kr_sendctrl, dd->sendctrl); | ||
1859 | qib_write_kreg(dd, kr_scratch, 0ULL); | ||
1860 | done: | ||
1861 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | ||
1862 | } | ||
1863 | |||
1864 | /* | ||
1865 | * Handle errors and unusual events first, separate function | ||
1866 | * to improve cache hits for fast path interrupt handling. | ||
1867 | */ | ||
1868 | static noinline void unlikely_7220_intr(struct qib_devdata *dd, u64 istat) | ||
1869 | { | ||
1870 | if (unlikely(istat & ~QLOGIC_IB_I_BITSEXTANT)) | ||
1871 | qib_dev_err(dd, | ||
1872 | "interrupt with unknown interrupts %Lx set\n", | ||
1873 | istat & ~QLOGIC_IB_I_BITSEXTANT); | ||
1874 | |||
1875 | if (istat & QLOGIC_IB_I_GPIO) { | ||
1876 | u32 gpiostatus; | ||
1877 | |||
1878 | /* | ||
1879 | * Boards for this chip currently don't use GPIO interrupts, | ||
1880 | * so clear by writing GPIOstatus to GPIOclear, and complain | ||
1881 | * to alert developer. To avoid endless repeats, clear | ||
1882 | * the bits in the mask, since there is some kind of | ||
1883 | * programming error or chip problem. | ||
1884 | */ | ||
1885 | gpiostatus = qib_read_kreg32(dd, kr_gpio_status); | ||
1886 | /* | ||
1887 | * In theory, writing GPIOstatus to GPIOclear could | ||
1888 | * have a bad side-effect on some diagnostic that wanted | ||
1889 | * to poll for a status-change, but the various shadows | ||
1890 | * make that problematic at best. Diags will just suppress | ||
1891 | * all GPIO interrupts during such tests. | ||
1892 | */ | ||
1893 | qib_write_kreg(dd, kr_gpio_clear, gpiostatus); | ||
1894 | |||
1895 | if (gpiostatus) { | ||
1896 | const u32 mask = qib_read_kreg32(dd, kr_gpio_mask); | ||
1897 | u32 gpio_irq = mask & gpiostatus; | ||
1898 | |||
1899 | /* | ||
1900 | * A bit set in status and (chip) Mask register | ||
1901 | * would cause an interrupt. Since we are not | ||
1902 | * expecting any, report it. Also check that the | ||
1903 | * chip reflects our shadow, report issues, | ||
1904 | * and refresh from the shadow. | ||
1905 | */ | ||
1906 | /* | ||
1907 | * Clear any troublemakers, and update chip | ||
1908 | * from shadow | ||
1909 | */ | ||
1910 | dd->cspec->gpio_mask &= ~gpio_irq; | ||
1911 | qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask); | ||
1912 | } | ||
1913 | } | ||
1914 | |||
1915 | if (istat & QLOGIC_IB_I_ERROR) { | ||
1916 | u64 estat; | ||
1917 | |||
1918 | qib_stats.sps_errints++; | ||
1919 | estat = qib_read_kreg64(dd, kr_errstatus); | ||
1920 | if (!estat) | ||
1921 | qib_devinfo(dd->pcidev, "error interrupt (%Lx), " | ||
1922 | "but no error bits set!\n", istat); | ||
1923 | else | ||
1924 | handle_7220_errors(dd, estat); | ||
1925 | } | ||
1926 | } | ||
1927 | |||
1928 | static irqreturn_t qib_7220intr(int irq, void *data) | ||
1929 | { | ||
1930 | struct qib_devdata *dd = data; | ||
1931 | irqreturn_t ret; | ||
1932 | u64 istat; | ||
1933 | u64 ctxtrbits; | ||
1934 | u64 rmask; | ||
1935 | unsigned i; | ||
1936 | |||
1937 | if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) { | ||
1938 | /* | ||
1939 | * This return value is not great, but we do not want the | ||
1940 | * interrupt core code to remove our interrupt handler | ||
1941 | * because we don't appear to be handling an interrupt | ||
1942 | * during a chip reset. | ||
1943 | */ | ||
1944 | ret = IRQ_HANDLED; | ||
1945 | goto bail; | ||
1946 | } | ||
1947 | |||
1948 | istat = qib_read_kreg64(dd, kr_intstatus); | ||
1949 | |||
1950 | if (unlikely(!istat)) { | ||
1951 | ret = IRQ_NONE; /* not our interrupt, or already handled */ | ||
1952 | goto bail; | ||
1953 | } | ||
1954 | if (unlikely(istat == -1)) { | ||
1955 | qib_bad_intrstatus(dd); | ||
1956 | /* don't know if it was our interrupt or not */ | ||
1957 | ret = IRQ_NONE; | ||
1958 | goto bail; | ||
1959 | } | ||
1960 | |||
1961 | qib_stats.sps_ints++; | ||
1962 | if (dd->int_counter != (u32) -1) | ||
1963 | dd->int_counter++; | ||
1964 | |||
1965 | if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT | | ||
1966 | QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR))) | ||
1967 | unlikely_7220_intr(dd, istat); | ||
1968 | |||
1969 | /* | ||
1970 | * Clear the interrupt bits we found set, relatively early, so we | ||
1971 | * "know" know the chip will have seen this by the time we process | ||
1972 | * the queue, and will re-interrupt if necessary. The processor | ||
1973 | * itself won't take the interrupt again until we return. | ||
1974 | */ | ||
1975 | qib_write_kreg(dd, kr_intclear, istat); | ||
1976 | |||
1977 | /* | ||
1978 | * Handle kernel receive queues before checking for pio buffers | ||
1979 | * available since receives can overflow; piobuf waiters can afford | ||
1980 | * a few extra cycles, since they were waiting anyway. | ||
1981 | */ | ||
1982 | ctxtrbits = istat & | ||
1983 | ((QLOGIC_IB_I_RCVAVAIL_MASK << QLOGIC_IB_I_RCVAVAIL_SHIFT) | | ||
1984 | (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT)); | ||
1985 | if (ctxtrbits) { | ||
1986 | rmask = (1ULL << QLOGIC_IB_I_RCVAVAIL_SHIFT) | | ||
1987 | (1ULL << QLOGIC_IB_I_RCVURG_SHIFT); | ||
1988 | for (i = 0; i < dd->first_user_ctxt; i++) { | ||
1989 | if (ctxtrbits & rmask) { | ||
1990 | ctxtrbits &= ~rmask; | ||
1991 | qib_kreceive(dd->rcd[i], NULL, NULL); | ||
1992 | } | ||
1993 | rmask <<= 1; | ||
1994 | } | ||
1995 | if (ctxtrbits) { | ||
1996 | ctxtrbits = | ||
1997 | (ctxtrbits >> QLOGIC_IB_I_RCVAVAIL_SHIFT) | | ||
1998 | (ctxtrbits >> QLOGIC_IB_I_RCVURG_SHIFT); | ||
1999 | qib_handle_urcv(dd, ctxtrbits); | ||
2000 | } | ||
2001 | } | ||
2002 | |||
2003 | /* only call for SDmaInt */ | ||
2004 | if (istat & QLOGIC_IB_I_SDMAINT) | ||
2005 | sdma_7220_intr(dd->pport, istat); | ||
2006 | |||
2007 | if ((istat & QLOGIC_IB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED)) | ||
2008 | qib_ib_piobufavail(dd); | ||
2009 | |||
2010 | ret = IRQ_HANDLED; | ||
2011 | bail: | ||
2012 | return ret; | ||
2013 | } | ||
2014 | |||
2015 | /* | ||
2016 | * Set up our chip-specific interrupt handler. | ||
2017 | * The interrupt type has already been setup, so | ||
2018 | * we just need to do the registration and error checking. | ||
2019 | * If we are using MSI interrupts, we may fall back to | ||
2020 | * INTx later, if the interrupt handler doesn't get called | ||
2021 | * within 1/2 second (see verify_interrupt()). | ||
2022 | */ | ||
2023 | static void qib_setup_7220_interrupt(struct qib_devdata *dd) | ||
2024 | { | ||
2025 | if (!dd->cspec->irq) | ||
2026 | qib_dev_err(dd, "irq is 0, BIOS error? Interrupts won't " | ||
2027 | "work\n"); | ||
2028 | else { | ||
2029 | int ret = request_irq(dd->cspec->irq, qib_7220intr, | ||
2030 | dd->msi_lo ? 0 : IRQF_SHARED, | ||
2031 | QIB_DRV_NAME, dd); | ||
2032 | |||
2033 | if (ret) | ||
2034 | qib_dev_err(dd, "Couldn't setup %s interrupt " | ||
2035 | "(irq=%d): %d\n", dd->msi_lo ? | ||
2036 | "MSI" : "INTx", dd->cspec->irq, ret); | ||
2037 | } | ||
2038 | } | ||
2039 | |||
2040 | /** | ||
2041 | * qib_7220_boardname - fill in the board name | ||
2042 | * @dd: the qlogic_ib device | ||
2043 | * | ||
2044 | * info is based on the board revision register | ||
2045 | */ | ||
2046 | static void qib_7220_boardname(struct qib_devdata *dd) | ||
2047 | { | ||
2048 | char *n; | ||
2049 | u32 boardid, namelen; | ||
2050 | |||
2051 | boardid = SYM_FIELD(dd->revision, Revision, | ||
2052 | BoardID); | ||
2053 | |||
2054 | switch (boardid) { | ||
2055 | case 1: | ||
2056 | n = "InfiniPath_QLE7240"; | ||
2057 | break; | ||
2058 | case 2: | ||
2059 | n = "InfiniPath_QLE7280"; | ||
2060 | break; | ||
2061 | default: | ||
2062 | qib_dev_err(dd, "Unknown 7220 board with ID %u\n", boardid); | ||
2063 | n = "Unknown_InfiniPath_7220"; | ||
2064 | break; | ||
2065 | } | ||
2066 | |||
2067 | namelen = strlen(n) + 1; | ||
2068 | dd->boardname = kmalloc(namelen, GFP_KERNEL); | ||
2069 | if (!dd->boardname) | ||
2070 | qib_dev_err(dd, "Failed allocation for board name: %s\n", n); | ||
2071 | else | ||
2072 | snprintf(dd->boardname, namelen, "%s", n); | ||
2073 | |||
2074 | if (dd->majrev != 5 || !dd->minrev || dd->minrev > 2) | ||
2075 | qib_dev_err(dd, "Unsupported InfiniPath hardware " | ||
2076 | "revision %u.%u!\n", | ||
2077 | dd->majrev, dd->minrev); | ||
2078 | |||
2079 | snprintf(dd->boardversion, sizeof(dd->boardversion), | ||
2080 | "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n", | ||
2081 | QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname, | ||
2082 | (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch), | ||
2083 | dd->majrev, dd->minrev, | ||
2084 | (unsigned)SYM_FIELD(dd->revision, Revision_R, SW)); | ||
2085 | } | ||
2086 | |||
2087 | /* | ||
2088 | * This routine sleeps, so it can only be called from user context, not | ||
2089 | * from interrupt context. | ||
2090 | */ | ||
2091 | static int qib_setup_7220_reset(struct qib_devdata *dd) | ||
2092 | { | ||
2093 | u64 val; | ||
2094 | int i; | ||
2095 | int ret; | ||
2096 | u16 cmdval; | ||
2097 | u8 int_line, clinesz; | ||
2098 | unsigned long flags; | ||
2099 | |||
2100 | qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz); | ||
2101 | |||
2102 | /* Use dev_err so it shows up in logs, etc. */ | ||
2103 | qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit); | ||
2104 | |||
2105 | /* no interrupts till re-initted */ | ||
2106 | qib_7220_set_intr_state(dd, 0); | ||
2107 | |||
2108 | dd->pport->cpspec->ibdeltainprog = 0; | ||
2109 | dd->pport->cpspec->ibsymdelta = 0; | ||
2110 | dd->pport->cpspec->iblnkerrdelta = 0; | ||
2111 | |||
2112 | /* | ||
2113 | * Keep chip from being accessed until we are ready. Use | ||
2114 | * writeq() directly, to allow the write even though QIB_PRESENT | ||
2115 | * isnt' set. | ||
2116 | */ | ||
2117 | dd->flags &= ~(QIB_INITTED | QIB_PRESENT); | ||
2118 | dd->int_counter = 0; /* so we check interrupts work again */ | ||
2119 | val = dd->control | QLOGIC_IB_C_RESET; | ||
2120 | writeq(val, &dd->kregbase[kr_control]); | ||
2121 | mb(); /* prevent compiler reordering around actual reset */ | ||
2122 | |||
2123 | for (i = 1; i <= 5; i++) { | ||
2124 | /* | ||
2125 | * Allow MBIST, etc. to complete; longer on each retry. | ||
2126 | * We sometimes get machine checks from bus timeout if no | ||
2127 | * response, so for now, make it *really* long. | ||
2128 | */ | ||
2129 | msleep(1000 + (1 + i) * 2000); | ||
2130 | |||
2131 | qib_pcie_reenable(dd, cmdval, int_line, clinesz); | ||
2132 | |||
2133 | /* | ||
2134 | * Use readq directly, so we don't need to mark it as PRESENT | ||
2135 | * until we get a successful indication that all is well. | ||
2136 | */ | ||
2137 | val = readq(&dd->kregbase[kr_revision]); | ||
2138 | if (val == dd->revision) { | ||
2139 | dd->flags |= QIB_PRESENT; /* it's back */ | ||
2140 | ret = qib_reinit_intr(dd); | ||
2141 | goto bail; | ||
2142 | } | ||
2143 | } | ||
2144 | ret = 0; /* failed */ | ||
2145 | |||
2146 | bail: | ||
2147 | if (ret) { | ||
2148 | if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL)) | ||
2149 | qib_dev_err(dd, "Reset failed to setup PCIe or " | ||
2150 | "interrupts; continuing anyway\n"); | ||
2151 | |||
2152 | /* hold IBC in reset, no sends, etc till later */ | ||
2153 | qib_write_kreg(dd, kr_control, 0ULL); | ||
2154 | |||
2155 | /* clear the reset error, init error/hwerror mask */ | ||
2156 | qib_7220_init_hwerrors(dd); | ||
2157 | |||
2158 | /* do setup similar to speed or link-width changes */ | ||
2159 | if (dd->pport->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK) | ||
2160 | dd->cspec->presets_needed = 1; | ||
2161 | spin_lock_irqsave(&dd->pport->lflags_lock, flags); | ||
2162 | dd->pport->lflags |= QIBL_IB_FORCE_NOTIFY; | ||
2163 | dd->pport->lflags &= ~QIBL_IB_AUTONEG_FAILED; | ||
2164 | spin_unlock_irqrestore(&dd->pport->lflags_lock, flags); | ||
2165 | } | ||
2166 | |||
2167 | return ret; | ||
2168 | } | ||
2169 | |||
2170 | /** | ||
2171 | * qib_7220_put_tid - write a TID to the chip | ||
2172 | * @dd: the qlogic_ib device | ||
2173 | * @tidptr: pointer to the expected TID (in chip) to update | ||
2174 | * @tidtype: 0 for eager, 1 for expected | ||
2175 | * @pa: physical address of in memory buffer; tidinvalid if freeing | ||
2176 | */ | ||
2177 | static void qib_7220_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr, | ||
2178 | u32 type, unsigned long pa) | ||
2179 | { | ||
2180 | if (pa != dd->tidinvalid) { | ||
2181 | u64 chippa = pa >> IBA7220_TID_PA_SHIFT; | ||
2182 | |||
2183 | /* paranoia checks */ | ||
2184 | if (pa != (chippa << IBA7220_TID_PA_SHIFT)) { | ||
2185 | qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n", | ||
2186 | pa); | ||
2187 | return; | ||
2188 | } | ||
2189 | if (chippa >= (1UL << IBA7220_TID_SZ_SHIFT)) { | ||
2190 | qib_dev_err(dd, "Physical page address 0x%lx " | ||
2191 | "larger than supported\n", pa); | ||
2192 | return; | ||
2193 | } | ||
2194 | |||
2195 | if (type == RCVHQ_RCV_TYPE_EAGER) | ||
2196 | chippa |= dd->tidtemplate; | ||
2197 | else /* for now, always full 4KB page */ | ||
2198 | chippa |= IBA7220_TID_SZ_4K; | ||
2199 | pa = chippa; | ||
2200 | } | ||
2201 | writeq(pa, tidptr); | ||
2202 | mmiowb(); | ||
2203 | } | ||
2204 | |||
2205 | /** | ||
2206 | * qib_7220_clear_tids - clear all TID entries for a ctxt, expected and eager | ||
2207 | * @dd: the qlogic_ib device | ||
2208 | * @ctxt: the ctxt | ||
2209 | * | ||
2210 | * clear all TID entries for a ctxt, expected and eager. | ||
2211 | * Used from qib_close(). On this chip, TIDs are only 32 bits, | ||
2212 | * not 64, but they are still on 64 bit boundaries, so tidbase | ||
2213 | * is declared as u64 * for the pointer math, even though we write 32 bits | ||
2214 | */ | ||
2215 | static void qib_7220_clear_tids(struct qib_devdata *dd, | ||
2216 | struct qib_ctxtdata *rcd) | ||
2217 | { | ||
2218 | u64 __iomem *tidbase; | ||
2219 | unsigned long tidinv; | ||
2220 | u32 ctxt; | ||
2221 | int i; | ||
2222 | |||
2223 | if (!dd->kregbase || !rcd) | ||
2224 | return; | ||
2225 | |||
2226 | ctxt = rcd->ctxt; | ||
2227 | |||
2228 | tidinv = dd->tidinvalid; | ||
2229 | tidbase = (u64 __iomem *) | ||
2230 | ((char __iomem *)(dd->kregbase) + | ||
2231 | dd->rcvtidbase + | ||
2232 | ctxt * dd->rcvtidcnt * sizeof(*tidbase)); | ||
2233 | |||
2234 | for (i = 0; i < dd->rcvtidcnt; i++) | ||
2235 | qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED, | ||
2236 | tidinv); | ||
2237 | |||
2238 | tidbase = (u64 __iomem *) | ||
2239 | ((char __iomem *)(dd->kregbase) + | ||
2240 | dd->rcvegrbase + | ||
2241 | rcd->rcvegr_tid_base * sizeof(*tidbase)); | ||
2242 | |||
2243 | for (i = 0; i < rcd->rcvegrcnt; i++) | ||
2244 | qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER, | ||
2245 | tidinv); | ||
2246 | } | ||
2247 | |||
2248 | /** | ||
2249 | * qib_7220_tidtemplate - setup constants for TID updates | ||
2250 | * @dd: the qlogic_ib device | ||
2251 | * | ||
2252 | * We setup stuff that we use a lot, to avoid calculating each time | ||
2253 | */ | ||
2254 | static void qib_7220_tidtemplate(struct qib_devdata *dd) | ||
2255 | { | ||
2256 | if (dd->rcvegrbufsize == 2048) | ||
2257 | dd->tidtemplate = IBA7220_TID_SZ_2K; | ||
2258 | else if (dd->rcvegrbufsize == 4096) | ||
2259 | dd->tidtemplate = IBA7220_TID_SZ_4K; | ||
2260 | dd->tidinvalid = 0; | ||
2261 | } | ||
2262 | |||
2263 | /** | ||
2264 | * qib_init_7220_get_base_info - set chip-specific flags for user code | ||
2265 | * @rcd: the qlogic_ib ctxt | ||
2266 | * @kbase: qib_base_info pointer | ||
2267 | * | ||
2268 | * We set the PCIE flag because the lower bandwidth on PCIe vs | ||
2269 | * HyperTransport can affect some user packet algorithims. | ||
2270 | */ | ||
2271 | static int qib_7220_get_base_info(struct qib_ctxtdata *rcd, | ||
2272 | struct qib_base_info *kinfo) | ||
2273 | { | ||
2274 | kinfo->spi_runtime_flags |= QIB_RUNTIME_PCIE | | ||
2275 | QIB_RUNTIME_NODMA_RTAIL | QIB_RUNTIME_SDMA; | ||
2276 | |||
2277 | if (rcd->dd->flags & QIB_USE_SPCL_TRIG) | ||
2278 | kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER; | ||
2279 | |||
2280 | return 0; | ||
2281 | } | ||
2282 | |||
2283 | static struct qib_message_header * | ||
2284 | qib_7220_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr) | ||
2285 | { | ||
2286 | u32 offset = qib_hdrget_offset(rhf_addr); | ||
2287 | |||
2288 | return (struct qib_message_header *) | ||
2289 | (rhf_addr - dd->rhf_offset + offset); | ||
2290 | } | ||
2291 | |||
2292 | static void qib_7220_config_ctxts(struct qib_devdata *dd) | ||
2293 | { | ||
2294 | unsigned long flags; | ||
2295 | u32 nchipctxts; | ||
2296 | |||
2297 | nchipctxts = qib_read_kreg32(dd, kr_portcnt); | ||
2298 | dd->cspec->numctxts = nchipctxts; | ||
2299 | if (qib_n_krcv_queues > 1) { | ||
2300 | dd->qpn_mask = 0x3f; | ||
2301 | dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports; | ||
2302 | if (dd->first_user_ctxt > nchipctxts) | ||
2303 | dd->first_user_ctxt = nchipctxts; | ||
2304 | } else | ||
2305 | dd->first_user_ctxt = dd->num_pports; | ||
2306 | dd->n_krcv_queues = dd->first_user_ctxt; | ||
2307 | |||
2308 | if (!qib_cfgctxts) { | ||
2309 | int nctxts = dd->first_user_ctxt + num_online_cpus(); | ||
2310 | |||
2311 | if (nctxts <= 5) | ||
2312 | dd->ctxtcnt = 5; | ||
2313 | else if (nctxts <= 9) | ||
2314 | dd->ctxtcnt = 9; | ||
2315 | else if (nctxts <= nchipctxts) | ||
2316 | dd->ctxtcnt = nchipctxts; | ||
2317 | } else if (qib_cfgctxts <= nchipctxts) | ||
2318 | dd->ctxtcnt = qib_cfgctxts; | ||
2319 | if (!dd->ctxtcnt) /* none of the above, set to max */ | ||
2320 | dd->ctxtcnt = nchipctxts; | ||
2321 | |||
2322 | /* | ||
2323 | * Chip can be configured for 5, 9, or 17 ctxts, and choice | ||
2324 | * affects number of eager TIDs per ctxt (1K, 2K, 4K). | ||
2325 | * Lock to be paranoid about later motion, etc. | ||
2326 | */ | ||
2327 | spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); | ||
2328 | if (dd->ctxtcnt > 9) | ||
2329 | dd->rcvctrl |= 2ULL << IBA7220_R_CTXTCFG_SHIFT; | ||
2330 | else if (dd->ctxtcnt > 5) | ||
2331 | dd->rcvctrl |= 1ULL << IBA7220_R_CTXTCFG_SHIFT; | ||
2332 | /* else configure for default 5 receive ctxts */ | ||
2333 | if (dd->qpn_mask) | ||
2334 | dd->rcvctrl |= 1ULL << QIB_7220_RcvCtrl_RcvQPMapEnable_LSB; | ||
2335 | qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl); | ||
2336 | spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); | ||
2337 | |||
2338 | /* kr_rcvegrcnt changes based on the number of contexts enabled */ | ||
2339 | dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt); | ||
2340 | dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, IBA7220_KRCVEGRCNT); | ||
2341 | } | ||
2342 | |||
2343 | static int qib_7220_get_ib_cfg(struct qib_pportdata *ppd, int which) | ||
2344 | { | ||
2345 | int lsb, ret = 0; | ||
2346 | u64 maskr; /* right-justified mask */ | ||
2347 | |||
2348 | switch (which) { | ||
2349 | case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */ | ||
2350 | ret = ppd->link_width_enabled; | ||
2351 | goto done; | ||
2352 | |||
2353 | case QIB_IB_CFG_LWID: /* Get currently active Link-width */ | ||
2354 | ret = ppd->link_width_active; | ||
2355 | goto done; | ||
2356 | |||
2357 | case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */ | ||
2358 | ret = ppd->link_speed_enabled; | ||
2359 | goto done; | ||
2360 | |||
2361 | case QIB_IB_CFG_SPD: /* Get current Link spd */ | ||
2362 | ret = ppd->link_speed_active; | ||
2363 | goto done; | ||
2364 | |||
2365 | case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */ | ||
2366 | lsb = IBA7220_IBC_RXPOL_SHIFT; | ||
2367 | maskr = IBA7220_IBC_RXPOL_MASK; | ||
2368 | break; | ||
2369 | |||
2370 | case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */ | ||
2371 | lsb = IBA7220_IBC_LREV_SHIFT; | ||
2372 | maskr = IBA7220_IBC_LREV_MASK; | ||
2373 | break; | ||
2374 | |||
2375 | case QIB_IB_CFG_LINKLATENCY: | ||
2376 | ret = qib_read_kreg64(ppd->dd, kr_ibcddrstatus) | ||
2377 | & IBA7220_DDRSTAT_LINKLAT_MASK; | ||
2378 | goto done; | ||
2379 | |||
2380 | case QIB_IB_CFG_OP_VLS: | ||
2381 | ret = ppd->vls_operational; | ||
2382 | goto done; | ||
2383 | |||
2384 | case QIB_IB_CFG_VL_HIGH_CAP: | ||
2385 | ret = 0; | ||
2386 | goto done; | ||
2387 | |||
2388 | case QIB_IB_CFG_VL_LOW_CAP: | ||
2389 | ret = 0; | ||
2390 | goto done; | ||
2391 | |||
2392 | case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */ | ||
2393 | ret = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl, | ||
2394 | OverrunThreshold); | ||
2395 | goto done; | ||
2396 | |||
2397 | case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */ | ||
2398 | ret = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl, | ||
2399 | PhyerrThreshold); | ||
2400 | goto done; | ||
2401 | |||
2402 | case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */ | ||
2403 | /* will only take effect when the link state changes */ | ||
2404 | ret = (ppd->cpspec->ibcctrl & | ||
2405 | SYM_MASK(IBCCtrl, LinkDownDefaultState)) ? | ||
2406 | IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL; | ||
2407 | goto done; | ||
2408 | |||
2409 | case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */ | ||
2410 | lsb = IBA7220_IBC_HRTBT_SHIFT; | ||
2411 | maskr = IBA7220_IBC_HRTBT_MASK; | ||
2412 | break; | ||
2413 | |||
2414 | case QIB_IB_CFG_PMA_TICKS: | ||
2415 | /* | ||
2416 | * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs | ||
2417 | * Since the clock is always 250MHz, the value is 1 or 0. | ||
2418 | */ | ||
2419 | ret = (ppd->link_speed_active == QIB_IB_DDR); | ||
2420 | goto done; | ||
2421 | |||
2422 | default: | ||
2423 | ret = -EINVAL; | ||
2424 | goto done; | ||
2425 | } | ||
2426 | ret = (int)((ppd->cpspec->ibcddrctrl >> lsb) & maskr); | ||
2427 | done: | ||
2428 | return ret; | ||
2429 | } | ||
2430 | |||
2431 | static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val) | ||
2432 | { | ||
2433 | struct qib_devdata *dd = ppd->dd; | ||
2434 | u64 maskr; /* right-justified mask */ | ||
2435 | int lsb, ret = 0, setforce = 0; | ||
2436 | u16 lcmd, licmd; | ||
2437 | unsigned long flags; | ||
2438 | |||
2439 | switch (which) { | ||
2440 | case QIB_IB_CFG_LIDLMC: | ||
2441 | /* | ||
2442 | * Set LID and LMC. Combined to avoid possible hazard | ||
2443 | * caller puts LMC in 16MSbits, DLID in 16LSbits of val | ||
2444 | */ | ||
2445 | lsb = IBA7220_IBC_DLIDLMC_SHIFT; | ||
2446 | maskr = IBA7220_IBC_DLIDLMC_MASK; | ||
2447 | break; | ||
2448 | |||
2449 | case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */ | ||
2450 | /* | ||
2451 | * As with speed, only write the actual register if | ||
2452 | * the link is currently down, otherwise takes effect | ||
2453 | * on next link change. | ||
2454 | */ | ||
2455 | ppd->link_width_enabled = val; | ||
2456 | if (!(ppd->lflags & QIBL_LINKDOWN)) | ||
2457 | goto bail; | ||
2458 | /* | ||
2459 | * We set the QIBL_IB_FORCE_NOTIFY bit so updown | ||
2460 | * will get called because we want update | ||
2461 | * link_width_active, and the change may not take | ||
2462 | * effect for some time (if we are in POLL), so this | ||
2463 | * flag will force the updown routine to be called | ||
2464 | * on the next ibstatuschange down interrupt, even | ||
2465 | * if it's not an down->up transition. | ||
2466 | */ | ||
2467 | val--; /* convert from IB to chip */ | ||
2468 | maskr = IBA7220_IBC_WIDTH_MASK; | ||
2469 | lsb = IBA7220_IBC_WIDTH_SHIFT; | ||
2470 | setforce = 1; | ||
2471 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
2472 | ppd->lflags |= QIBL_IB_FORCE_NOTIFY; | ||
2473 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
2474 | break; | ||
2475 | |||
2476 | case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */ | ||
2477 | /* | ||
2478 | * If we turn off IB1.2, need to preset SerDes defaults, | ||
2479 | * but not right now. Set a flag for the next time | ||
2480 | * we command the link down. As with width, only write the | ||
2481 | * actual register if the link is currently down, otherwise | ||
2482 | * takes effect on next link change. Since setting is being | ||
2483 | * explictly requested (via MAD or sysfs), clear autoneg | ||
2484 | * failure status if speed autoneg is enabled. | ||
2485 | */ | ||
2486 | ppd->link_speed_enabled = val; | ||
2487 | if ((ppd->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK) && | ||
2488 | !(val & (val - 1))) | ||
2489 | dd->cspec->presets_needed = 1; | ||
2490 | if (!(ppd->lflags & QIBL_LINKDOWN)) | ||
2491 | goto bail; | ||
2492 | /* | ||
2493 | * We set the QIBL_IB_FORCE_NOTIFY bit so updown | ||
2494 | * will get called because we want update | ||
2495 | * link_speed_active, and the change may not take | ||
2496 | * effect for some time (if we are in POLL), so this | ||
2497 | * flag will force the updown routine to be called | ||
2498 | * on the next ibstatuschange down interrupt, even | ||
2499 | * if it's not an down->up transition. | ||
2500 | */ | ||
2501 | if (val == (QIB_IB_SDR | QIB_IB_DDR)) { | ||
2502 | val = IBA7220_IBC_SPEED_AUTONEG_MASK | | ||
2503 | IBA7220_IBC_IBTA_1_2_MASK; | ||
2504 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
2505 | ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED; | ||
2506 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
2507 | } else | ||
2508 | val = val == QIB_IB_DDR ? | ||
2509 | IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR; | ||
2510 | maskr = IBA7220_IBC_SPEED_AUTONEG_MASK | | ||
2511 | IBA7220_IBC_IBTA_1_2_MASK; | ||
2512 | /* IBTA 1.2 mode + speed bits are contiguous */ | ||
2513 | lsb = SYM_LSB(IBCDDRCtrl, IB_ENHANCED_MODE); | ||
2514 | setforce = 1; | ||
2515 | break; | ||
2516 | |||
2517 | case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */ | ||
2518 | lsb = IBA7220_IBC_RXPOL_SHIFT; | ||
2519 | maskr = IBA7220_IBC_RXPOL_MASK; | ||
2520 | break; | ||
2521 | |||
2522 | case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */ | ||
2523 | lsb = IBA7220_IBC_LREV_SHIFT; | ||
2524 | maskr = IBA7220_IBC_LREV_MASK; | ||
2525 | break; | ||
2526 | |||
2527 | case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */ | ||
2528 | maskr = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl, | ||
2529 | OverrunThreshold); | ||
2530 | if (maskr != val) { | ||
2531 | ppd->cpspec->ibcctrl &= | ||
2532 | ~SYM_MASK(IBCCtrl, OverrunThreshold); | ||
2533 | ppd->cpspec->ibcctrl |= (u64) val << | ||
2534 | SYM_LSB(IBCCtrl, OverrunThreshold); | ||
2535 | qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl); | ||
2536 | qib_write_kreg(dd, kr_scratch, 0); | ||
2537 | } | ||
2538 | goto bail; | ||
2539 | |||
2540 | case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */ | ||
2541 | maskr = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl, | ||
2542 | PhyerrThreshold); | ||
2543 | if (maskr != val) { | ||
2544 | ppd->cpspec->ibcctrl &= | ||
2545 | ~SYM_MASK(IBCCtrl, PhyerrThreshold); | ||
2546 | ppd->cpspec->ibcctrl |= (u64) val << | ||
2547 | SYM_LSB(IBCCtrl, PhyerrThreshold); | ||
2548 | qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl); | ||
2549 | qib_write_kreg(dd, kr_scratch, 0); | ||
2550 | } | ||
2551 | goto bail; | ||
2552 | |||
2553 | case QIB_IB_CFG_PKEYS: /* update pkeys */ | ||
2554 | maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) | | ||
2555 | ((u64) ppd->pkeys[2] << 32) | | ||
2556 | ((u64) ppd->pkeys[3] << 48); | ||
2557 | qib_write_kreg(dd, kr_partitionkey, maskr); | ||
2558 | goto bail; | ||
2559 | |||
2560 | case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */ | ||
2561 | /* will only take effect when the link state changes */ | ||
2562 | if (val == IB_LINKINITCMD_POLL) | ||
2563 | ppd->cpspec->ibcctrl &= | ||
2564 | ~SYM_MASK(IBCCtrl, LinkDownDefaultState); | ||
2565 | else /* SLEEP */ | ||
2566 | ppd->cpspec->ibcctrl |= | ||
2567 | SYM_MASK(IBCCtrl, LinkDownDefaultState); | ||
2568 | qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl); | ||
2569 | qib_write_kreg(dd, kr_scratch, 0); | ||
2570 | goto bail; | ||
2571 | |||
2572 | case QIB_IB_CFG_MTU: /* update the MTU in IBC */ | ||
2573 | /* | ||
2574 | * Update our housekeeping variables, and set IBC max | ||
2575 | * size, same as init code; max IBC is max we allow in | ||
2576 | * buffer, less the qword pbc, plus 1 for ICRC, in dwords | ||
2577 | * Set even if it's unchanged, print debug message only | ||
2578 | * on changes. | ||
2579 | */ | ||
2580 | val = (ppd->ibmaxlen >> 2) + 1; | ||
2581 | ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, MaxPktLen); | ||
2582 | ppd->cpspec->ibcctrl |= (u64)val << SYM_LSB(IBCCtrl, MaxPktLen); | ||
2583 | qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl); | ||
2584 | qib_write_kreg(dd, kr_scratch, 0); | ||
2585 | goto bail; | ||
2586 | |||
2587 | case QIB_IB_CFG_LSTATE: /* set the IB link state */ | ||
2588 | switch (val & 0xffff0000) { | ||
2589 | case IB_LINKCMD_DOWN: | ||
2590 | lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN; | ||
2591 | if (!ppd->cpspec->ibdeltainprog && | ||
2592 | qib_compat_ddr_negotiate) { | ||
2593 | ppd->cpspec->ibdeltainprog = 1; | ||
2594 | ppd->cpspec->ibsymsnap = | ||
2595 | read_7220_creg32(dd, cr_ibsymbolerr); | ||
2596 | ppd->cpspec->iblnkerrsnap = | ||
2597 | read_7220_creg32(dd, cr_iblinkerrrecov); | ||
2598 | } | ||
2599 | break; | ||
2600 | |||
2601 | case IB_LINKCMD_ARMED: | ||
2602 | lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED; | ||
2603 | break; | ||
2604 | |||
2605 | case IB_LINKCMD_ACTIVE: | ||
2606 | lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE; | ||
2607 | break; | ||
2608 | |||
2609 | default: | ||
2610 | ret = -EINVAL; | ||
2611 | qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16); | ||
2612 | goto bail; | ||
2613 | } | ||
2614 | switch (val & 0xffff) { | ||
2615 | case IB_LINKINITCMD_NOP: | ||
2616 | licmd = 0; | ||
2617 | break; | ||
2618 | |||
2619 | case IB_LINKINITCMD_POLL: | ||
2620 | licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL; | ||
2621 | break; | ||
2622 | |||
2623 | case IB_LINKINITCMD_SLEEP: | ||
2624 | licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP; | ||
2625 | break; | ||
2626 | |||
2627 | case IB_LINKINITCMD_DISABLE: | ||
2628 | licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE; | ||
2629 | ppd->cpspec->chase_end = 0; | ||
2630 | /* | ||
2631 | * stop state chase counter and timer, if running. | ||
2632 | * wait forpending timer, but don't clear .data (ppd)! | ||
2633 | */ | ||
2634 | if (ppd->cpspec->chase_timer.expires) { | ||
2635 | del_timer_sync(&ppd->cpspec->chase_timer); | ||
2636 | ppd->cpspec->chase_timer.expires = 0; | ||
2637 | } | ||
2638 | break; | ||
2639 | |||
2640 | default: | ||
2641 | ret = -EINVAL; | ||
2642 | qib_dev_err(dd, "bad linkinitcmd req 0x%x\n", | ||
2643 | val & 0xffff); | ||
2644 | goto bail; | ||
2645 | } | ||
2646 | qib_set_ib_7220_lstate(ppd, lcmd, licmd); | ||
2647 | goto bail; | ||
2648 | |||
2649 | case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */ | ||
2650 | if (val > IBA7220_IBC_HRTBT_MASK) { | ||
2651 | ret = -EINVAL; | ||
2652 | goto bail; | ||
2653 | } | ||
2654 | lsb = IBA7220_IBC_HRTBT_SHIFT; | ||
2655 | maskr = IBA7220_IBC_HRTBT_MASK; | ||
2656 | break; | ||
2657 | |||
2658 | default: | ||
2659 | ret = -EINVAL; | ||
2660 | goto bail; | ||
2661 | } | ||
2662 | ppd->cpspec->ibcddrctrl &= ~(maskr << lsb); | ||
2663 | ppd->cpspec->ibcddrctrl |= (((u64) val & maskr) << lsb); | ||
2664 | qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl); | ||
2665 | qib_write_kreg(dd, kr_scratch, 0); | ||
2666 | if (setforce) { | ||
2667 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
2668 | ppd->lflags |= QIBL_IB_FORCE_NOTIFY; | ||
2669 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
2670 | } | ||
2671 | bail: | ||
2672 | return ret; | ||
2673 | } | ||
2674 | |||
2675 | static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what) | ||
2676 | { | ||
2677 | int ret = 0; | ||
2678 | u64 val, ddr; | ||
2679 | |||
2680 | if (!strncmp(what, "ibc", 3)) { | ||
2681 | ppd->cpspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback); | ||
2682 | val = 0; /* disable heart beat, so link will come up */ | ||
2683 | qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n", | ||
2684 | ppd->dd->unit, ppd->port); | ||
2685 | } else if (!strncmp(what, "off", 3)) { | ||
2686 | ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback); | ||
2687 | /* enable heart beat again */ | ||
2688 | val = IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT; | ||
2689 | qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback " | ||
2690 | "(normal)\n", ppd->dd->unit, ppd->port); | ||
2691 | } else | ||
2692 | ret = -EINVAL; | ||
2693 | if (!ret) { | ||
2694 | qib_write_kreg(ppd->dd, kr_ibcctrl, ppd->cpspec->ibcctrl); | ||
2695 | ddr = ppd->cpspec->ibcddrctrl & ~(IBA7220_IBC_HRTBT_MASK | ||
2696 | << IBA7220_IBC_HRTBT_SHIFT); | ||
2697 | ppd->cpspec->ibcddrctrl = ddr | val; | ||
2698 | qib_write_kreg(ppd->dd, kr_ibcddrctrl, | ||
2699 | ppd->cpspec->ibcddrctrl); | ||
2700 | qib_write_kreg(ppd->dd, kr_scratch, 0); | ||
2701 | } | ||
2702 | return ret; | ||
2703 | } | ||
2704 | |||
2705 | static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd, | ||
2706 | u32 updegr, u32 egrhd) | ||
2707 | { | ||
2708 | qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); | ||
2709 | if (updegr) | ||
2710 | qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt); | ||
2711 | } | ||
2712 | |||
2713 | static u32 qib_7220_hdrqempty(struct qib_ctxtdata *rcd) | ||
2714 | { | ||
2715 | u32 head, tail; | ||
2716 | |||
2717 | head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt); | ||
2718 | if (rcd->rcvhdrtail_kvaddr) | ||
2719 | tail = qib_get_rcvhdrtail(rcd); | ||
2720 | else | ||
2721 | tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt); | ||
2722 | return head == tail; | ||
2723 | } | ||
2724 | |||
2725 | /* | ||
2726 | * Modify the RCVCTRL register in chip-specific way. This | ||
2727 | * is a function because bit positions and (future) register | ||
2728 | * location is chip-specifc, but the needed operations are | ||
2729 | * generic. <op> is a bit-mask because we often want to | ||
2730 | * do multiple modifications. | ||
2731 | */ | ||
2732 | static void rcvctrl_7220_mod(struct qib_pportdata *ppd, unsigned int op, | ||
2733 | int ctxt) | ||
2734 | { | ||
2735 | struct qib_devdata *dd = ppd->dd; | ||
2736 | u64 mask, val; | ||
2737 | unsigned long flags; | ||
2738 | |||
2739 | spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); | ||
2740 | if (op & QIB_RCVCTRL_TAILUPD_ENB) | ||
2741 | dd->rcvctrl |= (1ULL << IBA7220_R_TAILUPD_SHIFT); | ||
2742 | if (op & QIB_RCVCTRL_TAILUPD_DIS) | ||
2743 | dd->rcvctrl &= ~(1ULL << IBA7220_R_TAILUPD_SHIFT); | ||
2744 | if (op & QIB_RCVCTRL_PKEY_ENB) | ||
2745 | dd->rcvctrl &= ~(1ULL << IBA7220_R_PKEY_DIS_SHIFT); | ||
2746 | if (op & QIB_RCVCTRL_PKEY_DIS) | ||
2747 | dd->rcvctrl |= (1ULL << IBA7220_R_PKEY_DIS_SHIFT); | ||
2748 | if (ctxt < 0) | ||
2749 | mask = (1ULL << dd->ctxtcnt) - 1; | ||
2750 | else | ||
2751 | mask = (1ULL << ctxt); | ||
2752 | if (op & QIB_RCVCTRL_CTXT_ENB) { | ||
2753 | /* always done for specific ctxt */ | ||
2754 | dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, PortEnable)); | ||
2755 | if (!(dd->flags & QIB_NODMA_RTAIL)) | ||
2756 | dd->rcvctrl |= 1ULL << IBA7220_R_TAILUPD_SHIFT; | ||
2757 | /* Write these registers before the context is enabled. */ | ||
2758 | qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt, | ||
2759 | dd->rcd[ctxt]->rcvhdrqtailaddr_phys); | ||
2760 | qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt, | ||
2761 | dd->rcd[ctxt]->rcvhdrq_phys); | ||
2762 | dd->rcd[ctxt]->seq_cnt = 1; | ||
2763 | } | ||
2764 | if (op & QIB_RCVCTRL_CTXT_DIS) | ||
2765 | dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, PortEnable)); | ||
2766 | if (op & QIB_RCVCTRL_INTRAVAIL_ENB) | ||
2767 | dd->rcvctrl |= (mask << IBA7220_R_INTRAVAIL_SHIFT); | ||
2768 | if (op & QIB_RCVCTRL_INTRAVAIL_DIS) | ||
2769 | dd->rcvctrl &= ~(mask << IBA7220_R_INTRAVAIL_SHIFT); | ||
2770 | qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl); | ||
2771 | if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rhdrhead_intr_off) { | ||
2772 | /* arm rcv interrupt */ | ||
2773 | val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) | | ||
2774 | dd->rhdrhead_intr_off; | ||
2775 | qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); | ||
2776 | } | ||
2777 | if (op & QIB_RCVCTRL_CTXT_ENB) { | ||
2778 | /* | ||
2779 | * Init the context registers also; if we were | ||
2780 | * disabled, tail and head should both be zero | ||
2781 | * already from the enable, but since we don't | ||
2782 | * know, we have to do it explictly. | ||
2783 | */ | ||
2784 | val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt); | ||
2785 | qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt); | ||
2786 | |||
2787 | val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt); | ||
2788 | dd->rcd[ctxt]->head = val; | ||
2789 | /* If kctxt, interrupt on next receive. */ | ||
2790 | if (ctxt < dd->first_user_ctxt) | ||
2791 | val |= dd->rhdrhead_intr_off; | ||
2792 | qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); | ||
2793 | } | ||
2794 | if (op & QIB_RCVCTRL_CTXT_DIS) { | ||
2795 | if (ctxt >= 0) { | ||
2796 | qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt, 0); | ||
2797 | qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt, 0); | ||
2798 | } else { | ||
2799 | unsigned i; | ||
2800 | |||
2801 | for (i = 0; i < dd->cfgctxts; i++) { | ||
2802 | qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, | ||
2803 | i, 0); | ||
2804 | qib_write_kreg_ctxt(dd, kr_rcvhdraddr, i, 0); | ||
2805 | } | ||
2806 | } | ||
2807 | } | ||
2808 | spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); | ||
2809 | } | ||
2810 | |||
2811 | /* | ||
2812 | * Modify the SENDCTRL register in chip-specific way. This | ||
2813 | * is a function there may be multiple such registers with | ||
2814 | * slightly different layouts. To start, we assume the | ||
2815 | * "canonical" register layout of the first chips. | ||
2816 | * Chip requires no back-back sendctrl writes, so write | ||
2817 | * scratch register after writing sendctrl | ||
2818 | */ | ||
2819 | static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op) | ||
2820 | { | ||
2821 | struct qib_devdata *dd = ppd->dd; | ||
2822 | u64 tmp_dd_sendctrl; | ||
2823 | unsigned long flags; | ||
2824 | |||
2825 | spin_lock_irqsave(&dd->sendctrl_lock, flags); | ||
2826 | |||
2827 | /* First the ones that are "sticky", saved in shadow */ | ||
2828 | if (op & QIB_SENDCTRL_CLEAR) | ||
2829 | dd->sendctrl = 0; | ||
2830 | if (op & QIB_SENDCTRL_SEND_DIS) | ||
2831 | dd->sendctrl &= ~SYM_MASK(SendCtrl, SPioEnable); | ||
2832 | else if (op & QIB_SENDCTRL_SEND_ENB) { | ||
2833 | dd->sendctrl |= SYM_MASK(SendCtrl, SPioEnable); | ||
2834 | if (dd->flags & QIB_USE_SPCL_TRIG) | ||
2835 | dd->sendctrl |= SYM_MASK(SendCtrl, | ||
2836 | SSpecialTriggerEn); | ||
2837 | } | ||
2838 | if (op & QIB_SENDCTRL_AVAIL_DIS) | ||
2839 | dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd); | ||
2840 | else if (op & QIB_SENDCTRL_AVAIL_ENB) | ||
2841 | dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd); | ||
2842 | |||
2843 | if (op & QIB_SENDCTRL_DISARM_ALL) { | ||
2844 | u32 i, last; | ||
2845 | |||
2846 | tmp_dd_sendctrl = dd->sendctrl; | ||
2847 | /* | ||
2848 | * disarm any that are not yet launched, disabling sends | ||
2849 | * and updates until done. | ||
2850 | */ | ||
2851 | last = dd->piobcnt2k + dd->piobcnt4k; | ||
2852 | tmp_dd_sendctrl &= | ||
2853 | ~(SYM_MASK(SendCtrl, SPioEnable) | | ||
2854 | SYM_MASK(SendCtrl, SendBufAvailUpd)); | ||
2855 | for (i = 0; i < last; i++) { | ||
2856 | qib_write_kreg(dd, kr_sendctrl, | ||
2857 | tmp_dd_sendctrl | | ||
2858 | SYM_MASK(SendCtrl, Disarm) | i); | ||
2859 | qib_write_kreg(dd, kr_scratch, 0); | ||
2860 | } | ||
2861 | } | ||
2862 | |||
2863 | tmp_dd_sendctrl = dd->sendctrl; | ||
2864 | |||
2865 | if (op & QIB_SENDCTRL_FLUSH) | ||
2866 | tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Abort); | ||
2867 | if (op & QIB_SENDCTRL_DISARM) | ||
2868 | tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) | | ||
2869 | ((op & QIB_7220_SendCtrl_DisarmPIOBuf_RMASK) << | ||
2870 | SYM_LSB(SendCtrl, DisarmPIOBuf)); | ||
2871 | if ((op & QIB_SENDCTRL_AVAIL_BLIP) && | ||
2872 | (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd))) | ||
2873 | tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd); | ||
2874 | |||
2875 | qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl); | ||
2876 | qib_write_kreg(dd, kr_scratch, 0); | ||
2877 | |||
2878 | if (op & QIB_SENDCTRL_AVAIL_BLIP) { | ||
2879 | qib_write_kreg(dd, kr_sendctrl, dd->sendctrl); | ||
2880 | qib_write_kreg(dd, kr_scratch, 0); | ||
2881 | } | ||
2882 | |||
2883 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | ||
2884 | |||
2885 | if (op & QIB_SENDCTRL_FLUSH) { | ||
2886 | u32 v; | ||
2887 | /* | ||
2888 | * ensure writes have hit chip, then do a few | ||
2889 | * more reads, to allow DMA of pioavail registers | ||
2890 | * to occur, so in-memory copy is in sync with | ||
2891 | * the chip. Not always safe to sleep. | ||
2892 | */ | ||
2893 | v = qib_read_kreg32(dd, kr_scratch); | ||
2894 | qib_write_kreg(dd, kr_scratch, v); | ||
2895 | v = qib_read_kreg32(dd, kr_scratch); | ||
2896 | qib_write_kreg(dd, kr_scratch, v); | ||
2897 | qib_read_kreg32(dd, kr_scratch); | ||
2898 | } | ||
2899 | } | ||
2900 | |||
2901 | /** | ||
2902 | * qib_portcntr_7220 - read a per-port counter | ||
2903 | * @dd: the qlogic_ib device | ||
2904 | * @creg: the counter to snapshot | ||
2905 | */ | ||
2906 | static u64 qib_portcntr_7220(struct qib_pportdata *ppd, u32 reg) | ||
2907 | { | ||
2908 | u64 ret = 0ULL; | ||
2909 | struct qib_devdata *dd = ppd->dd; | ||
2910 | u16 creg; | ||
2911 | /* 0xffff for unimplemented or synthesized counters */ | ||
2912 | static const u16 xlator[] = { | ||
2913 | [QIBPORTCNTR_PKTSEND] = cr_pktsend, | ||
2914 | [QIBPORTCNTR_WORDSEND] = cr_wordsend, | ||
2915 | [QIBPORTCNTR_PSXMITDATA] = cr_psxmitdatacount, | ||
2916 | [QIBPORTCNTR_PSXMITPKTS] = cr_psxmitpktscount, | ||
2917 | [QIBPORTCNTR_PSXMITWAIT] = cr_psxmitwaitcount, | ||
2918 | [QIBPORTCNTR_SENDSTALL] = cr_sendstall, | ||
2919 | [QIBPORTCNTR_PKTRCV] = cr_pktrcv, | ||
2920 | [QIBPORTCNTR_PSRCVDATA] = cr_psrcvdatacount, | ||
2921 | [QIBPORTCNTR_PSRCVPKTS] = cr_psrcvpktscount, | ||
2922 | [QIBPORTCNTR_RCVEBP] = cr_rcvebp, | ||
2923 | [QIBPORTCNTR_RCVOVFL] = cr_rcvovfl, | ||
2924 | [QIBPORTCNTR_WORDRCV] = cr_wordrcv, | ||
2925 | [QIBPORTCNTR_RXDROPPKT] = cr_rxdroppkt, | ||
2926 | [QIBPORTCNTR_RXLOCALPHYERR] = cr_rxotherlocalphyerr, | ||
2927 | [QIBPORTCNTR_RXVLERR] = cr_rxvlerr, | ||
2928 | [QIBPORTCNTR_ERRICRC] = cr_erricrc, | ||
2929 | [QIBPORTCNTR_ERRVCRC] = cr_errvcrc, | ||
2930 | [QIBPORTCNTR_ERRLPCRC] = cr_errlpcrc, | ||
2931 | [QIBPORTCNTR_BADFORMAT] = cr_badformat, | ||
2932 | [QIBPORTCNTR_ERR_RLEN] = cr_err_rlen, | ||
2933 | [QIBPORTCNTR_IBSYMBOLERR] = cr_ibsymbolerr, | ||
2934 | [QIBPORTCNTR_INVALIDRLEN] = cr_invalidrlen, | ||
2935 | [QIBPORTCNTR_UNSUPVL] = cr_txunsupvl, | ||
2936 | [QIBPORTCNTR_EXCESSBUFOVFL] = cr_excessbufferovfl, | ||
2937 | [QIBPORTCNTR_ERRLINK] = cr_errlink, | ||
2938 | [QIBPORTCNTR_IBLINKDOWN] = cr_iblinkdown, | ||
2939 | [QIBPORTCNTR_IBLINKERRRECOV] = cr_iblinkerrrecov, | ||
2940 | [QIBPORTCNTR_LLI] = cr_locallinkintegrityerr, | ||
2941 | [QIBPORTCNTR_PSINTERVAL] = cr_psinterval, | ||
2942 | [QIBPORTCNTR_PSSTART] = cr_psstart, | ||
2943 | [QIBPORTCNTR_PSSTAT] = cr_psstat, | ||
2944 | [QIBPORTCNTR_VL15PKTDROP] = cr_vl15droppedpkt, | ||
2945 | [QIBPORTCNTR_ERRPKEY] = cr_errpkey, | ||
2946 | [QIBPORTCNTR_KHDROVFL] = 0xffff, | ||
2947 | }; | ||
2948 | |||
2949 | if (reg >= ARRAY_SIZE(xlator)) { | ||
2950 | qib_devinfo(ppd->dd->pcidev, | ||
2951 | "Unimplemented portcounter %u\n", reg); | ||
2952 | goto done; | ||
2953 | } | ||
2954 | creg = xlator[reg]; | ||
2955 | |||
2956 | if (reg == QIBPORTCNTR_KHDROVFL) { | ||
2957 | int i; | ||
2958 | |||
2959 | /* sum over all kernel contexts */ | ||
2960 | for (i = 0; i < dd->first_user_ctxt; i++) | ||
2961 | ret += read_7220_creg32(dd, cr_portovfl + i); | ||
2962 | } | ||
2963 | if (creg == 0xffff) | ||
2964 | goto done; | ||
2965 | |||
2966 | /* | ||
2967 | * only fast incrementing counters are 64bit; use 32 bit reads to | ||
2968 | * avoid two independent reads when on opteron | ||
2969 | */ | ||
2970 | if ((creg == cr_wordsend || creg == cr_wordrcv || | ||
2971 | creg == cr_pktsend || creg == cr_pktrcv)) | ||
2972 | ret = read_7220_creg(dd, creg); | ||
2973 | else | ||
2974 | ret = read_7220_creg32(dd, creg); | ||
2975 | if (creg == cr_ibsymbolerr) { | ||
2976 | if (dd->pport->cpspec->ibdeltainprog) | ||
2977 | ret -= ret - ppd->cpspec->ibsymsnap; | ||
2978 | ret -= dd->pport->cpspec->ibsymdelta; | ||
2979 | } else if (creg == cr_iblinkerrrecov) { | ||
2980 | if (dd->pport->cpspec->ibdeltainprog) | ||
2981 | ret -= ret - ppd->cpspec->iblnkerrsnap; | ||
2982 | ret -= dd->pport->cpspec->iblnkerrdelta; | ||
2983 | } | ||
2984 | done: | ||
2985 | return ret; | ||
2986 | } | ||
2987 | |||
2988 | /* | ||
2989 | * Device counter names (not port-specific), one line per stat, | ||
2990 | * single string. Used by utilities like ipathstats to print the stats | ||
2991 | * in a way which works for different versions of drivers, without changing | ||
2992 | * the utility. Names need to be 12 chars or less (w/o newline), for proper | ||
2993 | * display by utility. | ||
2994 | * Non-error counters are first. | ||
2995 | * Start of "error" conters is indicated by a leading "E " on the first | ||
2996 | * "error" counter, and doesn't count in label length. | ||
2997 | * The EgrOvfl list needs to be last so we truncate them at the configured | ||
2998 | * context count for the device. | ||
2999 | * cntr7220indices contains the corresponding register indices. | ||
3000 | */ | ||
3001 | static const char cntr7220names[] = | ||
3002 | "Interrupts\n" | ||
3003 | "HostBusStall\n" | ||
3004 | "E RxTIDFull\n" | ||
3005 | "RxTIDInvalid\n" | ||
3006 | "Ctxt0EgrOvfl\n" | ||
3007 | "Ctxt1EgrOvfl\n" | ||
3008 | "Ctxt2EgrOvfl\n" | ||
3009 | "Ctxt3EgrOvfl\n" | ||
3010 | "Ctxt4EgrOvfl\n" | ||
3011 | "Ctxt5EgrOvfl\n" | ||
3012 | "Ctxt6EgrOvfl\n" | ||
3013 | "Ctxt7EgrOvfl\n" | ||
3014 | "Ctxt8EgrOvfl\n" | ||
3015 | "Ctxt9EgrOvfl\n" | ||
3016 | "Ctx10EgrOvfl\n" | ||
3017 | "Ctx11EgrOvfl\n" | ||
3018 | "Ctx12EgrOvfl\n" | ||
3019 | "Ctx13EgrOvfl\n" | ||
3020 | "Ctx14EgrOvfl\n" | ||
3021 | "Ctx15EgrOvfl\n" | ||
3022 | "Ctx16EgrOvfl\n"; | ||
3023 | |||
3024 | static const size_t cntr7220indices[] = { | ||
3025 | cr_lbint, | ||
3026 | cr_lbflowstall, | ||
3027 | cr_errtidfull, | ||
3028 | cr_errtidvalid, | ||
3029 | cr_portovfl + 0, | ||
3030 | cr_portovfl + 1, | ||
3031 | cr_portovfl + 2, | ||
3032 | cr_portovfl + 3, | ||
3033 | cr_portovfl + 4, | ||
3034 | cr_portovfl + 5, | ||
3035 | cr_portovfl + 6, | ||
3036 | cr_portovfl + 7, | ||
3037 | cr_portovfl + 8, | ||
3038 | cr_portovfl + 9, | ||
3039 | cr_portovfl + 10, | ||
3040 | cr_portovfl + 11, | ||
3041 | cr_portovfl + 12, | ||
3042 | cr_portovfl + 13, | ||
3043 | cr_portovfl + 14, | ||
3044 | cr_portovfl + 15, | ||
3045 | cr_portovfl + 16, | ||
3046 | }; | ||
3047 | |||
3048 | /* | ||
3049 | * same as cntr7220names and cntr7220indices, but for port-specific counters. | ||
3050 | * portcntr7220indices is somewhat complicated by some registers needing | ||
3051 | * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG | ||
3052 | */ | ||
3053 | static const char portcntr7220names[] = | ||
3054 | "TxPkt\n" | ||
3055 | "TxFlowPkt\n" | ||
3056 | "TxWords\n" | ||
3057 | "RxPkt\n" | ||
3058 | "RxFlowPkt\n" | ||
3059 | "RxWords\n" | ||
3060 | "TxFlowStall\n" | ||
3061 | "TxDmaDesc\n" /* 7220 and 7322-only */ | ||
3062 | "E RxDlidFltr\n" /* 7220 and 7322-only */ | ||
3063 | "IBStatusChng\n" | ||
3064 | "IBLinkDown\n" | ||
3065 | "IBLnkRecov\n" | ||
3066 | "IBRxLinkErr\n" | ||
3067 | "IBSymbolErr\n" | ||
3068 | "RxLLIErr\n" | ||
3069 | "RxBadFormat\n" | ||
3070 | "RxBadLen\n" | ||
3071 | "RxBufOvrfl\n" | ||
3072 | "RxEBP\n" | ||
3073 | "RxFlowCtlErr\n" | ||
3074 | "RxICRCerr\n" | ||
3075 | "RxLPCRCerr\n" | ||
3076 | "RxVCRCerr\n" | ||
3077 | "RxInvalLen\n" | ||
3078 | "RxInvalPKey\n" | ||
3079 | "RxPktDropped\n" | ||
3080 | "TxBadLength\n" | ||
3081 | "TxDropped\n" | ||
3082 | "TxInvalLen\n" | ||
3083 | "TxUnderrun\n" | ||
3084 | "TxUnsupVL\n" | ||
3085 | "RxLclPhyErr\n" /* 7220 and 7322-only */ | ||
3086 | "RxVL15Drop\n" /* 7220 and 7322-only */ | ||
3087 | "RxVlErr\n" /* 7220 and 7322-only */ | ||
3088 | "XcessBufOvfl\n" /* 7220 and 7322-only */ | ||
3089 | ; | ||
3090 | |||
3091 | #define _PORT_VIRT_FLAG 0x8000 /* "virtual", need adjustments */ | ||
3092 | static const size_t portcntr7220indices[] = { | ||
3093 | QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG, | ||
3094 | cr_pktsendflow, | ||
3095 | QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG, | ||
3096 | QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG, | ||
3097 | cr_pktrcvflowctrl, | ||
3098 | QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG, | ||
3099 | QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG, | ||
3100 | cr_txsdmadesc, | ||
3101 | cr_rxdlidfltr, | ||
3102 | cr_ibstatuschange, | ||
3103 | QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG, | ||
3104 | QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG, | ||
3105 | QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG, | ||
3106 | QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG, | ||
3107 | QIBPORTCNTR_LLI | _PORT_VIRT_FLAG, | ||
3108 | QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG, | ||
3109 | QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG, | ||
3110 | QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG, | ||
3111 | QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG, | ||
3112 | cr_rcvflowctrl_err, | ||
3113 | QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG, | ||
3114 | QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG, | ||
3115 | QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG, | ||
3116 | QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG, | ||
3117 | QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG, | ||
3118 | QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG, | ||
3119 | cr_invalidslen, | ||
3120 | cr_senddropped, | ||
3121 | cr_errslen, | ||
3122 | cr_sendunderrun, | ||
3123 | cr_txunsupvl, | ||
3124 | QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG, | ||
3125 | QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG, | ||
3126 | QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG, | ||
3127 | QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG, | ||
3128 | }; | ||
3129 | |||
3130 | /* do all the setup to make the counter reads efficient later */ | ||
3131 | static void init_7220_cntrnames(struct qib_devdata *dd) | ||
3132 | { | ||
3133 | int i, j = 0; | ||
3134 | char *s; | ||
3135 | |||
3136 | for (i = 0, s = (char *)cntr7220names; s && j <= dd->cfgctxts; | ||
3137 | i++) { | ||
3138 | /* we always have at least one counter before the egrovfl */ | ||
3139 | if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12)) | ||
3140 | j = 1; | ||
3141 | s = strchr(s + 1, '\n'); | ||
3142 | if (s && j) | ||
3143 | j++; | ||
3144 | } | ||
3145 | dd->cspec->ncntrs = i; | ||
3146 | if (!s) | ||
3147 | /* full list; size is without terminating null */ | ||
3148 | dd->cspec->cntrnamelen = sizeof(cntr7220names) - 1; | ||
3149 | else | ||
3150 | dd->cspec->cntrnamelen = 1 + s - cntr7220names; | ||
3151 | dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs | ||
3152 | * sizeof(u64), GFP_KERNEL); | ||
3153 | if (!dd->cspec->cntrs) | ||
3154 | qib_dev_err(dd, "Failed allocation for counters\n"); | ||
3155 | |||
3156 | for (i = 0, s = (char *)portcntr7220names; s; i++) | ||
3157 | s = strchr(s + 1, '\n'); | ||
3158 | dd->cspec->nportcntrs = i - 1; | ||
3159 | dd->cspec->portcntrnamelen = sizeof(portcntr7220names) - 1; | ||
3160 | dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs | ||
3161 | * sizeof(u64), GFP_KERNEL); | ||
3162 | if (!dd->cspec->portcntrs) | ||
3163 | qib_dev_err(dd, "Failed allocation for portcounters\n"); | ||
3164 | } | ||
3165 | |||
3166 | static u32 qib_read_7220cntrs(struct qib_devdata *dd, loff_t pos, char **namep, | ||
3167 | u64 **cntrp) | ||
3168 | { | ||
3169 | u32 ret; | ||
3170 | |||
3171 | if (!dd->cspec->cntrs) { | ||
3172 | ret = 0; | ||
3173 | goto done; | ||
3174 | } | ||
3175 | |||
3176 | if (namep) { | ||
3177 | *namep = (char *)cntr7220names; | ||
3178 | ret = dd->cspec->cntrnamelen; | ||
3179 | if (pos >= ret) | ||
3180 | ret = 0; /* final read after getting everything */ | ||
3181 | } else { | ||
3182 | u64 *cntr = dd->cspec->cntrs; | ||
3183 | int i; | ||
3184 | |||
3185 | ret = dd->cspec->ncntrs * sizeof(u64); | ||
3186 | if (!cntr || pos >= ret) { | ||
3187 | /* everything read, or couldn't get memory */ | ||
3188 | ret = 0; | ||
3189 | goto done; | ||
3190 | } | ||
3191 | |||
3192 | *cntrp = cntr; | ||
3193 | for (i = 0; i < dd->cspec->ncntrs; i++) | ||
3194 | *cntr++ = read_7220_creg32(dd, cntr7220indices[i]); | ||
3195 | } | ||
3196 | done: | ||
3197 | return ret; | ||
3198 | } | ||
3199 | |||
3200 | static u32 qib_read_7220portcntrs(struct qib_devdata *dd, loff_t pos, u32 port, | ||
3201 | char **namep, u64 **cntrp) | ||
3202 | { | ||
3203 | u32 ret; | ||
3204 | |||
3205 | if (!dd->cspec->portcntrs) { | ||
3206 | ret = 0; | ||
3207 | goto done; | ||
3208 | } | ||
3209 | if (namep) { | ||
3210 | *namep = (char *)portcntr7220names; | ||
3211 | ret = dd->cspec->portcntrnamelen; | ||
3212 | if (pos >= ret) | ||
3213 | ret = 0; /* final read after getting everything */ | ||
3214 | } else { | ||
3215 | u64 *cntr = dd->cspec->portcntrs; | ||
3216 | struct qib_pportdata *ppd = &dd->pport[port]; | ||
3217 | int i; | ||
3218 | |||
3219 | ret = dd->cspec->nportcntrs * sizeof(u64); | ||
3220 | if (!cntr || pos >= ret) { | ||
3221 | /* everything read, or couldn't get memory */ | ||
3222 | ret = 0; | ||
3223 | goto done; | ||
3224 | } | ||
3225 | *cntrp = cntr; | ||
3226 | for (i = 0; i < dd->cspec->nportcntrs; i++) { | ||
3227 | if (portcntr7220indices[i] & _PORT_VIRT_FLAG) | ||
3228 | *cntr++ = qib_portcntr_7220(ppd, | ||
3229 | portcntr7220indices[i] & | ||
3230 | ~_PORT_VIRT_FLAG); | ||
3231 | else | ||
3232 | *cntr++ = read_7220_creg32(dd, | ||
3233 | portcntr7220indices[i]); | ||
3234 | } | ||
3235 | } | ||
3236 | done: | ||
3237 | return ret; | ||
3238 | } | ||
3239 | |||
3240 | /** | ||
3241 | * qib_get_7220_faststats - get word counters from chip before they overflow | ||
3242 | * @opaque - contains a pointer to the qlogic_ib device qib_devdata | ||
3243 | * | ||
3244 | * This needs more work; in particular, decision on whether we really | ||
3245 | * need traffic_wds done the way it is | ||
3246 | * called from add_timer | ||
3247 | */ | ||
3248 | static void qib_get_7220_faststats(unsigned long opaque) | ||
3249 | { | ||
3250 | struct qib_devdata *dd = (struct qib_devdata *) opaque; | ||
3251 | struct qib_pportdata *ppd = dd->pport; | ||
3252 | unsigned long flags; | ||
3253 | u64 traffic_wds; | ||
3254 | |||
3255 | /* | ||
3256 | * don't access the chip while running diags, or memory diags can | ||
3257 | * fail | ||
3258 | */ | ||
3259 | if (!(dd->flags & QIB_INITTED) || dd->diag_client) | ||
3260 | /* but re-arm the timer, for diags case; won't hurt other */ | ||
3261 | goto done; | ||
3262 | |||
3263 | /* | ||
3264 | * We now try to maintain an activity timer, based on traffic | ||
3265 | * exceeding a threshold, so we need to check the word-counts | ||
3266 | * even if they are 64-bit. | ||
3267 | */ | ||
3268 | traffic_wds = qib_portcntr_7220(ppd, cr_wordsend) + | ||
3269 | qib_portcntr_7220(ppd, cr_wordrcv); | ||
3270 | spin_lock_irqsave(&dd->eep_st_lock, flags); | ||
3271 | traffic_wds -= dd->traffic_wds; | ||
3272 | dd->traffic_wds += traffic_wds; | ||
3273 | if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD) | ||
3274 | atomic_add(5, &dd->active_time); /* S/B #define */ | ||
3275 | spin_unlock_irqrestore(&dd->eep_st_lock, flags); | ||
3276 | done: | ||
3277 | mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER); | ||
3278 | } | ||
3279 | |||
3280 | /* | ||
3281 | * If we are using MSI, try to fallback to INTx. | ||
3282 | */ | ||
3283 | static int qib_7220_intr_fallback(struct qib_devdata *dd) | ||
3284 | { | ||
3285 | if (!dd->msi_lo) | ||
3286 | return 0; | ||
3287 | |||
3288 | qib_devinfo(dd->pcidev, "MSI interrupt not detected," | ||
3289 | " trying INTx interrupts\n"); | ||
3290 | qib_7220_free_irq(dd); | ||
3291 | qib_enable_intx(dd->pcidev); | ||
3292 | /* | ||
3293 | * Some newer kernels require free_irq before disable_msi, | ||
3294 | * and irq can be changed during disable and INTx enable | ||
3295 | * and we need to therefore use the pcidev->irq value, | ||
3296 | * not our saved MSI value. | ||
3297 | */ | ||
3298 | dd->cspec->irq = dd->pcidev->irq; | ||
3299 | qib_setup_7220_interrupt(dd); | ||
3300 | return 1; | ||
3301 | } | ||
3302 | |||
3303 | /* | ||
3304 | * Reset the XGXS (between serdes and IBC). Slightly less intrusive | ||
3305 | * than resetting the IBC or external link state, and useful in some | ||
3306 | * cases to cause some retraining. To do this right, we reset IBC | ||
3307 | * as well. | ||
3308 | */ | ||
3309 | static void qib_7220_xgxs_reset(struct qib_pportdata *ppd) | ||
3310 | { | ||
3311 | u64 val, prev_val; | ||
3312 | struct qib_devdata *dd = ppd->dd; | ||
3313 | |||
3314 | prev_val = qib_read_kreg64(dd, kr_xgxs_cfg); | ||
3315 | val = prev_val | QLOGIC_IB_XGXS_RESET; | ||
3316 | prev_val &= ~QLOGIC_IB_XGXS_RESET; /* be sure */ | ||
3317 | qib_write_kreg(dd, kr_control, | ||
3318 | dd->control & ~QLOGIC_IB_C_LINKENABLE); | ||
3319 | qib_write_kreg(dd, kr_xgxs_cfg, val); | ||
3320 | qib_read_kreg32(dd, kr_scratch); | ||
3321 | qib_write_kreg(dd, kr_xgxs_cfg, prev_val); | ||
3322 | qib_write_kreg(dd, kr_control, dd->control); | ||
3323 | } | ||
3324 | |||
3325 | /* | ||
3326 | * For this chip, we want to use the same buffer every time | ||
3327 | * when we are trying to bring the link up (they are always VL15 | ||
3328 | * packets). At that link state the packet should always go out immediately | ||
3329 | * (or at least be discarded at the tx interface if the link is down). | ||
3330 | * If it doesn't, and the buffer isn't available, that means some other | ||
3331 | * sender has gotten ahead of us, and is preventing our packet from going | ||
3332 | * out. In that case, we flush all packets, and try again. If that still | ||
3333 | * fails, we fail the request, and hope things work the next time around. | ||
3334 | * | ||
3335 | * We don't need very complicated heuristics on whether the packet had | ||
3336 | * time to go out or not, since even at SDR 1X, it goes out in very short | ||
3337 | * time periods, covered by the chip reads done here and as part of the | ||
3338 | * flush. | ||
3339 | */ | ||
3340 | static u32 __iomem *get_7220_link_buf(struct qib_pportdata *ppd, u32 *bnum) | ||
3341 | { | ||
3342 | u32 __iomem *buf; | ||
3343 | u32 lbuf = ppd->dd->cspec->lastbuf_for_pio; | ||
3344 | int do_cleanup; | ||
3345 | unsigned long flags; | ||
3346 | |||
3347 | /* | ||
3348 | * always blip to get avail list updated, since it's almost | ||
3349 | * always needed, and is fairly cheap. | ||
3350 | */ | ||
3351 | sendctrl_7220_mod(ppd->dd->pport, QIB_SENDCTRL_AVAIL_BLIP); | ||
3352 | qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */ | ||
3353 | buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf); | ||
3354 | if (buf) | ||
3355 | goto done; | ||
3356 | |||
3357 | spin_lock_irqsave(&ppd->sdma_lock, flags); | ||
3358 | if (ppd->sdma_state.current_state == qib_sdma_state_s20_idle && | ||
3359 | ppd->sdma_state.current_state != qib_sdma_state_s00_hw_down) { | ||
3360 | __qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down); | ||
3361 | do_cleanup = 0; | ||
3362 | } else { | ||
3363 | do_cleanup = 1; | ||
3364 | qib_7220_sdma_hw_clean_up(ppd); | ||
3365 | } | ||
3366 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); | ||
3367 | |||
3368 | if (do_cleanup) { | ||
3369 | qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */ | ||
3370 | buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf); | ||
3371 | } | ||
3372 | done: | ||
3373 | return buf; | ||
3374 | } | ||
3375 | |||
3376 | /* | ||
3377 | * This code for non-IBTA-compliant IB speed negotiation is only known to | ||
3378 | * work for the SDR to DDR transition, and only between an HCA and a switch | ||
3379 | * with recent firmware. It is based on observed heuristics, rather than | ||
3380 | * actual knowledge of the non-compliant speed negotiation. | ||
3381 | * It has a number of hard-coded fields, since the hope is to rewrite this | ||
3382 | * when a spec is available on how the negoation is intended to work. | ||
3383 | */ | ||
3384 | static void autoneg_7220_sendpkt(struct qib_pportdata *ppd, u32 *hdr, | ||
3385 | u32 dcnt, u32 *data) | ||
3386 | { | ||
3387 | int i; | ||
3388 | u64 pbc; | ||
3389 | u32 __iomem *piobuf; | ||
3390 | u32 pnum; | ||
3391 | struct qib_devdata *dd = ppd->dd; | ||
3392 | |||
3393 | i = 0; | ||
3394 | pbc = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */ | ||
3395 | pbc |= PBC_7220_VL15_SEND; | ||
3396 | while (!(piobuf = get_7220_link_buf(ppd, &pnum))) { | ||
3397 | if (i++ > 5) | ||
3398 | return; | ||
3399 | udelay(2); | ||
3400 | } | ||
3401 | sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_DISARM_BUF(pnum)); | ||
3402 | writeq(pbc, piobuf); | ||
3403 | qib_flush_wc(); | ||
3404 | qib_pio_copy(piobuf + 2, hdr, 7); | ||
3405 | qib_pio_copy(piobuf + 9, data, dcnt); | ||
3406 | if (dd->flags & QIB_USE_SPCL_TRIG) { | ||
3407 | u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023; | ||
3408 | |||
3409 | qib_flush_wc(); | ||
3410 | __raw_writel(0xaebecede, piobuf + spcl_off); | ||
3411 | } | ||
3412 | qib_flush_wc(); | ||
3413 | qib_sendbuf_done(dd, pnum); | ||
3414 | } | ||
3415 | |||
3416 | /* | ||
3417 | * _start packet gets sent twice at start, _done gets sent twice at end | ||
3418 | */ | ||
3419 | static void autoneg_7220_send(struct qib_pportdata *ppd, int which) | ||
3420 | { | ||
3421 | struct qib_devdata *dd = ppd->dd; | ||
3422 | static u32 swapped; | ||
3423 | u32 dw, i, hcnt, dcnt, *data; | ||
3424 | static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba }; | ||
3425 | static u32 madpayload_start[0x40] = { | ||
3426 | 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0, | ||
3427 | 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, | ||
3428 | 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */ | ||
3429 | }; | ||
3430 | static u32 madpayload_done[0x40] = { | ||
3431 | 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0, | ||
3432 | 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, | ||
3433 | 0x40000001, 0x1388, 0x15e, /* rest 0's */ | ||
3434 | }; | ||
3435 | |||
3436 | dcnt = ARRAY_SIZE(madpayload_start); | ||
3437 | hcnt = ARRAY_SIZE(hdr); | ||
3438 | if (!swapped) { | ||
3439 | /* for maintainability, do it at runtime */ | ||
3440 | for (i = 0; i < hcnt; i++) { | ||
3441 | dw = (__force u32) cpu_to_be32(hdr[i]); | ||
3442 | hdr[i] = dw; | ||
3443 | } | ||
3444 | for (i = 0; i < dcnt; i++) { | ||
3445 | dw = (__force u32) cpu_to_be32(madpayload_start[i]); | ||
3446 | madpayload_start[i] = dw; | ||
3447 | dw = (__force u32) cpu_to_be32(madpayload_done[i]); | ||
3448 | madpayload_done[i] = dw; | ||
3449 | } | ||
3450 | swapped = 1; | ||
3451 | } | ||
3452 | |||
3453 | data = which ? madpayload_done : madpayload_start; | ||
3454 | |||
3455 | autoneg_7220_sendpkt(ppd, hdr, dcnt, data); | ||
3456 | qib_read_kreg64(dd, kr_scratch); | ||
3457 | udelay(2); | ||
3458 | autoneg_7220_sendpkt(ppd, hdr, dcnt, data); | ||
3459 | qib_read_kreg64(dd, kr_scratch); | ||
3460 | udelay(2); | ||
3461 | } | ||
3462 | |||
3463 | /* | ||
3464 | * Do the absolute minimum to cause an IB speed change, and make it | ||
3465 | * ready, but don't actually trigger the change. The caller will | ||
3466 | * do that when ready (if link is in Polling training state, it will | ||
3467 | * happen immediately, otherwise when link next goes down) | ||
3468 | * | ||
3469 | * This routine should only be used as part of the DDR autonegotation | ||
3470 | * code for devices that are not compliant with IB 1.2 (or code that | ||
3471 | * fixes things up for same). | ||
3472 | * | ||
3473 | * When link has gone down, and autoneg enabled, or autoneg has | ||
3474 | * failed and we give up until next time we set both speeds, and | ||
3475 | * then we want IBTA enabled as well as "use max enabled speed. | ||
3476 | */ | ||
3477 | static void set_7220_ibspeed_fast(struct qib_pportdata *ppd, u32 speed) | ||
3478 | { | ||
3479 | ppd->cpspec->ibcddrctrl &= ~(IBA7220_IBC_SPEED_AUTONEG_MASK | | ||
3480 | IBA7220_IBC_IBTA_1_2_MASK); | ||
3481 | |||
3482 | if (speed == (QIB_IB_SDR | QIB_IB_DDR)) | ||
3483 | ppd->cpspec->ibcddrctrl |= IBA7220_IBC_SPEED_AUTONEG_MASK | | ||
3484 | IBA7220_IBC_IBTA_1_2_MASK; | ||
3485 | else | ||
3486 | ppd->cpspec->ibcddrctrl |= speed == QIB_IB_DDR ? | ||
3487 | IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR; | ||
3488 | |||
3489 | qib_write_kreg(ppd->dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl); | ||
3490 | qib_write_kreg(ppd->dd, kr_scratch, 0); | ||
3491 | } | ||
3492 | |||
3493 | /* | ||
3494 | * This routine is only used when we are not talking to another | ||
3495 | * IB 1.2-compliant device that we think can do DDR. | ||
3496 | * (This includes all existing switch chips as of Oct 2007.) | ||
3497 | * 1.2-compliant devices go directly to DDR prior to reaching INIT | ||
3498 | */ | ||
3499 | static void try_7220_autoneg(struct qib_pportdata *ppd) | ||
3500 | { | ||
3501 | unsigned long flags; | ||
3502 | |||
3503 | /* | ||
3504 | * Required for older non-IB1.2 DDR switches. Newer | ||
3505 | * non-IB-compliant switches don't need it, but so far, | ||
3506 | * aren't bothered by it either. "Magic constant" | ||
3507 | */ | ||
3508 | qib_write_kreg(ppd->dd, kr_ncmodectrl, 0x3b9dc07); | ||
3509 | |||
3510 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
3511 | ppd->lflags |= QIBL_IB_AUTONEG_INPROG; | ||
3512 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
3513 | autoneg_7220_send(ppd, 0); | ||
3514 | set_7220_ibspeed_fast(ppd, QIB_IB_DDR); | ||
3515 | |||
3516 | toggle_7220_rclkrls(ppd->dd); | ||
3517 | /* 2 msec is minimum length of a poll cycle */ | ||
3518 | schedule_delayed_work(&ppd->cpspec->autoneg_work, | ||
3519 | msecs_to_jiffies(2)); | ||
3520 | } | ||
3521 | |||
3522 | /* | ||
3523 | * Handle the empirically determined mechanism for auto-negotiation | ||
3524 | * of DDR speed with switches. | ||
3525 | */ | ||
3526 | static void autoneg_7220_work(struct work_struct *work) | ||
3527 | { | ||
3528 | struct qib_pportdata *ppd; | ||
3529 | struct qib_devdata *dd; | ||
3530 | u64 startms; | ||
3531 | u32 i; | ||
3532 | unsigned long flags; | ||
3533 | |||
3534 | ppd = &container_of(work, struct qib_chippport_specific, | ||
3535 | autoneg_work.work)->pportdata; | ||
3536 | dd = ppd->dd; | ||
3537 | |||
3538 | startms = jiffies_to_msecs(jiffies); | ||
3539 | |||
3540 | /* | ||
3541 | * Busy wait for this first part, it should be at most a | ||
3542 | * few hundred usec, since we scheduled ourselves for 2msec. | ||
3543 | */ | ||
3544 | for (i = 0; i < 25; i++) { | ||
3545 | if (SYM_FIELD(ppd->lastibcstat, IBCStatus, LinkTrainingState) | ||
3546 | == IB_7220_LT_STATE_POLLQUIET) { | ||
3547 | qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE); | ||
3548 | break; | ||
3549 | } | ||
3550 | udelay(100); | ||
3551 | } | ||
3552 | |||
3553 | if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) | ||
3554 | goto done; /* we got there early or told to stop */ | ||
3555 | |||
3556 | /* we expect this to timeout */ | ||
3557 | if (wait_event_timeout(ppd->cpspec->autoneg_wait, | ||
3558 | !(ppd->lflags & QIBL_IB_AUTONEG_INPROG), | ||
3559 | msecs_to_jiffies(90))) | ||
3560 | goto done; | ||
3561 | |||
3562 | toggle_7220_rclkrls(dd); | ||
3563 | |||
3564 | /* we expect this to timeout */ | ||
3565 | if (wait_event_timeout(ppd->cpspec->autoneg_wait, | ||
3566 | !(ppd->lflags & QIBL_IB_AUTONEG_INPROG), | ||
3567 | msecs_to_jiffies(1700))) | ||
3568 | goto done; | ||
3569 | |||
3570 | set_7220_ibspeed_fast(ppd, QIB_IB_SDR); | ||
3571 | toggle_7220_rclkrls(dd); | ||
3572 | |||
3573 | /* | ||
3574 | * Wait up to 250 msec for link to train and get to INIT at DDR; | ||
3575 | * this should terminate early. | ||
3576 | */ | ||
3577 | wait_event_timeout(ppd->cpspec->autoneg_wait, | ||
3578 | !(ppd->lflags & QIBL_IB_AUTONEG_INPROG), | ||
3579 | msecs_to_jiffies(250)); | ||
3580 | done: | ||
3581 | if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) { | ||
3582 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
3583 | ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; | ||
3584 | if (dd->cspec->autoneg_tries == AUTONEG_TRIES) { | ||
3585 | ppd->lflags |= QIBL_IB_AUTONEG_FAILED; | ||
3586 | dd->cspec->autoneg_tries = 0; | ||
3587 | } | ||
3588 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
3589 | set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled); | ||
3590 | } | ||
3591 | } | ||
3592 | |||
3593 | static u32 qib_7220_iblink_state(u64 ibcs) | ||
3594 | { | ||
3595 | u32 state = (u32)SYM_FIELD(ibcs, IBCStatus, LinkState); | ||
3596 | |||
3597 | switch (state) { | ||
3598 | case IB_7220_L_STATE_INIT: | ||
3599 | state = IB_PORT_INIT; | ||
3600 | break; | ||
3601 | case IB_7220_L_STATE_ARM: | ||
3602 | state = IB_PORT_ARMED; | ||
3603 | break; | ||
3604 | case IB_7220_L_STATE_ACTIVE: | ||
3605 | /* fall through */ | ||
3606 | case IB_7220_L_STATE_ACT_DEFER: | ||
3607 | state = IB_PORT_ACTIVE; | ||
3608 | break; | ||
3609 | default: /* fall through */ | ||
3610 | case IB_7220_L_STATE_DOWN: | ||
3611 | state = IB_PORT_DOWN; | ||
3612 | break; | ||
3613 | } | ||
3614 | return state; | ||
3615 | } | ||
3616 | |||
3617 | /* returns the IBTA port state, rather than the IBC link training state */ | ||
3618 | static u8 qib_7220_phys_portstate(u64 ibcs) | ||
3619 | { | ||
3620 | u8 state = (u8)SYM_FIELD(ibcs, IBCStatus, LinkTrainingState); | ||
3621 | return qib_7220_physportstate[state]; | ||
3622 | } | ||
3623 | |||
3624 | static int qib_7220_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs) | ||
3625 | { | ||
3626 | int ret = 0, symadj = 0; | ||
3627 | struct qib_devdata *dd = ppd->dd; | ||
3628 | unsigned long flags; | ||
3629 | |||
3630 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
3631 | ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY; | ||
3632 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
3633 | |||
3634 | if (!ibup) { | ||
3635 | /* | ||
3636 | * When the link goes down we don't want AEQ running, so it | ||
3637 | * won't interfere with IBC training, etc., and we need | ||
3638 | * to go back to the static SerDes preset values. | ||
3639 | */ | ||
3640 | if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED | | ||
3641 | QIBL_IB_AUTONEG_INPROG))) | ||
3642 | set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled); | ||
3643 | if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { | ||
3644 | qib_sd7220_presets(dd); | ||
3645 | qib_cancel_sends(ppd); /* initial disarm, etc. */ | ||
3646 | spin_lock_irqsave(&ppd->sdma_lock, flags); | ||
3647 | if (__qib_sdma_running(ppd)) | ||
3648 | __qib_sdma_process_event(ppd, | ||
3649 | qib_sdma_event_e70_go_idle); | ||
3650 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); | ||
3651 | } | ||
3652 | /* this might better in qib_sd7220_presets() */ | ||
3653 | set_7220_relock_poll(dd, ibup); | ||
3654 | } else { | ||
3655 | if (qib_compat_ddr_negotiate && | ||
3656 | !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED | | ||
3657 | QIBL_IB_AUTONEG_INPROG)) && | ||
3658 | ppd->link_speed_active == QIB_IB_SDR && | ||
3659 | (ppd->link_speed_enabled & (QIB_IB_DDR | QIB_IB_SDR)) == | ||
3660 | (QIB_IB_DDR | QIB_IB_SDR) && | ||
3661 | dd->cspec->autoneg_tries < AUTONEG_TRIES) { | ||
3662 | /* we are SDR, and DDR auto-negotiation enabled */ | ||
3663 | ++dd->cspec->autoneg_tries; | ||
3664 | if (!ppd->cpspec->ibdeltainprog) { | ||
3665 | ppd->cpspec->ibdeltainprog = 1; | ||
3666 | ppd->cpspec->ibsymsnap = read_7220_creg32(dd, | ||
3667 | cr_ibsymbolerr); | ||
3668 | ppd->cpspec->iblnkerrsnap = read_7220_creg32(dd, | ||
3669 | cr_iblinkerrrecov); | ||
3670 | } | ||
3671 | try_7220_autoneg(ppd); | ||
3672 | ret = 1; /* no other IB status change processing */ | ||
3673 | } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) && | ||
3674 | ppd->link_speed_active == QIB_IB_SDR) { | ||
3675 | autoneg_7220_send(ppd, 1); | ||
3676 | set_7220_ibspeed_fast(ppd, QIB_IB_DDR); | ||
3677 | udelay(2); | ||
3678 | toggle_7220_rclkrls(dd); | ||
3679 | ret = 1; /* no other IB status change processing */ | ||
3680 | } else { | ||
3681 | if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) && | ||
3682 | (ppd->link_speed_active & QIB_IB_DDR)) { | ||
3683 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
3684 | ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG | | ||
3685 | QIBL_IB_AUTONEG_FAILED); | ||
3686 | spin_unlock_irqrestore(&ppd->lflags_lock, | ||
3687 | flags); | ||
3688 | dd->cspec->autoneg_tries = 0; | ||
3689 | /* re-enable SDR, for next link down */ | ||
3690 | set_7220_ibspeed_fast(ppd, | ||
3691 | ppd->link_speed_enabled); | ||
3692 | wake_up(&ppd->cpspec->autoneg_wait); | ||
3693 | symadj = 1; | ||
3694 | } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) { | ||
3695 | /* | ||
3696 | * Clear autoneg failure flag, and do setup | ||
3697 | * so we'll try next time link goes down and | ||
3698 | * back to INIT (possibly connected to a | ||
3699 | * different device). | ||
3700 | */ | ||
3701 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
3702 | ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED; | ||
3703 | spin_unlock_irqrestore(&ppd->lflags_lock, | ||
3704 | flags); | ||
3705 | ppd->cpspec->ibcddrctrl |= | ||
3706 | IBA7220_IBC_IBTA_1_2_MASK; | ||
3707 | qib_write_kreg(dd, kr_ncmodectrl, 0); | ||
3708 | symadj = 1; | ||
3709 | } | ||
3710 | } | ||
3711 | |||
3712 | if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) | ||
3713 | symadj = 1; | ||
3714 | |||
3715 | if (!ret) { | ||
3716 | ppd->delay_mult = rate_to_delay | ||
3717 | [(ibcs >> IBA7220_LINKSPEED_SHIFT) & 1] | ||
3718 | [(ibcs >> IBA7220_LINKWIDTH_SHIFT) & 1]; | ||
3719 | |||
3720 | set_7220_relock_poll(dd, ibup); | ||
3721 | spin_lock_irqsave(&ppd->sdma_lock, flags); | ||
3722 | /* | ||
3723 | * Unlike 7322, the 7220 needs this, due to lack of | ||
3724 | * interrupt in some cases when we have sdma active | ||
3725 | * when the link goes down. | ||
3726 | */ | ||
3727 | if (ppd->sdma_state.current_state != | ||
3728 | qib_sdma_state_s20_idle) | ||
3729 | __qib_sdma_process_event(ppd, | ||
3730 | qib_sdma_event_e00_go_hw_down); | ||
3731 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); | ||
3732 | } | ||
3733 | } | ||
3734 | |||
3735 | if (symadj) { | ||
3736 | if (ppd->cpspec->ibdeltainprog) { | ||
3737 | ppd->cpspec->ibdeltainprog = 0; | ||
3738 | ppd->cpspec->ibsymdelta += read_7220_creg32(ppd->dd, | ||
3739 | cr_ibsymbolerr) - ppd->cpspec->ibsymsnap; | ||
3740 | ppd->cpspec->iblnkerrdelta += read_7220_creg32(ppd->dd, | ||
3741 | cr_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap; | ||
3742 | } | ||
3743 | } else if (!ibup && qib_compat_ddr_negotiate && | ||
3744 | !ppd->cpspec->ibdeltainprog && | ||
3745 | !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { | ||
3746 | ppd->cpspec->ibdeltainprog = 1; | ||
3747 | ppd->cpspec->ibsymsnap = read_7220_creg32(ppd->dd, | ||
3748 | cr_ibsymbolerr); | ||
3749 | ppd->cpspec->iblnkerrsnap = read_7220_creg32(ppd->dd, | ||
3750 | cr_iblinkerrrecov); | ||
3751 | } | ||
3752 | |||
3753 | if (!ret) | ||
3754 | qib_setup_7220_setextled(ppd, ibup); | ||
3755 | return ret; | ||
3756 | } | ||
3757 | |||
3758 | /* | ||
3759 | * Does read/modify/write to appropriate registers to | ||
3760 | * set output and direction bits selected by mask. | ||
3761 | * these are in their canonical postions (e.g. lsb of | ||
3762 | * dir will end up in D48 of extctrl on existing chips). | ||
3763 | * returns contents of GP Inputs. | ||
3764 | */ | ||
3765 | static int gpio_7220_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask) | ||
3766 | { | ||
3767 | u64 read_val, new_out; | ||
3768 | unsigned long flags; | ||
3769 | |||
3770 | if (mask) { | ||
3771 | /* some bits being written, lock access to GPIO */ | ||
3772 | dir &= mask; | ||
3773 | out &= mask; | ||
3774 | spin_lock_irqsave(&dd->cspec->gpio_lock, flags); | ||
3775 | dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe)); | ||
3776 | dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe)); | ||
3777 | new_out = (dd->cspec->gpio_out & ~mask) | out; | ||
3778 | |||
3779 | qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl); | ||
3780 | qib_write_kreg(dd, kr_gpio_out, new_out); | ||
3781 | dd->cspec->gpio_out = new_out; | ||
3782 | spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); | ||
3783 | } | ||
3784 | /* | ||
3785 | * It is unlikely that a read at this time would get valid | ||
3786 | * data on a pin whose direction line was set in the same | ||
3787 | * call to this function. We include the read here because | ||
3788 | * that allows us to potentially combine a change on one pin with | ||
3789 | * a read on another, and because the old code did something like | ||
3790 | * this. | ||
3791 | */ | ||
3792 | read_val = qib_read_kreg64(dd, kr_extstatus); | ||
3793 | return SYM_FIELD(read_val, EXTStatus, GPIOIn); | ||
3794 | } | ||
3795 | |||
3796 | /* | ||
3797 | * Read fundamental info we need to use the chip. These are | ||
3798 | * the registers that describe chip capabilities, and are | ||
3799 | * saved in shadow registers. | ||
3800 | */ | ||
3801 | static void get_7220_chip_params(struct qib_devdata *dd) | ||
3802 | { | ||
3803 | u64 val; | ||
3804 | u32 piobufs; | ||
3805 | int mtu; | ||
3806 | |||
3807 | dd->uregbase = qib_read_kreg32(dd, kr_userregbase); | ||
3808 | |||
3809 | dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt); | ||
3810 | dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase); | ||
3811 | dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase); | ||
3812 | dd->palign = qib_read_kreg32(dd, kr_palign); | ||
3813 | dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase); | ||
3814 | dd->pio2k_bufbase = dd->piobufbase & 0xffffffff; | ||
3815 | |||
3816 | val = qib_read_kreg64(dd, kr_sendpiosize); | ||
3817 | dd->piosize2k = val & ~0U; | ||
3818 | dd->piosize4k = val >> 32; | ||
3819 | |||
3820 | mtu = ib_mtu_enum_to_int(qib_ibmtu); | ||
3821 | if (mtu == -1) | ||
3822 | mtu = QIB_DEFAULT_MTU; | ||
3823 | dd->pport->ibmtu = (u32)mtu; | ||
3824 | |||
3825 | val = qib_read_kreg64(dd, kr_sendpiobufcnt); | ||
3826 | dd->piobcnt2k = val & ~0U; | ||
3827 | dd->piobcnt4k = val >> 32; | ||
3828 | /* these may be adjusted in init_chip_wc_pat() */ | ||
3829 | dd->pio2kbase = (u32 __iomem *) | ||
3830 | ((char __iomem *) dd->kregbase + dd->pio2k_bufbase); | ||
3831 | if (dd->piobcnt4k) { | ||
3832 | dd->pio4kbase = (u32 __iomem *) | ||
3833 | ((char __iomem *) dd->kregbase + | ||
3834 | (dd->piobufbase >> 32)); | ||
3835 | /* | ||
3836 | * 4K buffers take 2 pages; we use roundup just to be | ||
3837 | * paranoid; we calculate it once here, rather than on | ||
3838 | * ever buf allocate | ||
3839 | */ | ||
3840 | dd->align4k = ALIGN(dd->piosize4k, dd->palign); | ||
3841 | } | ||
3842 | |||
3843 | piobufs = dd->piobcnt4k + dd->piobcnt2k; | ||
3844 | |||
3845 | dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) / | ||
3846 | (sizeof(u64) * BITS_PER_BYTE / 2); | ||
3847 | } | ||
3848 | |||
3849 | /* | ||
3850 | * The chip base addresses in cspec and cpspec have to be set | ||
3851 | * after possible init_chip_wc_pat(), rather than in | ||
3852 | * qib_get_7220_chip_params(), so split out as separate function | ||
3853 | */ | ||
3854 | static void set_7220_baseaddrs(struct qib_devdata *dd) | ||
3855 | { | ||
3856 | u32 cregbase; | ||
3857 | /* init after possible re-map in init_chip_wc_pat() */ | ||
3858 | cregbase = qib_read_kreg32(dd, kr_counterregbase); | ||
3859 | dd->cspec->cregbase = (u64 __iomem *) | ||
3860 | ((char __iomem *) dd->kregbase + cregbase); | ||
3861 | |||
3862 | dd->egrtidbase = (u64 __iomem *) | ||
3863 | ((char __iomem *) dd->kregbase + dd->rcvegrbase); | ||
3864 | } | ||
3865 | |||
3866 | |||
3867 | #define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl, SendIntBufAvail) | \ | ||
3868 | SYM_MASK(SendCtrl, SPioEnable) | \ | ||
3869 | SYM_MASK(SendCtrl, SSpecialTriggerEn) | \ | ||
3870 | SYM_MASK(SendCtrl, SendBufAvailUpd) | \ | ||
3871 | SYM_MASK(SendCtrl, AvailUpdThld) | \ | ||
3872 | SYM_MASK(SendCtrl, SDmaEnable) | \ | ||
3873 | SYM_MASK(SendCtrl, SDmaIntEnable) | \ | ||
3874 | SYM_MASK(SendCtrl, SDmaHalt) | \ | ||
3875 | SYM_MASK(SendCtrl, SDmaSingleDescriptor)) | ||
3876 | |||
3877 | static int sendctrl_hook(struct qib_devdata *dd, | ||
3878 | const struct diag_observer *op, | ||
3879 | u32 offs, u64 *data, u64 mask, int only_32) | ||
3880 | { | ||
3881 | unsigned long flags; | ||
3882 | unsigned idx = offs / sizeof(u64); | ||
3883 | u64 local_data, all_bits; | ||
3884 | |||
3885 | if (idx != kr_sendctrl) { | ||
3886 | qib_dev_err(dd, "SendCtrl Hook called with offs %X, %s-bit\n", | ||
3887 | offs, only_32 ? "32" : "64"); | ||
3888 | return 0; | ||
3889 | } | ||
3890 | |||
3891 | all_bits = ~0ULL; | ||
3892 | if (only_32) | ||
3893 | all_bits >>= 32; | ||
3894 | spin_lock_irqsave(&dd->sendctrl_lock, flags); | ||
3895 | if ((mask & all_bits) != all_bits) { | ||
3896 | /* | ||
3897 | * At least some mask bits are zero, so we need | ||
3898 | * to read. The judgement call is whether from | ||
3899 | * reg or shadow. First-cut: read reg, and complain | ||
3900 | * if any bits which should be shadowed are different | ||
3901 | * from their shadowed value. | ||
3902 | */ | ||
3903 | if (only_32) | ||
3904 | local_data = (u64)qib_read_kreg32(dd, idx); | ||
3905 | else | ||
3906 | local_data = qib_read_kreg64(dd, idx); | ||
3907 | qib_dev_err(dd, "Sendctrl -> %X, Shad -> %X\n", | ||
3908 | (u32)local_data, (u32)dd->sendctrl); | ||
3909 | if ((local_data & SENDCTRL_SHADOWED) != | ||
3910 | (dd->sendctrl & SENDCTRL_SHADOWED)) | ||
3911 | qib_dev_err(dd, "Sendctrl read: %X shadow is %X\n", | ||
3912 | (u32)local_data, (u32) dd->sendctrl); | ||
3913 | *data = (local_data & ~mask) | (*data & mask); | ||
3914 | } | ||
3915 | if (mask) { | ||
3916 | /* | ||
3917 | * At least some mask bits are one, so we need | ||
3918 | * to write, but only shadow some bits. | ||
3919 | */ | ||
3920 | u64 sval, tval; /* Shadowed, transient */ | ||
3921 | |||
3922 | /* | ||
3923 | * New shadow val is bits we don't want to touch, | ||
3924 | * ORed with bits we do, that are intended for shadow. | ||
3925 | */ | ||
3926 | sval = (dd->sendctrl & ~mask); | ||
3927 | sval |= *data & SENDCTRL_SHADOWED & mask; | ||
3928 | dd->sendctrl = sval; | ||
3929 | tval = sval | (*data & ~SENDCTRL_SHADOWED & mask); | ||
3930 | qib_dev_err(dd, "Sendctrl <- %X, Shad <- %X\n", | ||
3931 | (u32)tval, (u32)sval); | ||
3932 | qib_write_kreg(dd, kr_sendctrl, tval); | ||
3933 | qib_write_kreg(dd, kr_scratch, 0Ull); | ||
3934 | } | ||
3935 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | ||
3936 | |||
3937 | return only_32 ? 4 : 8; | ||
3938 | } | ||
3939 | |||
3940 | static const struct diag_observer sendctrl_observer = { | ||
3941 | sendctrl_hook, kr_sendctrl * sizeof(u64), | ||
3942 | kr_sendctrl * sizeof(u64) | ||
3943 | }; | ||
3944 | |||
3945 | /* | ||
3946 | * write the final few registers that depend on some of the | ||
3947 | * init setup. Done late in init, just before bringing up | ||
3948 | * the serdes. | ||
3949 | */ | ||
3950 | static int qib_late_7220_initreg(struct qib_devdata *dd) | ||
3951 | { | ||
3952 | int ret = 0; | ||
3953 | u64 val; | ||
3954 | |||
3955 | qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize); | ||
3956 | qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize); | ||
3957 | qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt); | ||
3958 | qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys); | ||
3959 | val = qib_read_kreg64(dd, kr_sendpioavailaddr); | ||
3960 | if (val != dd->pioavailregs_phys) { | ||
3961 | qib_dev_err(dd, "Catastrophic software error, " | ||
3962 | "SendPIOAvailAddr written as %lx, " | ||
3963 | "read back as %llx\n", | ||
3964 | (unsigned long) dd->pioavailregs_phys, | ||
3965 | (unsigned long long) val); | ||
3966 | ret = -EINVAL; | ||
3967 | } | ||
3968 | qib_register_observer(dd, &sendctrl_observer); | ||
3969 | return ret; | ||
3970 | } | ||
3971 | |||
3972 | static int qib_init_7220_variables(struct qib_devdata *dd) | ||
3973 | { | ||
3974 | struct qib_chippport_specific *cpspec; | ||
3975 | struct qib_pportdata *ppd; | ||
3976 | int ret = 0; | ||
3977 | u32 sbufs, updthresh; | ||
3978 | |||
3979 | cpspec = (struct qib_chippport_specific *)(dd + 1); | ||
3980 | ppd = &cpspec->pportdata; | ||
3981 | dd->pport = ppd; | ||
3982 | dd->num_pports = 1; | ||
3983 | |||
3984 | dd->cspec = (struct qib_chip_specific *)(cpspec + dd->num_pports); | ||
3985 | ppd->cpspec = cpspec; | ||
3986 | |||
3987 | spin_lock_init(&dd->cspec->sdepb_lock); | ||
3988 | spin_lock_init(&dd->cspec->rcvmod_lock); | ||
3989 | spin_lock_init(&dd->cspec->gpio_lock); | ||
3990 | |||
3991 | /* we haven't yet set QIB_PRESENT, so use read directly */ | ||
3992 | dd->revision = readq(&dd->kregbase[kr_revision]); | ||
3993 | |||
3994 | if ((dd->revision & 0xffffffffU) == 0xffffffffU) { | ||
3995 | qib_dev_err(dd, "Revision register read failure, " | ||
3996 | "giving up initialization\n"); | ||
3997 | ret = -ENODEV; | ||
3998 | goto bail; | ||
3999 | } | ||
4000 | dd->flags |= QIB_PRESENT; /* now register routines work */ | ||
4001 | |||
4002 | dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, | ||
4003 | ChipRevMajor); | ||
4004 | dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, | ||
4005 | ChipRevMinor); | ||
4006 | |||
4007 | get_7220_chip_params(dd); | ||
4008 | qib_7220_boardname(dd); | ||
4009 | |||
4010 | /* | ||
4011 | * GPIO bits for TWSI data and clock, | ||
4012 | * used for serial EEPROM. | ||
4013 | */ | ||
4014 | dd->gpio_sda_num = _QIB_GPIO_SDA_NUM; | ||
4015 | dd->gpio_scl_num = _QIB_GPIO_SCL_NUM; | ||
4016 | dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV; | ||
4017 | |||
4018 | dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY | | ||
4019 | QIB_NODMA_RTAIL | QIB_HAS_THRESH_UPDATE; | ||
4020 | dd->flags |= qib_special_trigger ? | ||
4021 | QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA; | ||
4022 | |||
4023 | /* | ||
4024 | * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity. | ||
4025 | * 2 is Some Misc, 3 is reserved for future. | ||
4026 | */ | ||
4027 | dd->eep_st_masks[0].hwerrs_to_log = HWE_MASK(TXEMemParityErr); | ||
4028 | |||
4029 | dd->eep_st_masks[1].hwerrs_to_log = HWE_MASK(RXEMemParityErr); | ||
4030 | |||
4031 | dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated); | ||
4032 | |||
4033 | init_waitqueue_head(&cpspec->autoneg_wait); | ||
4034 | INIT_DELAYED_WORK(&cpspec->autoneg_work, autoneg_7220_work); | ||
4035 | |||
4036 | qib_init_pportdata(ppd, dd, 0, 1); | ||
4037 | ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X; | ||
4038 | ppd->link_speed_supported = QIB_IB_SDR | QIB_IB_DDR; | ||
4039 | |||
4040 | ppd->link_width_enabled = ppd->link_width_supported; | ||
4041 | ppd->link_speed_enabled = ppd->link_speed_supported; | ||
4042 | /* | ||
4043 | * Set the initial values to reasonable default, will be set | ||
4044 | * for real when link is up. | ||
4045 | */ | ||
4046 | ppd->link_width_active = IB_WIDTH_4X; | ||
4047 | ppd->link_speed_active = QIB_IB_SDR; | ||
4048 | ppd->delay_mult = rate_to_delay[0][1]; | ||
4049 | ppd->vls_supported = IB_VL_VL0; | ||
4050 | ppd->vls_operational = ppd->vls_supported; | ||
4051 | |||
4052 | if (!qib_mini_init) | ||
4053 | qib_write_kreg(dd, kr_rcvbthqp, QIB_KD_QP); | ||
4054 | |||
4055 | init_timer(&ppd->cpspec->chase_timer); | ||
4056 | ppd->cpspec->chase_timer.function = reenable_7220_chase; | ||
4057 | ppd->cpspec->chase_timer.data = (unsigned long)ppd; | ||
4058 | |||
4059 | qib_num_cfg_vls = 1; /* if any 7220's, only one VL */ | ||
4060 | |||
4061 | dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE; | ||
4062 | dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE; | ||
4063 | dd->rhf_offset = | ||
4064 | dd->rcvhdrentsize - sizeof(u64) / sizeof(u32); | ||
4065 | |||
4066 | /* we always allocate at least 2048 bytes for eager buffers */ | ||
4067 | ret = ib_mtu_enum_to_int(qib_ibmtu); | ||
4068 | dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU; | ||
4069 | |||
4070 | qib_7220_tidtemplate(dd); | ||
4071 | |||
4072 | /* | ||
4073 | * We can request a receive interrupt for 1 or | ||
4074 | * more packets from current offset. For now, we set this | ||
4075 | * up for a single packet. | ||
4076 | */ | ||
4077 | dd->rhdrhead_intr_off = 1ULL << 32; | ||
4078 | |||
4079 | /* setup the stats timer; the add_timer is done at end of init */ | ||
4080 | init_timer(&dd->stats_timer); | ||
4081 | dd->stats_timer.function = qib_get_7220_faststats; | ||
4082 | dd->stats_timer.data = (unsigned long) dd; | ||
4083 | dd->stats_timer.expires = jiffies + ACTIVITY_TIMER * HZ; | ||
4084 | |||
4085 | /* | ||
4086 | * Control[4] has been added to change the arbitration within | ||
4087 | * the SDMA engine between favoring data fetches over descriptor | ||
4088 | * fetches. qib_sdma_fetch_arb==0 gives data fetches priority. | ||
4089 | */ | ||
4090 | if (qib_sdma_fetch_arb) | ||
4091 | dd->control |= 1 << 4; | ||
4092 | |||
4093 | dd->ureg_align = 0x10000; /* 64KB alignment */ | ||
4094 | |||
4095 | dd->piosize2kmax_dwords = (dd->piosize2k >> 2)-1; | ||
4096 | qib_7220_config_ctxts(dd); | ||
4097 | qib_set_ctxtcnt(dd); /* needed for PAT setup */ | ||
4098 | |||
4099 | if (qib_wc_pat) { | ||
4100 | ret = init_chip_wc_pat(dd, 0); | ||
4101 | if (ret) | ||
4102 | goto bail; | ||
4103 | } | ||
4104 | set_7220_baseaddrs(dd); /* set chip access pointers now */ | ||
4105 | |||
4106 | ret = 0; | ||
4107 | if (qib_mini_init) | ||
4108 | goto bail; | ||
4109 | |||
4110 | ret = qib_create_ctxts(dd); | ||
4111 | init_7220_cntrnames(dd); | ||
4112 | |||
4113 | /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA. | ||
4114 | * reserve the update threshold amount for other kernel use, such | ||
4115 | * as sending SMI, MAD, and ACKs, or 3, whichever is greater, | ||
4116 | * unless we aren't enabling SDMA, in which case we want to use | ||
4117 | * all the 4k bufs for the kernel. | ||
4118 | * if this was less than the update threshold, we could wait | ||
4119 | * a long time for an update. Coded this way because we | ||
4120 | * sometimes change the update threshold for various reasons, | ||
4121 | * and we want this to remain robust. | ||
4122 | */ | ||
4123 | updthresh = 8U; /* update threshold */ | ||
4124 | if (dd->flags & QIB_HAS_SEND_DMA) { | ||
4125 | dd->cspec->sdmabufcnt = dd->piobcnt4k; | ||
4126 | sbufs = updthresh > 3 ? updthresh : 3; | ||
4127 | } else { | ||
4128 | dd->cspec->sdmabufcnt = 0; | ||
4129 | sbufs = dd->piobcnt4k; | ||
4130 | } | ||
4131 | |||
4132 | dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k - | ||
4133 | dd->cspec->sdmabufcnt; | ||
4134 | dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs; | ||
4135 | dd->cspec->lastbuf_for_pio--; /* range is <= , not < */ | ||
4136 | dd->pbufsctxt = dd->lastctxt_piobuf / | ||
4137 | (dd->cfgctxts - dd->first_user_ctxt); | ||
4138 | |||
4139 | /* | ||
4140 | * if we are at 16 user contexts, we will have one 7 sbufs | ||
4141 | * per context, so drop the update threshold to match. We | ||
4142 | * want to update before we actually run out, at low pbufs/ctxt | ||
4143 | * so give ourselves some margin | ||
4144 | */ | ||
4145 | if ((dd->pbufsctxt - 2) < updthresh) | ||
4146 | updthresh = dd->pbufsctxt - 2; | ||
4147 | |||
4148 | dd->cspec->updthresh_dflt = updthresh; | ||
4149 | dd->cspec->updthresh = updthresh; | ||
4150 | |||
4151 | /* before full enable, no interrupts, no locking needed */ | ||
4152 | dd->sendctrl |= (updthresh & SYM_RMASK(SendCtrl, AvailUpdThld)) | ||
4153 | << SYM_LSB(SendCtrl, AvailUpdThld); | ||
4154 | |||
4155 | dd->psxmitwait_supported = 1; | ||
4156 | dd->psxmitwait_check_rate = QIB_7220_PSXMITWAIT_CHECK_RATE; | ||
4157 | bail: | ||
4158 | return ret; | ||
4159 | } | ||
4160 | |||
4161 | static u32 __iomem *qib_7220_getsendbuf(struct qib_pportdata *ppd, u64 pbc, | ||
4162 | u32 *pbufnum) | ||
4163 | { | ||
4164 | u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK; | ||
4165 | struct qib_devdata *dd = ppd->dd; | ||
4166 | u32 __iomem *buf; | ||
4167 | |||
4168 | if (((pbc >> 32) & PBC_7220_VL15_SEND_CTRL) && | ||
4169 | !(ppd->lflags & (QIBL_IB_AUTONEG_INPROG | QIBL_LINKACTIVE))) | ||
4170 | buf = get_7220_link_buf(ppd, pbufnum); | ||
4171 | else { | ||
4172 | if ((plen + 1) > dd->piosize2kmax_dwords) | ||
4173 | first = dd->piobcnt2k; | ||
4174 | else | ||
4175 | first = 0; | ||
4176 | /* try 4k if all 2k busy, so same last for both sizes */ | ||
4177 | last = dd->cspec->lastbuf_for_pio; | ||
4178 | buf = qib_getsendbuf_range(dd, pbufnum, first, last); | ||
4179 | } | ||
4180 | return buf; | ||
4181 | } | ||
4182 | |||
4183 | /* these 2 "counters" are really control registers, and are always RW */ | ||
4184 | static void qib_set_cntr_7220_sample(struct qib_pportdata *ppd, u32 intv, | ||
4185 | u32 start) | ||
4186 | { | ||
4187 | write_7220_creg(ppd->dd, cr_psinterval, intv); | ||
4188 | write_7220_creg(ppd->dd, cr_psstart, start); | ||
4189 | } | ||
4190 | |||
4191 | /* | ||
4192 | * NOTE: no real attempt is made to generalize the SDMA stuff. | ||
4193 | * At some point "soon" we will have a new more generalized | ||
4194 | * set of sdma interface, and then we'll clean this up. | ||
4195 | */ | ||
4196 | |||
4197 | /* Must be called with sdma_lock held, or before init finished */ | ||
4198 | static void qib_sdma_update_7220_tail(struct qib_pportdata *ppd, u16 tail) | ||
4199 | { | ||
4200 | /* Commit writes to memory and advance the tail on the chip */ | ||
4201 | wmb(); | ||
4202 | ppd->sdma_descq_tail = tail; | ||
4203 | qib_write_kreg(ppd->dd, kr_senddmatail, tail); | ||
4204 | } | ||
4205 | |||
4206 | static void qib_sdma_set_7220_desc_cnt(struct qib_pportdata *ppd, unsigned cnt) | ||
4207 | { | ||
4208 | } | ||
4209 | |||
4210 | static struct sdma_set_state_action sdma_7220_action_table[] = { | ||
4211 | [qib_sdma_state_s00_hw_down] = { | ||
4212 | .op_enable = 0, | ||
4213 | .op_intenable = 0, | ||
4214 | .op_halt = 0, | ||
4215 | .go_s99_running_tofalse = 1, | ||
4216 | }, | ||
4217 | [qib_sdma_state_s10_hw_start_up_wait] = { | ||
4218 | .op_enable = 1, | ||
4219 | .op_intenable = 1, | ||
4220 | .op_halt = 1, | ||
4221 | }, | ||
4222 | [qib_sdma_state_s20_idle] = { | ||
4223 | .op_enable = 1, | ||
4224 | .op_intenable = 1, | ||
4225 | .op_halt = 1, | ||
4226 | }, | ||
4227 | [qib_sdma_state_s30_sw_clean_up_wait] = { | ||
4228 | .op_enable = 0, | ||
4229 | .op_intenable = 1, | ||
4230 | .op_halt = 0, | ||
4231 | }, | ||
4232 | [qib_sdma_state_s40_hw_clean_up_wait] = { | ||
4233 | .op_enable = 1, | ||
4234 | .op_intenable = 1, | ||
4235 | .op_halt = 1, | ||
4236 | }, | ||
4237 | [qib_sdma_state_s50_hw_halt_wait] = { | ||
4238 | .op_enable = 1, | ||
4239 | .op_intenable = 1, | ||
4240 | .op_halt = 1, | ||
4241 | }, | ||
4242 | [qib_sdma_state_s99_running] = { | ||
4243 | .op_enable = 1, | ||
4244 | .op_intenable = 1, | ||
4245 | .op_halt = 0, | ||
4246 | .go_s99_running_totrue = 1, | ||
4247 | }, | ||
4248 | }; | ||
4249 | |||
4250 | static void qib_7220_sdma_init_early(struct qib_pportdata *ppd) | ||
4251 | { | ||
4252 | ppd->sdma_state.set_state_action = sdma_7220_action_table; | ||
4253 | } | ||
4254 | |||
4255 | static int init_sdma_7220_regs(struct qib_pportdata *ppd) | ||
4256 | { | ||
4257 | struct qib_devdata *dd = ppd->dd; | ||
4258 | unsigned i, n; | ||
4259 | u64 senddmabufmask[3] = { 0 }; | ||
4260 | |||
4261 | /* Set SendDmaBase */ | ||
4262 | qib_write_kreg(dd, kr_senddmabase, ppd->sdma_descq_phys); | ||
4263 | qib_sdma_7220_setlengen(ppd); | ||
4264 | qib_sdma_update_7220_tail(ppd, 0); /* Set SendDmaTail */ | ||
4265 | /* Set SendDmaHeadAddr */ | ||
4266 | qib_write_kreg(dd, kr_senddmaheadaddr, ppd->sdma_head_phys); | ||
4267 | |||
4268 | /* | ||
4269 | * Reserve all the former "kernel" piobufs, using high number range | ||
4270 | * so we get as many 4K buffers as possible | ||
4271 | */ | ||
4272 | n = dd->piobcnt2k + dd->piobcnt4k; | ||
4273 | i = n - dd->cspec->sdmabufcnt; | ||
4274 | |||
4275 | for (; i < n; ++i) { | ||
4276 | unsigned word = i / 64; | ||
4277 | unsigned bit = i & 63; | ||
4278 | |||
4279 | BUG_ON(word >= 3); | ||
4280 | senddmabufmask[word] |= 1ULL << bit; | ||
4281 | } | ||
4282 | qib_write_kreg(dd, kr_senddmabufmask0, senddmabufmask[0]); | ||
4283 | qib_write_kreg(dd, kr_senddmabufmask1, senddmabufmask[1]); | ||
4284 | qib_write_kreg(dd, kr_senddmabufmask2, senddmabufmask[2]); | ||
4285 | |||
4286 | ppd->sdma_state.first_sendbuf = i; | ||
4287 | ppd->sdma_state.last_sendbuf = n; | ||
4288 | |||
4289 | return 0; | ||
4290 | } | ||
4291 | |||
4292 | /* sdma_lock must be held */ | ||
4293 | static u16 qib_sdma_7220_gethead(struct qib_pportdata *ppd) | ||
4294 | { | ||
4295 | struct qib_devdata *dd = ppd->dd; | ||
4296 | int sane; | ||
4297 | int use_dmahead; | ||
4298 | u16 swhead; | ||
4299 | u16 swtail; | ||
4300 | u16 cnt; | ||
4301 | u16 hwhead; | ||
4302 | |||
4303 | use_dmahead = __qib_sdma_running(ppd) && | ||
4304 | (dd->flags & QIB_HAS_SDMA_TIMEOUT); | ||
4305 | retry: | ||
4306 | hwhead = use_dmahead ? | ||
4307 | (u16)le64_to_cpu(*ppd->sdma_head_dma) : | ||
4308 | (u16)qib_read_kreg32(dd, kr_senddmahead); | ||
4309 | |||
4310 | swhead = ppd->sdma_descq_head; | ||
4311 | swtail = ppd->sdma_descq_tail; | ||
4312 | cnt = ppd->sdma_descq_cnt; | ||
4313 | |||
4314 | if (swhead < swtail) { | ||
4315 | /* not wrapped */ | ||
4316 | sane = (hwhead >= swhead) & (hwhead <= swtail); | ||
4317 | } else if (swhead > swtail) { | ||
4318 | /* wrapped around */ | ||
4319 | sane = ((hwhead >= swhead) && (hwhead < cnt)) || | ||
4320 | (hwhead <= swtail); | ||
4321 | } else { | ||
4322 | /* empty */ | ||
4323 | sane = (hwhead == swhead); | ||
4324 | } | ||
4325 | |||
4326 | if (unlikely(!sane)) { | ||
4327 | if (use_dmahead) { | ||
4328 | /* try one more time, directly from the register */ | ||
4329 | use_dmahead = 0; | ||
4330 | goto retry; | ||
4331 | } | ||
4332 | /* assume no progress */ | ||
4333 | hwhead = swhead; | ||
4334 | } | ||
4335 | |||
4336 | return hwhead; | ||
4337 | } | ||
4338 | |||
4339 | static int qib_sdma_7220_busy(struct qib_pportdata *ppd) | ||
4340 | { | ||
4341 | u64 hwstatus = qib_read_kreg64(ppd->dd, kr_senddmastatus); | ||
4342 | |||
4343 | return (hwstatus & SYM_MASK(SendDmaStatus, ScoreBoardDrainInProg)) || | ||
4344 | (hwstatus & SYM_MASK(SendDmaStatus, AbortInProg)) || | ||
4345 | (hwstatus & SYM_MASK(SendDmaStatus, InternalSDmaEnable)) || | ||
4346 | !(hwstatus & SYM_MASK(SendDmaStatus, ScbEmpty)); | ||
4347 | } | ||
4348 | |||
4349 | /* | ||
4350 | * Compute the amount of delay before sending the next packet if the | ||
4351 | * port's send rate differs from the static rate set for the QP. | ||
4352 | * Since the delay affects this packet but the amount of the delay is | ||
4353 | * based on the length of the previous packet, use the last delay computed | ||
4354 | * and save the delay count for this packet to be used next time | ||
4355 | * we get here. | ||
4356 | */ | ||
4357 | static u32 qib_7220_setpbc_control(struct qib_pportdata *ppd, u32 plen, | ||
4358 | u8 srate, u8 vl) | ||
4359 | { | ||
4360 | u8 snd_mult = ppd->delay_mult; | ||
4361 | u8 rcv_mult = ib_rate_to_delay[srate]; | ||
4362 | u32 ret = ppd->cpspec->last_delay_mult; | ||
4363 | |||
4364 | ppd->cpspec->last_delay_mult = (rcv_mult > snd_mult) ? | ||
4365 | (plen * (rcv_mult - snd_mult) + 1) >> 1 : 0; | ||
4366 | |||
4367 | /* Indicate VL15, if necessary */ | ||
4368 | if (vl == 15) | ||
4369 | ret |= PBC_7220_VL15_SEND_CTRL; | ||
4370 | return ret; | ||
4371 | } | ||
4372 | |||
4373 | static void qib_7220_initvl15_bufs(struct qib_devdata *dd) | ||
4374 | { | ||
4375 | } | ||
4376 | |||
4377 | static void qib_7220_init_ctxt(struct qib_ctxtdata *rcd) | ||
4378 | { | ||
4379 | if (!rcd->ctxt) { | ||
4380 | rcd->rcvegrcnt = IBA7220_KRCVEGRCNT; | ||
4381 | rcd->rcvegr_tid_base = 0; | ||
4382 | } else { | ||
4383 | rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt; | ||
4384 | rcd->rcvegr_tid_base = IBA7220_KRCVEGRCNT + | ||
4385 | (rcd->ctxt - 1) * rcd->rcvegrcnt; | ||
4386 | } | ||
4387 | } | ||
4388 | |||
4389 | static void qib_7220_txchk_change(struct qib_devdata *dd, u32 start, | ||
4390 | u32 len, u32 which, struct qib_ctxtdata *rcd) | ||
4391 | { | ||
4392 | int i; | ||
4393 | unsigned long flags; | ||
4394 | |||
4395 | switch (which) { | ||
4396 | case TXCHK_CHG_TYPE_KERN: | ||
4397 | /* see if we need to raise avail update threshold */ | ||
4398 | spin_lock_irqsave(&dd->uctxt_lock, flags); | ||
4399 | for (i = dd->first_user_ctxt; | ||
4400 | dd->cspec->updthresh != dd->cspec->updthresh_dflt | ||
4401 | && i < dd->cfgctxts; i++) | ||
4402 | if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt && | ||
4403 | ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1) | ||
4404 | < dd->cspec->updthresh_dflt) | ||
4405 | break; | ||
4406 | spin_unlock_irqrestore(&dd->uctxt_lock, flags); | ||
4407 | if (i == dd->cfgctxts) { | ||
4408 | spin_lock_irqsave(&dd->sendctrl_lock, flags); | ||
4409 | dd->cspec->updthresh = dd->cspec->updthresh_dflt; | ||
4410 | dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld); | ||
4411 | dd->sendctrl |= (dd->cspec->updthresh & | ||
4412 | SYM_RMASK(SendCtrl, AvailUpdThld)) << | ||
4413 | SYM_LSB(SendCtrl, AvailUpdThld); | ||
4414 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | ||
4415 | sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); | ||
4416 | } | ||
4417 | break; | ||
4418 | case TXCHK_CHG_TYPE_USER: | ||
4419 | spin_lock_irqsave(&dd->sendctrl_lock, flags); | ||
4420 | if (rcd && rcd->subctxt_cnt && ((rcd->piocnt | ||
4421 | / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) { | ||
4422 | dd->cspec->updthresh = (rcd->piocnt / | ||
4423 | rcd->subctxt_cnt) - 1; | ||
4424 | dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld); | ||
4425 | dd->sendctrl |= (dd->cspec->updthresh & | ||
4426 | SYM_RMASK(SendCtrl, AvailUpdThld)) | ||
4427 | << SYM_LSB(SendCtrl, AvailUpdThld); | ||
4428 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | ||
4429 | sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); | ||
4430 | } else | ||
4431 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | ||
4432 | break; | ||
4433 | } | ||
4434 | } | ||
4435 | |||
4436 | static void writescratch(struct qib_devdata *dd, u32 val) | ||
4437 | { | ||
4438 | qib_write_kreg(dd, kr_scratch, val); | ||
4439 | } | ||
4440 | |||
4441 | #define VALID_TS_RD_REG_MASK 0xBF | ||
4442 | /** | ||
4443 | * qib_7220_tempsense_read - read register of temp sensor via TWSI | ||
4444 | * @dd: the qlogic_ib device | ||
4445 | * @regnum: register to read from | ||
4446 | * | ||
4447 | * returns reg contents (0..255) or < 0 for error | ||
4448 | */ | ||
4449 | static int qib_7220_tempsense_rd(struct qib_devdata *dd, int regnum) | ||
4450 | { | ||
4451 | int ret; | ||
4452 | u8 rdata; | ||
4453 | |||
4454 | if (regnum > 7) { | ||
4455 | ret = -EINVAL; | ||
4456 | goto bail; | ||
4457 | } | ||
4458 | |||
4459 | /* return a bogus value for (the one) register we do not have */ | ||
4460 | if (!((1 << regnum) & VALID_TS_RD_REG_MASK)) { | ||
4461 | ret = 0; | ||
4462 | goto bail; | ||
4463 | } | ||
4464 | |||
4465 | ret = mutex_lock_interruptible(&dd->eep_lock); | ||
4466 | if (ret) | ||
4467 | goto bail; | ||
4468 | |||
4469 | ret = qib_twsi_blk_rd(dd, QIB_TWSI_TEMP_DEV, regnum, &rdata, 1); | ||
4470 | if (!ret) | ||
4471 | ret = rdata; | ||
4472 | |||
4473 | mutex_unlock(&dd->eep_lock); | ||
4474 | |||
4475 | /* | ||
4476 | * There are three possibilities here: | ||
4477 | * ret is actual value (0..255) | ||
4478 | * ret is -ENXIO or -EINVAL from twsi code or this file | ||
4479 | * ret is -EINTR from mutex_lock_interruptible. | ||
4480 | */ | ||
4481 | bail: | ||
4482 | return ret; | ||
4483 | } | ||
4484 | |||
4485 | /* Dummy function, as 7220 boards never disable EEPROM Write */ | ||
4486 | static int qib_7220_eeprom_wen(struct qib_devdata *dd, int wen) | ||
4487 | { | ||
4488 | return 1; | ||
4489 | } | ||
4490 | |||
4491 | /** | ||
4492 | * qib_init_iba7220_funcs - set up the chip-specific function pointers | ||
4493 | * @dev: the pci_dev for qlogic_ib device | ||
4494 | * @ent: pci_device_id struct for this dev | ||
4495 | * | ||
4496 | * This is global, and is called directly at init to set up the | ||
4497 | * chip-specific function pointers for later use. | ||
4498 | */ | ||
4499 | struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *pdev, | ||
4500 | const struct pci_device_id *ent) | ||
4501 | { | ||
4502 | struct qib_devdata *dd; | ||
4503 | int ret; | ||
4504 | u32 boardid, minwidth; | ||
4505 | |||
4506 | dd = qib_alloc_devdata(pdev, sizeof(struct qib_chip_specific) + | ||
4507 | sizeof(struct qib_chippport_specific)); | ||
4508 | if (IS_ERR(dd)) | ||
4509 | goto bail; | ||
4510 | |||
4511 | dd->f_bringup_serdes = qib_7220_bringup_serdes; | ||
4512 | dd->f_cleanup = qib_setup_7220_cleanup; | ||
4513 | dd->f_clear_tids = qib_7220_clear_tids; | ||
4514 | dd->f_free_irq = qib_7220_free_irq; | ||
4515 | dd->f_get_base_info = qib_7220_get_base_info; | ||
4516 | dd->f_get_msgheader = qib_7220_get_msgheader; | ||
4517 | dd->f_getsendbuf = qib_7220_getsendbuf; | ||
4518 | dd->f_gpio_mod = gpio_7220_mod; | ||
4519 | dd->f_eeprom_wen = qib_7220_eeprom_wen; | ||
4520 | dd->f_hdrqempty = qib_7220_hdrqempty; | ||
4521 | dd->f_ib_updown = qib_7220_ib_updown; | ||
4522 | dd->f_init_ctxt = qib_7220_init_ctxt; | ||
4523 | dd->f_initvl15_bufs = qib_7220_initvl15_bufs; | ||
4524 | dd->f_intr_fallback = qib_7220_intr_fallback; | ||
4525 | dd->f_late_initreg = qib_late_7220_initreg; | ||
4526 | dd->f_setpbc_control = qib_7220_setpbc_control; | ||
4527 | dd->f_portcntr = qib_portcntr_7220; | ||
4528 | dd->f_put_tid = qib_7220_put_tid; | ||
4529 | dd->f_quiet_serdes = qib_7220_quiet_serdes; | ||
4530 | dd->f_rcvctrl = rcvctrl_7220_mod; | ||
4531 | dd->f_read_cntrs = qib_read_7220cntrs; | ||
4532 | dd->f_read_portcntrs = qib_read_7220portcntrs; | ||
4533 | dd->f_reset = qib_setup_7220_reset; | ||
4534 | dd->f_init_sdma_regs = init_sdma_7220_regs; | ||
4535 | dd->f_sdma_busy = qib_sdma_7220_busy; | ||
4536 | dd->f_sdma_gethead = qib_sdma_7220_gethead; | ||
4537 | dd->f_sdma_sendctrl = qib_7220_sdma_sendctrl; | ||
4538 | dd->f_sdma_set_desc_cnt = qib_sdma_set_7220_desc_cnt; | ||
4539 | dd->f_sdma_update_tail = qib_sdma_update_7220_tail; | ||
4540 | dd->f_sdma_hw_clean_up = qib_7220_sdma_hw_clean_up; | ||
4541 | dd->f_sdma_hw_start_up = qib_7220_sdma_hw_start_up; | ||
4542 | dd->f_sdma_init_early = qib_7220_sdma_init_early; | ||
4543 | dd->f_sendctrl = sendctrl_7220_mod; | ||
4544 | dd->f_set_armlaunch = qib_set_7220_armlaunch; | ||
4545 | dd->f_set_cntr_sample = qib_set_cntr_7220_sample; | ||
4546 | dd->f_iblink_state = qib_7220_iblink_state; | ||
4547 | dd->f_ibphys_portstate = qib_7220_phys_portstate; | ||
4548 | dd->f_get_ib_cfg = qib_7220_get_ib_cfg; | ||
4549 | dd->f_set_ib_cfg = qib_7220_set_ib_cfg; | ||
4550 | dd->f_set_ib_loopback = qib_7220_set_loopback; | ||
4551 | dd->f_set_intr_state = qib_7220_set_intr_state; | ||
4552 | dd->f_setextled = qib_setup_7220_setextled; | ||
4553 | dd->f_txchk_change = qib_7220_txchk_change; | ||
4554 | dd->f_update_usrhead = qib_update_7220_usrhead; | ||
4555 | dd->f_wantpiobuf_intr = qib_wantpiobuf_7220_intr; | ||
4556 | dd->f_xgxs_reset = qib_7220_xgxs_reset; | ||
4557 | dd->f_writescratch = writescratch; | ||
4558 | dd->f_tempsense_rd = qib_7220_tempsense_rd; | ||
4559 | /* | ||
4560 | * Do remaining pcie setup and save pcie values in dd. | ||
4561 | * Any error printing is already done by the init code. | ||
4562 | * On return, we have the chip mapped, but chip registers | ||
4563 | * are not set up until start of qib_init_7220_variables. | ||
4564 | */ | ||
4565 | ret = qib_pcie_ddinit(dd, pdev, ent); | ||
4566 | if (ret < 0) | ||
4567 | goto bail_free; | ||
4568 | |||
4569 | /* initialize chip-specific variables */ | ||
4570 | ret = qib_init_7220_variables(dd); | ||
4571 | if (ret) | ||
4572 | goto bail_cleanup; | ||
4573 | |||
4574 | if (qib_mini_init) | ||
4575 | goto bail; | ||
4576 | |||
4577 | boardid = SYM_FIELD(dd->revision, Revision, | ||
4578 | BoardID); | ||
4579 | switch (boardid) { | ||
4580 | case 0: | ||
4581 | case 2: | ||
4582 | case 10: | ||
4583 | case 12: | ||
4584 | minwidth = 16; /* x16 capable boards */ | ||
4585 | break; | ||
4586 | default: | ||
4587 | minwidth = 8; /* x8 capable boards */ | ||
4588 | break; | ||
4589 | } | ||
4590 | if (qib_pcie_params(dd, minwidth, NULL, NULL)) | ||
4591 | qib_dev_err(dd, "Failed to setup PCIe or interrupts; " | ||
4592 | "continuing anyway\n"); | ||
4593 | |||
4594 | /* save IRQ for possible later use */ | ||
4595 | dd->cspec->irq = pdev->irq; | ||
4596 | |||
4597 | if (qib_read_kreg64(dd, kr_hwerrstatus) & | ||
4598 | QLOGIC_IB_HWE_SERDESPLLFAILED) | ||
4599 | qib_write_kreg(dd, kr_hwerrclear, | ||
4600 | QLOGIC_IB_HWE_SERDESPLLFAILED); | ||
4601 | |||
4602 | /* setup interrupt handler (interrupt type handled above) */ | ||
4603 | qib_setup_7220_interrupt(dd); | ||
4604 | qib_7220_init_hwerrors(dd); | ||
4605 | |||
4606 | /* clear diagctrl register, in case diags were running and crashed */ | ||
4607 | qib_write_kreg(dd, kr_hwdiagctrl, 0); | ||
4608 | |||
4609 | goto bail; | ||
4610 | |||
4611 | bail_cleanup: | ||
4612 | qib_pcie_ddcleanup(dd); | ||
4613 | bail_free: | ||
4614 | qib_free_devdata(dd); | ||
4615 | dd = ERR_PTR(ret); | ||
4616 | bail: | ||
4617 | return dd; | ||
4618 | } | ||
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c new file mode 100644 index 000000000000..2c24eab35b54 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_iba7322.c | |||
@@ -0,0 +1,8058 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | /* | ||
34 | * This file contains all of the code that is specific to the | ||
35 | * InfiniPath 7322 chip | ||
36 | */ | ||
37 | |||
38 | #include <linux/interrupt.h> | ||
39 | #include <linux/pci.h> | ||
40 | #include <linux/delay.h> | ||
41 | #include <linux/io.h> | ||
42 | #include <linux/jiffies.h> | ||
43 | #include <rdma/ib_verbs.h> | ||
44 | #include <rdma/ib_smi.h> | ||
45 | #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) | ||
46 | #include <linux/dca.h> | ||
47 | #endif | ||
48 | |||
49 | #include "qib.h" | ||
50 | #include "qib_7322_regs.h" | ||
51 | #include "qib_qsfp.h" | ||
52 | |||
53 | #include "qib_mad.h" | ||
54 | |||
55 | static void qib_setup_7322_setextled(struct qib_pportdata *, u32); | ||
56 | static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t); | ||
57 | static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op); | ||
58 | static irqreturn_t qib_7322intr(int irq, void *data); | ||
59 | static irqreturn_t qib_7322bufavail(int irq, void *data); | ||
60 | static irqreturn_t sdma_intr(int irq, void *data); | ||
61 | static irqreturn_t sdma_idle_intr(int irq, void *data); | ||
62 | static irqreturn_t sdma_progress_intr(int irq, void *data); | ||
63 | static irqreturn_t sdma_cleanup_intr(int irq, void *data); | ||
64 | static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32, | ||
65 | struct qib_ctxtdata *rcd); | ||
66 | static u8 qib_7322_phys_portstate(u64); | ||
67 | static u32 qib_7322_iblink_state(u64); | ||
68 | static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd, | ||
69 | u16 linitcmd); | ||
70 | static void force_h1(struct qib_pportdata *); | ||
71 | static void adj_tx_serdes(struct qib_pportdata *); | ||
72 | static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8); | ||
73 | static void qib_7322_mini_pcs_reset(struct qib_pportdata *); | ||
74 | |||
75 | static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32); | ||
76 | static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned); | ||
77 | |||
78 | #define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb)) | ||
79 | |||
80 | /* LE2 serdes values for different cases */ | ||
81 | #define LE2_DEFAULT 5 | ||
82 | #define LE2_5m 4 | ||
83 | #define LE2_QME 0 | ||
84 | |||
85 | /* Below is special-purpose, so only really works for the IB SerDes blocks. */ | ||
86 | #define IBSD(hw_pidx) (hw_pidx + 2) | ||
87 | |||
88 | /* these are variables for documentation and experimentation purposes */ | ||
89 | static const unsigned rcv_int_timeout = 375; | ||
90 | static const unsigned rcv_int_count = 16; | ||
91 | static const unsigned sdma_idle_cnt = 64; | ||
92 | |||
93 | /* Time to stop altering Rx Equalization parameters, after link up. */ | ||
94 | #define RXEQ_DISABLE_MSECS 2500 | ||
95 | |||
96 | /* | ||
97 | * Number of VLs we are configured to use (to allow for more | ||
98 | * credits per vl, etc.) | ||
99 | */ | ||
100 | ushort qib_num_cfg_vls = 2; | ||
101 | module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO); | ||
102 | MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)"); | ||
103 | |||
104 | static ushort qib_chase = 1; | ||
105 | module_param_named(chase, qib_chase, ushort, S_IRUGO); | ||
106 | MODULE_PARM_DESC(chase, "Enable state chase handling"); | ||
107 | |||
108 | static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */ | ||
109 | module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO); | ||
110 | MODULE_PARM_DESC(long_attenuation, \ | ||
111 | "attenuation cutoff (dB) for long copper cable setup"); | ||
112 | |||
113 | static ushort qib_singleport; | ||
114 | module_param_named(singleport, qib_singleport, ushort, S_IRUGO); | ||
115 | MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space"); | ||
116 | |||
117 | |||
118 | /* | ||
119 | * Setup QMH7342 receive and transmit parameters, necessary because | ||
120 | * each bay, Mez connector, and IB port need different tuning, beyond | ||
121 | * what the switch and HCA can do automatically. | ||
122 | * It's expected to be done by cat'ing files to the modules file, | ||
123 | * rather than setting up as a module parameter. | ||
124 | * It's a "write-only" file, returns 0 when read back. | ||
125 | * The unit, port, bay (if given), and values MUST be done as a single write. | ||
126 | * The unit, port, and bay must precede the values to be effective. | ||
127 | */ | ||
128 | static int setup_qmh_params(const char *, struct kernel_param *); | ||
129 | static unsigned dummy_qmh_params; | ||
130 | module_param_call(qmh_serdes_setup, setup_qmh_params, param_get_uint, | ||
131 | &dummy_qmh_params, S_IWUSR | S_IRUGO); | ||
132 | |||
133 | /* similarly for QME7342, but it's simpler */ | ||
134 | static int setup_qme_params(const char *, struct kernel_param *); | ||
135 | static unsigned dummy_qme_params; | ||
136 | module_param_call(qme_serdes_setup, setup_qme_params, param_get_uint, | ||
137 | &dummy_qme_params, S_IWUSR | S_IRUGO); | ||
138 | |||
139 | #define MAX_ATTEN_LEN 64 /* plenty for any real system */ | ||
140 | /* for read back, default index is ~5m copper cable */ | ||
141 | static char cable_atten_list[MAX_ATTEN_LEN] = "10"; | ||
142 | static struct kparam_string kp_cable_atten = { | ||
143 | .string = cable_atten_list, | ||
144 | .maxlen = MAX_ATTEN_LEN | ||
145 | }; | ||
146 | static int setup_cable_atten(const char *, struct kernel_param *); | ||
147 | module_param_call(cable_atten, setup_cable_atten, param_get_string, | ||
148 | &kp_cable_atten, S_IWUSR | S_IRUGO); | ||
149 | MODULE_PARM_DESC(cable_atten, \ | ||
150 | "cable attenuation indices for cables with invalid EEPROM"); | ||
151 | |||
152 | #define BOARD_QME7342 5 | ||
153 | #define BOARD_QMH7342 6 | ||
154 | #define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \ | ||
155 | BOARD_QMH7342) | ||
156 | #define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \ | ||
157 | BOARD_QME7342) | ||
158 | |||
159 | #define KREG_IDX(regname) (QIB_7322_##regname##_OFFS / sizeof(u64)) | ||
160 | |||
161 | #define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64))) | ||
162 | |||
163 | #define MASK_ACROSS(lsb, msb) \ | ||
164 | (((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb)) | ||
165 | |||
166 | #define SYM_RMASK(regname, fldname) ((u64) \ | ||
167 | QIB_7322_##regname##_##fldname##_RMASK) | ||
168 | |||
169 | #define SYM_MASK(regname, fldname) ((u64) \ | ||
170 | QIB_7322_##regname##_##fldname##_RMASK << \ | ||
171 | QIB_7322_##regname##_##fldname##_LSB) | ||
172 | |||
173 | #define SYM_FIELD(value, regname, fldname) ((u64) \ | ||
174 | (((value) >> SYM_LSB(regname, fldname)) & \ | ||
175 | SYM_RMASK(regname, fldname))) | ||
176 | |||
177 | /* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */ | ||
178 | #define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \ | ||
179 | (((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits)) | ||
180 | |||
181 | #define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask) | ||
182 | #define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask) | ||
183 | #define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask) | ||
184 | #define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask) | ||
185 | #define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port) | ||
186 | /* Below because most, but not all, fields of IntMask have that full suffix */ | ||
187 | #define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port) | ||
188 | |||
189 | |||
190 | #define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB) | ||
191 | |||
192 | /* | ||
193 | * the size bits give us 2^N, in KB units. 0 marks as invalid, | ||
194 | * and 7 is reserved. We currently use only 2KB and 4KB | ||
195 | */ | ||
196 | #define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB | ||
197 | #define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */ | ||
198 | #define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */ | ||
199 | #define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */ | ||
200 | |||
201 | #define SendIBSLIDAssignMask \ | ||
202 | QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK | ||
203 | #define SendIBSLMCMask \ | ||
204 | QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK | ||
205 | |||
206 | #define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn) | ||
207 | #define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn) | ||
208 | #define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn) | ||
209 | #define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn) | ||
210 | #define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN) | ||
211 | #define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN) | ||
212 | |||
213 | #define _QIB_GPIO_SDA_NUM 1 | ||
214 | #define _QIB_GPIO_SCL_NUM 0 | ||
215 | #define QIB_EEPROM_WEN_NUM 14 | ||
216 | #define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */ | ||
217 | |||
218 | /* HW counter clock is at 4nsec */ | ||
219 | #define QIB_7322_PSXMITWAIT_CHECK_RATE 4000 | ||
220 | |||
221 | /* full speed IB port 1 only */ | ||
222 | #define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR) | ||
223 | #define PORT_SPD_CAP_SHIFT 3 | ||
224 | |||
225 | /* full speed featuremask, both ports */ | ||
226 | #define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT)) | ||
227 | |||
228 | /* | ||
229 | * This file contains almost all the chip-specific register information and | ||
230 | * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip. | ||
231 | */ | ||
232 | |||
233 | /* Use defines to tie machine-generated names to lower-case names */ | ||
234 | #define kr_contextcnt KREG_IDX(ContextCnt) | ||
235 | #define kr_control KREG_IDX(Control) | ||
236 | #define kr_counterregbase KREG_IDX(CntrRegBase) | ||
237 | #define kr_errclear KREG_IDX(ErrClear) | ||
238 | #define kr_errmask KREG_IDX(ErrMask) | ||
239 | #define kr_errstatus KREG_IDX(ErrStatus) | ||
240 | #define kr_extctrl KREG_IDX(EXTCtrl) | ||
241 | #define kr_extstatus KREG_IDX(EXTStatus) | ||
242 | #define kr_gpio_clear KREG_IDX(GPIOClear) | ||
243 | #define kr_gpio_mask KREG_IDX(GPIOMask) | ||
244 | #define kr_gpio_out KREG_IDX(GPIOOut) | ||
245 | #define kr_gpio_status KREG_IDX(GPIOStatus) | ||
246 | #define kr_hwdiagctrl KREG_IDX(HwDiagCtrl) | ||
247 | #define kr_debugportval KREG_IDX(DebugPortValueReg) | ||
248 | #define kr_fmask KREG_IDX(feature_mask) | ||
249 | #define kr_act_fmask KREG_IDX(active_feature_mask) | ||
250 | #define kr_hwerrclear KREG_IDX(HwErrClear) | ||
251 | #define kr_hwerrmask KREG_IDX(HwErrMask) | ||
252 | #define kr_hwerrstatus KREG_IDX(HwErrStatus) | ||
253 | #define kr_intclear KREG_IDX(IntClear) | ||
254 | #define kr_intmask KREG_IDX(IntMask) | ||
255 | #define kr_intredirect KREG_IDX(IntRedirect0) | ||
256 | #define kr_intstatus KREG_IDX(IntStatus) | ||
257 | #define kr_pagealign KREG_IDX(PageAlign) | ||
258 | #define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0) | ||
259 | #define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */ | ||
260 | #define kr_rcvegrbase KREG_IDX(RcvEgrBase) | ||
261 | #define kr_rcvegrcnt KREG_IDX(RcvEgrCnt) | ||
262 | #define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt) | ||
263 | #define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize) | ||
264 | #define kr_rcvhdrsize KREG_IDX(RcvHdrSize) | ||
265 | #define kr_rcvtidbase KREG_IDX(RcvTIDBase) | ||
266 | #define kr_rcvtidcnt KREG_IDX(RcvTIDCnt) | ||
267 | #define kr_revision KREG_IDX(Revision) | ||
268 | #define kr_scratch KREG_IDX(Scratch) | ||
269 | #define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */ | ||
270 | #define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */ | ||
271 | #define kr_sendctrl KREG_IDX(SendCtrl) | ||
272 | #define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */ | ||
273 | #define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */ | ||
274 | #define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr) | ||
275 | #define kr_sendpiobufbase KREG_IDX(SendBufBase) | ||
276 | #define kr_sendpiobufcnt KREG_IDX(SendBufCnt) | ||
277 | #define kr_sendpiosize KREG_IDX(SendBufSize) | ||
278 | #define kr_sendregbase KREG_IDX(SendRegBase) | ||
279 | #define kr_sendbufavail0 KREG_IDX(SendBufAvail0) | ||
280 | #define kr_userregbase KREG_IDX(UserRegBase) | ||
281 | #define kr_intgranted KREG_IDX(Int_Granted) | ||
282 | #define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int) | ||
283 | #define kr_intblocked KREG_IDX(IntBlocked) | ||
284 | #define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG) | ||
285 | |||
286 | /* | ||
287 | * per-port kernel registers. Access only with qib_read_kreg_port() | ||
288 | * or qib_write_kreg_port() | ||
289 | */ | ||
290 | #define krp_errclear KREG_IBPORT_IDX(ErrClear) | ||
291 | #define krp_errmask KREG_IBPORT_IDX(ErrMask) | ||
292 | #define krp_errstatus KREG_IBPORT_IDX(ErrStatus) | ||
293 | #define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0) | ||
294 | #define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit) | ||
295 | #define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID) | ||
296 | #define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig) | ||
297 | #define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA) | ||
298 | #define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB) | ||
299 | #define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC) | ||
300 | #define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA) | ||
301 | #define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB) | ||
302 | #define krp_txestatus KREG_IBPORT_IDX(TXEStatus) | ||
303 | #define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0) | ||
304 | #define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl) | ||
305 | #define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey) | ||
306 | #define krp_psinterval KREG_IBPORT_IDX(PSInterval) | ||
307 | #define krp_psstart KREG_IBPORT_IDX(PSStart) | ||
308 | #define krp_psstat KREG_IBPORT_IDX(PSStat) | ||
309 | #define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP) | ||
310 | #define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl) | ||
311 | #define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt) | ||
312 | #define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA) | ||
313 | #define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0) | ||
314 | #define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15) | ||
315 | #define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl) | ||
316 | #define krp_sendctrl KREG_IBPORT_IDX(SendCtrl) | ||
317 | #define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase) | ||
318 | #define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0) | ||
319 | #define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1) | ||
320 | #define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2) | ||
321 | #define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0) | ||
322 | #define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1) | ||
323 | #define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2) | ||
324 | #define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt) | ||
325 | #define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead) | ||
326 | #define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr) | ||
327 | #define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt) | ||
328 | #define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen) | ||
329 | #define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld) | ||
330 | #define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt) | ||
331 | #define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus) | ||
332 | #define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail) | ||
333 | #define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom) | ||
334 | #define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign) | ||
335 | #define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask) | ||
336 | #define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX) | ||
337 | #define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD) | ||
338 | #define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE) | ||
339 | #define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl) | ||
340 | |||
341 | /* | ||
342 | * Per-context kernel registers. Acess only with qib_read_kreg_ctxt() | ||
343 | * or qib_write_kreg_ctxt() | ||
344 | */ | ||
345 | #define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0) | ||
346 | #define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0) | ||
347 | |||
348 | /* | ||
349 | * TID Flow table, per context. Reduces | ||
350 | * number of hdrq updates to one per flow (or on errors). | ||
351 | * context 0 and 1 share same memory, but have distinct | ||
352 | * addresses. Since for now, we never use expected sends | ||
353 | * on kernel contexts, we don't worry about that (we initialize | ||
354 | * those entries for ctxt 0/1 on driver load twice, for example). | ||
355 | */ | ||
356 | #define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */ | ||
357 | #define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0)) | ||
358 | |||
359 | /* these are the error bits in the tid flows, and are W1C */ | ||
360 | #define TIDFLOW_ERRBITS ( \ | ||
361 | (SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \ | ||
362 | SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \ | ||
363 | (SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \ | ||
364 | SYM_LSB(RcvTIDFlowTable0, SeqMismatch))) | ||
365 | |||
366 | /* Most (not all) Counters are per-IBport. | ||
367 | * Requires LBIntCnt is at offset 0 in the group | ||
368 | */ | ||
369 | #define CREG_IDX(regname) \ | ||
370 | ((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64)) | ||
371 | |||
372 | #define crp_badformat CREG_IDX(RxVersionErrCnt) | ||
373 | #define crp_err_rlen CREG_IDX(RxLenErrCnt) | ||
374 | #define crp_erricrc CREG_IDX(RxICRCErrCnt) | ||
375 | #define crp_errlink CREG_IDX(RxLinkMalformCnt) | ||
376 | #define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt) | ||
377 | #define crp_errpkey CREG_IDX(RxPKeyMismatchCnt) | ||
378 | #define crp_errvcrc CREG_IDX(RxVCRCErrCnt) | ||
379 | #define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt) | ||
380 | #define crp_iblinkdown CREG_IDX(IBLinkDownedCnt) | ||
381 | #define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt) | ||
382 | #define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt) | ||
383 | #define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt) | ||
384 | #define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt) | ||
385 | #define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt) | ||
386 | #define crp_pktrcv CREG_IDX(RxDataPktCnt) | ||
387 | #define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt) | ||
388 | #define crp_pktsend CREG_IDX(TxDataPktCnt) | ||
389 | #define crp_pktsendflow CREG_IDX(TxFlowPktCnt) | ||
390 | #define crp_psrcvdatacount CREG_IDX(PSRcvDataCount) | ||
391 | #define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount) | ||
392 | #define crp_psxmitdatacount CREG_IDX(PSXmitDataCount) | ||
393 | #define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount) | ||
394 | #define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount) | ||
395 | #define crp_rcvebp CREG_IDX(RxEBPCnt) | ||
396 | #define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt) | ||
397 | #define crp_rcvovfl CREG_IDX(RxBufOvflCnt) | ||
398 | #define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt) | ||
399 | #define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt) | ||
400 | #define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt) | ||
401 | #define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt) | ||
402 | #define crp_rxvlerr CREG_IDX(RxVlErrCnt) | ||
403 | #define crp_sendstall CREG_IDX(TxFlowStallCnt) | ||
404 | #define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt) | ||
405 | #define crp_txhdrerr CREG_IDX(TxHeadersErrCnt) | ||
406 | #define crp_txlenerr CREG_IDX(TxLenErrCnt) | ||
407 | #define crp_txlenerr CREG_IDX(TxLenErrCnt) | ||
408 | #define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt) | ||
409 | #define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt) | ||
410 | #define crp_txunderrun CREG_IDX(TxUnderrunCnt) | ||
411 | #define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt) | ||
412 | #define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt) | ||
413 | #define crp_wordrcv CREG_IDX(RxDwordCnt) | ||
414 | #define crp_wordsend CREG_IDX(TxDwordCnt) | ||
415 | #define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut) | ||
416 | |||
417 | /* these are the (few) counters that are not port-specific */ | ||
418 | #define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \ | ||
419 | QIB_7322_LBIntCnt_OFFS) / sizeof(u64)) | ||
420 | #define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt) | ||
421 | #define cr_lbint CREG_DEVIDX(LBIntCnt) | ||
422 | #define cr_lbstall CREG_DEVIDX(LBFlowStallCnt) | ||
423 | #define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt) | ||
424 | #define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt) | ||
425 | #define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt) | ||
426 | #define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt) | ||
427 | |||
428 | /* no chip register for # of IB ports supported, so define */ | ||
429 | #define NUM_IB_PORTS 2 | ||
430 | |||
431 | /* 1 VL15 buffer per hardware IB port, no register for this, so define */ | ||
432 | #define NUM_VL15_BUFS NUM_IB_PORTS | ||
433 | |||
434 | /* | ||
435 | * context 0 and 1 are special, and there is no chip register that | ||
436 | * defines this value, so we have to define it here. | ||
437 | * These are all allocated to either 0 or 1 for single port | ||
438 | * hardware configuration, otherwise each gets half | ||
439 | */ | ||
440 | #define KCTXT0_EGRCNT 2048 | ||
441 | |||
442 | /* values for vl and port fields in PBC, 7322-specific */ | ||
443 | #define PBC_PORT_SEL_LSB 26 | ||
444 | #define PBC_PORT_SEL_RMASK 1 | ||
445 | #define PBC_VL_NUM_LSB 27 | ||
446 | #define PBC_VL_NUM_RMASK 7 | ||
447 | #define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */ | ||
448 | #define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */ | ||
449 | |||
450 | static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = { | ||
451 | [IB_RATE_2_5_GBPS] = 16, | ||
452 | [IB_RATE_5_GBPS] = 8, | ||
453 | [IB_RATE_10_GBPS] = 4, | ||
454 | [IB_RATE_20_GBPS] = 2, | ||
455 | [IB_RATE_30_GBPS] = 2, | ||
456 | [IB_RATE_40_GBPS] = 1 | ||
457 | }; | ||
458 | |||
459 | #define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive) | ||
460 | #define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive) | ||
461 | |||
462 | /* link training states, from IBC */ | ||
463 | #define IB_7322_LT_STATE_DISABLED 0x00 | ||
464 | #define IB_7322_LT_STATE_LINKUP 0x01 | ||
465 | #define IB_7322_LT_STATE_POLLACTIVE 0x02 | ||
466 | #define IB_7322_LT_STATE_POLLQUIET 0x03 | ||
467 | #define IB_7322_LT_STATE_SLEEPDELAY 0x04 | ||
468 | #define IB_7322_LT_STATE_SLEEPQUIET 0x05 | ||
469 | #define IB_7322_LT_STATE_CFGDEBOUNCE 0x08 | ||
470 | #define IB_7322_LT_STATE_CFGRCVFCFG 0x09 | ||
471 | #define IB_7322_LT_STATE_CFGWAITRMT 0x0a | ||
472 | #define IB_7322_LT_STATE_CFGIDLE 0x0b | ||
473 | #define IB_7322_LT_STATE_RECOVERRETRAIN 0x0c | ||
474 | #define IB_7322_LT_STATE_TXREVLANES 0x0d | ||
475 | #define IB_7322_LT_STATE_RECOVERWAITRMT 0x0e | ||
476 | #define IB_7322_LT_STATE_RECOVERIDLE 0x0f | ||
477 | #define IB_7322_LT_STATE_CFGENH 0x10 | ||
478 | #define IB_7322_LT_STATE_CFGTEST 0x11 | ||
479 | |||
480 | /* link state machine states from IBC */ | ||
481 | #define IB_7322_L_STATE_DOWN 0x0 | ||
482 | #define IB_7322_L_STATE_INIT 0x1 | ||
483 | #define IB_7322_L_STATE_ARM 0x2 | ||
484 | #define IB_7322_L_STATE_ACTIVE 0x3 | ||
485 | #define IB_7322_L_STATE_ACT_DEFER 0x4 | ||
486 | |||
487 | static const u8 qib_7322_physportstate[0x20] = { | ||
488 | [IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED, | ||
489 | [IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP, | ||
490 | [IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL, | ||
491 | [IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL, | ||
492 | [IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP, | ||
493 | [IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP, | ||
494 | [IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN, | ||
495 | [IB_7322_LT_STATE_CFGRCVFCFG] = | ||
496 | IB_PHYSPORTSTATE_CFG_TRAIN, | ||
497 | [IB_7322_LT_STATE_CFGWAITRMT] = | ||
498 | IB_PHYSPORTSTATE_CFG_TRAIN, | ||
499 | [IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE, | ||
500 | [IB_7322_LT_STATE_RECOVERRETRAIN] = | ||
501 | IB_PHYSPORTSTATE_LINK_ERR_RECOVER, | ||
502 | [IB_7322_LT_STATE_RECOVERWAITRMT] = | ||
503 | IB_PHYSPORTSTATE_LINK_ERR_RECOVER, | ||
504 | [IB_7322_LT_STATE_RECOVERIDLE] = | ||
505 | IB_PHYSPORTSTATE_LINK_ERR_RECOVER, | ||
506 | [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH, | ||
507 | [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN, | ||
508 | [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN, | ||
509 | [0x13] = IB_PHYSPORTSTATE_CFG_WAIT_ENH, | ||
510 | [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN, | ||
511 | [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN, | ||
512 | [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN, | ||
513 | [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN | ||
514 | }; | ||
515 | |||
516 | struct qib_chip_specific { | ||
517 | u64 __iomem *cregbase; | ||
518 | u64 *cntrs; | ||
519 | spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */ | ||
520 | spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */ | ||
521 | u64 main_int_mask; /* clear bits which have dedicated handlers */ | ||
522 | u64 int_enable_mask; /* for per port interrupts in single port mode */ | ||
523 | u64 errormask; | ||
524 | u64 hwerrmask; | ||
525 | u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */ | ||
526 | u64 gpio_mask; /* shadow the gpio mask register */ | ||
527 | u64 extctrl; /* shadow the gpio output enable, etc... */ | ||
528 | u32 ncntrs; | ||
529 | u32 nportcntrs; | ||
530 | u32 cntrnamelen; | ||
531 | u32 portcntrnamelen; | ||
532 | u32 numctxts; | ||
533 | u32 rcvegrcnt; | ||
534 | u32 updthresh; /* current AvailUpdThld */ | ||
535 | u32 updthresh_dflt; /* default AvailUpdThld */ | ||
536 | u32 r1; | ||
537 | int irq; | ||
538 | u32 num_msix_entries; | ||
539 | u32 sdmabufcnt; | ||
540 | u32 lastbuf_for_pio; | ||
541 | u32 stay_in_freeze; | ||
542 | u32 recovery_ports_initted; | ||
543 | #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) | ||
544 | u32 dca_ctrl; | ||
545 | int rhdr_cpu[18]; | ||
546 | int sdma_cpu[2]; | ||
547 | u64 dca_rcvhdr_ctrl[5]; /* B, C, D, E, F */ | ||
548 | #endif | ||
549 | struct msix_entry *msix_entries; | ||
550 | void **msix_arg; | ||
551 | unsigned long *sendchkenable; | ||
552 | unsigned long *sendgrhchk; | ||
553 | unsigned long *sendibchk; | ||
554 | u32 rcvavail_timeout[18]; | ||
555 | char emsgbuf[128]; /* for device error interrupt msg buffer */ | ||
556 | }; | ||
557 | |||
558 | /* Table of entries in "human readable" form Tx Emphasis. */ | ||
559 | struct txdds_ent { | ||
560 | u8 amp; | ||
561 | u8 pre; | ||
562 | u8 main; | ||
563 | u8 post; | ||
564 | }; | ||
565 | |||
566 | struct vendor_txdds_ent { | ||
567 | u8 oui[QSFP_VOUI_LEN]; | ||
568 | u8 *partnum; | ||
569 | struct txdds_ent sdr; | ||
570 | struct txdds_ent ddr; | ||
571 | struct txdds_ent qdr; | ||
572 | }; | ||
573 | |||
574 | static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *); | ||
575 | |||
576 | #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */ | ||
577 | #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */ | ||
578 | |||
579 | #define H1_FORCE_VAL 8 | ||
580 | #define H1_FORCE_QME 1 /* may be overridden via setup_qme_params() */ | ||
581 | #define H1_FORCE_QMH 7 /* may be overridden via setup_qmh_params() */ | ||
582 | |||
583 | /* The static and dynamic registers are paired, and the pairs indexed by spd */ | ||
584 | #define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \ | ||
585 | + ((spd) * 2)) | ||
586 | |||
587 | #define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */ | ||
588 | #define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */ | ||
589 | #define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */ | ||
590 | #define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */ | ||
591 | #define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */ | ||
592 | |||
593 | static const struct txdds_ent qmh_sdr_txdds = { 11, 0, 5, 6 }; | ||
594 | static const struct txdds_ent qmh_ddr_txdds = { 7, 0, 2, 8 }; | ||
595 | static const struct txdds_ent qmh_qdr_txdds = { 0, 1, 3, 10 }; | ||
596 | |||
597 | /* this is used for unknown mez cards also */ | ||
598 | static const struct txdds_ent qme_sdr_txdds = { 11, 0, 4, 4 }; | ||
599 | static const struct txdds_ent qme_ddr_txdds = { 7, 0, 2, 7 }; | ||
600 | static const struct txdds_ent qme_qdr_txdds = { 0, 1, 12, 11 }; | ||
601 | |||
602 | struct qib_chippport_specific { | ||
603 | u64 __iomem *kpregbase; | ||
604 | u64 __iomem *cpregbase; | ||
605 | u64 *portcntrs; | ||
606 | struct qib_pportdata *ppd; | ||
607 | wait_queue_head_t autoneg_wait; | ||
608 | struct delayed_work autoneg_work; | ||
609 | struct delayed_work ipg_work; | ||
610 | struct timer_list chase_timer; | ||
611 | /* | ||
612 | * these 5 fields are used to establish deltas for IB symbol | ||
613 | * errors and linkrecovery errors. They can be reported on | ||
614 | * some chips during link negotiation prior to INIT, and with | ||
615 | * DDR when faking DDR negotiations with non-IBTA switches. | ||
616 | * The chip counters are adjusted at driver unload if there is | ||
617 | * a non-zero delta. | ||
618 | */ | ||
619 | u64 ibdeltainprog; | ||
620 | u64 ibsymdelta; | ||
621 | u64 ibsymsnap; | ||
622 | u64 iblnkerrdelta; | ||
623 | u64 iblnkerrsnap; | ||
624 | u64 iblnkdownsnap; | ||
625 | u64 iblnkdowndelta; | ||
626 | u64 ibmalfdelta; | ||
627 | u64 ibmalfsnap; | ||
628 | u64 ibcctrl_a; /* krp_ibcctrl_a shadow */ | ||
629 | u64 ibcctrl_b; /* krp_ibcctrl_b shadow */ | ||
630 | u64 qdr_dfe_time; | ||
631 | u64 chase_end; | ||
632 | u32 autoneg_tries; | ||
633 | u32 recovery_init; | ||
634 | u32 qdr_dfe_on; | ||
635 | u32 qdr_reforce; | ||
636 | /* | ||
637 | * Per-bay per-channel rcv QMH H1 values and Tx values for QDR. | ||
638 | * entry zero is unused, to simplify indexing | ||
639 | */ | ||
640 | u16 h1_val; | ||
641 | u8 amp[SERDES_CHANS]; | ||
642 | u8 pre[SERDES_CHANS]; | ||
643 | u8 mainv[SERDES_CHANS]; | ||
644 | u8 post[SERDES_CHANS]; | ||
645 | u8 no_eep; /* attenuation index to use if no qsfp info */ | ||
646 | u8 ipg_tries; | ||
647 | u8 ibmalfusesnap; | ||
648 | struct qib_qsfp_data qsfp_data; | ||
649 | char epmsgbuf[192]; /* for port error interrupt msg buffer */ | ||
650 | }; | ||
651 | |||
652 | static struct { | ||
653 | const char *name; | ||
654 | irq_handler_t handler; | ||
655 | int lsb; | ||
656 | int port; /* 0 if not port-specific, else port # */ | ||
657 | } irq_table[] = { | ||
658 | { QIB_DRV_NAME, qib_7322intr, -1, 0 }, | ||
659 | { QIB_DRV_NAME " (buf avail)", qib_7322bufavail, | ||
660 | SYM_LSB(IntStatus, SendBufAvail), 0 }, | ||
661 | { QIB_DRV_NAME " (sdma 0)", sdma_intr, | ||
662 | SYM_LSB(IntStatus, SDmaInt_0), 1 }, | ||
663 | { QIB_DRV_NAME " (sdma 1)", sdma_intr, | ||
664 | SYM_LSB(IntStatus, SDmaInt_1), 2 }, | ||
665 | { QIB_DRV_NAME " (sdmaI 0)", sdma_idle_intr, | ||
666 | SYM_LSB(IntStatus, SDmaIdleInt_0), 1 }, | ||
667 | { QIB_DRV_NAME " (sdmaI 1)", sdma_idle_intr, | ||
668 | SYM_LSB(IntStatus, SDmaIdleInt_1), 2 }, | ||
669 | { QIB_DRV_NAME " (sdmaP 0)", sdma_progress_intr, | ||
670 | SYM_LSB(IntStatus, SDmaProgressInt_0), 1 }, | ||
671 | { QIB_DRV_NAME " (sdmaP 1)", sdma_progress_intr, | ||
672 | SYM_LSB(IntStatus, SDmaProgressInt_1), 2 }, | ||
673 | { QIB_DRV_NAME " (sdmaC 0)", sdma_cleanup_intr, | ||
674 | SYM_LSB(IntStatus, SDmaCleanupDone_0), 1 }, | ||
675 | { QIB_DRV_NAME " (sdmaC 1)", sdma_cleanup_intr, | ||
676 | SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 }, | ||
677 | }; | ||
678 | |||
679 | #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) | ||
680 | static const struct dca_reg_map { | ||
681 | int shadow_inx; | ||
682 | int lsb; | ||
683 | u64 mask; | ||
684 | u16 regno; | ||
685 | } dca_rcvhdr_reg_map[] = { | ||
686 | { 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH), | ||
687 | ~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) }, | ||
688 | { 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH), | ||
689 | ~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) }, | ||
690 | { 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH), | ||
691 | ~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) }, | ||
692 | { 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH), | ||
693 | ~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) }, | ||
694 | { 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH), | ||
695 | ~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) }, | ||
696 | { 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH), | ||
697 | ~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) }, | ||
698 | { 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH), | ||
699 | ~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) }, | ||
700 | { 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH), | ||
701 | ~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) }, | ||
702 | { 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH), | ||
703 | ~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) }, | ||
704 | { 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH), | ||
705 | ~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) }, | ||
706 | { 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH), | ||
707 | ~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) }, | ||
708 | { 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH), | ||
709 | ~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) }, | ||
710 | { 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH), | ||
711 | ~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) }, | ||
712 | { 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH), | ||
713 | ~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) }, | ||
714 | { 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH), | ||
715 | ~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) }, | ||
716 | { 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH), | ||
717 | ~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) }, | ||
718 | { 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH), | ||
719 | ~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) }, | ||
720 | { 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH), | ||
721 | ~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) }, | ||
722 | }; | ||
723 | #endif | ||
724 | |||
725 | /* ibcctrl bits */ | ||
726 | #define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1 | ||
727 | /* cycle through TS1/TS2 till OK */ | ||
728 | #define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2 | ||
729 | /* wait for TS1, then go on */ | ||
730 | #define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3 | ||
731 | #define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16 | ||
732 | |||
733 | #define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */ | ||
734 | #define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */ | ||
735 | #define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */ | ||
736 | |||
737 | #define BLOB_7322_IBCHG 0x101 | ||
738 | |||
739 | static inline void qib_write_kreg(const struct qib_devdata *dd, | ||
740 | const u32 regno, u64 value); | ||
741 | static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32); | ||
742 | static void write_7322_initregs(struct qib_devdata *); | ||
743 | static void write_7322_init_portregs(struct qib_pportdata *); | ||
744 | static void setup_7322_link_recovery(struct qib_pportdata *, u32); | ||
745 | static void check_7322_rxe_status(struct qib_pportdata *); | ||
746 | static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *); | ||
747 | |||
748 | /** | ||
749 | * qib_read_ureg32 - read 32-bit virtualized per-context register | ||
750 | * @dd: device | ||
751 | * @regno: register number | ||
752 | * @ctxt: context number | ||
753 | * | ||
754 | * Return the contents of a register that is virtualized to be per context. | ||
755 | * Returns -1 on errors (not distinguishable from valid contents at | ||
756 | * runtime; we may add a separate error variable at some point). | ||
757 | */ | ||
758 | static inline u32 qib_read_ureg32(const struct qib_devdata *dd, | ||
759 | enum qib_ureg regno, int ctxt) | ||
760 | { | ||
761 | if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) | ||
762 | return 0; | ||
763 | return readl(regno + (u64 __iomem *)( | ||
764 | (dd->ureg_align * ctxt) + (dd->userbase ? | ||
765 | (char __iomem *)dd->userbase : | ||
766 | (char __iomem *)dd->kregbase + dd->uregbase))); | ||
767 | } | ||
768 | |||
769 | /** | ||
770 | * qib_read_ureg - read virtualized per-context register | ||
771 | * @dd: device | ||
772 | * @regno: register number | ||
773 | * @ctxt: context number | ||
774 | * | ||
775 | * Return the contents of a register that is virtualized to be per context. | ||
776 | * Returns -1 on errors (not distinguishable from valid contents at | ||
777 | * runtime; we may add a separate error variable at some point). | ||
778 | */ | ||
779 | static inline u64 qib_read_ureg(const struct qib_devdata *dd, | ||
780 | enum qib_ureg regno, int ctxt) | ||
781 | { | ||
782 | |||
783 | if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) | ||
784 | return 0; | ||
785 | return readq(regno + (u64 __iomem *)( | ||
786 | (dd->ureg_align * ctxt) + (dd->userbase ? | ||
787 | (char __iomem *)dd->userbase : | ||
788 | (char __iomem *)dd->kregbase + dd->uregbase))); | ||
789 | } | ||
790 | |||
791 | /** | ||
792 | * qib_write_ureg - write virtualized per-context register | ||
793 | * @dd: device | ||
794 | * @regno: register number | ||
795 | * @value: value | ||
796 | * @ctxt: context | ||
797 | * | ||
798 | * Write the contents of a register that is virtualized to be per context. | ||
799 | */ | ||
800 | static inline void qib_write_ureg(const struct qib_devdata *dd, | ||
801 | enum qib_ureg regno, u64 value, int ctxt) | ||
802 | { | ||
803 | u64 __iomem *ubase; | ||
804 | if (dd->userbase) | ||
805 | ubase = (u64 __iomem *) | ||
806 | ((char __iomem *) dd->userbase + | ||
807 | dd->ureg_align * ctxt); | ||
808 | else | ||
809 | ubase = (u64 __iomem *) | ||
810 | (dd->uregbase + | ||
811 | (char __iomem *) dd->kregbase + | ||
812 | dd->ureg_align * ctxt); | ||
813 | |||
814 | if (dd->kregbase && (dd->flags & QIB_PRESENT)) | ||
815 | writeq(value, &ubase[regno]); | ||
816 | } | ||
817 | |||
818 | static inline u32 qib_read_kreg32(const struct qib_devdata *dd, | ||
819 | const u32 regno) | ||
820 | { | ||
821 | if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) | ||
822 | return -1; | ||
823 | return readl((u32 __iomem *) &dd->kregbase[regno]); | ||
824 | } | ||
825 | |||
826 | static inline u64 qib_read_kreg64(const struct qib_devdata *dd, | ||
827 | const u32 regno) | ||
828 | { | ||
829 | if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) | ||
830 | return -1; | ||
831 | return readq(&dd->kregbase[regno]); | ||
832 | } | ||
833 | |||
834 | static inline void qib_write_kreg(const struct qib_devdata *dd, | ||
835 | const u32 regno, u64 value) | ||
836 | { | ||
837 | if (dd->kregbase && (dd->flags & QIB_PRESENT)) | ||
838 | writeq(value, &dd->kregbase[regno]); | ||
839 | } | ||
840 | |||
841 | /* | ||
842 | * not many sanity checks for the port-specific kernel register routines, | ||
843 | * since they are only used when it's known to be safe. | ||
844 | */ | ||
845 | static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd, | ||
846 | const u16 regno) | ||
847 | { | ||
848 | if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT)) | ||
849 | return 0ULL; | ||
850 | return readq(&ppd->cpspec->kpregbase[regno]); | ||
851 | } | ||
852 | |||
853 | static inline void qib_write_kreg_port(const struct qib_pportdata *ppd, | ||
854 | const u16 regno, u64 value) | ||
855 | { | ||
856 | if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase && | ||
857 | (ppd->dd->flags & QIB_PRESENT)) | ||
858 | writeq(value, &ppd->cpspec->kpregbase[regno]); | ||
859 | } | ||
860 | |||
861 | /** | ||
862 | * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register | ||
863 | * @dd: the qlogic_ib device | ||
864 | * @regno: the register number to write | ||
865 | * @ctxt: the context containing the register | ||
866 | * @value: the value to write | ||
867 | */ | ||
868 | static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd, | ||
869 | const u16 regno, unsigned ctxt, | ||
870 | u64 value) | ||
871 | { | ||
872 | qib_write_kreg(dd, regno + ctxt, value); | ||
873 | } | ||
874 | |||
875 | static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno) | ||
876 | { | ||
877 | if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT)) | ||
878 | return 0; | ||
879 | return readq(&dd->cspec->cregbase[regno]); | ||
880 | |||
881 | |||
882 | } | ||
883 | |||
884 | static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno) | ||
885 | { | ||
886 | if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT)) | ||
887 | return 0; | ||
888 | return readl(&dd->cspec->cregbase[regno]); | ||
889 | |||
890 | |||
891 | } | ||
892 | |||
893 | static inline void write_7322_creg_port(const struct qib_pportdata *ppd, | ||
894 | u16 regno, u64 value) | ||
895 | { | ||
896 | if (ppd->cpspec && ppd->cpspec->cpregbase && | ||
897 | (ppd->dd->flags & QIB_PRESENT)) | ||
898 | writeq(value, &ppd->cpspec->cpregbase[regno]); | ||
899 | } | ||
900 | |||
901 | static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd, | ||
902 | u16 regno) | ||
903 | { | ||
904 | if (!ppd->cpspec || !ppd->cpspec->cpregbase || | ||
905 | !(ppd->dd->flags & QIB_PRESENT)) | ||
906 | return 0; | ||
907 | return readq(&ppd->cpspec->cpregbase[regno]); | ||
908 | } | ||
909 | |||
910 | static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd, | ||
911 | u16 regno) | ||
912 | { | ||
913 | if (!ppd->cpspec || !ppd->cpspec->cpregbase || | ||
914 | !(ppd->dd->flags & QIB_PRESENT)) | ||
915 | return 0; | ||
916 | return readl(&ppd->cpspec->cpregbase[regno]); | ||
917 | } | ||
918 | |||
919 | /* bits in Control register */ | ||
920 | #define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset) | ||
921 | #define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn) | ||
922 | |||
923 | /* bits in general interrupt regs */ | ||
924 | #define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask) | ||
925 | #define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17) | ||
926 | #define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB) | ||
927 | #define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask) | ||
928 | #define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17) | ||
929 | #define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB) | ||
930 | #define QIB_I_C_ERROR INT_MASK(Err) | ||
931 | |||
932 | #define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1)) | ||
933 | #define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail) | ||
934 | #define QIB_I_GPIO INT_MASK(AssertGPIO) | ||
935 | #define QIB_I_P_SDMAINT(pidx) \ | ||
936 | (INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \ | ||
937 | INT_MASK_P(SDmaProgress, pidx) | \ | ||
938 | INT_MASK_PM(SDmaCleanupDone, pidx)) | ||
939 | |||
940 | /* Interrupt bits that are "per port" */ | ||
941 | #define QIB_I_P_BITSEXTANT(pidx) \ | ||
942 | (INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \ | ||
943 | INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \ | ||
944 | INT_MASK_P(SDmaProgress, pidx) | \ | ||
945 | INT_MASK_PM(SDmaCleanupDone, pidx)) | ||
946 | |||
947 | /* Interrupt bits that are common to a device */ | ||
948 | /* currently unused: QIB_I_SPIOSENT */ | ||
949 | #define QIB_I_C_BITSEXTANT \ | ||
950 | (QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \ | ||
951 | QIB_I_SPIOSENT | \ | ||
952 | QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO) | ||
953 | |||
954 | #define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \ | ||
955 | QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1)) | ||
956 | |||
957 | /* | ||
958 | * Error bits that are "per port". | ||
959 | */ | ||
960 | #define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged) | ||
961 | #define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr) | ||
962 | #define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr) | ||
963 | #define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr) | ||
964 | #define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr) | ||
965 | #define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr) | ||
966 | #define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr) | ||
967 | #define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr) | ||
968 | #define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr) | ||
969 | #define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr) | ||
970 | #define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr) | ||
971 | #define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr) | ||
972 | #define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr) | ||
973 | #define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr) | ||
974 | #define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr) | ||
975 | #define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr) | ||
976 | #define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr) | ||
977 | #define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr) | ||
978 | #define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr) | ||
979 | #define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr) | ||
980 | #define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr) | ||
981 | #define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr) | ||
982 | #define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr) | ||
983 | #define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr) | ||
984 | #define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr) | ||
985 | #define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr) | ||
986 | #define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr) | ||
987 | #define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr) | ||
988 | |||
989 | #define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr) | ||
990 | #define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr) | ||
991 | #define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr) | ||
992 | #define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr) | ||
993 | #define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr) | ||
994 | #define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr) | ||
995 | #define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr) | ||
996 | #define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr) | ||
997 | #define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr) | ||
998 | #define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr) | ||
999 | #define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr) | ||
1000 | |||
1001 | /* Error bits that are common to a device */ | ||
1002 | #define QIB_E_RESET ERR_MASK(ResetNegated) | ||
1003 | #define QIB_E_HARDWARE ERR_MASK(HardwareErr) | ||
1004 | #define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr) | ||
1005 | |||
1006 | |||
1007 | /* | ||
1008 | * Per chip (rather than per-port) errors. Most either do | ||
1009 | * nothing but trigger a print (because they self-recover, or | ||
1010 | * always occur in tandem with other errors that handle the | ||
1011 | * issue), or because they indicate errors with no recovery, | ||
1012 | * but we want to know that they happened. | ||
1013 | */ | ||
1014 | #define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr) | ||
1015 | #define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd) | ||
1016 | #define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr) | ||
1017 | #define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr) | ||
1018 | #define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr) | ||
1019 | #define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr) | ||
1020 | #define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr) | ||
1021 | #define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr) | ||
1022 | |||
1023 | /* SDMA chip errors (not per port) | ||
1024 | * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get | ||
1025 | * the SDMAHALT error immediately, so we just print the dup error via the | ||
1026 | * E_AUTO mechanism. This is true of most of the per-port fatal errors | ||
1027 | * as well, but since this is port-independent, by definition, it's | ||
1028 | * handled a bit differently. SDMA_VL15 and SDMA_WRONG_PORT are per | ||
1029 | * packet send errors, and so are handled in the same manner as other | ||
1030 | * per-packet errors. | ||
1031 | */ | ||
1032 | #define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err) | ||
1033 | #define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr) | ||
1034 | #define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr) | ||
1035 | |||
1036 | /* | ||
1037 | * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS | ||
1038 | * it is used to print "common" packet errors. | ||
1039 | */ | ||
1040 | #define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\ | ||
1041 | QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\ | ||
1042 | QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\ | ||
1043 | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \ | ||
1044 | QIB_E_P_REBP) | ||
1045 | |||
1046 | /* Error Bits that Packet-related (Receive, per-port) */ | ||
1047 | #define QIB_E_P_RPKTERRS (\ | ||
1048 | QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \ | ||
1049 | QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \ | ||
1050 | QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\ | ||
1051 | QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \ | ||
1052 | QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \ | ||
1053 | QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP) | ||
1054 | |||
1055 | /* | ||
1056 | * Error bits that are Send-related (per port) | ||
1057 | * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling). | ||
1058 | * All of these potentially need to have a buffer disarmed | ||
1059 | */ | ||
1060 | #define QIB_E_P_SPKTERRS (\ | ||
1061 | QIB_E_P_SUNEXP_PKTNUM |\ | ||
1062 | QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\ | ||
1063 | QIB_E_P_SMAXPKTLEN |\ | ||
1064 | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \ | ||
1065 | QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \ | ||
1066 | QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL) | ||
1067 | |||
1068 | #define QIB_E_SPKTERRS ( \ | ||
1069 | QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \ | ||
1070 | ERR_MASK_N(SendUnsupportedVLErr) | \ | ||
1071 | QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT) | ||
1072 | |||
1073 | #define QIB_E_P_SDMAERRS ( \ | ||
1074 | QIB_E_P_SDMAHALT | \ | ||
1075 | QIB_E_P_SDMADESCADDRMISALIGN | \ | ||
1076 | QIB_E_P_SDMAUNEXPDATA | \ | ||
1077 | QIB_E_P_SDMAMISSINGDW | \ | ||
1078 | QIB_E_P_SDMADWEN | \ | ||
1079 | QIB_E_P_SDMARPYTAG | \ | ||
1080 | QIB_E_P_SDMA1STDESC | \ | ||
1081 | QIB_E_P_SDMABASE | \ | ||
1082 | QIB_E_P_SDMATAILOUTOFBOUND | \ | ||
1083 | QIB_E_P_SDMAOUTOFBOUND | \ | ||
1084 | QIB_E_P_SDMAGENMISMATCH) | ||
1085 | |||
1086 | /* | ||
1087 | * This sets some bits more than once, but makes it more obvious which | ||
1088 | * bits are not handled under other categories, and the repeat definition | ||
1089 | * is not a problem. | ||
1090 | */ | ||
1091 | #define QIB_E_P_BITSEXTANT ( \ | ||
1092 | QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \ | ||
1093 | QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \ | ||
1094 | QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \ | ||
1095 | QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \ | ||
1096 | ) | ||
1097 | |||
1098 | /* | ||
1099 | * These are errors that can occur when the link | ||
1100 | * changes state while a packet is being sent or received. This doesn't | ||
1101 | * cover things like EBP or VCRC that can be the result of a sending | ||
1102 | * having the link change state, so we receive a "known bad" packet. | ||
1103 | * All of these are "per port", so renamed: | ||
1104 | */ | ||
1105 | #define QIB_E_P_LINK_PKTERRS (\ | ||
1106 | QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\ | ||
1107 | QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\ | ||
1108 | QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\ | ||
1109 | QIB_E_P_RUNEXPCHAR) | ||
1110 | |||
1111 | /* | ||
1112 | * This sets some bits more than once, but makes it more obvious which | ||
1113 | * bits are not handled under other categories (such as QIB_E_SPKTERRS), | ||
1114 | * and the repeat definition is not a problem. | ||
1115 | */ | ||
1116 | #define QIB_E_C_BITSEXTANT (\ | ||
1117 | QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\ | ||
1118 | QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\ | ||
1119 | QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE) | ||
1120 | |||
1121 | /* Likewise Neuter E_SPKT_ERRS_IGNORE */ | ||
1122 | #define E_SPKT_ERRS_IGNORE 0 | ||
1123 | |||
1124 | #define QIB_EXTS_MEMBIST_DISABLED \ | ||
1125 | SYM_MASK(EXTStatus, MemBISTDisabled) | ||
1126 | #define QIB_EXTS_MEMBIST_ENDTEST \ | ||
1127 | SYM_MASK(EXTStatus, MemBISTEndTest) | ||
1128 | |||
1129 | #define QIB_E_SPIOARMLAUNCH \ | ||
1130 | ERR_MASK(SendArmLaunchErr) | ||
1131 | |||
1132 | #define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd) | ||
1133 | #define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd) | ||
1134 | |||
1135 | /* | ||
1136 | * IBTA_1_2 is set when multiple speeds are enabled (normal), | ||
1137 | * and also if forced QDR (only QDR enabled). It's enabled for the | ||
1138 | * forced QDR case so that scrambling will be enabled by the TS3 | ||
1139 | * exchange, when supported by both sides of the link. | ||
1140 | */ | ||
1141 | #define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE) | ||
1142 | #define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED) | ||
1143 | #define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR) | ||
1144 | #define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | ||
1145 | #define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | ||
1146 | #define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \ | ||
1147 | SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)) | ||
1148 | #define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR) | ||
1149 | |||
1150 | #define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod) | ||
1151 | #define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod) | ||
1152 | |||
1153 | #define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS) | ||
1154 | #define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS)) | ||
1155 | #define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS)) | ||
1156 | |||
1157 | #define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP) | ||
1158 | #define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP) | ||
1159 | #define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \ | ||
1160 | SYM_MASK(IBCCtrlB_0, HRTBT_ENB)) | ||
1161 | #define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \ | ||
1162 | SYM_LSB(IBCCtrlB_0, HRTBT_ENB)) | ||
1163 | #define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB) | ||
1164 | |||
1165 | #define IBA7322_REDIRECT_VEC_PER_REG 12 | ||
1166 | |||
1167 | #define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En) | ||
1168 | #define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En) | ||
1169 | #define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En) | ||
1170 | #define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En) | ||
1171 | #define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En) | ||
1172 | |||
1173 | #define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */ | ||
1174 | |||
1175 | #define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \ | ||
1176 | .msg = #fldname } | ||
1177 | #define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \ | ||
1178 | fldname##Mask##_##port), .msg = #fldname } | ||
1179 | static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = { | ||
1180 | HWE_AUTO_P(IBSerdesPClkNotDetect, 1), | ||
1181 | HWE_AUTO_P(IBSerdesPClkNotDetect, 0), | ||
1182 | HWE_AUTO(PCIESerdesPClkNotDetect), | ||
1183 | HWE_AUTO(PowerOnBISTFailed), | ||
1184 | HWE_AUTO(TempsenseTholdReached), | ||
1185 | HWE_AUTO(MemoryErr), | ||
1186 | HWE_AUTO(PCIeBusParityErr), | ||
1187 | HWE_AUTO(PcieCplTimeout), | ||
1188 | HWE_AUTO(PciePoisonedTLP), | ||
1189 | HWE_AUTO_P(SDmaMemReadErr, 1), | ||
1190 | HWE_AUTO_P(SDmaMemReadErr, 0), | ||
1191 | HWE_AUTO_P(IBCBusFromSPCParityErr, 1), | ||
1192 | HWE_AUTO_P(IBCBusFromSPCParityErr, 0), | ||
1193 | HWE_AUTO_P(statusValidNoEop, 1), | ||
1194 | HWE_AUTO_P(statusValidNoEop, 0), | ||
1195 | HWE_AUTO(LATriggered), | ||
1196 | { .mask = 0 } | ||
1197 | }; | ||
1198 | |||
1199 | #define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \ | ||
1200 | .msg = #fldname } | ||
1201 | #define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \ | ||
1202 | .msg = #fldname } | ||
1203 | static const struct qib_hwerror_msgs qib_7322error_msgs[] = { | ||
1204 | E_AUTO(ResetNegated), | ||
1205 | E_AUTO(HardwareErr), | ||
1206 | E_AUTO(InvalidAddrErr), | ||
1207 | E_AUTO(SDmaVL15Err), | ||
1208 | E_AUTO(SBufVL15MisUseErr), | ||
1209 | E_AUTO(InvalidEEPCmd), | ||
1210 | E_AUTO(RcvContextShareErr), | ||
1211 | E_AUTO(SendVLMismatchErr), | ||
1212 | E_AUTO(SendArmLaunchErr), | ||
1213 | E_AUTO(SendSpecialTriggerErr), | ||
1214 | E_AUTO(SDmaWrongPortErr), | ||
1215 | E_AUTO(SDmaBufMaskDuplicateErr), | ||
1216 | E_AUTO(RcvHdrFullErr), | ||
1217 | E_AUTO(RcvEgrFullErr), | ||
1218 | { .mask = 0 } | ||
1219 | }; | ||
1220 | |||
1221 | static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = { | ||
1222 | E_P_AUTO(IBStatusChanged), | ||
1223 | E_P_AUTO(SHeadersErr), | ||
1224 | E_P_AUTO(VL15BufMisuseErr), | ||
1225 | /* | ||
1226 | * SDmaHaltErr is not really an error, make it clearer; | ||
1227 | */ | ||
1228 | {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted"}, | ||
1229 | E_P_AUTO(SDmaDescAddrMisalignErr), | ||
1230 | E_P_AUTO(SDmaUnexpDataErr), | ||
1231 | E_P_AUTO(SDmaMissingDwErr), | ||
1232 | E_P_AUTO(SDmaDwEnErr), | ||
1233 | E_P_AUTO(SDmaRpyTagErr), | ||
1234 | E_P_AUTO(SDma1stDescErr), | ||
1235 | E_P_AUTO(SDmaBaseErr), | ||
1236 | E_P_AUTO(SDmaTailOutOfBoundErr), | ||
1237 | E_P_AUTO(SDmaOutOfBoundErr), | ||
1238 | E_P_AUTO(SDmaGenMismatchErr), | ||
1239 | E_P_AUTO(SendBufMisuseErr), | ||
1240 | E_P_AUTO(SendUnsupportedVLErr), | ||
1241 | E_P_AUTO(SendUnexpectedPktNumErr), | ||
1242 | E_P_AUTO(SendDroppedDataPktErr), | ||
1243 | E_P_AUTO(SendDroppedSmpPktErr), | ||
1244 | E_P_AUTO(SendPktLenErr), | ||
1245 | E_P_AUTO(SendUnderRunErr), | ||
1246 | E_P_AUTO(SendMaxPktLenErr), | ||
1247 | E_P_AUTO(SendMinPktLenErr), | ||
1248 | E_P_AUTO(RcvIBLostLinkErr), | ||
1249 | E_P_AUTO(RcvHdrErr), | ||
1250 | E_P_AUTO(RcvHdrLenErr), | ||
1251 | E_P_AUTO(RcvBadTidErr), | ||
1252 | E_P_AUTO(RcvBadVersionErr), | ||
1253 | E_P_AUTO(RcvIBFlowErr), | ||
1254 | E_P_AUTO(RcvEBPErr), | ||
1255 | E_P_AUTO(RcvUnsupportedVLErr), | ||
1256 | E_P_AUTO(RcvUnexpectedCharErr), | ||
1257 | E_P_AUTO(RcvShortPktLenErr), | ||
1258 | E_P_AUTO(RcvLongPktLenErr), | ||
1259 | E_P_AUTO(RcvMaxPktLenErr), | ||
1260 | E_P_AUTO(RcvMinPktLenErr), | ||
1261 | E_P_AUTO(RcvICRCErr), | ||
1262 | E_P_AUTO(RcvVCRCErr), | ||
1263 | E_P_AUTO(RcvFormatErr), | ||
1264 | { .mask = 0 } | ||
1265 | }; | ||
1266 | |||
1267 | /* | ||
1268 | * Below generates "auto-message" for interrupts not specific to any port or | ||
1269 | * context | ||
1270 | */ | ||
1271 | #define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \ | ||
1272 | .msg = #fldname } | ||
1273 | /* Below generates "auto-message" for interrupts specific to a port */ | ||
1274 | #define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\ | ||
1275 | SYM_LSB(IntMask, fldname##Mask##_0), \ | ||
1276 | SYM_LSB(IntMask, fldname##Mask##_1)), \ | ||
1277 | .msg = #fldname "_P" } | ||
1278 | /* For some reason, the SerDesTrimDone bits are reversed */ | ||
1279 | #define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\ | ||
1280 | SYM_LSB(IntMask, fldname##Mask##_1), \ | ||
1281 | SYM_LSB(IntMask, fldname##Mask##_0)), \ | ||
1282 | .msg = #fldname "_P" } | ||
1283 | /* | ||
1284 | * Below generates "auto-message" for interrupts specific to a context, | ||
1285 | * with ctxt-number appended | ||
1286 | */ | ||
1287 | #define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\ | ||
1288 | SYM_LSB(IntMask, fldname##0IntMask), \ | ||
1289 | SYM_LSB(IntMask, fldname##17IntMask)), \ | ||
1290 | .msg = #fldname "_C"} | ||
1291 | |||
1292 | static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = { | ||
1293 | INTR_AUTO_P(SDmaInt), | ||
1294 | INTR_AUTO_P(SDmaProgressInt), | ||
1295 | INTR_AUTO_P(SDmaIdleInt), | ||
1296 | INTR_AUTO_P(SDmaCleanupDone), | ||
1297 | INTR_AUTO_C(RcvUrg), | ||
1298 | INTR_AUTO_P(ErrInt), | ||
1299 | INTR_AUTO(ErrInt), /* non-port-specific errs */ | ||
1300 | INTR_AUTO(AssertGPIOInt), | ||
1301 | INTR_AUTO_P(SendDoneInt), | ||
1302 | INTR_AUTO(SendBufAvailInt), | ||
1303 | INTR_AUTO_C(RcvAvail), | ||
1304 | { .mask = 0 } | ||
1305 | }; | ||
1306 | |||
1307 | #define TXSYMPTOM_AUTO_P(fldname) \ | ||
1308 | { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), .msg = #fldname } | ||
1309 | static const struct qib_hwerror_msgs hdrchk_msgs[] = { | ||
1310 | TXSYMPTOM_AUTO_P(NonKeyPacket), | ||
1311 | TXSYMPTOM_AUTO_P(GRHFail), | ||
1312 | TXSYMPTOM_AUTO_P(PkeyFail), | ||
1313 | TXSYMPTOM_AUTO_P(QPFail), | ||
1314 | TXSYMPTOM_AUTO_P(SLIDFail), | ||
1315 | TXSYMPTOM_AUTO_P(RawIPV6), | ||
1316 | TXSYMPTOM_AUTO_P(PacketTooSmall), | ||
1317 | { .mask = 0 } | ||
1318 | }; | ||
1319 | |||
1320 | #define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */ | ||
1321 | |||
1322 | /* | ||
1323 | * Called when we might have an error that is specific to a particular | ||
1324 | * PIO buffer, and may need to cancel that buffer, so it can be re-used, | ||
1325 | * because we don't need to force the update of pioavail | ||
1326 | */ | ||
1327 | static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd) | ||
1328 | { | ||
1329 | struct qib_devdata *dd = ppd->dd; | ||
1330 | u32 i; | ||
1331 | int any; | ||
1332 | u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS; | ||
1333 | u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG; | ||
1334 | unsigned long sbuf[4]; | ||
1335 | |||
1336 | /* | ||
1337 | * It's possible that sendbuffererror could have bits set; might | ||
1338 | * have already done this as a result of hardware error handling. | ||
1339 | */ | ||
1340 | any = 0; | ||
1341 | for (i = 0; i < regcnt; ++i) { | ||
1342 | sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i); | ||
1343 | if (sbuf[i]) { | ||
1344 | any = 1; | ||
1345 | qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]); | ||
1346 | } | ||
1347 | } | ||
1348 | |||
1349 | if (any) | ||
1350 | qib_disarm_piobufs_set(dd, sbuf, piobcnt); | ||
1351 | } | ||
1352 | |||
1353 | /* No txe_recover yet, if ever */ | ||
1354 | |||
1355 | /* No decode__errors yet */ | ||
1356 | static void err_decode(char *msg, size_t len, u64 errs, | ||
1357 | const struct qib_hwerror_msgs *msp) | ||
1358 | { | ||
1359 | u64 these, lmask; | ||
1360 | int took, multi, n = 0; | ||
1361 | |||
1362 | while (msp && msp->mask) { | ||
1363 | multi = (msp->mask & (msp->mask - 1)); | ||
1364 | while (errs & msp->mask) { | ||
1365 | these = (errs & msp->mask); | ||
1366 | lmask = (these & (these - 1)) ^ these; | ||
1367 | if (len) { | ||
1368 | if (n++) { | ||
1369 | /* separate the strings */ | ||
1370 | *msg++ = ','; | ||
1371 | len--; | ||
1372 | } | ||
1373 | took = scnprintf(msg, len, "%s", msp->msg); | ||
1374 | len -= took; | ||
1375 | msg += took; | ||
1376 | } | ||
1377 | errs &= ~lmask; | ||
1378 | if (len && multi) { | ||
1379 | /* More than one bit this mask */ | ||
1380 | int idx = -1; | ||
1381 | |||
1382 | while (lmask & msp->mask) { | ||
1383 | ++idx; | ||
1384 | lmask >>= 1; | ||
1385 | } | ||
1386 | took = scnprintf(msg, len, "_%d", idx); | ||
1387 | len -= took; | ||
1388 | msg += took; | ||
1389 | } | ||
1390 | } | ||
1391 | ++msp; | ||
1392 | } | ||
1393 | /* If some bits are left, show in hex. */ | ||
1394 | if (len && errs) | ||
1395 | snprintf(msg, len, "%sMORE:%llX", n ? "," : "", | ||
1396 | (unsigned long long) errs); | ||
1397 | } | ||
1398 | |||
1399 | /* only called if r1 set */ | ||
1400 | static void flush_fifo(struct qib_pportdata *ppd) | ||
1401 | { | ||
1402 | struct qib_devdata *dd = ppd->dd; | ||
1403 | u32 __iomem *piobuf; | ||
1404 | u32 bufn; | ||
1405 | u32 *hdr; | ||
1406 | u64 pbc; | ||
1407 | const unsigned hdrwords = 7; | ||
1408 | static struct qib_ib_header ibhdr = { | ||
1409 | .lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH), | ||
1410 | .lrh[1] = IB_LID_PERMISSIVE, | ||
1411 | .lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC), | ||
1412 | .lrh[3] = IB_LID_PERMISSIVE, | ||
1413 | .u.oth.bth[0] = cpu_to_be32( | ||
1414 | (IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY), | ||
1415 | .u.oth.bth[1] = cpu_to_be32(0), | ||
1416 | .u.oth.bth[2] = cpu_to_be32(0), | ||
1417 | .u.oth.u.ud.deth[0] = cpu_to_be32(0), | ||
1418 | .u.oth.u.ud.deth[1] = cpu_to_be32(0), | ||
1419 | }; | ||
1420 | |||
1421 | /* | ||
1422 | * Send a dummy VL15 packet to flush the launch FIFO. | ||
1423 | * This will not actually be sent since the TxeBypassIbc bit is set. | ||
1424 | */ | ||
1425 | pbc = PBC_7322_VL15_SEND | | ||
1426 | (((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) | | ||
1427 | (hdrwords + SIZE_OF_CRC); | ||
1428 | piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn); | ||
1429 | if (!piobuf) | ||
1430 | return; | ||
1431 | writeq(pbc, piobuf); | ||
1432 | hdr = (u32 *) &ibhdr; | ||
1433 | if (dd->flags & QIB_PIO_FLUSH_WC) { | ||
1434 | qib_flush_wc(); | ||
1435 | qib_pio_copy(piobuf + 2, hdr, hdrwords - 1); | ||
1436 | qib_flush_wc(); | ||
1437 | __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1); | ||
1438 | qib_flush_wc(); | ||
1439 | } else | ||
1440 | qib_pio_copy(piobuf + 2, hdr, hdrwords); | ||
1441 | qib_sendbuf_done(dd, bufn); | ||
1442 | } | ||
1443 | |||
1444 | /* | ||
1445 | * This is called with interrupts disabled and sdma_lock held. | ||
1446 | */ | ||
1447 | static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op) | ||
1448 | { | ||
1449 | struct qib_devdata *dd = ppd->dd; | ||
1450 | u64 set_sendctrl = 0; | ||
1451 | u64 clr_sendctrl = 0; | ||
1452 | |||
1453 | if (op & QIB_SDMA_SENDCTRL_OP_ENABLE) | ||
1454 | set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable); | ||
1455 | else | ||
1456 | clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable); | ||
1457 | |||
1458 | if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE) | ||
1459 | set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable); | ||
1460 | else | ||
1461 | clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable); | ||
1462 | |||
1463 | if (op & QIB_SDMA_SENDCTRL_OP_HALT) | ||
1464 | set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt); | ||
1465 | else | ||
1466 | clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt); | ||
1467 | |||
1468 | if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) | ||
1469 | set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) | | ||
1470 | SYM_MASK(SendCtrl_0, TxeAbortIbc) | | ||
1471 | SYM_MASK(SendCtrl_0, TxeDrainRmFifo); | ||
1472 | else | ||
1473 | clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) | | ||
1474 | SYM_MASK(SendCtrl_0, TxeAbortIbc) | | ||
1475 | SYM_MASK(SendCtrl_0, TxeDrainRmFifo); | ||
1476 | |||
1477 | spin_lock(&dd->sendctrl_lock); | ||
1478 | |||
1479 | /* If we are draining everything, block sends first */ | ||
1480 | if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) { | ||
1481 | ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable); | ||
1482 | qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl); | ||
1483 | qib_write_kreg(dd, kr_scratch, 0); | ||
1484 | } | ||
1485 | |||
1486 | ppd->p_sendctrl |= set_sendctrl; | ||
1487 | ppd->p_sendctrl &= ~clr_sendctrl; | ||
1488 | |||
1489 | if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP) | ||
1490 | qib_write_kreg_port(ppd, krp_sendctrl, | ||
1491 | ppd->p_sendctrl | | ||
1492 | SYM_MASK(SendCtrl_0, SDmaCleanup)); | ||
1493 | else | ||
1494 | qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl); | ||
1495 | qib_write_kreg(dd, kr_scratch, 0); | ||
1496 | |||
1497 | if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) { | ||
1498 | ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable); | ||
1499 | qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl); | ||
1500 | qib_write_kreg(dd, kr_scratch, 0); | ||
1501 | } | ||
1502 | |||
1503 | spin_unlock(&dd->sendctrl_lock); | ||
1504 | |||
1505 | if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1) | ||
1506 | flush_fifo(ppd); | ||
1507 | } | ||
1508 | |||
1509 | static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd) | ||
1510 | { | ||
1511 | __qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned); | ||
1512 | } | ||
1513 | |||
1514 | static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd) | ||
1515 | { | ||
1516 | /* | ||
1517 | * Set SendDmaLenGen and clear and set | ||
1518 | * the MSB of the generation count to enable generation checking | ||
1519 | * and load the internal generation counter. | ||
1520 | */ | ||
1521 | qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt); | ||
1522 | qib_write_kreg_port(ppd, krp_senddmalengen, | ||
1523 | ppd->sdma_descq_cnt | | ||
1524 | (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB)); | ||
1525 | } | ||
1526 | |||
1527 | /* | ||
1528 | * Must be called with sdma_lock held, or before init finished. | ||
1529 | */ | ||
1530 | static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail) | ||
1531 | { | ||
1532 | /* Commit writes to memory and advance the tail on the chip */ | ||
1533 | wmb(); | ||
1534 | ppd->sdma_descq_tail = tail; | ||
1535 | qib_write_kreg_port(ppd, krp_senddmatail, tail); | ||
1536 | } | ||
1537 | |||
1538 | /* | ||
1539 | * This is called with interrupts disabled and sdma_lock held. | ||
1540 | */ | ||
1541 | static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd) | ||
1542 | { | ||
1543 | /* | ||
1544 | * Drain all FIFOs. | ||
1545 | * The hardware doesn't require this but we do it so that verbs | ||
1546 | * and user applications don't wait for link active to send stale | ||
1547 | * data. | ||
1548 | */ | ||
1549 | sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH); | ||
1550 | |||
1551 | qib_sdma_7322_setlengen(ppd); | ||
1552 | qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */ | ||
1553 | ppd->sdma_head_dma[0] = 0; | ||
1554 | qib_7322_sdma_sendctrl(ppd, | ||
1555 | ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP); | ||
1556 | } | ||
1557 | |||
1558 | #define DISABLES_SDMA ( \ | ||
1559 | QIB_E_P_SDMAHALT | \ | ||
1560 | QIB_E_P_SDMADESCADDRMISALIGN | \ | ||
1561 | QIB_E_P_SDMAMISSINGDW | \ | ||
1562 | QIB_E_P_SDMADWEN | \ | ||
1563 | QIB_E_P_SDMARPYTAG | \ | ||
1564 | QIB_E_P_SDMA1STDESC | \ | ||
1565 | QIB_E_P_SDMABASE | \ | ||
1566 | QIB_E_P_SDMATAILOUTOFBOUND | \ | ||
1567 | QIB_E_P_SDMAOUTOFBOUND | \ | ||
1568 | QIB_E_P_SDMAGENMISMATCH) | ||
1569 | |||
1570 | static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs) | ||
1571 | { | ||
1572 | unsigned long flags; | ||
1573 | struct qib_devdata *dd = ppd->dd; | ||
1574 | |||
1575 | errs &= QIB_E_P_SDMAERRS; | ||
1576 | |||
1577 | if (errs & QIB_E_P_SDMAUNEXPDATA) | ||
1578 | qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit, | ||
1579 | ppd->port); | ||
1580 | |||
1581 | spin_lock_irqsave(&ppd->sdma_lock, flags); | ||
1582 | |||
1583 | switch (ppd->sdma_state.current_state) { | ||
1584 | case qib_sdma_state_s00_hw_down: | ||
1585 | break; | ||
1586 | |||
1587 | case qib_sdma_state_s10_hw_start_up_wait: | ||
1588 | if (errs & QIB_E_P_SDMAHALT) | ||
1589 | __qib_sdma_process_event(ppd, | ||
1590 | qib_sdma_event_e20_hw_started); | ||
1591 | break; | ||
1592 | |||
1593 | case qib_sdma_state_s20_idle: | ||
1594 | break; | ||
1595 | |||
1596 | case qib_sdma_state_s30_sw_clean_up_wait: | ||
1597 | break; | ||
1598 | |||
1599 | case qib_sdma_state_s40_hw_clean_up_wait: | ||
1600 | if (errs & QIB_E_P_SDMAHALT) | ||
1601 | __qib_sdma_process_event(ppd, | ||
1602 | qib_sdma_event_e50_hw_cleaned); | ||
1603 | break; | ||
1604 | |||
1605 | case qib_sdma_state_s50_hw_halt_wait: | ||
1606 | if (errs & QIB_E_P_SDMAHALT) | ||
1607 | __qib_sdma_process_event(ppd, | ||
1608 | qib_sdma_event_e60_hw_halted); | ||
1609 | break; | ||
1610 | |||
1611 | case qib_sdma_state_s99_running: | ||
1612 | __qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted); | ||
1613 | __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted); | ||
1614 | break; | ||
1615 | } | ||
1616 | |||
1617 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); | ||
1618 | } | ||
1619 | |||
1620 | /* | ||
1621 | * handle per-device errors (not per-port errors) | ||
1622 | */ | ||
1623 | static noinline void handle_7322_errors(struct qib_devdata *dd) | ||
1624 | { | ||
1625 | char *msg; | ||
1626 | u64 iserr = 0; | ||
1627 | u64 errs; | ||
1628 | u64 mask; | ||
1629 | int log_idx; | ||
1630 | |||
1631 | qib_stats.sps_errints++; | ||
1632 | errs = qib_read_kreg64(dd, kr_errstatus); | ||
1633 | if (!errs) { | ||
1634 | qib_devinfo(dd->pcidev, "device error interrupt, " | ||
1635 | "but no error bits set!\n"); | ||
1636 | goto done; | ||
1637 | } | ||
1638 | |||
1639 | /* don't report errors that are masked */ | ||
1640 | errs &= dd->cspec->errormask; | ||
1641 | msg = dd->cspec->emsgbuf; | ||
1642 | |||
1643 | /* do these first, they are most important */ | ||
1644 | if (errs & QIB_E_HARDWARE) { | ||
1645 | *msg = '\0'; | ||
1646 | qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf); | ||
1647 | } else | ||
1648 | for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) | ||
1649 | if (errs & dd->eep_st_masks[log_idx].errs_to_log) | ||
1650 | qib_inc_eeprom_err(dd, log_idx, 1); | ||
1651 | |||
1652 | if (errs & QIB_E_SPKTERRS) { | ||
1653 | qib_disarm_7322_senderrbufs(dd->pport); | ||
1654 | qib_stats.sps_txerrs++; | ||
1655 | } else if (errs & QIB_E_INVALIDADDR) | ||
1656 | qib_stats.sps_txerrs++; | ||
1657 | else if (errs & QIB_E_ARMLAUNCH) { | ||
1658 | qib_stats.sps_txerrs++; | ||
1659 | qib_disarm_7322_senderrbufs(dd->pport); | ||
1660 | } | ||
1661 | qib_write_kreg(dd, kr_errclear, errs); | ||
1662 | |||
1663 | /* | ||
1664 | * The ones we mask off are handled specially below | ||
1665 | * or above. Also mask SDMADISABLED by default as it | ||
1666 | * is too chatty. | ||
1667 | */ | ||
1668 | mask = QIB_E_HARDWARE; | ||
1669 | *msg = '\0'; | ||
1670 | |||
1671 | err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask, | ||
1672 | qib_7322error_msgs); | ||
1673 | |||
1674 | /* | ||
1675 | * Getting reset is a tragedy for all ports. Mark the device | ||
1676 | * _and_ the ports as "offline" in way meaningful to each. | ||
1677 | */ | ||
1678 | if (errs & QIB_E_RESET) { | ||
1679 | int pidx; | ||
1680 | |||
1681 | qib_dev_err(dd, "Got reset, requires re-init " | ||
1682 | "(unload and reload driver)\n"); | ||
1683 | dd->flags &= ~QIB_INITTED; /* needs re-init */ | ||
1684 | /* mark as having had error */ | ||
1685 | *dd->devstatusp |= QIB_STATUS_HWERROR; | ||
1686 | for (pidx = 0; pidx < dd->num_pports; ++pidx) | ||
1687 | if (dd->pport[pidx].link_speed_supported) | ||
1688 | *dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF; | ||
1689 | } | ||
1690 | |||
1691 | if (*msg && iserr) | ||
1692 | qib_dev_err(dd, "%s error\n", msg); | ||
1693 | |||
1694 | /* | ||
1695 | * If there were hdrq or egrfull errors, wake up any processes | ||
1696 | * waiting in poll. We used to try to check which contexts had | ||
1697 | * the overflow, but given the cost of that and the chip reads | ||
1698 | * to support it, it's better to just wake everybody up if we | ||
1699 | * get an overflow; waiters can poll again if it's not them. | ||
1700 | */ | ||
1701 | if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) { | ||
1702 | qib_handle_urcv(dd, ~0U); | ||
1703 | if (errs & ERR_MASK(RcvEgrFullErr)) | ||
1704 | qib_stats.sps_buffull++; | ||
1705 | else | ||
1706 | qib_stats.sps_hdrfull++; | ||
1707 | } | ||
1708 | |||
1709 | done: | ||
1710 | return; | ||
1711 | } | ||
1712 | |||
1713 | static void reenable_chase(unsigned long opaque) | ||
1714 | { | ||
1715 | struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; | ||
1716 | |||
1717 | ppd->cpspec->chase_timer.expires = 0; | ||
1718 | qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN, | ||
1719 | QLOGIC_IB_IBCC_LINKINITCMD_POLL); | ||
1720 | } | ||
1721 | |||
1722 | static void disable_chase(struct qib_pportdata *ppd, u64 tnow, u8 ibclt) | ||
1723 | { | ||
1724 | ppd->cpspec->chase_end = 0; | ||
1725 | |||
1726 | if (!qib_chase) | ||
1727 | return; | ||
1728 | |||
1729 | qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN, | ||
1730 | QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); | ||
1731 | ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME; | ||
1732 | add_timer(&ppd->cpspec->chase_timer); | ||
1733 | } | ||
1734 | |||
1735 | static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst) | ||
1736 | { | ||
1737 | u8 ibclt; | ||
1738 | u64 tnow; | ||
1739 | |||
1740 | ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState); | ||
1741 | |||
1742 | /* | ||
1743 | * Detect and handle the state chase issue, where we can | ||
1744 | * get stuck if we are unlucky on timing on both sides of | ||
1745 | * the link. If we are, we disable, set a timer, and | ||
1746 | * then re-enable. | ||
1747 | */ | ||
1748 | switch (ibclt) { | ||
1749 | case IB_7322_LT_STATE_CFGRCVFCFG: | ||
1750 | case IB_7322_LT_STATE_CFGWAITRMT: | ||
1751 | case IB_7322_LT_STATE_TXREVLANES: | ||
1752 | case IB_7322_LT_STATE_CFGENH: | ||
1753 | tnow = get_jiffies_64(); | ||
1754 | if (ppd->cpspec->chase_end && | ||
1755 | time_after64(tnow, ppd->cpspec->chase_end)) | ||
1756 | disable_chase(ppd, tnow, ibclt); | ||
1757 | else if (!ppd->cpspec->chase_end) | ||
1758 | ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME; | ||
1759 | break; | ||
1760 | default: | ||
1761 | ppd->cpspec->chase_end = 0; | ||
1762 | break; | ||
1763 | } | ||
1764 | |||
1765 | if (ibclt == IB_7322_LT_STATE_CFGTEST && | ||
1766 | (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) { | ||
1767 | force_h1(ppd); | ||
1768 | ppd->cpspec->qdr_reforce = 1; | ||
1769 | } else if (ppd->cpspec->qdr_reforce && | ||
1770 | (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) && | ||
1771 | (ibclt == IB_7322_LT_STATE_CFGENH || | ||
1772 | ibclt == IB_7322_LT_STATE_CFGIDLE || | ||
1773 | ibclt == IB_7322_LT_STATE_LINKUP)) | ||
1774 | force_h1(ppd); | ||
1775 | |||
1776 | if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) && | ||
1777 | ppd->link_speed_enabled == QIB_IB_QDR && | ||
1778 | (ibclt == IB_7322_LT_STATE_CFGTEST || | ||
1779 | ibclt == IB_7322_LT_STATE_CFGENH || | ||
1780 | (ibclt >= IB_7322_LT_STATE_POLLACTIVE && | ||
1781 | ibclt <= IB_7322_LT_STATE_SLEEPQUIET))) | ||
1782 | adj_tx_serdes(ppd); | ||
1783 | |||
1784 | if (!ppd->cpspec->qdr_dfe_on && ibclt != IB_7322_LT_STATE_LINKUP && | ||
1785 | ibclt <= IB_7322_LT_STATE_SLEEPQUIET) { | ||
1786 | ppd->cpspec->qdr_dfe_on = 1; | ||
1787 | ppd->cpspec->qdr_dfe_time = 0; | ||
1788 | /* On link down, reenable QDR adaptation */ | ||
1789 | qib_write_kreg_port(ppd, krp_static_adapt_dis(2), | ||
1790 | ppd->dd->cspec->r1 ? | ||
1791 | QDR_STATIC_ADAPT_DOWN_R1 : | ||
1792 | QDR_STATIC_ADAPT_DOWN); | ||
1793 | } | ||
1794 | } | ||
1795 | |||
1796 | /* | ||
1797 | * This is per-pport error handling. | ||
1798 | * will likely get it's own MSIx interrupt (one for each port, | ||
1799 | * although just a single handler). | ||
1800 | */ | ||
1801 | static noinline void handle_7322_p_errors(struct qib_pportdata *ppd) | ||
1802 | { | ||
1803 | char *msg; | ||
1804 | u64 ignore_this_time = 0, iserr = 0, errs, fmask; | ||
1805 | struct qib_devdata *dd = ppd->dd; | ||
1806 | |||
1807 | /* do this as soon as possible */ | ||
1808 | fmask = qib_read_kreg64(dd, kr_act_fmask); | ||
1809 | if (!fmask) | ||
1810 | check_7322_rxe_status(ppd); | ||
1811 | |||
1812 | errs = qib_read_kreg_port(ppd, krp_errstatus); | ||
1813 | if (!errs) | ||
1814 | qib_devinfo(dd->pcidev, | ||
1815 | "Port%d error interrupt, but no error bits set!\n", | ||
1816 | ppd->port); | ||
1817 | if (!fmask) | ||
1818 | errs &= ~QIB_E_P_IBSTATUSCHANGED; | ||
1819 | if (!errs) | ||
1820 | goto done; | ||
1821 | |||
1822 | msg = ppd->cpspec->epmsgbuf; | ||
1823 | *msg = '\0'; | ||
1824 | |||
1825 | if (errs & ~QIB_E_P_BITSEXTANT) { | ||
1826 | err_decode(msg, sizeof ppd->cpspec->epmsgbuf, | ||
1827 | errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs); | ||
1828 | if (!*msg) | ||
1829 | snprintf(msg, sizeof ppd->cpspec->epmsgbuf, | ||
1830 | "no others"); | ||
1831 | qib_dev_porterr(dd, ppd->port, "error interrupt with unknown" | ||
1832 | " errors 0x%016Lx set (and %s)\n", | ||
1833 | (errs & ~QIB_E_P_BITSEXTANT), msg); | ||
1834 | *msg = '\0'; | ||
1835 | } | ||
1836 | |||
1837 | if (errs & QIB_E_P_SHDR) { | ||
1838 | u64 symptom; | ||
1839 | |||
1840 | /* determine cause, then write to clear */ | ||
1841 | symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom); | ||
1842 | qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0); | ||
1843 | err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom, | ||
1844 | hdrchk_msgs); | ||
1845 | *msg = '\0'; | ||
1846 | /* senderrbuf cleared in SPKTERRS below */ | ||
1847 | } | ||
1848 | |||
1849 | if (errs & QIB_E_P_SPKTERRS) { | ||
1850 | if ((errs & QIB_E_P_LINK_PKTERRS) && | ||
1851 | !(ppd->lflags & QIBL_LINKACTIVE)) { | ||
1852 | /* | ||
1853 | * This can happen when trying to bring the link | ||
1854 | * up, but the IB link changes state at the "wrong" | ||
1855 | * time. The IB logic then complains that the packet | ||
1856 | * isn't valid. We don't want to confuse people, so | ||
1857 | * we just don't print them, except at debug | ||
1858 | */ | ||
1859 | err_decode(msg, sizeof ppd->cpspec->epmsgbuf, | ||
1860 | (errs & QIB_E_P_LINK_PKTERRS), | ||
1861 | qib_7322p_error_msgs); | ||
1862 | *msg = '\0'; | ||
1863 | ignore_this_time = errs & QIB_E_P_LINK_PKTERRS; | ||
1864 | } | ||
1865 | qib_disarm_7322_senderrbufs(ppd); | ||
1866 | } else if ((errs & QIB_E_P_LINK_PKTERRS) && | ||
1867 | !(ppd->lflags & QIBL_LINKACTIVE)) { | ||
1868 | /* | ||
1869 | * This can happen when SMA is trying to bring the link | ||
1870 | * up, but the IB link changes state at the "wrong" time. | ||
1871 | * The IB logic then complains that the packet isn't | ||
1872 | * valid. We don't want to confuse people, so we just | ||
1873 | * don't print them, except at debug | ||
1874 | */ | ||
1875 | err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs, | ||
1876 | qib_7322p_error_msgs); | ||
1877 | ignore_this_time = errs & QIB_E_P_LINK_PKTERRS; | ||
1878 | *msg = '\0'; | ||
1879 | } | ||
1880 | |||
1881 | qib_write_kreg_port(ppd, krp_errclear, errs); | ||
1882 | |||
1883 | errs &= ~ignore_this_time; | ||
1884 | if (!errs) | ||
1885 | goto done; | ||
1886 | |||
1887 | if (errs & QIB_E_P_RPKTERRS) | ||
1888 | qib_stats.sps_rcverrs++; | ||
1889 | if (errs & QIB_E_P_SPKTERRS) | ||
1890 | qib_stats.sps_txerrs++; | ||
1891 | |||
1892 | iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS); | ||
1893 | |||
1894 | if (errs & QIB_E_P_SDMAERRS) | ||
1895 | sdma_7322_p_errors(ppd, errs); | ||
1896 | |||
1897 | if (errs & QIB_E_P_IBSTATUSCHANGED) { | ||
1898 | u64 ibcs; | ||
1899 | u8 ltstate; | ||
1900 | |||
1901 | ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a); | ||
1902 | ltstate = qib_7322_phys_portstate(ibcs); | ||
1903 | |||
1904 | if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) | ||
1905 | handle_serdes_issues(ppd, ibcs); | ||
1906 | if (!(ppd->cpspec->ibcctrl_a & | ||
1907 | SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) { | ||
1908 | /* | ||
1909 | * We got our interrupt, so init code should be | ||
1910 | * happy and not try alternatives. Now squelch | ||
1911 | * other "chatter" from link-negotiation (pre Init) | ||
1912 | */ | ||
1913 | ppd->cpspec->ibcctrl_a |= | ||
1914 | SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn); | ||
1915 | qib_write_kreg_port(ppd, krp_ibcctrl_a, | ||
1916 | ppd->cpspec->ibcctrl_a); | ||
1917 | } | ||
1918 | |||
1919 | /* Update our picture of width and speed from chip */ | ||
1920 | ppd->link_width_active = | ||
1921 | (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ? | ||
1922 | IB_WIDTH_4X : IB_WIDTH_1X; | ||
1923 | ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0, | ||
1924 | LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs & | ||
1925 | SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ? | ||
1926 | QIB_IB_DDR : QIB_IB_SDR; | ||
1927 | |||
1928 | if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate != | ||
1929 | IB_PHYSPORTSTATE_DISABLED) | ||
1930 | qib_set_ib_7322_lstate(ppd, 0, | ||
1931 | QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); | ||
1932 | else | ||
1933 | /* | ||
1934 | * Since going into a recovery state causes the link | ||
1935 | * state to go down and since recovery is transitory, | ||
1936 | * it is better if we "miss" ever seeing the link | ||
1937 | * training state go into recovery (i.e., ignore this | ||
1938 | * transition for link state special handling purposes) | ||
1939 | * without updating lastibcstat. | ||
1940 | */ | ||
1941 | if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER && | ||
1942 | ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN && | ||
1943 | ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT && | ||
1944 | ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE) | ||
1945 | qib_handle_e_ibstatuschanged(ppd, ibcs); | ||
1946 | } | ||
1947 | if (*msg && iserr) | ||
1948 | qib_dev_porterr(dd, ppd->port, "%s error\n", msg); | ||
1949 | |||
1950 | if (ppd->state_wanted & ppd->lflags) | ||
1951 | wake_up_interruptible(&ppd->state_wait); | ||
1952 | done: | ||
1953 | return; | ||
1954 | } | ||
1955 | |||
1956 | /* enable/disable chip from delivering interrupts */ | ||
1957 | static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable) | ||
1958 | { | ||
1959 | if (enable) { | ||
1960 | if (dd->flags & QIB_BADINTR) | ||
1961 | return; | ||
1962 | qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask); | ||
1963 | /* cause any pending enabled interrupts to be re-delivered */ | ||
1964 | qib_write_kreg(dd, kr_intclear, 0ULL); | ||
1965 | if (dd->cspec->num_msix_entries) { | ||
1966 | /* and same for MSIx */ | ||
1967 | u64 val = qib_read_kreg64(dd, kr_intgranted); | ||
1968 | if (val) | ||
1969 | qib_write_kreg(dd, kr_intgranted, val); | ||
1970 | } | ||
1971 | } else | ||
1972 | qib_write_kreg(dd, kr_intmask, 0ULL); | ||
1973 | } | ||
1974 | |||
1975 | /* | ||
1976 | * Try to cleanup as much as possible for anything that might have gone | ||
1977 | * wrong while in freeze mode, such as pio buffers being written by user | ||
1978 | * processes (causing armlaunch), send errors due to going into freeze mode, | ||
1979 | * etc., and try to avoid causing extra interrupts while doing so. | ||
1980 | * Forcibly update the in-memory pioavail register copies after cleanup | ||
1981 | * because the chip won't do it while in freeze mode (the register values | ||
1982 | * themselves are kept correct). | ||
1983 | * Make sure that we don't lose any important interrupts by using the chip | ||
1984 | * feature that says that writing 0 to a bit in *clear that is set in | ||
1985 | * *status will cause an interrupt to be generated again (if allowed by | ||
1986 | * the *mask value). | ||
1987 | * This is in chip-specific code because of all of the register accesses, | ||
1988 | * even though the details are similar on most chips. | ||
1989 | */ | ||
1990 | static void qib_7322_clear_freeze(struct qib_devdata *dd) | ||
1991 | { | ||
1992 | int pidx; | ||
1993 | |||
1994 | /* disable error interrupts, to avoid confusion */ | ||
1995 | qib_write_kreg(dd, kr_errmask, 0ULL); | ||
1996 | |||
1997 | for (pidx = 0; pidx < dd->num_pports; ++pidx) | ||
1998 | if (dd->pport[pidx].link_speed_supported) | ||
1999 | qib_write_kreg_port(dd->pport + pidx, krp_errmask, | ||
2000 | 0ULL); | ||
2001 | |||
2002 | /* also disable interrupts; errormask is sometimes overwriten */ | ||
2003 | qib_7322_set_intr_state(dd, 0); | ||
2004 | |||
2005 | /* clear the freeze, and be sure chip saw it */ | ||
2006 | qib_write_kreg(dd, kr_control, dd->control); | ||
2007 | qib_read_kreg32(dd, kr_scratch); | ||
2008 | |||
2009 | /* | ||
2010 | * Force new interrupt if any hwerr, error or interrupt bits are | ||
2011 | * still set, and clear "safe" send packet errors related to freeze | ||
2012 | * and cancelling sends. Re-enable error interrupts before possible | ||
2013 | * force of re-interrupt on pending interrupts. | ||
2014 | */ | ||
2015 | qib_write_kreg(dd, kr_hwerrclear, 0ULL); | ||
2016 | qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE); | ||
2017 | qib_write_kreg(dd, kr_errmask, dd->cspec->errormask); | ||
2018 | /* We need to purge per-port errs and reset mask, too */ | ||
2019 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | ||
2020 | if (!dd->pport[pidx].link_speed_supported) | ||
2021 | continue; | ||
2022 | qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull); | ||
2023 | qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull); | ||
2024 | } | ||
2025 | qib_7322_set_intr_state(dd, 1); | ||
2026 | } | ||
2027 | |||
2028 | /* no error handling to speak of */ | ||
2029 | /** | ||
2030 | * qib_7322_handle_hwerrors - display hardware errors. | ||
2031 | * @dd: the qlogic_ib device | ||
2032 | * @msg: the output buffer | ||
2033 | * @msgl: the size of the output buffer | ||
2034 | * | ||
2035 | * Use same msg buffer as regular errors to avoid excessive stack | ||
2036 | * use. Most hardware errors are catastrophic, but for right now, | ||
2037 | * we'll print them and continue. We reuse the same message buffer as | ||
2038 | * qib_handle_errors() to avoid excessive stack usage. | ||
2039 | */ | ||
2040 | static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg, | ||
2041 | size_t msgl) | ||
2042 | { | ||
2043 | u64 hwerrs; | ||
2044 | u32 ctrl; | ||
2045 | int isfatal = 0; | ||
2046 | |||
2047 | hwerrs = qib_read_kreg64(dd, kr_hwerrstatus); | ||
2048 | if (!hwerrs) | ||
2049 | goto bail; | ||
2050 | if (hwerrs == ~0ULL) { | ||
2051 | qib_dev_err(dd, "Read of hardware error status failed " | ||
2052 | "(all bits set); ignoring\n"); | ||
2053 | goto bail; | ||
2054 | } | ||
2055 | qib_stats.sps_hwerrs++; | ||
2056 | |||
2057 | /* Always clear the error status register, except BIST fail */ | ||
2058 | qib_write_kreg(dd, kr_hwerrclear, hwerrs & | ||
2059 | ~HWE_MASK(PowerOnBISTFailed)); | ||
2060 | |||
2061 | hwerrs &= dd->cspec->hwerrmask; | ||
2062 | |||
2063 | /* no EEPROM logging, yet */ | ||
2064 | |||
2065 | if (hwerrs) | ||
2066 | qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx " | ||
2067 | "(cleared)\n", (unsigned long long) hwerrs); | ||
2068 | |||
2069 | ctrl = qib_read_kreg32(dd, kr_control); | ||
2070 | if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) { | ||
2071 | /* | ||
2072 | * No recovery yet... | ||
2073 | */ | ||
2074 | if ((hwerrs & ~HWE_MASK(LATriggered)) || | ||
2075 | dd->cspec->stay_in_freeze) { | ||
2076 | /* | ||
2077 | * If any set that we aren't ignoring only make the | ||
2078 | * complaint once, in case it's stuck or recurring, | ||
2079 | * and we get here multiple times | ||
2080 | * Force link down, so switch knows, and | ||
2081 | * LEDs are turned off. | ||
2082 | */ | ||
2083 | if (dd->flags & QIB_INITTED) | ||
2084 | isfatal = 1; | ||
2085 | } else | ||
2086 | qib_7322_clear_freeze(dd); | ||
2087 | } | ||
2088 | |||
2089 | if (hwerrs & HWE_MASK(PowerOnBISTFailed)) { | ||
2090 | isfatal = 1; | ||
2091 | strlcpy(msg, "[Memory BIST test failed, " | ||
2092 | "InfiniPath hardware unusable]", msgl); | ||
2093 | /* ignore from now on, so disable until driver reloaded */ | ||
2094 | dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed); | ||
2095 | qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); | ||
2096 | } | ||
2097 | |||
2098 | err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs); | ||
2099 | |||
2100 | /* Ignore esoteric PLL failures et al. */ | ||
2101 | |||
2102 | qib_dev_err(dd, "%s hardware error\n", msg); | ||
2103 | |||
2104 | if (isfatal && !dd->diag_client) { | ||
2105 | qib_dev_err(dd, "Fatal Hardware Error, no longer" | ||
2106 | " usable, SN %.16s\n", dd->serial); | ||
2107 | /* | ||
2108 | * for /sys status file and user programs to print; if no | ||
2109 | * trailing brace is copied, we'll know it was truncated. | ||
2110 | */ | ||
2111 | if (dd->freezemsg) | ||
2112 | snprintf(dd->freezemsg, dd->freezelen, | ||
2113 | "{%s}", msg); | ||
2114 | qib_disable_after_error(dd); | ||
2115 | } | ||
2116 | bail:; | ||
2117 | } | ||
2118 | |||
2119 | /** | ||
2120 | * qib_7322_init_hwerrors - enable hardware errors | ||
2121 | * @dd: the qlogic_ib device | ||
2122 | * | ||
2123 | * now that we have finished initializing everything that might reasonably | ||
2124 | * cause a hardware error, and cleared those errors bits as they occur, | ||
2125 | * we can enable hardware errors in the mask (potentially enabling | ||
2126 | * freeze mode), and enable hardware errors as errors (along with | ||
2127 | * everything else) in errormask | ||
2128 | */ | ||
2129 | static void qib_7322_init_hwerrors(struct qib_devdata *dd) | ||
2130 | { | ||
2131 | int pidx; | ||
2132 | u64 extsval; | ||
2133 | |||
2134 | extsval = qib_read_kreg64(dd, kr_extstatus); | ||
2135 | if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED | | ||
2136 | QIB_EXTS_MEMBIST_ENDTEST))) | ||
2137 | qib_dev_err(dd, "MemBIST did not complete!\n"); | ||
2138 | |||
2139 | /* never clear BIST failure, so reported on each driver load */ | ||
2140 | qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed)); | ||
2141 | qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); | ||
2142 | |||
2143 | /* clear all */ | ||
2144 | qib_write_kreg(dd, kr_errclear, ~0ULL); | ||
2145 | /* enable errors that are masked, at least this first time. */ | ||
2146 | qib_write_kreg(dd, kr_errmask, ~0ULL); | ||
2147 | dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask); | ||
2148 | for (pidx = 0; pidx < dd->num_pports; ++pidx) | ||
2149 | if (dd->pport[pidx].link_speed_supported) | ||
2150 | qib_write_kreg_port(dd->pport + pidx, krp_errmask, | ||
2151 | ~0ULL); | ||
2152 | } | ||
2153 | |||
2154 | /* | ||
2155 | * Disable and enable the armlaunch error. Used for PIO bandwidth testing | ||
2156 | * on chips that are count-based, rather than trigger-based. There is no | ||
2157 | * reference counting, but that's also fine, given the intended use. | ||
2158 | * Only chip-specific because it's all register accesses | ||
2159 | */ | ||
2160 | static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable) | ||
2161 | { | ||
2162 | if (enable) { | ||
2163 | qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH); | ||
2164 | dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH; | ||
2165 | } else | ||
2166 | dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH; | ||
2167 | qib_write_kreg(dd, kr_errmask, dd->cspec->errormask); | ||
2168 | } | ||
2169 | |||
2170 | /* | ||
2171 | * Formerly took parameter <which> in pre-shifted, | ||
2172 | * pre-merged form with LinkCmd and LinkInitCmd | ||
2173 | * together, and assuming the zero was NOP. | ||
2174 | */ | ||
2175 | static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd, | ||
2176 | u16 linitcmd) | ||
2177 | { | ||
2178 | u64 mod_wd; | ||
2179 | struct qib_devdata *dd = ppd->dd; | ||
2180 | unsigned long flags; | ||
2181 | |||
2182 | if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) { | ||
2183 | /* | ||
2184 | * If we are told to disable, note that so link-recovery | ||
2185 | * code does not attempt to bring us back up. | ||
2186 | * Also reset everything that we can, so we start | ||
2187 | * completely clean when re-enabled (before we | ||
2188 | * actually issue the disable to the IBC) | ||
2189 | */ | ||
2190 | qib_7322_mini_pcs_reset(ppd); | ||
2191 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
2192 | ppd->lflags |= QIBL_IB_LINK_DISABLED; | ||
2193 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
2194 | } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) { | ||
2195 | /* | ||
2196 | * Any other linkinitcmd will lead to LINKDOWN and then | ||
2197 | * to INIT (if all is well), so clear flag to let | ||
2198 | * link-recovery code attempt to bring us back up. | ||
2199 | */ | ||
2200 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
2201 | ppd->lflags &= ~QIBL_IB_LINK_DISABLED; | ||
2202 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
2203 | /* | ||
2204 | * Clear status change interrupt reduction so the | ||
2205 | * new state is seen. | ||
2206 | */ | ||
2207 | ppd->cpspec->ibcctrl_a &= | ||
2208 | ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn); | ||
2209 | } | ||
2210 | |||
2211 | mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) | | ||
2212 | (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT); | ||
2213 | |||
2214 | qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a | | ||
2215 | mod_wd); | ||
2216 | /* write to chip to prevent back-to-back writes of ibc reg */ | ||
2217 | qib_write_kreg(dd, kr_scratch, 0); | ||
2218 | |||
2219 | } | ||
2220 | |||
2221 | /* | ||
2222 | * The total RCV buffer memory is 64KB, used for both ports, and is | ||
2223 | * in units of 64 bytes (same as IB flow control credit unit). | ||
2224 | * The consumedVL unit in the same registers are in 32 byte units! | ||
2225 | * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks, | ||
2226 | * and we can therefore allocate just 9 IB credits for 2 VL15 packets | ||
2227 | * in krp_rxcreditvl15, rather than 10. | ||
2228 | */ | ||
2229 | #define RCV_BUF_UNITSZ 64 | ||
2230 | #define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports)) | ||
2231 | |||
2232 | static void set_vls(struct qib_pportdata *ppd) | ||
2233 | { | ||
2234 | int i, numvls, totcred, cred_vl, vl0extra; | ||
2235 | struct qib_devdata *dd = ppd->dd; | ||
2236 | u64 val; | ||
2237 | |||
2238 | numvls = qib_num_vls(ppd->vls_operational); | ||
2239 | |||
2240 | /* | ||
2241 | * Set up per-VL credits. Below is kluge based on these assumptions: | ||
2242 | * 1) port is disabled at the time early_init is called. | ||
2243 | * 2) give VL15 17 credits, for two max-plausible packets. | ||
2244 | * 3) Give VL0-N the rest, with any rounding excess used for VL0 | ||
2245 | */ | ||
2246 | /* 2 VL15 packets @ 288 bytes each (including IB headers) */ | ||
2247 | totcred = NUM_RCV_BUF_UNITS(dd); | ||
2248 | cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ; | ||
2249 | totcred -= cred_vl; | ||
2250 | qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl); | ||
2251 | cred_vl = totcred / numvls; | ||
2252 | vl0extra = totcred - cred_vl * numvls; | ||
2253 | qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra); | ||
2254 | for (i = 1; i < numvls; i++) | ||
2255 | qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl); | ||
2256 | for (; i < 8; i++) /* no buffer space for other VLs */ | ||
2257 | qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0); | ||
2258 | |||
2259 | /* Notify IBC that credits need to be recalculated */ | ||
2260 | val = qib_read_kreg_port(ppd, krp_ibsdtestiftx); | ||
2261 | val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE); | ||
2262 | qib_write_kreg_port(ppd, krp_ibsdtestiftx, val); | ||
2263 | qib_write_kreg(dd, kr_scratch, 0ULL); | ||
2264 | val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE); | ||
2265 | qib_write_kreg_port(ppd, krp_ibsdtestiftx, val); | ||
2266 | |||
2267 | for (i = 0; i < numvls; i++) | ||
2268 | val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i); | ||
2269 | val = qib_read_kreg_port(ppd, krp_rxcreditvl15); | ||
2270 | |||
2271 | /* Change the number of operational VLs */ | ||
2272 | ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a & | ||
2273 | ~SYM_MASK(IBCCtrlA_0, NumVLane)) | | ||
2274 | ((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane)); | ||
2275 | qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); | ||
2276 | qib_write_kreg(dd, kr_scratch, 0ULL); | ||
2277 | } | ||
2278 | |||
2279 | /* | ||
2280 | * The code that deals with actual SerDes is in serdes_7322_init(). | ||
2281 | * Compared to the code for iba7220, it is minimal. | ||
2282 | */ | ||
2283 | static int serdes_7322_init(struct qib_pportdata *ppd); | ||
2284 | |||
2285 | /** | ||
2286 | * qib_7322_bringup_serdes - bring up the serdes | ||
2287 | * @ppd: physical port on the qlogic_ib device | ||
2288 | */ | ||
2289 | static int qib_7322_bringup_serdes(struct qib_pportdata *ppd) | ||
2290 | { | ||
2291 | struct qib_devdata *dd = ppd->dd; | ||
2292 | u64 val, guid, ibc; | ||
2293 | unsigned long flags; | ||
2294 | int ret = 0; | ||
2295 | |||
2296 | /* | ||
2297 | * SerDes model not in Pd, but still need to | ||
2298 | * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere | ||
2299 | * eventually. | ||
2300 | */ | ||
2301 | /* Put IBC in reset, sends disabled (should be in reset already) */ | ||
2302 | ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn); | ||
2303 | qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); | ||
2304 | qib_write_kreg(dd, kr_scratch, 0ULL); | ||
2305 | |||
2306 | if (qib_compat_ddr_negotiate) { | ||
2307 | ppd->cpspec->ibdeltainprog = 1; | ||
2308 | ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd, | ||
2309 | crp_ibsymbolerr); | ||
2310 | ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd, | ||
2311 | crp_iblinkerrrecov); | ||
2312 | } | ||
2313 | |||
2314 | /* flowcontrolwatermark is in units of KBytes */ | ||
2315 | ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark); | ||
2316 | /* | ||
2317 | * Flow control is sent this often, even if no changes in | ||
2318 | * buffer space occur. Units are 128ns for this chip. | ||
2319 | * Set to 3usec. | ||
2320 | */ | ||
2321 | ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod); | ||
2322 | /* max error tolerance */ | ||
2323 | ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold); | ||
2324 | /* IB credit flow control. */ | ||
2325 | ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold); | ||
2326 | /* | ||
2327 | * set initial max size pkt IBC will send, including ICRC; it's the | ||
2328 | * PIO buffer size in dwords, less 1; also see qib_set_mtu() | ||
2329 | */ | ||
2330 | ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << | ||
2331 | SYM_LSB(IBCCtrlA_0, MaxPktLen); | ||
2332 | ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */ | ||
2333 | |||
2334 | /* initially come up waiting for TS1, without sending anything. */ | ||
2335 | val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE << | ||
2336 | QLOGIC_IB_IBCC_LINKINITCMD_SHIFT); | ||
2337 | |||
2338 | /* | ||
2339 | * Reset the PCS interface to the serdes (and also ibc, which is still | ||
2340 | * in reset from above). Writes new value of ibcctrl_a as last step. | ||
2341 | */ | ||
2342 | qib_7322_mini_pcs_reset(ppd); | ||
2343 | qib_write_kreg(dd, kr_scratch, 0ULL); | ||
2344 | |||
2345 | if (!ppd->cpspec->ibcctrl_b) { | ||
2346 | unsigned lse = ppd->link_speed_enabled; | ||
2347 | |||
2348 | /* | ||
2349 | * Not on re-init after reset, establish shadow | ||
2350 | * and force initial config. | ||
2351 | */ | ||
2352 | ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd, | ||
2353 | krp_ibcctrl_b); | ||
2354 | ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR | | ||
2355 | IBA7322_IBC_SPEED_DDR | | ||
2356 | IBA7322_IBC_SPEED_SDR | | ||
2357 | IBA7322_IBC_WIDTH_AUTONEG | | ||
2358 | SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED)); | ||
2359 | if (lse & (lse - 1)) /* Muliple speeds enabled */ | ||
2360 | ppd->cpspec->ibcctrl_b |= | ||
2361 | (lse << IBA7322_IBC_SPEED_LSB) | | ||
2362 | IBA7322_IBC_IBTA_1_2_MASK | | ||
2363 | IBA7322_IBC_MAX_SPEED_MASK; | ||
2364 | else | ||
2365 | ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ? | ||
2366 | IBA7322_IBC_SPEED_QDR | | ||
2367 | IBA7322_IBC_IBTA_1_2_MASK : | ||
2368 | (lse == QIB_IB_DDR) ? | ||
2369 | IBA7322_IBC_SPEED_DDR : | ||
2370 | IBA7322_IBC_SPEED_SDR; | ||
2371 | if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) == | ||
2372 | (IB_WIDTH_1X | IB_WIDTH_4X)) | ||
2373 | ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG; | ||
2374 | else | ||
2375 | ppd->cpspec->ibcctrl_b |= | ||
2376 | ppd->link_width_enabled == IB_WIDTH_4X ? | ||
2377 | IBA7322_IBC_WIDTH_4X_ONLY : | ||
2378 | IBA7322_IBC_WIDTH_1X_ONLY; | ||
2379 | |||
2380 | /* always enable these on driver reload, not sticky */ | ||
2381 | ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK | | ||
2382 | IBA7322_IBC_HRTBT_MASK); | ||
2383 | } | ||
2384 | qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b); | ||
2385 | |||
2386 | /* setup so we have more time at CFGTEST to change H1 */ | ||
2387 | val = qib_read_kreg_port(ppd, krp_ibcctrl_c); | ||
2388 | val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH); | ||
2389 | val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH); | ||
2390 | qib_write_kreg_port(ppd, krp_ibcctrl_c, val); | ||
2391 | |||
2392 | serdes_7322_init(ppd); | ||
2393 | |||
2394 | guid = be64_to_cpu(ppd->guid); | ||
2395 | if (!guid) { | ||
2396 | if (dd->base_guid) | ||
2397 | guid = be64_to_cpu(dd->base_guid) + ppd->port - 1; | ||
2398 | ppd->guid = cpu_to_be64(guid); | ||
2399 | } | ||
2400 | |||
2401 | qib_write_kreg_port(ppd, krp_hrtbt_guid, guid); | ||
2402 | /* write to chip to prevent back-to-back writes of ibc reg */ | ||
2403 | qib_write_kreg(dd, kr_scratch, 0); | ||
2404 | |||
2405 | /* Enable port */ | ||
2406 | ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn); | ||
2407 | set_vls(ppd); | ||
2408 | |||
2409 | /* be paranoid against later code motion, etc. */ | ||
2410 | spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); | ||
2411 | ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable); | ||
2412 | qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl); | ||
2413 | spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); | ||
2414 | |||
2415 | /* Also enable IBSTATUSCHG interrupt. */ | ||
2416 | val = qib_read_kreg_port(ppd, krp_errmask); | ||
2417 | qib_write_kreg_port(ppd, krp_errmask, | ||
2418 | val | ERR_MASK_N(IBStatusChanged)); | ||
2419 | |||
2420 | /* Always zero until we start messing with SerDes for real */ | ||
2421 | return ret; | ||
2422 | } | ||
2423 | |||
2424 | /** | ||
2425 | * qib_7322_quiet_serdes - set serdes to txidle | ||
2426 | * @dd: the qlogic_ib device | ||
2427 | * Called when driver is being unloaded | ||
2428 | */ | ||
2429 | static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd) | ||
2430 | { | ||
2431 | u64 val; | ||
2432 | unsigned long flags; | ||
2433 | |||
2434 | qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); | ||
2435 | |||
2436 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
2437 | ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; | ||
2438 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
2439 | wake_up(&ppd->cpspec->autoneg_wait); | ||
2440 | cancel_delayed_work(&ppd->cpspec->autoneg_work); | ||
2441 | if (ppd->dd->cspec->r1) | ||
2442 | cancel_delayed_work(&ppd->cpspec->ipg_work); | ||
2443 | flush_scheduled_work(); | ||
2444 | |||
2445 | ppd->cpspec->chase_end = 0; | ||
2446 | if (ppd->cpspec->chase_timer.data) /* if initted */ | ||
2447 | del_timer_sync(&ppd->cpspec->chase_timer); | ||
2448 | |||
2449 | /* | ||
2450 | * Despite the name, actually disables IBC as well. Do it when | ||
2451 | * we are as sure as possible that no more packets can be | ||
2452 | * received, following the down and the PCS reset. | ||
2453 | * The actual disabling happens in qib_7322_mini_pci_reset(), | ||
2454 | * along with the PCS being reset. | ||
2455 | */ | ||
2456 | ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn); | ||
2457 | qib_7322_mini_pcs_reset(ppd); | ||
2458 | |||
2459 | /* | ||
2460 | * Update the adjusted counters so the adjustment persists | ||
2461 | * across driver reload. | ||
2462 | */ | ||
2463 | if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta || | ||
2464 | ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) { | ||
2465 | struct qib_devdata *dd = ppd->dd; | ||
2466 | u64 diagc; | ||
2467 | |||
2468 | /* enable counter writes */ | ||
2469 | diagc = qib_read_kreg64(dd, kr_hwdiagctrl); | ||
2470 | qib_write_kreg(dd, kr_hwdiagctrl, | ||
2471 | diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable)); | ||
2472 | |||
2473 | if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) { | ||
2474 | val = read_7322_creg32_port(ppd, crp_ibsymbolerr); | ||
2475 | if (ppd->cpspec->ibdeltainprog) | ||
2476 | val -= val - ppd->cpspec->ibsymsnap; | ||
2477 | val -= ppd->cpspec->ibsymdelta; | ||
2478 | write_7322_creg_port(ppd, crp_ibsymbolerr, val); | ||
2479 | } | ||
2480 | if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) { | ||
2481 | val = read_7322_creg32_port(ppd, crp_iblinkerrrecov); | ||
2482 | if (ppd->cpspec->ibdeltainprog) | ||
2483 | val -= val - ppd->cpspec->iblnkerrsnap; | ||
2484 | val -= ppd->cpspec->iblnkerrdelta; | ||
2485 | write_7322_creg_port(ppd, crp_iblinkerrrecov, val); | ||
2486 | } | ||
2487 | if (ppd->cpspec->iblnkdowndelta) { | ||
2488 | val = read_7322_creg32_port(ppd, crp_iblinkdown); | ||
2489 | val += ppd->cpspec->iblnkdowndelta; | ||
2490 | write_7322_creg_port(ppd, crp_iblinkdown, val); | ||
2491 | } | ||
2492 | /* | ||
2493 | * No need to save ibmalfdelta since IB perfcounters | ||
2494 | * are cleared on driver reload. | ||
2495 | */ | ||
2496 | |||
2497 | /* and disable counter writes */ | ||
2498 | qib_write_kreg(dd, kr_hwdiagctrl, diagc); | ||
2499 | } | ||
2500 | } | ||
2501 | |||
2502 | /** | ||
2503 | * qib_setup_7322_setextled - set the state of the two external LEDs | ||
2504 | * @ppd: physical port on the qlogic_ib device | ||
2505 | * @on: whether the link is up or not | ||
2506 | * | ||
2507 | * The exact combo of LEDs if on is true is determined by looking | ||
2508 | * at the ibcstatus. | ||
2509 | * | ||
2510 | * These LEDs indicate the physical and logical state of IB link. | ||
2511 | * For this chip (at least with recommended board pinouts), LED1 | ||
2512 | * is Yellow (logical state) and LED2 is Green (physical state), | ||
2513 | * | ||
2514 | * Note: We try to match the Mellanox HCA LED behavior as best | ||
2515 | * we can. Green indicates physical link state is OK (something is | ||
2516 | * plugged in, and we can train). | ||
2517 | * Amber indicates the link is logically up (ACTIVE). | ||
2518 | * Mellanox further blinks the amber LED to indicate data packet | ||
2519 | * activity, but we have no hardware support for that, so it would | ||
2520 | * require waking up every 10-20 msecs and checking the counters | ||
2521 | * on the chip, and then turning the LED off if appropriate. That's | ||
2522 | * visible overhead, so not something we will do. | ||
2523 | */ | ||
2524 | static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on) | ||
2525 | { | ||
2526 | struct qib_devdata *dd = ppd->dd; | ||
2527 | u64 extctl, ledblink = 0, val; | ||
2528 | unsigned long flags; | ||
2529 | int yel, grn; | ||
2530 | |||
2531 | /* | ||
2532 | * The diags use the LED to indicate diag info, so we leave | ||
2533 | * the external LED alone when the diags are running. | ||
2534 | */ | ||
2535 | if (dd->diag_client) | ||
2536 | return; | ||
2537 | |||
2538 | /* Allow override of LED display for, e.g. Locating system in rack */ | ||
2539 | if (ppd->led_override) { | ||
2540 | grn = (ppd->led_override & QIB_LED_PHYS); | ||
2541 | yel = (ppd->led_override & QIB_LED_LOG); | ||
2542 | } else if (on) { | ||
2543 | val = qib_read_kreg_port(ppd, krp_ibcstatus_a); | ||
2544 | grn = qib_7322_phys_portstate(val) == | ||
2545 | IB_PHYSPORTSTATE_LINKUP; | ||
2546 | yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE; | ||
2547 | } else { | ||
2548 | grn = 0; | ||
2549 | yel = 0; | ||
2550 | } | ||
2551 | |||
2552 | spin_lock_irqsave(&dd->cspec->gpio_lock, flags); | ||
2553 | extctl = dd->cspec->extctrl & (ppd->port == 1 ? | ||
2554 | ~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK); | ||
2555 | if (grn) { | ||
2556 | extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN; | ||
2557 | /* | ||
2558 | * Counts are in chip clock (4ns) periods. | ||
2559 | * This is 1/16 sec (66.6ms) on, | ||
2560 | * 3/16 sec (187.5 ms) off, with packets rcvd. | ||
2561 | */ | ||
2562 | ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) | | ||
2563 | ((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT); | ||
2564 | } | ||
2565 | if (yel) | ||
2566 | extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL; | ||
2567 | dd->cspec->extctrl = extctl; | ||
2568 | qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl); | ||
2569 | spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); | ||
2570 | |||
2571 | if (ledblink) /* blink the LED on packet receive */ | ||
2572 | qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink); | ||
2573 | } | ||
2574 | |||
2575 | #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) | ||
2576 | static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd) | ||
2577 | { | ||
2578 | struct qib_devdata *dd = rcd->dd; | ||
2579 | struct qib_chip_specific *cspec = dd->cspec; | ||
2580 | int cpu = get_cpu(); | ||
2581 | |||
2582 | if (cspec->rhdr_cpu[rcd->ctxt] != cpu) { | ||
2583 | const struct dca_reg_map *rmp; | ||
2584 | |||
2585 | cspec->rhdr_cpu[rcd->ctxt] = cpu; | ||
2586 | rmp = &dca_rcvhdr_reg_map[rcd->ctxt]; | ||
2587 | cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask; | ||
2588 | cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |= | ||
2589 | (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb; | ||
2590 | qib_write_kreg(dd, rmp->regno, | ||
2591 | cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]); | ||
2592 | cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable); | ||
2593 | qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl); | ||
2594 | } | ||
2595 | put_cpu(); | ||
2596 | } | ||
2597 | |||
2598 | static void qib_update_sdma_dca(struct qib_pportdata *ppd) | ||
2599 | { | ||
2600 | struct qib_devdata *dd = ppd->dd; | ||
2601 | struct qib_chip_specific *cspec = dd->cspec; | ||
2602 | int cpu = get_cpu(); | ||
2603 | unsigned pidx = ppd->port - 1; | ||
2604 | |||
2605 | if (cspec->sdma_cpu[pidx] != cpu) { | ||
2606 | cspec->sdma_cpu[pidx] = cpu; | ||
2607 | cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ? | ||
2608 | SYM_MASK(DCACtrlF, SendDma1DCAOPH) : | ||
2609 | SYM_MASK(DCACtrlF, SendDma0DCAOPH)); | ||
2610 | cspec->dca_rcvhdr_ctrl[4] |= | ||
2611 | (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << | ||
2612 | (ppd->hw_pidx ? | ||
2613 | SYM_LSB(DCACtrlF, SendDma1DCAOPH) : | ||
2614 | SYM_LSB(DCACtrlF, SendDma0DCAOPH)); | ||
2615 | qib_write_kreg(dd, KREG_IDX(DCACtrlF), | ||
2616 | cspec->dca_rcvhdr_ctrl[4]); | ||
2617 | cspec->dca_ctrl |= ppd->hw_pidx ? | ||
2618 | SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) : | ||
2619 | SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable); | ||
2620 | qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl); | ||
2621 | } | ||
2622 | put_cpu(); | ||
2623 | } | ||
2624 | |||
2625 | static void qib_setup_dca(struct qib_devdata *dd) | ||
2626 | { | ||
2627 | struct qib_chip_specific *cspec = dd->cspec; | ||
2628 | int i; | ||
2629 | |||
2630 | for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++) | ||
2631 | cspec->rhdr_cpu[i] = -1; | ||
2632 | for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++) | ||
2633 | cspec->sdma_cpu[i] = -1; | ||
2634 | cspec->dca_rcvhdr_ctrl[0] = | ||
2635 | (1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) | | ||
2636 | (1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) | | ||
2637 | (1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) | | ||
2638 | (1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt)); | ||
2639 | cspec->dca_rcvhdr_ctrl[1] = | ||
2640 | (1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) | | ||
2641 | (1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) | | ||
2642 | (1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) | | ||
2643 | (1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt)); | ||
2644 | cspec->dca_rcvhdr_ctrl[2] = | ||
2645 | (1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) | | ||
2646 | (1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) | | ||
2647 | (1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) | | ||
2648 | (1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt)); | ||
2649 | cspec->dca_rcvhdr_ctrl[3] = | ||
2650 | (1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) | | ||
2651 | (1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) | | ||
2652 | (1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) | | ||
2653 | (1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt)); | ||
2654 | cspec->dca_rcvhdr_ctrl[4] = | ||
2655 | (1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) | | ||
2656 | (1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt)); | ||
2657 | for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++) | ||
2658 | qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i, | ||
2659 | cspec->dca_rcvhdr_ctrl[i]); | ||
2660 | } | ||
2661 | |||
2662 | #endif | ||
2663 | |||
2664 | /* | ||
2665 | * Disable MSIx interrupt if enabled, call generic MSIx code | ||
2666 | * to cleanup, and clear pending MSIx interrupts. | ||
2667 | * Used for fallback to INTx, after reset, and when MSIx setup fails. | ||
2668 | */ | ||
2669 | static void qib_7322_nomsix(struct qib_devdata *dd) | ||
2670 | { | ||
2671 | u64 intgranted; | ||
2672 | int n; | ||
2673 | |||
2674 | dd->cspec->main_int_mask = ~0ULL; | ||
2675 | n = dd->cspec->num_msix_entries; | ||
2676 | if (n) { | ||
2677 | int i; | ||
2678 | |||
2679 | dd->cspec->num_msix_entries = 0; | ||
2680 | for (i = 0; i < n; i++) | ||
2681 | free_irq(dd->cspec->msix_entries[i].vector, | ||
2682 | dd->cspec->msix_arg[i]); | ||
2683 | qib_nomsix(dd); | ||
2684 | } | ||
2685 | /* make sure no MSIx interrupts are left pending */ | ||
2686 | intgranted = qib_read_kreg64(dd, kr_intgranted); | ||
2687 | if (intgranted) | ||
2688 | qib_write_kreg(dd, kr_intgranted, intgranted); | ||
2689 | } | ||
2690 | |||
2691 | static void qib_7322_free_irq(struct qib_devdata *dd) | ||
2692 | { | ||
2693 | if (dd->cspec->irq) { | ||
2694 | free_irq(dd->cspec->irq, dd); | ||
2695 | dd->cspec->irq = 0; | ||
2696 | } | ||
2697 | qib_7322_nomsix(dd); | ||
2698 | } | ||
2699 | |||
2700 | static void qib_setup_7322_cleanup(struct qib_devdata *dd) | ||
2701 | { | ||
2702 | int i; | ||
2703 | |||
2704 | #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) | ||
2705 | if (dd->flags & QIB_DCA_ENABLED) { | ||
2706 | dca_remove_requester(&dd->pcidev->dev); | ||
2707 | dd->flags &= ~QIB_DCA_ENABLED; | ||
2708 | dd->cspec->dca_ctrl = 0; | ||
2709 | qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl); | ||
2710 | } | ||
2711 | #endif | ||
2712 | |||
2713 | qib_7322_free_irq(dd); | ||
2714 | kfree(dd->cspec->cntrs); | ||
2715 | kfree(dd->cspec->sendchkenable); | ||
2716 | kfree(dd->cspec->sendgrhchk); | ||
2717 | kfree(dd->cspec->sendibchk); | ||
2718 | kfree(dd->cspec->msix_entries); | ||
2719 | kfree(dd->cspec->msix_arg); | ||
2720 | for (i = 0; i < dd->num_pports; i++) { | ||
2721 | unsigned long flags; | ||
2722 | u32 mask = QSFP_GPIO_MOD_PRS_N | | ||
2723 | (QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT); | ||
2724 | |||
2725 | kfree(dd->pport[i].cpspec->portcntrs); | ||
2726 | if (dd->flags & QIB_HAS_QSFP) { | ||
2727 | spin_lock_irqsave(&dd->cspec->gpio_lock, flags); | ||
2728 | dd->cspec->gpio_mask &= ~mask; | ||
2729 | qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask); | ||
2730 | spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); | ||
2731 | qib_qsfp_deinit(&dd->pport[i].cpspec->qsfp_data); | ||
2732 | } | ||
2733 | if (dd->pport[i].ibport_data.smi_ah) | ||
2734 | ib_destroy_ah(&dd->pport[i].ibport_data.smi_ah->ibah); | ||
2735 | } | ||
2736 | } | ||
2737 | |||
2738 | /* handle SDMA interrupts */ | ||
2739 | static void sdma_7322_intr(struct qib_devdata *dd, u64 istat) | ||
2740 | { | ||
2741 | struct qib_pportdata *ppd0 = &dd->pport[0]; | ||
2742 | struct qib_pportdata *ppd1 = &dd->pport[1]; | ||
2743 | u64 intr0 = istat & (INT_MASK_P(SDma, 0) | | ||
2744 | INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0)); | ||
2745 | u64 intr1 = istat & (INT_MASK_P(SDma, 1) | | ||
2746 | INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1)); | ||
2747 | |||
2748 | if (intr0) | ||
2749 | qib_sdma_intr(ppd0); | ||
2750 | if (intr1) | ||
2751 | qib_sdma_intr(ppd1); | ||
2752 | |||
2753 | if (istat & INT_MASK_PM(SDmaCleanupDone, 0)) | ||
2754 | qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started); | ||
2755 | if (istat & INT_MASK_PM(SDmaCleanupDone, 1)) | ||
2756 | qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started); | ||
2757 | } | ||
2758 | |||
2759 | /* | ||
2760 | * Set or clear the Send buffer available interrupt enable bit. | ||
2761 | */ | ||
2762 | static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint) | ||
2763 | { | ||
2764 | unsigned long flags; | ||
2765 | |||
2766 | spin_lock_irqsave(&dd->sendctrl_lock, flags); | ||
2767 | if (needint) | ||
2768 | dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail); | ||
2769 | else | ||
2770 | dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail); | ||
2771 | qib_write_kreg(dd, kr_sendctrl, dd->sendctrl); | ||
2772 | qib_write_kreg(dd, kr_scratch, 0ULL); | ||
2773 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | ||
2774 | } | ||
2775 | |||
2776 | /* | ||
2777 | * Somehow got an interrupt with reserved bits set in interrupt status. | ||
2778 | * Print a message so we know it happened, then clear them. | ||
2779 | * keep mainline interrupt handler cache-friendly | ||
2780 | */ | ||
2781 | static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat) | ||
2782 | { | ||
2783 | u64 kills; | ||
2784 | char msg[128]; | ||
2785 | |||
2786 | kills = istat & ~QIB_I_BITSEXTANT; | ||
2787 | qib_dev_err(dd, "Clearing reserved interrupt(s) 0x%016llx:" | ||
2788 | " %s\n", (unsigned long long) kills, msg); | ||
2789 | qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills)); | ||
2790 | } | ||
2791 | |||
2792 | /* keep mainline interrupt handler cache-friendly */ | ||
2793 | static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd) | ||
2794 | { | ||
2795 | u32 gpiostatus; | ||
2796 | int handled = 0; | ||
2797 | int pidx; | ||
2798 | |||
2799 | /* | ||
2800 | * Boards for this chip currently don't use GPIO interrupts, | ||
2801 | * so clear by writing GPIOstatus to GPIOclear, and complain | ||
2802 | * to developer. To avoid endless repeats, clear | ||
2803 | * the bits in the mask, since there is some kind of | ||
2804 | * programming error or chip problem. | ||
2805 | */ | ||
2806 | gpiostatus = qib_read_kreg32(dd, kr_gpio_status); | ||
2807 | /* | ||
2808 | * In theory, writing GPIOstatus to GPIOclear could | ||
2809 | * have a bad side-effect on some diagnostic that wanted | ||
2810 | * to poll for a status-change, but the various shadows | ||
2811 | * make that problematic at best. Diags will just suppress | ||
2812 | * all GPIO interrupts during such tests. | ||
2813 | */ | ||
2814 | qib_write_kreg(dd, kr_gpio_clear, gpiostatus); | ||
2815 | /* | ||
2816 | * Check for QSFP MOD_PRS changes | ||
2817 | * only works for single port if IB1 != pidx1 | ||
2818 | */ | ||
2819 | for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP); | ||
2820 | ++pidx) { | ||
2821 | struct qib_pportdata *ppd; | ||
2822 | struct qib_qsfp_data *qd; | ||
2823 | u32 mask; | ||
2824 | if (!dd->pport[pidx].link_speed_supported) | ||
2825 | continue; | ||
2826 | mask = QSFP_GPIO_MOD_PRS_N; | ||
2827 | ppd = dd->pport + pidx; | ||
2828 | mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx); | ||
2829 | if (gpiostatus & dd->cspec->gpio_mask & mask) { | ||
2830 | u64 pins; | ||
2831 | qd = &ppd->cpspec->qsfp_data; | ||
2832 | gpiostatus &= ~mask; | ||
2833 | pins = qib_read_kreg64(dd, kr_extstatus); | ||
2834 | pins >>= SYM_LSB(EXTStatus, GPIOIn); | ||
2835 | if (!(pins & mask)) { | ||
2836 | ++handled; | ||
2837 | qd->t_insert = get_jiffies_64(); | ||
2838 | schedule_work(&qd->work); | ||
2839 | } | ||
2840 | } | ||
2841 | } | ||
2842 | |||
2843 | if (gpiostatus && !handled) { | ||
2844 | const u32 mask = qib_read_kreg32(dd, kr_gpio_mask); | ||
2845 | u32 gpio_irq = mask & gpiostatus; | ||
2846 | |||
2847 | /* | ||
2848 | * Clear any troublemakers, and update chip from shadow | ||
2849 | */ | ||
2850 | dd->cspec->gpio_mask &= ~gpio_irq; | ||
2851 | qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask); | ||
2852 | } | ||
2853 | } | ||
2854 | |||
2855 | /* | ||
2856 | * Handle errors and unusual events first, separate function | ||
2857 | * to improve cache hits for fast path interrupt handling. | ||
2858 | */ | ||
2859 | static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat) | ||
2860 | { | ||
2861 | if (istat & ~QIB_I_BITSEXTANT) | ||
2862 | unknown_7322_ibits(dd, istat); | ||
2863 | if (istat & QIB_I_GPIO) | ||
2864 | unknown_7322_gpio_intr(dd); | ||
2865 | if (istat & QIB_I_C_ERROR) | ||
2866 | handle_7322_errors(dd); | ||
2867 | if (istat & INT_MASK_P(Err, 0) && dd->rcd[0]) | ||
2868 | handle_7322_p_errors(dd->rcd[0]->ppd); | ||
2869 | if (istat & INT_MASK_P(Err, 1) && dd->rcd[1]) | ||
2870 | handle_7322_p_errors(dd->rcd[1]->ppd); | ||
2871 | } | ||
2872 | |||
2873 | /* | ||
2874 | * Dynamically adjust the rcv int timeout for a context based on incoming | ||
2875 | * packet rate. | ||
2876 | */ | ||
2877 | static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts) | ||
2878 | { | ||
2879 | struct qib_devdata *dd = rcd->dd; | ||
2880 | u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt]; | ||
2881 | |||
2882 | /* | ||
2883 | * Dynamically adjust idle timeout on chip | ||
2884 | * based on number of packets processed. | ||
2885 | */ | ||
2886 | if (npkts < rcv_int_count && timeout > 2) | ||
2887 | timeout >>= 1; | ||
2888 | else if (npkts >= rcv_int_count && timeout < rcv_int_timeout) | ||
2889 | timeout = min(timeout << 1, rcv_int_timeout); | ||
2890 | else | ||
2891 | return; | ||
2892 | |||
2893 | dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout; | ||
2894 | qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout); | ||
2895 | } | ||
2896 | |||
2897 | /* | ||
2898 | * This is the main interrupt handler. | ||
2899 | * It will normally only be used for low frequency interrupts but may | ||
2900 | * have to handle all interrupts if INTx is enabled or fewer than normal | ||
2901 | * MSIx interrupts were allocated. | ||
2902 | * This routine should ignore the interrupt bits for any of the | ||
2903 | * dedicated MSIx handlers. | ||
2904 | */ | ||
2905 | static irqreturn_t qib_7322intr(int irq, void *data) | ||
2906 | { | ||
2907 | struct qib_devdata *dd = data; | ||
2908 | irqreturn_t ret; | ||
2909 | u64 istat; | ||
2910 | u64 ctxtrbits; | ||
2911 | u64 rmask; | ||
2912 | unsigned i; | ||
2913 | u32 npkts; | ||
2914 | |||
2915 | if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) { | ||
2916 | /* | ||
2917 | * This return value is not great, but we do not want the | ||
2918 | * interrupt core code to remove our interrupt handler | ||
2919 | * because we don't appear to be handling an interrupt | ||
2920 | * during a chip reset. | ||
2921 | */ | ||
2922 | ret = IRQ_HANDLED; | ||
2923 | goto bail; | ||
2924 | } | ||
2925 | |||
2926 | istat = qib_read_kreg64(dd, kr_intstatus); | ||
2927 | |||
2928 | if (unlikely(istat == ~0ULL)) { | ||
2929 | qib_bad_intrstatus(dd); | ||
2930 | qib_dev_err(dd, "Interrupt status all f's, skipping\n"); | ||
2931 | /* don't know if it was our interrupt or not */ | ||
2932 | ret = IRQ_NONE; | ||
2933 | goto bail; | ||
2934 | } | ||
2935 | |||
2936 | istat &= dd->cspec->main_int_mask; | ||
2937 | if (unlikely(!istat)) { | ||
2938 | /* already handled, or shared and not us */ | ||
2939 | ret = IRQ_NONE; | ||
2940 | goto bail; | ||
2941 | } | ||
2942 | |||
2943 | qib_stats.sps_ints++; | ||
2944 | if (dd->int_counter != (u32) -1) | ||
2945 | dd->int_counter++; | ||
2946 | |||
2947 | /* handle "errors" of various kinds first, device ahead of port */ | ||
2948 | if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO | | ||
2949 | QIB_I_C_ERROR | INT_MASK_P(Err, 0) | | ||
2950 | INT_MASK_P(Err, 1)))) | ||
2951 | unlikely_7322_intr(dd, istat); | ||
2952 | |||
2953 | /* | ||
2954 | * Clear the interrupt bits we found set, relatively early, so we | ||
2955 | * "know" know the chip will have seen this by the time we process | ||
2956 | * the queue, and will re-interrupt if necessary. The processor | ||
2957 | * itself won't take the interrupt again until we return. | ||
2958 | */ | ||
2959 | qib_write_kreg(dd, kr_intclear, istat); | ||
2960 | |||
2961 | /* | ||
2962 | * Handle kernel receive queues before checking for pio buffers | ||
2963 | * available since receives can overflow; piobuf waiters can afford | ||
2964 | * a few extra cycles, since they were waiting anyway. | ||
2965 | */ | ||
2966 | ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK); | ||
2967 | if (ctxtrbits) { | ||
2968 | rmask = (1ULL << QIB_I_RCVAVAIL_LSB) | | ||
2969 | (1ULL << QIB_I_RCVURG_LSB); | ||
2970 | for (i = 0; i < dd->first_user_ctxt; i++) { | ||
2971 | if (ctxtrbits & rmask) { | ||
2972 | ctxtrbits &= ~rmask; | ||
2973 | if (dd->rcd[i]) { | ||
2974 | qib_kreceive(dd->rcd[i], NULL, &npkts); | ||
2975 | adjust_rcv_timeout(dd->rcd[i], npkts); | ||
2976 | } | ||
2977 | } | ||
2978 | rmask <<= 1; | ||
2979 | } | ||
2980 | if (ctxtrbits) { | ||
2981 | ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) | | ||
2982 | (ctxtrbits >> QIB_I_RCVURG_LSB); | ||
2983 | qib_handle_urcv(dd, ctxtrbits); | ||
2984 | } | ||
2985 | } | ||
2986 | |||
2987 | if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1))) | ||
2988 | sdma_7322_intr(dd, istat); | ||
2989 | |||
2990 | if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED)) | ||
2991 | qib_ib_piobufavail(dd); | ||
2992 | |||
2993 | ret = IRQ_HANDLED; | ||
2994 | bail: | ||
2995 | return ret; | ||
2996 | } | ||
2997 | |||
2998 | /* | ||
2999 | * Dedicated receive packet available interrupt handler. | ||
3000 | */ | ||
3001 | static irqreturn_t qib_7322pintr(int irq, void *data) | ||
3002 | { | ||
3003 | struct qib_ctxtdata *rcd = data; | ||
3004 | struct qib_devdata *dd = rcd->dd; | ||
3005 | u32 npkts; | ||
3006 | |||
3007 | if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) | ||
3008 | /* | ||
3009 | * This return value is not great, but we do not want the | ||
3010 | * interrupt core code to remove our interrupt handler | ||
3011 | * because we don't appear to be handling an interrupt | ||
3012 | * during a chip reset. | ||
3013 | */ | ||
3014 | return IRQ_HANDLED; | ||
3015 | |||
3016 | qib_stats.sps_ints++; | ||
3017 | if (dd->int_counter != (u32) -1) | ||
3018 | dd->int_counter++; | ||
3019 | |||
3020 | #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) | ||
3021 | if (dd->flags & QIB_DCA_ENABLED) | ||
3022 | qib_update_rhdrq_dca(rcd); | ||
3023 | #endif | ||
3024 | |||
3025 | /* Clear the interrupt bit we expect to be set. */ | ||
3026 | qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) | | ||
3027 | (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt); | ||
3028 | |||
3029 | qib_kreceive(rcd, NULL, &npkts); | ||
3030 | adjust_rcv_timeout(rcd, npkts); | ||
3031 | |||
3032 | return IRQ_HANDLED; | ||
3033 | } | ||
3034 | |||
3035 | /* | ||
3036 | * Dedicated Send buffer available interrupt handler. | ||
3037 | */ | ||
3038 | static irqreturn_t qib_7322bufavail(int irq, void *data) | ||
3039 | { | ||
3040 | struct qib_devdata *dd = data; | ||
3041 | |||
3042 | if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) | ||
3043 | /* | ||
3044 | * This return value is not great, but we do not want the | ||
3045 | * interrupt core code to remove our interrupt handler | ||
3046 | * because we don't appear to be handling an interrupt | ||
3047 | * during a chip reset. | ||
3048 | */ | ||
3049 | return IRQ_HANDLED; | ||
3050 | |||
3051 | qib_stats.sps_ints++; | ||
3052 | if (dd->int_counter != (u32) -1) | ||
3053 | dd->int_counter++; | ||
3054 | |||
3055 | /* Clear the interrupt bit we expect to be set. */ | ||
3056 | qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL); | ||
3057 | |||
3058 | /* qib_ib_piobufavail() will clear the want PIO interrupt if needed */ | ||
3059 | if (dd->flags & QIB_INITTED) | ||
3060 | qib_ib_piobufavail(dd); | ||
3061 | else | ||
3062 | qib_wantpiobuf_7322_intr(dd, 0); | ||
3063 | |||
3064 | return IRQ_HANDLED; | ||
3065 | } | ||
3066 | |||
3067 | /* | ||
3068 | * Dedicated Send DMA interrupt handler. | ||
3069 | */ | ||
3070 | static irqreturn_t sdma_intr(int irq, void *data) | ||
3071 | { | ||
3072 | struct qib_pportdata *ppd = data; | ||
3073 | struct qib_devdata *dd = ppd->dd; | ||
3074 | |||
3075 | if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) | ||
3076 | /* | ||
3077 | * This return value is not great, but we do not want the | ||
3078 | * interrupt core code to remove our interrupt handler | ||
3079 | * because we don't appear to be handling an interrupt | ||
3080 | * during a chip reset. | ||
3081 | */ | ||
3082 | return IRQ_HANDLED; | ||
3083 | |||
3084 | qib_stats.sps_ints++; | ||
3085 | if (dd->int_counter != (u32) -1) | ||
3086 | dd->int_counter++; | ||
3087 | |||
3088 | #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) | ||
3089 | if (dd->flags & QIB_DCA_ENABLED) | ||
3090 | qib_update_sdma_dca(ppd); | ||
3091 | #endif | ||
3092 | |||
3093 | /* Clear the interrupt bit we expect to be set. */ | ||
3094 | qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? | ||
3095 | INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0)); | ||
3096 | qib_sdma_intr(ppd); | ||
3097 | |||
3098 | return IRQ_HANDLED; | ||
3099 | } | ||
3100 | |||
3101 | /* | ||
3102 | * Dedicated Send DMA idle interrupt handler. | ||
3103 | */ | ||
3104 | static irqreturn_t sdma_idle_intr(int irq, void *data) | ||
3105 | { | ||
3106 | struct qib_pportdata *ppd = data; | ||
3107 | struct qib_devdata *dd = ppd->dd; | ||
3108 | |||
3109 | if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) | ||
3110 | /* | ||
3111 | * This return value is not great, but we do not want the | ||
3112 | * interrupt core code to remove our interrupt handler | ||
3113 | * because we don't appear to be handling an interrupt | ||
3114 | * during a chip reset. | ||
3115 | */ | ||
3116 | return IRQ_HANDLED; | ||
3117 | |||
3118 | qib_stats.sps_ints++; | ||
3119 | if (dd->int_counter != (u32) -1) | ||
3120 | dd->int_counter++; | ||
3121 | |||
3122 | #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) | ||
3123 | if (dd->flags & QIB_DCA_ENABLED) | ||
3124 | qib_update_sdma_dca(ppd); | ||
3125 | #endif | ||
3126 | |||
3127 | /* Clear the interrupt bit we expect to be set. */ | ||
3128 | qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? | ||
3129 | INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0)); | ||
3130 | qib_sdma_intr(ppd); | ||
3131 | |||
3132 | return IRQ_HANDLED; | ||
3133 | } | ||
3134 | |||
3135 | /* | ||
3136 | * Dedicated Send DMA progress interrupt handler. | ||
3137 | */ | ||
3138 | static irqreturn_t sdma_progress_intr(int irq, void *data) | ||
3139 | { | ||
3140 | struct qib_pportdata *ppd = data; | ||
3141 | struct qib_devdata *dd = ppd->dd; | ||
3142 | |||
3143 | if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) | ||
3144 | /* | ||
3145 | * This return value is not great, but we do not want the | ||
3146 | * interrupt core code to remove our interrupt handler | ||
3147 | * because we don't appear to be handling an interrupt | ||
3148 | * during a chip reset. | ||
3149 | */ | ||
3150 | return IRQ_HANDLED; | ||
3151 | |||
3152 | qib_stats.sps_ints++; | ||
3153 | if (dd->int_counter != (u32) -1) | ||
3154 | dd->int_counter++; | ||
3155 | |||
3156 | #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) | ||
3157 | if (dd->flags & QIB_DCA_ENABLED) | ||
3158 | qib_update_sdma_dca(ppd); | ||
3159 | #endif | ||
3160 | |||
3161 | /* Clear the interrupt bit we expect to be set. */ | ||
3162 | qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? | ||
3163 | INT_MASK_P(SDmaProgress, 1) : | ||
3164 | INT_MASK_P(SDmaProgress, 0)); | ||
3165 | qib_sdma_intr(ppd); | ||
3166 | |||
3167 | return IRQ_HANDLED; | ||
3168 | } | ||
3169 | |||
3170 | /* | ||
3171 | * Dedicated Send DMA cleanup interrupt handler. | ||
3172 | */ | ||
3173 | static irqreturn_t sdma_cleanup_intr(int irq, void *data) | ||
3174 | { | ||
3175 | struct qib_pportdata *ppd = data; | ||
3176 | struct qib_devdata *dd = ppd->dd; | ||
3177 | |||
3178 | if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) | ||
3179 | /* | ||
3180 | * This return value is not great, but we do not want the | ||
3181 | * interrupt core code to remove our interrupt handler | ||
3182 | * because we don't appear to be handling an interrupt | ||
3183 | * during a chip reset. | ||
3184 | */ | ||
3185 | return IRQ_HANDLED; | ||
3186 | |||
3187 | qib_stats.sps_ints++; | ||
3188 | if (dd->int_counter != (u32) -1) | ||
3189 | dd->int_counter++; | ||
3190 | |||
3191 | #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) | ||
3192 | if (dd->flags & QIB_DCA_ENABLED) | ||
3193 | qib_update_sdma_dca(ppd); | ||
3194 | #endif | ||
3195 | |||
3196 | /* Clear the interrupt bit we expect to be set. */ | ||
3197 | qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? | ||
3198 | INT_MASK_PM(SDmaCleanupDone, 1) : | ||
3199 | INT_MASK_PM(SDmaCleanupDone, 0)); | ||
3200 | qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started); | ||
3201 | |||
3202 | return IRQ_HANDLED; | ||
3203 | } | ||
3204 | |||
3205 | /* | ||
3206 | * Set up our chip-specific interrupt handler. | ||
3207 | * The interrupt type has already been setup, so | ||
3208 | * we just need to do the registration and error checking. | ||
3209 | * If we are using MSIx interrupts, we may fall back to | ||
3210 | * INTx later, if the interrupt handler doesn't get called | ||
3211 | * within 1/2 second (see verify_interrupt()). | ||
3212 | */ | ||
3213 | static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend) | ||
3214 | { | ||
3215 | int ret, i, msixnum; | ||
3216 | u64 redirect[6]; | ||
3217 | u64 mask; | ||
3218 | |||
3219 | if (!dd->num_pports) | ||
3220 | return; | ||
3221 | |||
3222 | if (clearpend) { | ||
3223 | /* | ||
3224 | * if not switching interrupt types, be sure interrupts are | ||
3225 | * disabled, and then clear anything pending at this point, | ||
3226 | * because we are starting clean. | ||
3227 | */ | ||
3228 | qib_7322_set_intr_state(dd, 0); | ||
3229 | |||
3230 | /* clear the reset error, init error/hwerror mask */ | ||
3231 | qib_7322_init_hwerrors(dd); | ||
3232 | |||
3233 | /* clear any interrupt bits that might be set */ | ||
3234 | qib_write_kreg(dd, kr_intclear, ~0ULL); | ||
3235 | |||
3236 | /* make sure no pending MSIx intr, and clear diag reg */ | ||
3237 | qib_write_kreg(dd, kr_intgranted, ~0ULL); | ||
3238 | qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL); | ||
3239 | } | ||
3240 | |||
3241 | if (!dd->cspec->num_msix_entries) { | ||
3242 | /* Try to get INTx interrupt */ | ||
3243 | try_intx: | ||
3244 | if (!dd->pcidev->irq) { | ||
3245 | qib_dev_err(dd, "irq is 0, BIOS error? " | ||
3246 | "Interrupts won't work\n"); | ||
3247 | goto bail; | ||
3248 | } | ||
3249 | ret = request_irq(dd->pcidev->irq, qib_7322intr, | ||
3250 | IRQF_SHARED, QIB_DRV_NAME, dd); | ||
3251 | if (ret) { | ||
3252 | qib_dev_err(dd, "Couldn't setup INTx " | ||
3253 | "interrupt (irq=%d): %d\n", | ||
3254 | dd->pcidev->irq, ret); | ||
3255 | goto bail; | ||
3256 | } | ||
3257 | dd->cspec->irq = dd->pcidev->irq; | ||
3258 | dd->cspec->main_int_mask = ~0ULL; | ||
3259 | goto bail; | ||
3260 | } | ||
3261 | |||
3262 | /* Try to get MSIx interrupts */ | ||
3263 | memset(redirect, 0, sizeof redirect); | ||
3264 | mask = ~0ULL; | ||
3265 | msixnum = 0; | ||
3266 | for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) { | ||
3267 | irq_handler_t handler; | ||
3268 | const char *name; | ||
3269 | void *arg; | ||
3270 | u64 val; | ||
3271 | int lsb, reg, sh; | ||
3272 | |||
3273 | if (i < ARRAY_SIZE(irq_table)) { | ||
3274 | if (irq_table[i].port) { | ||
3275 | /* skip if for a non-configured port */ | ||
3276 | if (irq_table[i].port > dd->num_pports) | ||
3277 | continue; | ||
3278 | arg = dd->pport + irq_table[i].port - 1; | ||
3279 | } else | ||
3280 | arg = dd; | ||
3281 | lsb = irq_table[i].lsb; | ||
3282 | handler = irq_table[i].handler; | ||
3283 | name = irq_table[i].name; | ||
3284 | } else { | ||
3285 | unsigned ctxt; | ||
3286 | |||
3287 | ctxt = i - ARRAY_SIZE(irq_table); | ||
3288 | /* per krcvq context receive interrupt */ | ||
3289 | arg = dd->rcd[ctxt]; | ||
3290 | if (!arg) | ||
3291 | continue; | ||
3292 | lsb = QIB_I_RCVAVAIL_LSB + ctxt; | ||
3293 | handler = qib_7322pintr; | ||
3294 | name = QIB_DRV_NAME " (kctx)"; | ||
3295 | } | ||
3296 | ret = request_irq(dd->cspec->msix_entries[msixnum].vector, | ||
3297 | handler, 0, name, arg); | ||
3298 | if (ret) { | ||
3299 | /* | ||
3300 | * Shouldn't happen since the enable said we could | ||
3301 | * have as many as we are trying to setup here. | ||
3302 | */ | ||
3303 | qib_dev_err(dd, "Couldn't setup MSIx " | ||
3304 | "interrupt (vec=%d, irq=%d): %d\n", msixnum, | ||
3305 | dd->cspec->msix_entries[msixnum].vector, | ||
3306 | ret); | ||
3307 | qib_7322_nomsix(dd); | ||
3308 | goto try_intx; | ||
3309 | } | ||
3310 | dd->cspec->msix_arg[msixnum] = arg; | ||
3311 | if (lsb >= 0) { | ||
3312 | reg = lsb / IBA7322_REDIRECT_VEC_PER_REG; | ||
3313 | sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) * | ||
3314 | SYM_LSB(IntRedirect0, vec1); | ||
3315 | mask &= ~(1ULL << lsb); | ||
3316 | redirect[reg] |= ((u64) msixnum) << sh; | ||
3317 | } | ||
3318 | val = qib_read_kreg64(dd, 2 * msixnum + 1 + | ||
3319 | (QIB_7322_MsixTable_OFFS / sizeof(u64))); | ||
3320 | msixnum++; | ||
3321 | } | ||
3322 | /* Initialize the vector mapping */ | ||
3323 | for (i = 0; i < ARRAY_SIZE(redirect); i++) | ||
3324 | qib_write_kreg(dd, kr_intredirect + i, redirect[i]); | ||
3325 | dd->cspec->main_int_mask = mask; | ||
3326 | bail:; | ||
3327 | } | ||
3328 | |||
3329 | /** | ||
3330 | * qib_7322_boardname - fill in the board name and note features | ||
3331 | * @dd: the qlogic_ib device | ||
3332 | * | ||
3333 | * info will be based on the board revision register | ||
3334 | */ | ||
3335 | static unsigned qib_7322_boardname(struct qib_devdata *dd) | ||
3336 | { | ||
3337 | /* Will need enumeration of board-types here */ | ||
3338 | char *n; | ||
3339 | u32 boardid, namelen; | ||
3340 | unsigned features = DUAL_PORT_CAP; | ||
3341 | |||
3342 | boardid = SYM_FIELD(dd->revision, Revision, BoardID); | ||
3343 | |||
3344 | switch (boardid) { | ||
3345 | case 0: | ||
3346 | n = "InfiniPath_QLE7342_Emulation"; | ||
3347 | break; | ||
3348 | case 1: | ||
3349 | n = "InfiniPath_QLE7340"; | ||
3350 | dd->flags |= QIB_HAS_QSFP; | ||
3351 | features = PORT_SPD_CAP; | ||
3352 | break; | ||
3353 | case 2: | ||
3354 | n = "InfiniPath_QLE7342"; | ||
3355 | dd->flags |= QIB_HAS_QSFP; | ||
3356 | break; | ||
3357 | case 3: | ||
3358 | n = "InfiniPath_QMI7342"; | ||
3359 | break; | ||
3360 | case 4: | ||
3361 | n = "InfiniPath_Unsupported7342"; | ||
3362 | qib_dev_err(dd, "Unsupported version of QMH7342\n"); | ||
3363 | features = 0; | ||
3364 | break; | ||
3365 | case BOARD_QMH7342: | ||
3366 | n = "InfiniPath_QMH7342"; | ||
3367 | features = 0x24; | ||
3368 | break; | ||
3369 | case BOARD_QME7342: | ||
3370 | n = "InfiniPath_QME7342"; | ||
3371 | break; | ||
3372 | case 15: | ||
3373 | n = "InfiniPath_QLE7342_TEST"; | ||
3374 | dd->flags |= QIB_HAS_QSFP; | ||
3375 | break; | ||
3376 | default: | ||
3377 | n = "InfiniPath_QLE73xy_UNKNOWN"; | ||
3378 | qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid); | ||
3379 | break; | ||
3380 | } | ||
3381 | dd->board_atten = 1; /* index into txdds_Xdr */ | ||
3382 | |||
3383 | namelen = strlen(n) + 1; | ||
3384 | dd->boardname = kmalloc(namelen, GFP_KERNEL); | ||
3385 | if (!dd->boardname) | ||
3386 | qib_dev_err(dd, "Failed allocation for board name: %s\n", n); | ||
3387 | else | ||
3388 | snprintf(dd->boardname, namelen, "%s", n); | ||
3389 | |||
3390 | snprintf(dd->boardversion, sizeof(dd->boardversion), | ||
3391 | "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n", | ||
3392 | QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname, | ||
3393 | (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch), | ||
3394 | dd->majrev, dd->minrev, | ||
3395 | (unsigned)SYM_FIELD(dd->revision, Revision_R, SW)); | ||
3396 | |||
3397 | if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) { | ||
3398 | qib_devinfo(dd->pcidev, "IB%u: Forced to single port mode" | ||
3399 | " by module parameter\n", dd->unit); | ||
3400 | features &= PORT_SPD_CAP; | ||
3401 | } | ||
3402 | |||
3403 | return features; | ||
3404 | } | ||
3405 | |||
3406 | /* | ||
3407 | * This routine sleeps, so it can only be called from user context, not | ||
3408 | * from interrupt context. | ||
3409 | */ | ||
3410 | static int qib_do_7322_reset(struct qib_devdata *dd) | ||
3411 | { | ||
3412 | u64 val; | ||
3413 | u64 *msix_vecsave; | ||
3414 | int i, msix_entries, ret = 1; | ||
3415 | u16 cmdval; | ||
3416 | u8 int_line, clinesz; | ||
3417 | unsigned long flags; | ||
3418 | |||
3419 | /* Use dev_err so it shows up in logs, etc. */ | ||
3420 | qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit); | ||
3421 | |||
3422 | qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz); | ||
3423 | |||
3424 | msix_entries = dd->cspec->num_msix_entries; | ||
3425 | |||
3426 | /* no interrupts till re-initted */ | ||
3427 | qib_7322_set_intr_state(dd, 0); | ||
3428 | |||
3429 | if (msix_entries) { | ||
3430 | qib_7322_nomsix(dd); | ||
3431 | /* can be up to 512 bytes, too big for stack */ | ||
3432 | msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries * | ||
3433 | sizeof(u64), GFP_KERNEL); | ||
3434 | if (!msix_vecsave) | ||
3435 | qib_dev_err(dd, "No mem to save MSIx data\n"); | ||
3436 | } else | ||
3437 | msix_vecsave = NULL; | ||
3438 | |||
3439 | /* | ||
3440 | * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector | ||
3441 | * info that is set up by the BIOS, so we have to save and restore | ||
3442 | * it ourselves. There is some risk something could change it, | ||
3443 | * after we save it, but since we have disabled the MSIx, it | ||
3444 | * shouldn't be touched... | ||
3445 | */ | ||
3446 | for (i = 0; i < msix_entries; i++) { | ||
3447 | u64 vecaddr, vecdata; | ||
3448 | vecaddr = qib_read_kreg64(dd, 2 * i + | ||
3449 | (QIB_7322_MsixTable_OFFS / sizeof(u64))); | ||
3450 | vecdata = qib_read_kreg64(dd, 1 + 2 * i + | ||
3451 | (QIB_7322_MsixTable_OFFS / sizeof(u64))); | ||
3452 | if (msix_vecsave) { | ||
3453 | msix_vecsave[2 * i] = vecaddr; | ||
3454 | /* save it without the masked bit set */ | ||
3455 | msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL; | ||
3456 | } | ||
3457 | } | ||
3458 | |||
3459 | dd->pport->cpspec->ibdeltainprog = 0; | ||
3460 | dd->pport->cpspec->ibsymdelta = 0; | ||
3461 | dd->pport->cpspec->iblnkerrdelta = 0; | ||
3462 | dd->pport->cpspec->ibmalfdelta = 0; | ||
3463 | dd->int_counter = 0; /* so we check interrupts work again */ | ||
3464 | |||
3465 | /* | ||
3466 | * Keep chip from being accessed until we are ready. Use | ||
3467 | * writeq() directly, to allow the write even though QIB_PRESENT | ||
3468 | * isnt' set. | ||
3469 | */ | ||
3470 | dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR); | ||
3471 | dd->flags |= QIB_DOING_RESET; | ||
3472 | val = dd->control | QLOGIC_IB_C_RESET; | ||
3473 | writeq(val, &dd->kregbase[kr_control]); | ||
3474 | |||
3475 | for (i = 1; i <= 5; i++) { | ||
3476 | /* | ||
3477 | * Allow MBIST, etc. to complete; longer on each retry. | ||
3478 | * We sometimes get machine checks from bus timeout if no | ||
3479 | * response, so for now, make it *really* long. | ||
3480 | */ | ||
3481 | msleep(1000 + (1 + i) * 3000); | ||
3482 | |||
3483 | qib_pcie_reenable(dd, cmdval, int_line, clinesz); | ||
3484 | |||
3485 | /* | ||
3486 | * Use readq directly, so we don't need to mark it as PRESENT | ||
3487 | * until we get a successful indication that all is well. | ||
3488 | */ | ||
3489 | val = readq(&dd->kregbase[kr_revision]); | ||
3490 | if (val == dd->revision) | ||
3491 | break; | ||
3492 | if (i == 5) { | ||
3493 | qib_dev_err(dd, "Failed to initialize after reset, " | ||
3494 | "unusable\n"); | ||
3495 | ret = 0; | ||
3496 | goto bail; | ||
3497 | } | ||
3498 | } | ||
3499 | |||
3500 | dd->flags |= QIB_PRESENT; /* it's back */ | ||
3501 | |||
3502 | if (msix_entries) { | ||
3503 | /* restore the MSIx vector address and data if saved above */ | ||
3504 | for (i = 0; i < msix_entries; i++) { | ||
3505 | dd->cspec->msix_entries[i].entry = i; | ||
3506 | if (!msix_vecsave || !msix_vecsave[2 * i]) | ||
3507 | continue; | ||
3508 | qib_write_kreg(dd, 2 * i + | ||
3509 | (QIB_7322_MsixTable_OFFS / sizeof(u64)), | ||
3510 | msix_vecsave[2 * i]); | ||
3511 | qib_write_kreg(dd, 1 + 2 * i + | ||
3512 | (QIB_7322_MsixTable_OFFS / sizeof(u64)), | ||
3513 | msix_vecsave[1 + 2 * i]); | ||
3514 | } | ||
3515 | } | ||
3516 | |||
3517 | /* initialize the remaining registers. */ | ||
3518 | for (i = 0; i < dd->num_pports; ++i) | ||
3519 | write_7322_init_portregs(&dd->pport[i]); | ||
3520 | write_7322_initregs(dd); | ||
3521 | |||
3522 | if (qib_pcie_params(dd, dd->lbus_width, | ||
3523 | &dd->cspec->num_msix_entries, | ||
3524 | dd->cspec->msix_entries)) | ||
3525 | qib_dev_err(dd, "Reset failed to setup PCIe or interrupts; " | ||
3526 | "continuing anyway\n"); | ||
3527 | |||
3528 | qib_setup_7322_interrupt(dd, 1); | ||
3529 | |||
3530 | for (i = 0; i < dd->num_pports; ++i) { | ||
3531 | struct qib_pportdata *ppd = &dd->pport[i]; | ||
3532 | |||
3533 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
3534 | ppd->lflags |= QIBL_IB_FORCE_NOTIFY; | ||
3535 | ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED; | ||
3536 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
3537 | } | ||
3538 | |||
3539 | bail: | ||
3540 | dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */ | ||
3541 | kfree(msix_vecsave); | ||
3542 | return ret; | ||
3543 | } | ||
3544 | |||
3545 | /** | ||
3546 | * qib_7322_put_tid - write a TID to the chip | ||
3547 | * @dd: the qlogic_ib device | ||
3548 | * @tidptr: pointer to the expected TID (in chip) to update | ||
3549 | * @tidtype: 0 for eager, 1 for expected | ||
3550 | * @pa: physical address of in memory buffer; tidinvalid if freeing | ||
3551 | */ | ||
3552 | static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr, | ||
3553 | u32 type, unsigned long pa) | ||
3554 | { | ||
3555 | if (!(dd->flags & QIB_PRESENT)) | ||
3556 | return; | ||
3557 | if (pa != dd->tidinvalid) { | ||
3558 | u64 chippa = pa >> IBA7322_TID_PA_SHIFT; | ||
3559 | |||
3560 | /* paranoia checks */ | ||
3561 | if (pa != (chippa << IBA7322_TID_PA_SHIFT)) { | ||
3562 | qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n", | ||
3563 | pa); | ||
3564 | return; | ||
3565 | } | ||
3566 | if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) { | ||
3567 | qib_dev_err(dd, "Physical page address 0x%lx " | ||
3568 | "larger than supported\n", pa); | ||
3569 | return; | ||
3570 | } | ||
3571 | |||
3572 | if (type == RCVHQ_RCV_TYPE_EAGER) | ||
3573 | chippa |= dd->tidtemplate; | ||
3574 | else /* for now, always full 4KB page */ | ||
3575 | chippa |= IBA7322_TID_SZ_4K; | ||
3576 | pa = chippa; | ||
3577 | } | ||
3578 | writeq(pa, tidptr); | ||
3579 | mmiowb(); | ||
3580 | } | ||
3581 | |||
3582 | /** | ||
3583 | * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager | ||
3584 | * @dd: the qlogic_ib device | ||
3585 | * @ctxt: the ctxt | ||
3586 | * | ||
3587 | * clear all TID entries for a ctxt, expected and eager. | ||
3588 | * Used from qib_close(). | ||
3589 | */ | ||
3590 | static void qib_7322_clear_tids(struct qib_devdata *dd, | ||
3591 | struct qib_ctxtdata *rcd) | ||
3592 | { | ||
3593 | u64 __iomem *tidbase; | ||
3594 | unsigned long tidinv; | ||
3595 | u32 ctxt; | ||
3596 | int i; | ||
3597 | |||
3598 | if (!dd->kregbase || !rcd) | ||
3599 | return; | ||
3600 | |||
3601 | ctxt = rcd->ctxt; | ||
3602 | |||
3603 | tidinv = dd->tidinvalid; | ||
3604 | tidbase = (u64 __iomem *) | ||
3605 | ((char __iomem *) dd->kregbase + | ||
3606 | dd->rcvtidbase + | ||
3607 | ctxt * dd->rcvtidcnt * sizeof(*tidbase)); | ||
3608 | |||
3609 | for (i = 0; i < dd->rcvtidcnt; i++) | ||
3610 | qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED, | ||
3611 | tidinv); | ||
3612 | |||
3613 | tidbase = (u64 __iomem *) | ||
3614 | ((char __iomem *) dd->kregbase + | ||
3615 | dd->rcvegrbase + | ||
3616 | rcd->rcvegr_tid_base * sizeof(*tidbase)); | ||
3617 | |||
3618 | for (i = 0; i < rcd->rcvegrcnt; i++) | ||
3619 | qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER, | ||
3620 | tidinv); | ||
3621 | } | ||
3622 | |||
3623 | /** | ||
3624 | * qib_7322_tidtemplate - setup constants for TID updates | ||
3625 | * @dd: the qlogic_ib device | ||
3626 | * | ||
3627 | * We setup stuff that we use a lot, to avoid calculating each time | ||
3628 | */ | ||
3629 | static void qib_7322_tidtemplate(struct qib_devdata *dd) | ||
3630 | { | ||
3631 | /* | ||
3632 | * For now, we always allocate 4KB buffers (at init) so we can | ||
3633 | * receive max size packets. We may want a module parameter to | ||
3634 | * specify 2KB or 4KB and/or make it per port instead of per device | ||
3635 | * for those who want to reduce memory footprint. Note that the | ||
3636 | * rcvhdrentsize size must be large enough to hold the largest | ||
3637 | * IB header (currently 96 bytes) that we expect to handle (plus of | ||
3638 | * course the 2 dwords of RHF). | ||
3639 | */ | ||
3640 | if (dd->rcvegrbufsize == 2048) | ||
3641 | dd->tidtemplate = IBA7322_TID_SZ_2K; | ||
3642 | else if (dd->rcvegrbufsize == 4096) | ||
3643 | dd->tidtemplate = IBA7322_TID_SZ_4K; | ||
3644 | dd->tidinvalid = 0; | ||
3645 | } | ||
3646 | |||
3647 | /** | ||
3648 | * qib_init_7322_get_base_info - set chip-specific flags for user code | ||
3649 | * @rcd: the qlogic_ib ctxt | ||
3650 | * @kbase: qib_base_info pointer | ||
3651 | * | ||
3652 | * We set the PCIE flag because the lower bandwidth on PCIe vs | ||
3653 | * HyperTransport can affect some user packet algorithims. | ||
3654 | */ | ||
3655 | |||
3656 | static int qib_7322_get_base_info(struct qib_ctxtdata *rcd, | ||
3657 | struct qib_base_info *kinfo) | ||
3658 | { | ||
3659 | kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP | | ||
3660 | QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL | | ||
3661 | QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA; | ||
3662 | if (rcd->dd->cspec->r1) | ||
3663 | kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK; | ||
3664 | if (rcd->dd->flags & QIB_USE_SPCL_TRIG) | ||
3665 | kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER; | ||
3666 | |||
3667 | return 0; | ||
3668 | } | ||
3669 | |||
3670 | static struct qib_message_header * | ||
3671 | qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr) | ||
3672 | { | ||
3673 | u32 offset = qib_hdrget_offset(rhf_addr); | ||
3674 | |||
3675 | return (struct qib_message_header *) | ||
3676 | (rhf_addr - dd->rhf_offset + offset); | ||
3677 | } | ||
3678 | |||
3679 | /* | ||
3680 | * Configure number of contexts. | ||
3681 | */ | ||
3682 | static void qib_7322_config_ctxts(struct qib_devdata *dd) | ||
3683 | { | ||
3684 | unsigned long flags; | ||
3685 | u32 nchipctxts; | ||
3686 | |||
3687 | nchipctxts = qib_read_kreg32(dd, kr_contextcnt); | ||
3688 | dd->cspec->numctxts = nchipctxts; | ||
3689 | if (qib_n_krcv_queues > 1 && dd->num_pports) { | ||
3690 | /* | ||
3691 | * Set the mask for which bits from the QPN are used | ||
3692 | * to select a context number. | ||
3693 | */ | ||
3694 | dd->qpn_mask = 0x3f; | ||
3695 | dd->first_user_ctxt = NUM_IB_PORTS + | ||
3696 | (qib_n_krcv_queues - 1) * dd->num_pports; | ||
3697 | if (dd->first_user_ctxt > nchipctxts) | ||
3698 | dd->first_user_ctxt = nchipctxts; | ||
3699 | dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports; | ||
3700 | } else { | ||
3701 | dd->first_user_ctxt = NUM_IB_PORTS; | ||
3702 | dd->n_krcv_queues = 1; | ||
3703 | } | ||
3704 | |||
3705 | if (!qib_cfgctxts) { | ||
3706 | int nctxts = dd->first_user_ctxt + num_online_cpus(); | ||
3707 | |||
3708 | if (nctxts <= 6) | ||
3709 | dd->ctxtcnt = 6; | ||
3710 | else if (nctxts <= 10) | ||
3711 | dd->ctxtcnt = 10; | ||
3712 | else if (nctxts <= nchipctxts) | ||
3713 | dd->ctxtcnt = nchipctxts; | ||
3714 | } else if (qib_cfgctxts < dd->num_pports) | ||
3715 | dd->ctxtcnt = dd->num_pports; | ||
3716 | else if (qib_cfgctxts <= nchipctxts) | ||
3717 | dd->ctxtcnt = qib_cfgctxts; | ||
3718 | if (!dd->ctxtcnt) /* none of the above, set to max */ | ||
3719 | dd->ctxtcnt = nchipctxts; | ||
3720 | |||
3721 | /* | ||
3722 | * Chip can be configured for 6, 10, or 18 ctxts, and choice | ||
3723 | * affects number of eager TIDs per ctxt (1K, 2K, 4K). | ||
3724 | * Lock to be paranoid about later motion, etc. | ||
3725 | */ | ||
3726 | spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); | ||
3727 | if (dd->ctxtcnt > 10) | ||
3728 | dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg); | ||
3729 | else if (dd->ctxtcnt > 6) | ||
3730 | dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg); | ||
3731 | /* else configure for default 6 receive ctxts */ | ||
3732 | |||
3733 | /* The XRC opcode is 5. */ | ||
3734 | dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode); | ||
3735 | |||
3736 | /* | ||
3737 | * RcvCtrl *must* be written here so that the | ||
3738 | * chip understands how to change rcvegrcnt below. | ||
3739 | */ | ||
3740 | qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl); | ||
3741 | spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); | ||
3742 | |||
3743 | /* kr_rcvegrcnt changes based on the number of contexts enabled */ | ||
3744 | dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt); | ||
3745 | dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, | ||
3746 | dd->num_pports > 1 ? 1024U : 2048U); | ||
3747 | } | ||
3748 | |||
3749 | static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which) | ||
3750 | { | ||
3751 | |||
3752 | int lsb, ret = 0; | ||
3753 | u64 maskr; /* right-justified mask */ | ||
3754 | |||
3755 | switch (which) { | ||
3756 | |||
3757 | case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */ | ||
3758 | ret = ppd->link_width_enabled; | ||
3759 | goto done; | ||
3760 | |||
3761 | case QIB_IB_CFG_LWID: /* Get currently active Link-width */ | ||
3762 | ret = ppd->link_width_active; | ||
3763 | goto done; | ||
3764 | |||
3765 | case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */ | ||
3766 | ret = ppd->link_speed_enabled; | ||
3767 | goto done; | ||
3768 | |||
3769 | case QIB_IB_CFG_SPD: /* Get current Link spd */ | ||
3770 | ret = ppd->link_speed_active; | ||
3771 | goto done; | ||
3772 | |||
3773 | case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */ | ||
3774 | lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP); | ||
3775 | maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP); | ||
3776 | break; | ||
3777 | |||
3778 | case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */ | ||
3779 | lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED); | ||
3780 | maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED); | ||
3781 | break; | ||
3782 | |||
3783 | case QIB_IB_CFG_LINKLATENCY: | ||
3784 | ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) & | ||
3785 | SYM_MASK(IBCStatusB_0, LinkRoundTripLatency); | ||
3786 | goto done; | ||
3787 | |||
3788 | case QIB_IB_CFG_OP_VLS: | ||
3789 | ret = ppd->vls_operational; | ||
3790 | goto done; | ||
3791 | |||
3792 | case QIB_IB_CFG_VL_HIGH_CAP: | ||
3793 | ret = 16; | ||
3794 | goto done; | ||
3795 | |||
3796 | case QIB_IB_CFG_VL_LOW_CAP: | ||
3797 | ret = 16; | ||
3798 | goto done; | ||
3799 | |||
3800 | case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */ | ||
3801 | ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0, | ||
3802 | OverrunThreshold); | ||
3803 | goto done; | ||
3804 | |||
3805 | case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */ | ||
3806 | ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0, | ||
3807 | PhyerrThreshold); | ||
3808 | goto done; | ||
3809 | |||
3810 | case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */ | ||
3811 | /* will only take effect when the link state changes */ | ||
3812 | ret = (ppd->cpspec->ibcctrl_a & | ||
3813 | SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ? | ||
3814 | IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL; | ||
3815 | goto done; | ||
3816 | |||
3817 | case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */ | ||
3818 | lsb = IBA7322_IBC_HRTBT_LSB; | ||
3819 | maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */ | ||
3820 | break; | ||
3821 | |||
3822 | case QIB_IB_CFG_PMA_TICKS: | ||
3823 | /* | ||
3824 | * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs | ||
3825 | * Since the clock is always 250MHz, the value is 3, 1 or 0. | ||
3826 | */ | ||
3827 | if (ppd->link_speed_active == QIB_IB_QDR) | ||
3828 | ret = 3; | ||
3829 | else if (ppd->link_speed_active == QIB_IB_DDR) | ||
3830 | ret = 1; | ||
3831 | else | ||
3832 | ret = 0; | ||
3833 | goto done; | ||
3834 | |||
3835 | default: | ||
3836 | ret = -EINVAL; | ||
3837 | goto done; | ||
3838 | } | ||
3839 | ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr); | ||
3840 | done: | ||
3841 | return ret; | ||
3842 | } | ||
3843 | |||
3844 | /* | ||
3845 | * Below again cribbed liberally from older version. Do not lean | ||
3846 | * heavily on it. | ||
3847 | */ | ||
3848 | #define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB | ||
3849 | #define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \ | ||
3850 | | (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16)) | ||
3851 | |||
3852 | static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val) | ||
3853 | { | ||
3854 | struct qib_devdata *dd = ppd->dd; | ||
3855 | u64 maskr; /* right-justified mask */ | ||
3856 | int lsb, ret = 0; | ||
3857 | u16 lcmd, licmd; | ||
3858 | unsigned long flags; | ||
3859 | |||
3860 | switch (which) { | ||
3861 | case QIB_IB_CFG_LIDLMC: | ||
3862 | /* | ||
3863 | * Set LID and LMC. Combined to avoid possible hazard | ||
3864 | * caller puts LMC in 16MSbits, DLID in 16LSbits of val | ||
3865 | */ | ||
3866 | lsb = IBA7322_IBC_DLIDLMC_SHIFT; | ||
3867 | maskr = IBA7322_IBC_DLIDLMC_MASK; | ||
3868 | /* | ||
3869 | * For header-checking, the SLID in the packet will | ||
3870 | * be masked with SendIBSLMCMask, and compared | ||
3871 | * with SendIBSLIDAssignMask. Make sure we do not | ||
3872 | * set any bits not covered by the mask, or we get | ||
3873 | * false-positives. | ||
3874 | */ | ||
3875 | qib_write_kreg_port(ppd, krp_sendslid, | ||
3876 | val & (val >> 16) & SendIBSLIDAssignMask); | ||
3877 | qib_write_kreg_port(ppd, krp_sendslidmask, | ||
3878 | (val >> 16) & SendIBSLMCMask); | ||
3879 | break; | ||
3880 | |||
3881 | case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */ | ||
3882 | ppd->link_width_enabled = val; | ||
3883 | /* convert IB value to chip register value */ | ||
3884 | if (val == IB_WIDTH_1X) | ||
3885 | val = 0; | ||
3886 | else if (val == IB_WIDTH_4X) | ||
3887 | val = 1; | ||
3888 | else | ||
3889 | val = 3; | ||
3890 | maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS); | ||
3891 | lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS); | ||
3892 | break; | ||
3893 | |||
3894 | case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */ | ||
3895 | /* | ||
3896 | * As with width, only write the actual register if the | ||
3897 | * link is currently down, otherwise takes effect on next | ||
3898 | * link change. Since setting is being explictly requested | ||
3899 | * (via MAD or sysfs), clear autoneg failure status if speed | ||
3900 | * autoneg is enabled. | ||
3901 | */ | ||
3902 | ppd->link_speed_enabled = val; | ||
3903 | val <<= IBA7322_IBC_SPEED_LSB; | ||
3904 | maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK | | ||
3905 | IBA7322_IBC_MAX_SPEED_MASK; | ||
3906 | if (val & (val - 1)) { | ||
3907 | /* Muliple speeds enabled */ | ||
3908 | val |= IBA7322_IBC_IBTA_1_2_MASK | | ||
3909 | IBA7322_IBC_MAX_SPEED_MASK; | ||
3910 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
3911 | ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED; | ||
3912 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
3913 | } else if (val & IBA7322_IBC_SPEED_QDR) | ||
3914 | val |= IBA7322_IBC_IBTA_1_2_MASK; | ||
3915 | /* IBTA 1.2 mode + min/max + speed bits are contiguous */ | ||
3916 | lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE); | ||
3917 | break; | ||
3918 | |||
3919 | case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */ | ||
3920 | lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP); | ||
3921 | maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP); | ||
3922 | break; | ||
3923 | |||
3924 | case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */ | ||
3925 | lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED); | ||
3926 | maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED); | ||
3927 | break; | ||
3928 | |||
3929 | case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */ | ||
3930 | maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0, | ||
3931 | OverrunThreshold); | ||
3932 | if (maskr != val) { | ||
3933 | ppd->cpspec->ibcctrl_a &= | ||
3934 | ~SYM_MASK(IBCCtrlA_0, OverrunThreshold); | ||
3935 | ppd->cpspec->ibcctrl_a |= (u64) val << | ||
3936 | SYM_LSB(IBCCtrlA_0, OverrunThreshold); | ||
3937 | qib_write_kreg_port(ppd, krp_ibcctrl_a, | ||
3938 | ppd->cpspec->ibcctrl_a); | ||
3939 | qib_write_kreg(dd, kr_scratch, 0ULL); | ||
3940 | } | ||
3941 | goto bail; | ||
3942 | |||
3943 | case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */ | ||
3944 | maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0, | ||
3945 | PhyerrThreshold); | ||
3946 | if (maskr != val) { | ||
3947 | ppd->cpspec->ibcctrl_a &= | ||
3948 | ~SYM_MASK(IBCCtrlA_0, PhyerrThreshold); | ||
3949 | ppd->cpspec->ibcctrl_a |= (u64) val << | ||
3950 | SYM_LSB(IBCCtrlA_0, PhyerrThreshold); | ||
3951 | qib_write_kreg_port(ppd, krp_ibcctrl_a, | ||
3952 | ppd->cpspec->ibcctrl_a); | ||
3953 | qib_write_kreg(dd, kr_scratch, 0ULL); | ||
3954 | } | ||
3955 | goto bail; | ||
3956 | |||
3957 | case QIB_IB_CFG_PKEYS: /* update pkeys */ | ||
3958 | maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) | | ||
3959 | ((u64) ppd->pkeys[2] << 32) | | ||
3960 | ((u64) ppd->pkeys[3] << 48); | ||
3961 | qib_write_kreg_port(ppd, krp_partitionkey, maskr); | ||
3962 | goto bail; | ||
3963 | |||
3964 | case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */ | ||
3965 | /* will only take effect when the link state changes */ | ||
3966 | if (val == IB_LINKINITCMD_POLL) | ||
3967 | ppd->cpspec->ibcctrl_a &= | ||
3968 | ~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState); | ||
3969 | else /* SLEEP */ | ||
3970 | ppd->cpspec->ibcctrl_a |= | ||
3971 | SYM_MASK(IBCCtrlA_0, LinkDownDefaultState); | ||
3972 | qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); | ||
3973 | qib_write_kreg(dd, kr_scratch, 0ULL); | ||
3974 | goto bail; | ||
3975 | |||
3976 | case QIB_IB_CFG_MTU: /* update the MTU in IBC */ | ||
3977 | /* | ||
3978 | * Update our housekeeping variables, and set IBC max | ||
3979 | * size, same as init code; max IBC is max we allow in | ||
3980 | * buffer, less the qword pbc, plus 1 for ICRC, in dwords | ||
3981 | * Set even if it's unchanged, print debug message only | ||
3982 | * on changes. | ||
3983 | */ | ||
3984 | val = (ppd->ibmaxlen >> 2) + 1; | ||
3985 | ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen); | ||
3986 | ppd->cpspec->ibcctrl_a |= (u64)val << | ||
3987 | SYM_LSB(IBCCtrlA_0, MaxPktLen); | ||
3988 | qib_write_kreg_port(ppd, krp_ibcctrl_a, | ||
3989 | ppd->cpspec->ibcctrl_a); | ||
3990 | qib_write_kreg(dd, kr_scratch, 0ULL); | ||
3991 | goto bail; | ||
3992 | |||
3993 | case QIB_IB_CFG_LSTATE: /* set the IB link state */ | ||
3994 | switch (val & 0xffff0000) { | ||
3995 | case IB_LINKCMD_DOWN: | ||
3996 | lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN; | ||
3997 | ppd->cpspec->ibmalfusesnap = 1; | ||
3998 | ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd, | ||
3999 | crp_errlink); | ||
4000 | if (!ppd->cpspec->ibdeltainprog && | ||
4001 | qib_compat_ddr_negotiate) { | ||
4002 | ppd->cpspec->ibdeltainprog = 1; | ||
4003 | ppd->cpspec->ibsymsnap = | ||
4004 | read_7322_creg32_port(ppd, | ||
4005 | crp_ibsymbolerr); | ||
4006 | ppd->cpspec->iblnkerrsnap = | ||
4007 | read_7322_creg32_port(ppd, | ||
4008 | crp_iblinkerrrecov); | ||
4009 | } | ||
4010 | break; | ||
4011 | |||
4012 | case IB_LINKCMD_ARMED: | ||
4013 | lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED; | ||
4014 | if (ppd->cpspec->ibmalfusesnap) { | ||
4015 | ppd->cpspec->ibmalfusesnap = 0; | ||
4016 | ppd->cpspec->ibmalfdelta += | ||
4017 | read_7322_creg32_port(ppd, | ||
4018 | crp_errlink) - | ||
4019 | ppd->cpspec->ibmalfsnap; | ||
4020 | } | ||
4021 | break; | ||
4022 | |||
4023 | case IB_LINKCMD_ACTIVE: | ||
4024 | lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE; | ||
4025 | break; | ||
4026 | |||
4027 | default: | ||
4028 | ret = -EINVAL; | ||
4029 | qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16); | ||
4030 | goto bail; | ||
4031 | } | ||
4032 | switch (val & 0xffff) { | ||
4033 | case IB_LINKINITCMD_NOP: | ||
4034 | licmd = 0; | ||
4035 | break; | ||
4036 | |||
4037 | case IB_LINKINITCMD_POLL: | ||
4038 | licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL; | ||
4039 | break; | ||
4040 | |||
4041 | case IB_LINKINITCMD_SLEEP: | ||
4042 | licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP; | ||
4043 | break; | ||
4044 | |||
4045 | case IB_LINKINITCMD_DISABLE: | ||
4046 | licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE; | ||
4047 | ppd->cpspec->chase_end = 0; | ||
4048 | /* | ||
4049 | * stop state chase counter and timer, if running. | ||
4050 | * wait forpending timer, but don't clear .data (ppd)! | ||
4051 | */ | ||
4052 | if (ppd->cpspec->chase_timer.expires) { | ||
4053 | del_timer_sync(&ppd->cpspec->chase_timer); | ||
4054 | ppd->cpspec->chase_timer.expires = 0; | ||
4055 | } | ||
4056 | break; | ||
4057 | |||
4058 | default: | ||
4059 | ret = -EINVAL; | ||
4060 | qib_dev_err(dd, "bad linkinitcmd req 0x%x\n", | ||
4061 | val & 0xffff); | ||
4062 | goto bail; | ||
4063 | } | ||
4064 | qib_set_ib_7322_lstate(ppd, lcmd, licmd); | ||
4065 | goto bail; | ||
4066 | |||
4067 | case QIB_IB_CFG_OP_VLS: | ||
4068 | if (ppd->vls_operational != val) { | ||
4069 | ppd->vls_operational = val; | ||
4070 | set_vls(ppd); | ||
4071 | } | ||
4072 | goto bail; | ||
4073 | |||
4074 | case QIB_IB_CFG_VL_HIGH_LIMIT: | ||
4075 | qib_write_kreg_port(ppd, krp_highprio_limit, val); | ||
4076 | goto bail; | ||
4077 | |||
4078 | case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */ | ||
4079 | if (val > 3) { | ||
4080 | ret = -EINVAL; | ||
4081 | goto bail; | ||
4082 | } | ||
4083 | lsb = IBA7322_IBC_HRTBT_LSB; | ||
4084 | maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */ | ||
4085 | break; | ||
4086 | |||
4087 | case QIB_IB_CFG_PORT: | ||
4088 | /* val is the port number of the switch we are connected to. */ | ||
4089 | if (ppd->dd->cspec->r1) { | ||
4090 | cancel_delayed_work(&ppd->cpspec->ipg_work); | ||
4091 | ppd->cpspec->ipg_tries = 0; | ||
4092 | } | ||
4093 | goto bail; | ||
4094 | |||
4095 | default: | ||
4096 | ret = -EINVAL; | ||
4097 | goto bail; | ||
4098 | } | ||
4099 | ppd->cpspec->ibcctrl_b &= ~(maskr << lsb); | ||
4100 | ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb); | ||
4101 | qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b); | ||
4102 | qib_write_kreg(dd, kr_scratch, 0); | ||
4103 | bail: | ||
4104 | return ret; | ||
4105 | } | ||
4106 | |||
4107 | static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what) | ||
4108 | { | ||
4109 | int ret = 0; | ||
4110 | u64 val, ctrlb; | ||
4111 | |||
4112 | /* only IBC loopback, may add serdes and xgxs loopbacks later */ | ||
4113 | if (!strncmp(what, "ibc", 3)) { | ||
4114 | ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, | ||
4115 | Loopback); | ||
4116 | val = 0; /* disable heart beat, so link will come up */ | ||
4117 | qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n", | ||
4118 | ppd->dd->unit, ppd->port); | ||
4119 | } else if (!strncmp(what, "off", 3)) { | ||
4120 | ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, | ||
4121 | Loopback); | ||
4122 | /* enable heart beat again */ | ||
4123 | val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB; | ||
4124 | qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback " | ||
4125 | "(normal)\n", ppd->dd->unit, ppd->port); | ||
4126 | } else | ||
4127 | ret = -EINVAL; | ||
4128 | if (!ret) { | ||
4129 | qib_write_kreg_port(ppd, krp_ibcctrl_a, | ||
4130 | ppd->cpspec->ibcctrl_a); | ||
4131 | ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK | ||
4132 | << IBA7322_IBC_HRTBT_LSB); | ||
4133 | ppd->cpspec->ibcctrl_b = ctrlb | val; | ||
4134 | qib_write_kreg_port(ppd, krp_ibcctrl_b, | ||
4135 | ppd->cpspec->ibcctrl_b); | ||
4136 | qib_write_kreg(ppd->dd, kr_scratch, 0); | ||
4137 | } | ||
4138 | return ret; | ||
4139 | } | ||
4140 | |||
4141 | static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno, | ||
4142 | struct ib_vl_weight_elem *vl) | ||
4143 | { | ||
4144 | unsigned i; | ||
4145 | |||
4146 | for (i = 0; i < 16; i++, regno++, vl++) { | ||
4147 | u32 val = qib_read_kreg_port(ppd, regno); | ||
4148 | |||
4149 | vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) & | ||
4150 | SYM_RMASK(LowPriority0_0, VirtualLane); | ||
4151 | vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) & | ||
4152 | SYM_RMASK(LowPriority0_0, Weight); | ||
4153 | } | ||
4154 | } | ||
4155 | |||
4156 | static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno, | ||
4157 | struct ib_vl_weight_elem *vl) | ||
4158 | { | ||
4159 | unsigned i; | ||
4160 | |||
4161 | for (i = 0; i < 16; i++, regno++, vl++) { | ||
4162 | u64 val; | ||
4163 | |||
4164 | val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) << | ||
4165 | SYM_LSB(LowPriority0_0, VirtualLane)) | | ||
4166 | ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) << | ||
4167 | SYM_LSB(LowPriority0_0, Weight)); | ||
4168 | qib_write_kreg_port(ppd, regno, val); | ||
4169 | } | ||
4170 | if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) { | ||
4171 | struct qib_devdata *dd = ppd->dd; | ||
4172 | unsigned long flags; | ||
4173 | |||
4174 | spin_lock_irqsave(&dd->sendctrl_lock, flags); | ||
4175 | ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn); | ||
4176 | qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl); | ||
4177 | qib_write_kreg(dd, kr_scratch, 0); | ||
4178 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | ||
4179 | } | ||
4180 | } | ||
4181 | |||
4182 | static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t) | ||
4183 | { | ||
4184 | switch (which) { | ||
4185 | case QIB_IB_TBL_VL_HIGH_ARB: | ||
4186 | get_vl_weights(ppd, krp_highprio_0, t); | ||
4187 | break; | ||
4188 | |||
4189 | case QIB_IB_TBL_VL_LOW_ARB: | ||
4190 | get_vl_weights(ppd, krp_lowprio_0, t); | ||
4191 | break; | ||
4192 | |||
4193 | default: | ||
4194 | return -EINVAL; | ||
4195 | } | ||
4196 | return 0; | ||
4197 | } | ||
4198 | |||
4199 | static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t) | ||
4200 | { | ||
4201 | switch (which) { | ||
4202 | case QIB_IB_TBL_VL_HIGH_ARB: | ||
4203 | set_vl_weights(ppd, krp_highprio_0, t); | ||
4204 | break; | ||
4205 | |||
4206 | case QIB_IB_TBL_VL_LOW_ARB: | ||
4207 | set_vl_weights(ppd, krp_lowprio_0, t); | ||
4208 | break; | ||
4209 | |||
4210 | default: | ||
4211 | return -EINVAL; | ||
4212 | } | ||
4213 | return 0; | ||
4214 | } | ||
4215 | |||
4216 | static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd, | ||
4217 | u32 updegr, u32 egrhd) | ||
4218 | { | ||
4219 | qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); | ||
4220 | qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); | ||
4221 | if (updegr) | ||
4222 | qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt); | ||
4223 | } | ||
4224 | |||
4225 | static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd) | ||
4226 | { | ||
4227 | u32 head, tail; | ||
4228 | |||
4229 | head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt); | ||
4230 | if (rcd->rcvhdrtail_kvaddr) | ||
4231 | tail = qib_get_rcvhdrtail(rcd); | ||
4232 | else | ||
4233 | tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt); | ||
4234 | return head == tail; | ||
4235 | } | ||
4236 | |||
4237 | #define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \ | ||
4238 | QIB_RCVCTRL_CTXT_DIS | \ | ||
4239 | QIB_RCVCTRL_TIDFLOW_ENB | \ | ||
4240 | QIB_RCVCTRL_TIDFLOW_DIS | \ | ||
4241 | QIB_RCVCTRL_TAILUPD_ENB | \ | ||
4242 | QIB_RCVCTRL_TAILUPD_DIS | \ | ||
4243 | QIB_RCVCTRL_INTRAVAIL_ENB | \ | ||
4244 | QIB_RCVCTRL_INTRAVAIL_DIS | \ | ||
4245 | QIB_RCVCTRL_BP_ENB | \ | ||
4246 | QIB_RCVCTRL_BP_DIS) | ||
4247 | |||
4248 | #define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \ | ||
4249 | QIB_RCVCTRL_CTXT_DIS | \ | ||
4250 | QIB_RCVCTRL_PKEY_DIS | \ | ||
4251 | QIB_RCVCTRL_PKEY_ENB) | ||
4252 | |||
4253 | /* | ||
4254 | * Modify the RCVCTRL register in chip-specific way. This | ||
4255 | * is a function because bit positions and (future) register | ||
4256 | * location is chip-specifc, but the needed operations are | ||
4257 | * generic. <op> is a bit-mask because we often want to | ||
4258 | * do multiple modifications. | ||
4259 | */ | ||
4260 | static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op, | ||
4261 | int ctxt) | ||
4262 | { | ||
4263 | struct qib_devdata *dd = ppd->dd; | ||
4264 | struct qib_ctxtdata *rcd; | ||
4265 | u64 mask, val; | ||
4266 | unsigned long flags; | ||
4267 | |||
4268 | spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); | ||
4269 | |||
4270 | if (op & QIB_RCVCTRL_TIDFLOW_ENB) | ||
4271 | dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable); | ||
4272 | if (op & QIB_RCVCTRL_TIDFLOW_DIS) | ||
4273 | dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable); | ||
4274 | if (op & QIB_RCVCTRL_TAILUPD_ENB) | ||
4275 | dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd); | ||
4276 | if (op & QIB_RCVCTRL_TAILUPD_DIS) | ||
4277 | dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd); | ||
4278 | if (op & QIB_RCVCTRL_PKEY_ENB) | ||
4279 | ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable); | ||
4280 | if (op & QIB_RCVCTRL_PKEY_DIS) | ||
4281 | ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable); | ||
4282 | if (ctxt < 0) { | ||
4283 | mask = (1ULL << dd->ctxtcnt) - 1; | ||
4284 | rcd = NULL; | ||
4285 | } else { | ||
4286 | mask = (1ULL << ctxt); | ||
4287 | rcd = dd->rcd[ctxt]; | ||
4288 | } | ||
4289 | if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) { | ||
4290 | ppd->p_rcvctrl |= | ||
4291 | (mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel)); | ||
4292 | if (!(dd->flags & QIB_NODMA_RTAIL)) { | ||
4293 | op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */ | ||
4294 | dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd); | ||
4295 | } | ||
4296 | /* Write these registers before the context is enabled. */ | ||
4297 | qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, | ||
4298 | rcd->rcvhdrqtailaddr_phys); | ||
4299 | qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, | ||
4300 | rcd->rcvhdrq_phys); | ||
4301 | rcd->seq_cnt = 1; | ||
4302 | #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) | ||
4303 | if (dd->flags & QIB_DCA_ENABLED) | ||
4304 | qib_update_rhdrq_dca(rcd); | ||
4305 | #endif | ||
4306 | } | ||
4307 | if (op & QIB_RCVCTRL_CTXT_DIS) | ||
4308 | ppd->p_rcvctrl &= | ||
4309 | ~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel)); | ||
4310 | if (op & QIB_RCVCTRL_BP_ENB) | ||
4311 | dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull); | ||
4312 | if (op & QIB_RCVCTRL_BP_DIS) | ||
4313 | dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull)); | ||
4314 | if (op & QIB_RCVCTRL_INTRAVAIL_ENB) | ||
4315 | dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail)); | ||
4316 | if (op & QIB_RCVCTRL_INTRAVAIL_DIS) | ||
4317 | dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail)); | ||
4318 | /* | ||
4319 | * Decide which registers to write depending on the ops enabled. | ||
4320 | * Special case is "flush" (no bits set at all) | ||
4321 | * which needs to write both. | ||
4322 | */ | ||
4323 | if (op == 0 || (op & RCVCTRL_COMMON_MODS)) | ||
4324 | qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl); | ||
4325 | if (op == 0 || (op & RCVCTRL_PORT_MODS)) | ||
4326 | qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl); | ||
4327 | if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) { | ||
4328 | /* | ||
4329 | * Init the context registers also; if we were | ||
4330 | * disabled, tail and head should both be zero | ||
4331 | * already from the enable, but since we don't | ||
4332 | * know, we have to do it explictly. | ||
4333 | */ | ||
4334 | val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt); | ||
4335 | qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt); | ||
4336 | |||
4337 | /* be sure enabling write seen; hd/tl should be 0 */ | ||
4338 | (void) qib_read_kreg32(dd, kr_scratch); | ||
4339 | val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt); | ||
4340 | dd->rcd[ctxt]->head = val; | ||
4341 | /* If kctxt, interrupt on next receive. */ | ||
4342 | if (ctxt < dd->first_user_ctxt) | ||
4343 | val |= dd->rhdrhead_intr_off; | ||
4344 | qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); | ||
4345 | } else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && | ||
4346 | dd->rcd[ctxt] && dd->rhdrhead_intr_off) { | ||
4347 | /* arm rcv interrupt */ | ||
4348 | val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off; | ||
4349 | qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); | ||
4350 | } | ||
4351 | if (op & QIB_RCVCTRL_CTXT_DIS) { | ||
4352 | unsigned f; | ||
4353 | |||
4354 | /* Now that the context is disabled, clear these registers. */ | ||
4355 | if (ctxt >= 0) { | ||
4356 | qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0); | ||
4357 | qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0); | ||
4358 | for (f = 0; f < NUM_TIDFLOWS_CTXT; f++) | ||
4359 | qib_write_ureg(dd, ur_rcvflowtable + f, | ||
4360 | TIDFLOW_ERRBITS, ctxt); | ||
4361 | } else { | ||
4362 | unsigned i; | ||
4363 | |||
4364 | for (i = 0; i < dd->cfgctxts; i++) { | ||
4365 | qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, | ||
4366 | i, 0); | ||
4367 | qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0); | ||
4368 | for (f = 0; f < NUM_TIDFLOWS_CTXT; f++) | ||
4369 | qib_write_ureg(dd, ur_rcvflowtable + f, | ||
4370 | TIDFLOW_ERRBITS, i); | ||
4371 | } | ||
4372 | } | ||
4373 | } | ||
4374 | spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); | ||
4375 | } | ||
4376 | |||
4377 | /* | ||
4378 | * Modify the SENDCTRL register in chip-specific way. This | ||
4379 | * is a function where there are multiple such registers with | ||
4380 | * slightly different layouts. | ||
4381 | * The chip doesn't allow back-to-back sendctrl writes, so write | ||
4382 | * the scratch register after writing sendctrl. | ||
4383 | * | ||
4384 | * Which register is written depends on the operation. | ||
4385 | * Most operate on the common register, while | ||
4386 | * SEND_ENB and SEND_DIS operate on the per-port ones. | ||
4387 | * SEND_ENB is included in common because it can change SPCL_TRIG | ||
4388 | */ | ||
4389 | #define SENDCTRL_COMMON_MODS (\ | ||
4390 | QIB_SENDCTRL_CLEAR | \ | ||
4391 | QIB_SENDCTRL_AVAIL_DIS | \ | ||
4392 | QIB_SENDCTRL_AVAIL_ENB | \ | ||
4393 | QIB_SENDCTRL_AVAIL_BLIP | \ | ||
4394 | QIB_SENDCTRL_DISARM | \ | ||
4395 | QIB_SENDCTRL_DISARM_ALL | \ | ||
4396 | QIB_SENDCTRL_SEND_ENB) | ||
4397 | |||
4398 | #define SENDCTRL_PORT_MODS (\ | ||
4399 | QIB_SENDCTRL_CLEAR | \ | ||
4400 | QIB_SENDCTRL_SEND_ENB | \ | ||
4401 | QIB_SENDCTRL_SEND_DIS | \ | ||
4402 | QIB_SENDCTRL_FLUSH) | ||
4403 | |||
4404 | static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op) | ||
4405 | { | ||
4406 | struct qib_devdata *dd = ppd->dd; | ||
4407 | u64 tmp_dd_sendctrl; | ||
4408 | unsigned long flags; | ||
4409 | |||
4410 | spin_lock_irqsave(&dd->sendctrl_lock, flags); | ||
4411 | |||
4412 | /* First the dd ones that are "sticky", saved in shadow */ | ||
4413 | if (op & QIB_SENDCTRL_CLEAR) | ||
4414 | dd->sendctrl = 0; | ||
4415 | if (op & QIB_SENDCTRL_AVAIL_DIS) | ||
4416 | dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd); | ||
4417 | else if (op & QIB_SENDCTRL_AVAIL_ENB) { | ||
4418 | dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd); | ||
4419 | if (dd->flags & QIB_USE_SPCL_TRIG) | ||
4420 | dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn); | ||
4421 | } | ||
4422 | |||
4423 | /* Then the ppd ones that are "sticky", saved in shadow */ | ||
4424 | if (op & QIB_SENDCTRL_SEND_DIS) | ||
4425 | ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable); | ||
4426 | else if (op & QIB_SENDCTRL_SEND_ENB) | ||
4427 | ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable); | ||
4428 | |||
4429 | if (op & QIB_SENDCTRL_DISARM_ALL) { | ||
4430 | u32 i, last; | ||
4431 | |||
4432 | tmp_dd_sendctrl = dd->sendctrl; | ||
4433 | last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS; | ||
4434 | /* | ||
4435 | * Disarm any buffers that are not yet launched, | ||
4436 | * disabling updates until done. | ||
4437 | */ | ||
4438 | tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd); | ||
4439 | for (i = 0; i < last; i++) { | ||
4440 | qib_write_kreg(dd, kr_sendctrl, | ||
4441 | tmp_dd_sendctrl | | ||
4442 | SYM_MASK(SendCtrl, Disarm) | i); | ||
4443 | qib_write_kreg(dd, kr_scratch, 0); | ||
4444 | } | ||
4445 | } | ||
4446 | |||
4447 | if (op & QIB_SENDCTRL_FLUSH) { | ||
4448 | u64 tmp_ppd_sendctrl = ppd->p_sendctrl; | ||
4449 | |||
4450 | /* | ||
4451 | * Now drain all the fifos. The Abort bit should never be | ||
4452 | * needed, so for now, at least, we don't use it. | ||
4453 | */ | ||
4454 | tmp_ppd_sendctrl |= | ||
4455 | SYM_MASK(SendCtrl_0, TxeDrainRmFifo) | | ||
4456 | SYM_MASK(SendCtrl_0, TxeDrainLaFifo) | | ||
4457 | SYM_MASK(SendCtrl_0, TxeBypassIbc); | ||
4458 | qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl); | ||
4459 | qib_write_kreg(dd, kr_scratch, 0); | ||
4460 | } | ||
4461 | |||
4462 | tmp_dd_sendctrl = dd->sendctrl; | ||
4463 | |||
4464 | if (op & QIB_SENDCTRL_DISARM) | ||
4465 | tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) | | ||
4466 | ((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) << | ||
4467 | SYM_LSB(SendCtrl, DisarmSendBuf)); | ||
4468 | if ((op & QIB_SENDCTRL_AVAIL_BLIP) && | ||
4469 | (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd))) | ||
4470 | tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd); | ||
4471 | |||
4472 | if (op == 0 || (op & SENDCTRL_COMMON_MODS)) { | ||
4473 | qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl); | ||
4474 | qib_write_kreg(dd, kr_scratch, 0); | ||
4475 | } | ||
4476 | |||
4477 | if (op == 0 || (op & SENDCTRL_PORT_MODS)) { | ||
4478 | qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl); | ||
4479 | qib_write_kreg(dd, kr_scratch, 0); | ||
4480 | } | ||
4481 | |||
4482 | if (op & QIB_SENDCTRL_AVAIL_BLIP) { | ||
4483 | qib_write_kreg(dd, kr_sendctrl, dd->sendctrl); | ||
4484 | qib_write_kreg(dd, kr_scratch, 0); | ||
4485 | } | ||
4486 | |||
4487 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | ||
4488 | |||
4489 | if (op & QIB_SENDCTRL_FLUSH) { | ||
4490 | u32 v; | ||
4491 | /* | ||
4492 | * ensure writes have hit chip, then do a few | ||
4493 | * more reads, to allow DMA of pioavail registers | ||
4494 | * to occur, so in-memory copy is in sync with | ||
4495 | * the chip. Not always safe to sleep. | ||
4496 | */ | ||
4497 | v = qib_read_kreg32(dd, kr_scratch); | ||
4498 | qib_write_kreg(dd, kr_scratch, v); | ||
4499 | v = qib_read_kreg32(dd, kr_scratch); | ||
4500 | qib_write_kreg(dd, kr_scratch, v); | ||
4501 | qib_read_kreg32(dd, kr_scratch); | ||
4502 | } | ||
4503 | } | ||
4504 | |||
4505 | #define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */ | ||
4506 | #define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */ | ||
4507 | #define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */ | ||
4508 | |||
4509 | /** | ||
4510 | * qib_portcntr_7322 - read a per-port chip counter | ||
4511 | * @ppd: the qlogic_ib pport | ||
4512 | * @creg: the counter to read (not a chip offset) | ||
4513 | */ | ||
4514 | static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg) | ||
4515 | { | ||
4516 | struct qib_devdata *dd = ppd->dd; | ||
4517 | u64 ret = 0ULL; | ||
4518 | u16 creg; | ||
4519 | /* 0xffff for unimplemented or synthesized counters */ | ||
4520 | static const u32 xlator[] = { | ||
4521 | [QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG, | ||
4522 | [QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG, | ||
4523 | [QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount, | ||
4524 | [QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount, | ||
4525 | [QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount, | ||
4526 | [QIBPORTCNTR_SENDSTALL] = crp_sendstall, | ||
4527 | [QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG, | ||
4528 | [QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount, | ||
4529 | [QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount, | ||
4530 | [QIBPORTCNTR_RCVEBP] = crp_rcvebp, | ||
4531 | [QIBPORTCNTR_RCVOVFL] = crp_rcvovfl, | ||
4532 | [QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG, | ||
4533 | [QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed for 7322 */ | ||
4534 | [QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr, | ||
4535 | [QIBPORTCNTR_RXVLERR] = crp_rxvlerr, | ||
4536 | [QIBPORTCNTR_ERRICRC] = crp_erricrc, | ||
4537 | [QIBPORTCNTR_ERRVCRC] = crp_errvcrc, | ||
4538 | [QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc, | ||
4539 | [QIBPORTCNTR_BADFORMAT] = crp_badformat, | ||
4540 | [QIBPORTCNTR_ERR_RLEN] = crp_err_rlen, | ||
4541 | [QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr, | ||
4542 | [QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen, | ||
4543 | [QIBPORTCNTR_UNSUPVL] = crp_txunsupvl, | ||
4544 | [QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl, | ||
4545 | [QIBPORTCNTR_ERRLINK] = crp_errlink, | ||
4546 | [QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown, | ||
4547 | [QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov, | ||
4548 | [QIBPORTCNTR_LLI] = crp_locallinkintegrityerr, | ||
4549 | [QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt, | ||
4550 | [QIBPORTCNTR_ERRPKEY] = crp_errpkey, | ||
4551 | /* | ||
4552 | * the next 3 aren't really counters, but were implemented | ||
4553 | * as counters in older chips, so still get accessed as | ||
4554 | * though they were counters from this code. | ||
4555 | */ | ||
4556 | [QIBPORTCNTR_PSINTERVAL] = krp_psinterval, | ||
4557 | [QIBPORTCNTR_PSSTART] = krp_psstart, | ||
4558 | [QIBPORTCNTR_PSSTAT] = krp_psstat, | ||
4559 | /* pseudo-counter, summed for all ports */ | ||
4560 | [QIBPORTCNTR_KHDROVFL] = 0xffff, | ||
4561 | }; | ||
4562 | |||
4563 | if (reg >= ARRAY_SIZE(xlator)) { | ||
4564 | qib_devinfo(ppd->dd->pcidev, | ||
4565 | "Unimplemented portcounter %u\n", reg); | ||
4566 | goto done; | ||
4567 | } | ||
4568 | creg = xlator[reg] & _PORT_CNTR_IDXMASK; | ||
4569 | |||
4570 | /* handle non-counters and special cases first */ | ||
4571 | if (reg == QIBPORTCNTR_KHDROVFL) { | ||
4572 | int i; | ||
4573 | |||
4574 | /* sum over all kernel contexts (skip if mini_init) */ | ||
4575 | for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) { | ||
4576 | struct qib_ctxtdata *rcd = dd->rcd[i]; | ||
4577 | |||
4578 | if (!rcd || rcd->ppd != ppd) | ||
4579 | continue; | ||
4580 | ret += read_7322_creg32(dd, cr_base_egrovfl + i); | ||
4581 | } | ||
4582 | goto done; | ||
4583 | } else if (reg == QIBPORTCNTR_RXDROPPKT) { | ||
4584 | /* | ||
4585 | * Used as part of the synthesis of port_rcv_errors | ||
4586 | * in the verbs code for IBTA counters. Not needed for 7322, | ||
4587 | * because all the errors are already counted by other cntrs. | ||
4588 | */ | ||
4589 | goto done; | ||
4590 | } else if (reg == QIBPORTCNTR_PSINTERVAL || | ||
4591 | reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) { | ||
4592 | /* were counters in older chips, now per-port kernel regs */ | ||
4593 | ret = qib_read_kreg_port(ppd, creg); | ||
4594 | goto done; | ||
4595 | } | ||
4596 | |||
4597 | /* | ||
4598 | * Only fast increment counters are 64 bits; use 32 bit reads to | ||
4599 | * avoid two independent reads when on Opteron. | ||
4600 | */ | ||
4601 | if (xlator[reg] & _PORT_64BIT_FLAG) | ||
4602 | ret = read_7322_creg_port(ppd, creg); | ||
4603 | else | ||
4604 | ret = read_7322_creg32_port(ppd, creg); | ||
4605 | if (creg == crp_ibsymbolerr) { | ||
4606 | if (ppd->cpspec->ibdeltainprog) | ||
4607 | ret -= ret - ppd->cpspec->ibsymsnap; | ||
4608 | ret -= ppd->cpspec->ibsymdelta; | ||
4609 | } else if (creg == crp_iblinkerrrecov) { | ||
4610 | if (ppd->cpspec->ibdeltainprog) | ||
4611 | ret -= ret - ppd->cpspec->iblnkerrsnap; | ||
4612 | ret -= ppd->cpspec->iblnkerrdelta; | ||
4613 | } else if (creg == crp_errlink) | ||
4614 | ret -= ppd->cpspec->ibmalfdelta; | ||
4615 | else if (creg == crp_iblinkdown) | ||
4616 | ret += ppd->cpspec->iblnkdowndelta; | ||
4617 | done: | ||
4618 | return ret; | ||
4619 | } | ||
4620 | |||
4621 | /* | ||
4622 | * Device counter names (not port-specific), one line per stat, | ||
4623 | * single string. Used by utilities like ipathstats to print the stats | ||
4624 | * in a way which works for different versions of drivers, without changing | ||
4625 | * the utility. Names need to be 12 chars or less (w/o newline), for proper | ||
4626 | * display by utility. | ||
4627 | * Non-error counters are first. | ||
4628 | * Start of "error" conters is indicated by a leading "E " on the first | ||
4629 | * "error" counter, and doesn't count in label length. | ||
4630 | * The EgrOvfl list needs to be last so we truncate them at the configured | ||
4631 | * context count for the device. | ||
4632 | * cntr7322indices contains the corresponding register indices. | ||
4633 | */ | ||
4634 | static const char cntr7322names[] = | ||
4635 | "Interrupts\n" | ||
4636 | "HostBusStall\n" | ||
4637 | "E RxTIDFull\n" | ||
4638 | "RxTIDInvalid\n" | ||
4639 | "RxTIDFloDrop\n" /* 7322 only */ | ||
4640 | "Ctxt0EgrOvfl\n" | ||
4641 | "Ctxt1EgrOvfl\n" | ||
4642 | "Ctxt2EgrOvfl\n" | ||
4643 | "Ctxt3EgrOvfl\n" | ||
4644 | "Ctxt4EgrOvfl\n" | ||
4645 | "Ctxt5EgrOvfl\n" | ||
4646 | "Ctxt6EgrOvfl\n" | ||
4647 | "Ctxt7EgrOvfl\n" | ||
4648 | "Ctxt8EgrOvfl\n" | ||
4649 | "Ctxt9EgrOvfl\n" | ||
4650 | "Ctx10EgrOvfl\n" | ||
4651 | "Ctx11EgrOvfl\n" | ||
4652 | "Ctx12EgrOvfl\n" | ||
4653 | "Ctx13EgrOvfl\n" | ||
4654 | "Ctx14EgrOvfl\n" | ||
4655 | "Ctx15EgrOvfl\n" | ||
4656 | "Ctx16EgrOvfl\n" | ||
4657 | "Ctx17EgrOvfl\n" | ||
4658 | ; | ||
4659 | |||
4660 | static const u32 cntr7322indices[] = { | ||
4661 | cr_lbint | _PORT_64BIT_FLAG, | ||
4662 | cr_lbstall | _PORT_64BIT_FLAG, | ||
4663 | cr_tidfull, | ||
4664 | cr_tidinvalid, | ||
4665 | cr_rxtidflowdrop, | ||
4666 | cr_base_egrovfl + 0, | ||
4667 | cr_base_egrovfl + 1, | ||
4668 | cr_base_egrovfl + 2, | ||
4669 | cr_base_egrovfl + 3, | ||
4670 | cr_base_egrovfl + 4, | ||
4671 | cr_base_egrovfl + 5, | ||
4672 | cr_base_egrovfl + 6, | ||
4673 | cr_base_egrovfl + 7, | ||
4674 | cr_base_egrovfl + 8, | ||
4675 | cr_base_egrovfl + 9, | ||
4676 | cr_base_egrovfl + 10, | ||
4677 | cr_base_egrovfl + 11, | ||
4678 | cr_base_egrovfl + 12, | ||
4679 | cr_base_egrovfl + 13, | ||
4680 | cr_base_egrovfl + 14, | ||
4681 | cr_base_egrovfl + 15, | ||
4682 | cr_base_egrovfl + 16, | ||
4683 | cr_base_egrovfl + 17, | ||
4684 | }; | ||
4685 | |||
4686 | /* | ||
4687 | * same as cntr7322names and cntr7322indices, but for port-specific counters. | ||
4688 | * portcntr7322indices is somewhat complicated by some registers needing | ||
4689 | * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG | ||
4690 | */ | ||
4691 | static const char portcntr7322names[] = | ||
4692 | "TxPkt\n" | ||
4693 | "TxFlowPkt\n" | ||
4694 | "TxWords\n" | ||
4695 | "RxPkt\n" | ||
4696 | "RxFlowPkt\n" | ||
4697 | "RxWords\n" | ||
4698 | "TxFlowStall\n" | ||
4699 | "TxDmaDesc\n" /* 7220 and 7322-only */ | ||
4700 | "E RxDlidFltr\n" /* 7220 and 7322-only */ | ||
4701 | "IBStatusChng\n" | ||
4702 | "IBLinkDown\n" | ||
4703 | "IBLnkRecov\n" | ||
4704 | "IBRxLinkErr\n" | ||
4705 | "IBSymbolErr\n" | ||
4706 | "RxLLIErr\n" | ||
4707 | "RxBadFormat\n" | ||
4708 | "RxBadLen\n" | ||
4709 | "RxBufOvrfl\n" | ||
4710 | "RxEBP\n" | ||
4711 | "RxFlowCtlErr\n" | ||
4712 | "RxICRCerr\n" | ||
4713 | "RxLPCRCerr\n" | ||
4714 | "RxVCRCerr\n" | ||
4715 | "RxInvalLen\n" | ||
4716 | "RxInvalPKey\n" | ||
4717 | "RxPktDropped\n" | ||
4718 | "TxBadLength\n" | ||
4719 | "TxDropped\n" | ||
4720 | "TxInvalLen\n" | ||
4721 | "TxUnderrun\n" | ||
4722 | "TxUnsupVL\n" | ||
4723 | "RxLclPhyErr\n" /* 7220 and 7322-only from here down */ | ||
4724 | "RxVL15Drop\n" | ||
4725 | "RxVlErr\n" | ||
4726 | "XcessBufOvfl\n" | ||
4727 | "RxQPBadCtxt\n" /* 7322-only from here down */ | ||
4728 | "TXBadHeader\n" | ||
4729 | ; | ||
4730 | |||
4731 | static const u32 portcntr7322indices[] = { | ||
4732 | QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG, | ||
4733 | crp_pktsendflow, | ||
4734 | QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG, | ||
4735 | QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG, | ||
4736 | crp_pktrcvflowctrl, | ||
4737 | QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG, | ||
4738 | QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG, | ||
4739 | crp_txsdmadesc | _PORT_64BIT_FLAG, | ||
4740 | crp_rxdlidfltr, | ||
4741 | crp_ibstatuschange, | ||
4742 | QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG, | ||
4743 | QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG, | ||
4744 | QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG, | ||
4745 | QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG, | ||
4746 | QIBPORTCNTR_LLI | _PORT_VIRT_FLAG, | ||
4747 | QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG, | ||
4748 | QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG, | ||
4749 | QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG, | ||
4750 | QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG, | ||
4751 | crp_rcvflowctrlviol, | ||
4752 | QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG, | ||
4753 | QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG, | ||
4754 | QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG, | ||
4755 | QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG, | ||
4756 | QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG, | ||
4757 | QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG, | ||
4758 | crp_txminmaxlenerr, | ||
4759 | crp_txdroppedpkt, | ||
4760 | crp_txlenerr, | ||
4761 | crp_txunderrun, | ||
4762 | crp_txunsupvl, | ||
4763 | QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG, | ||
4764 | QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG, | ||
4765 | QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG, | ||
4766 | QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG, | ||
4767 | crp_rxqpinvalidctxt, | ||
4768 | crp_txhdrerr, | ||
4769 | }; | ||
4770 | |||
4771 | /* do all the setup to make the counter reads efficient later */ | ||
4772 | static void init_7322_cntrnames(struct qib_devdata *dd) | ||
4773 | { | ||
4774 | int i, j = 0; | ||
4775 | char *s; | ||
4776 | |||
4777 | for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts; | ||
4778 | i++) { | ||
4779 | /* we always have at least one counter before the egrovfl */ | ||
4780 | if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12)) | ||
4781 | j = 1; | ||
4782 | s = strchr(s + 1, '\n'); | ||
4783 | if (s && j) | ||
4784 | j++; | ||
4785 | } | ||
4786 | dd->cspec->ncntrs = i; | ||
4787 | if (!s) | ||
4788 | /* full list; size is without terminating null */ | ||
4789 | dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1; | ||
4790 | else | ||
4791 | dd->cspec->cntrnamelen = 1 + s - cntr7322names; | ||
4792 | dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs | ||
4793 | * sizeof(u64), GFP_KERNEL); | ||
4794 | if (!dd->cspec->cntrs) | ||
4795 | qib_dev_err(dd, "Failed allocation for counters\n"); | ||
4796 | |||
4797 | for (i = 0, s = (char *)portcntr7322names; s; i++) | ||
4798 | s = strchr(s + 1, '\n'); | ||
4799 | dd->cspec->nportcntrs = i - 1; | ||
4800 | dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1; | ||
4801 | for (i = 0; i < dd->num_pports; ++i) { | ||
4802 | dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs | ||
4803 | * sizeof(u64), GFP_KERNEL); | ||
4804 | if (!dd->pport[i].cpspec->portcntrs) | ||
4805 | qib_dev_err(dd, "Failed allocation for" | ||
4806 | " portcounters\n"); | ||
4807 | } | ||
4808 | } | ||
4809 | |||
4810 | static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep, | ||
4811 | u64 **cntrp) | ||
4812 | { | ||
4813 | u32 ret; | ||
4814 | |||
4815 | if (namep) { | ||
4816 | ret = dd->cspec->cntrnamelen; | ||
4817 | if (pos >= ret) | ||
4818 | ret = 0; /* final read after getting everything */ | ||
4819 | else | ||
4820 | *namep = (char *) cntr7322names; | ||
4821 | } else { | ||
4822 | u64 *cntr = dd->cspec->cntrs; | ||
4823 | int i; | ||
4824 | |||
4825 | ret = dd->cspec->ncntrs * sizeof(u64); | ||
4826 | if (!cntr || pos >= ret) { | ||
4827 | /* everything read, or couldn't get memory */ | ||
4828 | ret = 0; | ||
4829 | goto done; | ||
4830 | } | ||
4831 | *cntrp = cntr; | ||
4832 | for (i = 0; i < dd->cspec->ncntrs; i++) | ||
4833 | if (cntr7322indices[i] & _PORT_64BIT_FLAG) | ||
4834 | *cntr++ = read_7322_creg(dd, | ||
4835 | cntr7322indices[i] & | ||
4836 | _PORT_CNTR_IDXMASK); | ||
4837 | else | ||
4838 | *cntr++ = read_7322_creg32(dd, | ||
4839 | cntr7322indices[i]); | ||
4840 | } | ||
4841 | done: | ||
4842 | return ret; | ||
4843 | } | ||
4844 | |||
4845 | static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port, | ||
4846 | char **namep, u64 **cntrp) | ||
4847 | { | ||
4848 | u32 ret; | ||
4849 | |||
4850 | if (namep) { | ||
4851 | ret = dd->cspec->portcntrnamelen; | ||
4852 | if (pos >= ret) | ||
4853 | ret = 0; /* final read after getting everything */ | ||
4854 | else | ||
4855 | *namep = (char *)portcntr7322names; | ||
4856 | } else { | ||
4857 | struct qib_pportdata *ppd = &dd->pport[port]; | ||
4858 | u64 *cntr = ppd->cpspec->portcntrs; | ||
4859 | int i; | ||
4860 | |||
4861 | ret = dd->cspec->nportcntrs * sizeof(u64); | ||
4862 | if (!cntr || pos >= ret) { | ||
4863 | /* everything read, or couldn't get memory */ | ||
4864 | ret = 0; | ||
4865 | goto done; | ||
4866 | } | ||
4867 | *cntrp = cntr; | ||
4868 | for (i = 0; i < dd->cspec->nportcntrs; i++) { | ||
4869 | if (portcntr7322indices[i] & _PORT_VIRT_FLAG) | ||
4870 | *cntr++ = qib_portcntr_7322(ppd, | ||
4871 | portcntr7322indices[i] & | ||
4872 | _PORT_CNTR_IDXMASK); | ||
4873 | else if (portcntr7322indices[i] & _PORT_64BIT_FLAG) | ||
4874 | *cntr++ = read_7322_creg_port(ppd, | ||
4875 | portcntr7322indices[i] & | ||
4876 | _PORT_CNTR_IDXMASK); | ||
4877 | else | ||
4878 | *cntr++ = read_7322_creg32_port(ppd, | ||
4879 | portcntr7322indices[i]); | ||
4880 | } | ||
4881 | } | ||
4882 | done: | ||
4883 | return ret; | ||
4884 | } | ||
4885 | |||
4886 | /** | ||
4887 | * qib_get_7322_faststats - get word counters from chip before they overflow | ||
4888 | * @opaque - contains a pointer to the qlogic_ib device qib_devdata | ||
4889 | * | ||
4890 | * VESTIGIAL IBA7322 has no "small fast counters", so the only | ||
4891 | * real purpose of this function is to maintain the notion of | ||
4892 | * "active time", which in turn is only logged into the eeprom, | ||
4893 | * which we don;t have, yet, for 7322-based boards. | ||
4894 | * | ||
4895 | * called from add_timer | ||
4896 | */ | ||
4897 | static void qib_get_7322_faststats(unsigned long opaque) | ||
4898 | { | ||
4899 | struct qib_devdata *dd = (struct qib_devdata *) opaque; | ||
4900 | struct qib_pportdata *ppd; | ||
4901 | unsigned long flags; | ||
4902 | u64 traffic_wds; | ||
4903 | int pidx; | ||
4904 | |||
4905 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | ||
4906 | ppd = dd->pport + pidx; | ||
4907 | |||
4908 | /* | ||
4909 | * If port isn't enabled or not operational ports, or | ||
4910 | * diags is running (can cause memory diags to fail) | ||
4911 | * skip this port this time. | ||
4912 | */ | ||
4913 | if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED) | ||
4914 | || dd->diag_client) | ||
4915 | continue; | ||
4916 | |||
4917 | /* | ||
4918 | * Maintain an activity timer, based on traffic | ||
4919 | * exceeding a threshold, so we need to check the word-counts | ||
4920 | * even if they are 64-bit. | ||
4921 | */ | ||
4922 | traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) + | ||
4923 | qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND); | ||
4924 | spin_lock_irqsave(&ppd->dd->eep_st_lock, flags); | ||
4925 | traffic_wds -= ppd->dd->traffic_wds; | ||
4926 | ppd->dd->traffic_wds += traffic_wds; | ||
4927 | if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD) | ||
4928 | atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time); | ||
4929 | spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags); | ||
4930 | if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active & | ||
4931 | QIB_IB_QDR) && | ||
4932 | (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | | ||
4933 | QIBL_LINKACTIVE)) && | ||
4934 | ppd->cpspec->qdr_dfe_time && | ||
4935 | time_after64(get_jiffies_64(), ppd->cpspec->qdr_dfe_time)) { | ||
4936 | ppd->cpspec->qdr_dfe_on = 0; | ||
4937 | |||
4938 | qib_write_kreg_port(ppd, krp_static_adapt_dis(2), | ||
4939 | ppd->dd->cspec->r1 ? | ||
4940 | QDR_STATIC_ADAPT_INIT_R1 : | ||
4941 | QDR_STATIC_ADAPT_INIT); | ||
4942 | force_h1(ppd); | ||
4943 | } | ||
4944 | } | ||
4945 | mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER); | ||
4946 | } | ||
4947 | |||
4948 | /* | ||
4949 | * If we were using MSIx, try to fallback to INTx. | ||
4950 | */ | ||
4951 | static int qib_7322_intr_fallback(struct qib_devdata *dd) | ||
4952 | { | ||
4953 | if (!dd->cspec->num_msix_entries) | ||
4954 | return 0; /* already using INTx */ | ||
4955 | |||
4956 | qib_devinfo(dd->pcidev, "MSIx interrupt not detected," | ||
4957 | " trying INTx interrupts\n"); | ||
4958 | qib_7322_nomsix(dd); | ||
4959 | qib_enable_intx(dd->pcidev); | ||
4960 | qib_setup_7322_interrupt(dd, 0); | ||
4961 | return 1; | ||
4962 | } | ||
4963 | |||
4964 | /* | ||
4965 | * Reset the XGXS (between serdes and IBC). Slightly less intrusive | ||
4966 | * than resetting the IBC or external link state, and useful in some | ||
4967 | * cases to cause some retraining. To do this right, we reset IBC | ||
4968 | * as well, then return to previous state (which may be still in reset) | ||
4969 | * NOTE: some callers of this "know" this writes the current value | ||
4970 | * of cpspec->ibcctrl_a as part of it's operation, so if that changes, | ||
4971 | * check all callers. | ||
4972 | */ | ||
4973 | static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd) | ||
4974 | { | ||
4975 | u64 val; | ||
4976 | struct qib_devdata *dd = ppd->dd; | ||
4977 | const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) | | ||
4978 | SYM_MASK(IBPCSConfig_0, xcv_treset) | | ||
4979 | SYM_MASK(IBPCSConfig_0, tx_rx_reset); | ||
4980 | |||
4981 | val = qib_read_kreg_port(ppd, krp_ib_pcsconfig); | ||
4982 | qib_write_kreg_port(ppd, krp_ibcctrl_a, | ||
4983 | ppd->cpspec->ibcctrl_a & | ||
4984 | ~SYM_MASK(IBCCtrlA_0, IBLinkEn)); | ||
4985 | |||
4986 | qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits); | ||
4987 | qib_read_kreg32(dd, kr_scratch); | ||
4988 | qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits); | ||
4989 | qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); | ||
4990 | qib_write_kreg(dd, kr_scratch, 0ULL); | ||
4991 | } | ||
4992 | |||
4993 | /* | ||
4994 | * This code for non-IBTA-compliant IB speed negotiation is only known to | ||
4995 | * work for the SDR to DDR transition, and only between an HCA and a switch | ||
4996 | * with recent firmware. It is based on observed heuristics, rather than | ||
4997 | * actual knowledge of the non-compliant speed negotiation. | ||
4998 | * It has a number of hard-coded fields, since the hope is to rewrite this | ||
4999 | * when a spec is available on how the negoation is intended to work. | ||
5000 | */ | ||
5001 | static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr, | ||
5002 | u32 dcnt, u32 *data) | ||
5003 | { | ||
5004 | int i; | ||
5005 | u64 pbc; | ||
5006 | u32 __iomem *piobuf; | ||
5007 | u32 pnum, control, len; | ||
5008 | struct qib_devdata *dd = ppd->dd; | ||
5009 | |||
5010 | i = 0; | ||
5011 | len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */ | ||
5012 | control = qib_7322_setpbc_control(ppd, len, 0, 15); | ||
5013 | pbc = ((u64) control << 32) | len; | ||
5014 | while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) { | ||
5015 | if (i++ > 15) | ||
5016 | return; | ||
5017 | udelay(2); | ||
5018 | } | ||
5019 | /* disable header check on this packet, since it can't be valid */ | ||
5020 | dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL); | ||
5021 | writeq(pbc, piobuf); | ||
5022 | qib_flush_wc(); | ||
5023 | qib_pio_copy(piobuf + 2, hdr, 7); | ||
5024 | qib_pio_copy(piobuf + 9, data, dcnt); | ||
5025 | if (dd->flags & QIB_USE_SPCL_TRIG) { | ||
5026 | u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023; | ||
5027 | |||
5028 | qib_flush_wc(); | ||
5029 | __raw_writel(0xaebecede, piobuf + spcl_off); | ||
5030 | } | ||
5031 | qib_flush_wc(); | ||
5032 | qib_sendbuf_done(dd, pnum); | ||
5033 | /* and re-enable hdr check */ | ||
5034 | dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL); | ||
5035 | } | ||
5036 | |||
5037 | /* | ||
5038 | * _start packet gets sent twice at start, _done gets sent twice at end | ||
5039 | */ | ||
5040 | static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which) | ||
5041 | { | ||
5042 | struct qib_devdata *dd = ppd->dd; | ||
5043 | static u32 swapped; | ||
5044 | u32 dw, i, hcnt, dcnt, *data; | ||
5045 | static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba }; | ||
5046 | static u32 madpayload_start[0x40] = { | ||
5047 | 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0, | ||
5048 | 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, | ||
5049 | 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */ | ||
5050 | }; | ||
5051 | static u32 madpayload_done[0x40] = { | ||
5052 | 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0, | ||
5053 | 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, | ||
5054 | 0x40000001, 0x1388, 0x15e, /* rest 0's */ | ||
5055 | }; | ||
5056 | |||
5057 | dcnt = ARRAY_SIZE(madpayload_start); | ||
5058 | hcnt = ARRAY_SIZE(hdr); | ||
5059 | if (!swapped) { | ||
5060 | /* for maintainability, do it at runtime */ | ||
5061 | for (i = 0; i < hcnt; i++) { | ||
5062 | dw = (__force u32) cpu_to_be32(hdr[i]); | ||
5063 | hdr[i] = dw; | ||
5064 | } | ||
5065 | for (i = 0; i < dcnt; i++) { | ||
5066 | dw = (__force u32) cpu_to_be32(madpayload_start[i]); | ||
5067 | madpayload_start[i] = dw; | ||
5068 | dw = (__force u32) cpu_to_be32(madpayload_done[i]); | ||
5069 | madpayload_done[i] = dw; | ||
5070 | } | ||
5071 | swapped = 1; | ||
5072 | } | ||
5073 | |||
5074 | data = which ? madpayload_done : madpayload_start; | ||
5075 | |||
5076 | autoneg_7322_sendpkt(ppd, hdr, dcnt, data); | ||
5077 | qib_read_kreg64(dd, kr_scratch); | ||
5078 | udelay(2); | ||
5079 | autoneg_7322_sendpkt(ppd, hdr, dcnt, data); | ||
5080 | qib_read_kreg64(dd, kr_scratch); | ||
5081 | udelay(2); | ||
5082 | } | ||
5083 | |||
5084 | /* | ||
5085 | * Do the absolute minimum to cause an IB speed change, and make it | ||
5086 | * ready, but don't actually trigger the change. The caller will | ||
5087 | * do that when ready (if link is in Polling training state, it will | ||
5088 | * happen immediately, otherwise when link next goes down) | ||
5089 | * | ||
5090 | * This routine should only be used as part of the DDR autonegotation | ||
5091 | * code for devices that are not compliant with IB 1.2 (or code that | ||
5092 | * fixes things up for same). | ||
5093 | * | ||
5094 | * When link has gone down, and autoneg enabled, or autoneg has | ||
5095 | * failed and we give up until next time we set both speeds, and | ||
5096 | * then we want IBTA enabled as well as "use max enabled speed. | ||
5097 | */ | ||
5098 | static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed) | ||
5099 | { | ||
5100 | u64 newctrlb; | ||
5101 | newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK | | ||
5102 | IBA7322_IBC_IBTA_1_2_MASK | | ||
5103 | IBA7322_IBC_MAX_SPEED_MASK); | ||
5104 | |||
5105 | if (speed & (speed - 1)) /* multiple speeds */ | ||
5106 | newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) | | ||
5107 | IBA7322_IBC_IBTA_1_2_MASK | | ||
5108 | IBA7322_IBC_MAX_SPEED_MASK; | ||
5109 | else | ||
5110 | newctrlb |= speed == QIB_IB_QDR ? | ||
5111 | IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK : | ||
5112 | ((speed == QIB_IB_DDR ? | ||
5113 | IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR)); | ||
5114 | |||
5115 | if (newctrlb == ppd->cpspec->ibcctrl_b) | ||
5116 | return; | ||
5117 | |||
5118 | ppd->cpspec->ibcctrl_b = newctrlb; | ||
5119 | qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b); | ||
5120 | qib_write_kreg(ppd->dd, kr_scratch, 0); | ||
5121 | } | ||
5122 | |||
5123 | /* | ||
5124 | * This routine is only used when we are not talking to another | ||
5125 | * IB 1.2-compliant device that we think can do DDR. | ||
5126 | * (This includes all existing switch chips as of Oct 2007.) | ||
5127 | * 1.2-compliant devices go directly to DDR prior to reaching INIT | ||
5128 | */ | ||
5129 | static void try_7322_autoneg(struct qib_pportdata *ppd) | ||
5130 | { | ||
5131 | unsigned long flags; | ||
5132 | |||
5133 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
5134 | ppd->lflags |= QIBL_IB_AUTONEG_INPROG; | ||
5135 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
5136 | qib_autoneg_7322_send(ppd, 0); | ||
5137 | set_7322_ibspeed_fast(ppd, QIB_IB_DDR); | ||
5138 | qib_7322_mini_pcs_reset(ppd); | ||
5139 | /* 2 msec is minimum length of a poll cycle */ | ||
5140 | schedule_delayed_work(&ppd->cpspec->autoneg_work, | ||
5141 | msecs_to_jiffies(2)); | ||
5142 | } | ||
5143 | |||
5144 | /* | ||
5145 | * Handle the empirically determined mechanism for auto-negotiation | ||
5146 | * of DDR speed with switches. | ||
5147 | */ | ||
5148 | static void autoneg_7322_work(struct work_struct *work) | ||
5149 | { | ||
5150 | struct qib_pportdata *ppd; | ||
5151 | struct qib_devdata *dd; | ||
5152 | u64 startms; | ||
5153 | u32 i; | ||
5154 | unsigned long flags; | ||
5155 | |||
5156 | ppd = container_of(work, struct qib_chippport_specific, | ||
5157 | autoneg_work.work)->ppd; | ||
5158 | dd = ppd->dd; | ||
5159 | |||
5160 | startms = jiffies_to_msecs(jiffies); | ||
5161 | |||
5162 | /* | ||
5163 | * Busy wait for this first part, it should be at most a | ||
5164 | * few hundred usec, since we scheduled ourselves for 2msec. | ||
5165 | */ | ||
5166 | for (i = 0; i < 25; i++) { | ||
5167 | if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState) | ||
5168 | == IB_7322_LT_STATE_POLLQUIET) { | ||
5169 | qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE); | ||
5170 | break; | ||
5171 | } | ||
5172 | udelay(100); | ||
5173 | } | ||
5174 | |||
5175 | if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) | ||
5176 | goto done; /* we got there early or told to stop */ | ||
5177 | |||
5178 | /* we expect this to timeout */ | ||
5179 | if (wait_event_timeout(ppd->cpspec->autoneg_wait, | ||
5180 | !(ppd->lflags & QIBL_IB_AUTONEG_INPROG), | ||
5181 | msecs_to_jiffies(90))) | ||
5182 | goto done; | ||
5183 | qib_7322_mini_pcs_reset(ppd); | ||
5184 | |||
5185 | /* we expect this to timeout */ | ||
5186 | if (wait_event_timeout(ppd->cpspec->autoneg_wait, | ||
5187 | !(ppd->lflags & QIBL_IB_AUTONEG_INPROG), | ||
5188 | msecs_to_jiffies(1700))) | ||
5189 | goto done; | ||
5190 | qib_7322_mini_pcs_reset(ppd); | ||
5191 | |||
5192 | set_7322_ibspeed_fast(ppd, QIB_IB_SDR); | ||
5193 | |||
5194 | /* | ||
5195 | * Wait up to 250 msec for link to train and get to INIT at DDR; | ||
5196 | * this should terminate early. | ||
5197 | */ | ||
5198 | wait_event_timeout(ppd->cpspec->autoneg_wait, | ||
5199 | !(ppd->lflags & QIBL_IB_AUTONEG_INPROG), | ||
5200 | msecs_to_jiffies(250)); | ||
5201 | done: | ||
5202 | if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) { | ||
5203 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
5204 | ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; | ||
5205 | if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) { | ||
5206 | ppd->lflags |= QIBL_IB_AUTONEG_FAILED; | ||
5207 | ppd->cpspec->autoneg_tries = 0; | ||
5208 | } | ||
5209 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
5210 | set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled); | ||
5211 | } | ||
5212 | } | ||
5213 | |||
5214 | /* | ||
5215 | * This routine is used to request IPG set in the QLogic switch. | ||
5216 | * Only called if r1. | ||
5217 | */ | ||
5218 | static void try_7322_ipg(struct qib_pportdata *ppd) | ||
5219 | { | ||
5220 | struct qib_ibport *ibp = &ppd->ibport_data; | ||
5221 | struct ib_mad_send_buf *send_buf; | ||
5222 | struct ib_mad_agent *agent; | ||
5223 | struct ib_smp *smp; | ||
5224 | unsigned delay; | ||
5225 | int ret; | ||
5226 | |||
5227 | agent = ibp->send_agent; | ||
5228 | if (!agent) | ||
5229 | goto retry; | ||
5230 | |||
5231 | send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR, | ||
5232 | IB_MGMT_MAD_DATA, GFP_ATOMIC); | ||
5233 | if (IS_ERR(send_buf)) | ||
5234 | goto retry; | ||
5235 | |||
5236 | if (!ibp->smi_ah) { | ||
5237 | struct ib_ah_attr attr; | ||
5238 | struct ib_ah *ah; | ||
5239 | |||
5240 | memset(&attr, 0, sizeof attr); | ||
5241 | attr.dlid = be16_to_cpu(IB_LID_PERMISSIVE); | ||
5242 | attr.port_num = ppd->port; | ||
5243 | ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr); | ||
5244 | if (IS_ERR(ah)) | ||
5245 | ret = -EINVAL; | ||
5246 | else { | ||
5247 | send_buf->ah = ah; | ||
5248 | ibp->smi_ah = to_iah(ah); | ||
5249 | ret = 0; | ||
5250 | } | ||
5251 | } else { | ||
5252 | send_buf->ah = &ibp->smi_ah->ibah; | ||
5253 | ret = 0; | ||
5254 | } | ||
5255 | |||
5256 | smp = send_buf->mad; | ||
5257 | smp->base_version = IB_MGMT_BASE_VERSION; | ||
5258 | smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE; | ||
5259 | smp->class_version = 1; | ||
5260 | smp->method = IB_MGMT_METHOD_SEND; | ||
5261 | smp->hop_cnt = 1; | ||
5262 | smp->attr_id = QIB_VENDOR_IPG; | ||
5263 | smp->attr_mod = 0; | ||
5264 | |||
5265 | if (!ret) | ||
5266 | ret = ib_post_send_mad(send_buf, NULL); | ||
5267 | if (ret) | ||
5268 | ib_free_send_mad(send_buf); | ||
5269 | retry: | ||
5270 | delay = 2 << ppd->cpspec->ipg_tries; | ||
5271 | schedule_delayed_work(&ppd->cpspec->ipg_work, msecs_to_jiffies(delay)); | ||
5272 | } | ||
5273 | |||
5274 | /* | ||
5275 | * Timeout handler for setting IPG. | ||
5276 | * Only called if r1. | ||
5277 | */ | ||
5278 | static void ipg_7322_work(struct work_struct *work) | ||
5279 | { | ||
5280 | struct qib_pportdata *ppd; | ||
5281 | |||
5282 | ppd = container_of(work, struct qib_chippport_specific, | ||
5283 | ipg_work.work)->ppd; | ||
5284 | if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE)) | ||
5285 | && ++ppd->cpspec->ipg_tries <= 10) | ||
5286 | try_7322_ipg(ppd); | ||
5287 | } | ||
5288 | |||
5289 | static u32 qib_7322_iblink_state(u64 ibcs) | ||
5290 | { | ||
5291 | u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState); | ||
5292 | |||
5293 | switch (state) { | ||
5294 | case IB_7322_L_STATE_INIT: | ||
5295 | state = IB_PORT_INIT; | ||
5296 | break; | ||
5297 | case IB_7322_L_STATE_ARM: | ||
5298 | state = IB_PORT_ARMED; | ||
5299 | break; | ||
5300 | case IB_7322_L_STATE_ACTIVE: | ||
5301 | /* fall through */ | ||
5302 | case IB_7322_L_STATE_ACT_DEFER: | ||
5303 | state = IB_PORT_ACTIVE; | ||
5304 | break; | ||
5305 | default: /* fall through */ | ||
5306 | case IB_7322_L_STATE_DOWN: | ||
5307 | state = IB_PORT_DOWN; | ||
5308 | break; | ||
5309 | } | ||
5310 | return state; | ||
5311 | } | ||
5312 | |||
5313 | /* returns the IBTA port state, rather than the IBC link training state */ | ||
5314 | static u8 qib_7322_phys_portstate(u64 ibcs) | ||
5315 | { | ||
5316 | u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState); | ||
5317 | return qib_7322_physportstate[state]; | ||
5318 | } | ||
5319 | |||
5320 | static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs) | ||
5321 | { | ||
5322 | int ret = 0, symadj = 0; | ||
5323 | unsigned long flags; | ||
5324 | int mult; | ||
5325 | |||
5326 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
5327 | ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY; | ||
5328 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
5329 | |||
5330 | /* Update our picture of width and speed from chip */ | ||
5331 | if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) { | ||
5332 | ppd->link_speed_active = QIB_IB_QDR; | ||
5333 | mult = 4; | ||
5334 | } else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) { | ||
5335 | ppd->link_speed_active = QIB_IB_DDR; | ||
5336 | mult = 2; | ||
5337 | } else { | ||
5338 | ppd->link_speed_active = QIB_IB_SDR; | ||
5339 | mult = 1; | ||
5340 | } | ||
5341 | if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) { | ||
5342 | ppd->link_width_active = IB_WIDTH_4X; | ||
5343 | mult *= 4; | ||
5344 | } else | ||
5345 | ppd->link_width_active = IB_WIDTH_1X; | ||
5346 | ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)]; | ||
5347 | |||
5348 | if (!ibup) { | ||
5349 | u64 clr; | ||
5350 | |||
5351 | /* Link went down. */ | ||
5352 | /* do IPG MAD again after linkdown, even if last time failed */ | ||
5353 | ppd->cpspec->ipg_tries = 0; | ||
5354 | clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) & | ||
5355 | (SYM_MASK(IBCStatusB_0, heartbeat_timed_out) | | ||
5356 | SYM_MASK(IBCStatusB_0, heartbeat_crosstalk)); | ||
5357 | if (clr) | ||
5358 | qib_write_kreg_port(ppd, krp_ibcstatus_b, clr); | ||
5359 | if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED | | ||
5360 | QIBL_IB_AUTONEG_INPROG))) | ||
5361 | set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled); | ||
5362 | if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { | ||
5363 | qib_cancel_sends(ppd); | ||
5364 | spin_lock_irqsave(&ppd->sdma_lock, flags); | ||
5365 | if (__qib_sdma_running(ppd)) | ||
5366 | __qib_sdma_process_event(ppd, | ||
5367 | qib_sdma_event_e70_go_idle); | ||
5368 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); | ||
5369 | } | ||
5370 | clr = read_7322_creg32_port(ppd, crp_iblinkdown); | ||
5371 | if (clr == ppd->cpspec->iblnkdownsnap) | ||
5372 | ppd->cpspec->iblnkdowndelta++; | ||
5373 | } else { | ||
5374 | if (qib_compat_ddr_negotiate && | ||
5375 | !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED | | ||
5376 | QIBL_IB_AUTONEG_INPROG)) && | ||
5377 | ppd->link_speed_active == QIB_IB_SDR && | ||
5378 | (ppd->link_speed_enabled & QIB_IB_DDR) | ||
5379 | && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) { | ||
5380 | /* we are SDR, and auto-negotiation enabled */ | ||
5381 | ++ppd->cpspec->autoneg_tries; | ||
5382 | if (!ppd->cpspec->ibdeltainprog) { | ||
5383 | ppd->cpspec->ibdeltainprog = 1; | ||
5384 | ppd->cpspec->ibsymdelta += | ||
5385 | read_7322_creg32_port(ppd, | ||
5386 | crp_ibsymbolerr) - | ||
5387 | ppd->cpspec->ibsymsnap; | ||
5388 | ppd->cpspec->iblnkerrdelta += | ||
5389 | read_7322_creg32_port(ppd, | ||
5390 | crp_iblinkerrrecov) - | ||
5391 | ppd->cpspec->iblnkerrsnap; | ||
5392 | } | ||
5393 | try_7322_autoneg(ppd); | ||
5394 | ret = 1; /* no other IB status change processing */ | ||
5395 | } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) && | ||
5396 | ppd->link_speed_active == QIB_IB_SDR) { | ||
5397 | qib_autoneg_7322_send(ppd, 1); | ||
5398 | set_7322_ibspeed_fast(ppd, QIB_IB_DDR); | ||
5399 | qib_7322_mini_pcs_reset(ppd); | ||
5400 | udelay(2); | ||
5401 | ret = 1; /* no other IB status change processing */ | ||
5402 | } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) && | ||
5403 | (ppd->link_speed_active & QIB_IB_DDR)) { | ||
5404 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
5405 | ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG | | ||
5406 | QIBL_IB_AUTONEG_FAILED); | ||
5407 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
5408 | ppd->cpspec->autoneg_tries = 0; | ||
5409 | /* re-enable SDR, for next link down */ | ||
5410 | set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled); | ||
5411 | wake_up(&ppd->cpspec->autoneg_wait); | ||
5412 | symadj = 1; | ||
5413 | } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) { | ||
5414 | /* | ||
5415 | * Clear autoneg failure flag, and do setup | ||
5416 | * so we'll try next time link goes down and | ||
5417 | * back to INIT (possibly connected to a | ||
5418 | * different device). | ||
5419 | */ | ||
5420 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
5421 | ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED; | ||
5422 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
5423 | ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK; | ||
5424 | symadj = 1; | ||
5425 | } | ||
5426 | if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { | ||
5427 | symadj = 1; | ||
5428 | if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10) | ||
5429 | try_7322_ipg(ppd); | ||
5430 | if (!ppd->cpspec->recovery_init) | ||
5431 | setup_7322_link_recovery(ppd, 0); | ||
5432 | ppd->cpspec->qdr_dfe_time = jiffies + | ||
5433 | msecs_to_jiffies(QDR_DFE_DISABLE_DELAY); | ||
5434 | } | ||
5435 | ppd->cpspec->ibmalfusesnap = 0; | ||
5436 | ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd, | ||
5437 | crp_errlink); | ||
5438 | } | ||
5439 | if (symadj) { | ||
5440 | ppd->cpspec->iblnkdownsnap = | ||
5441 | read_7322_creg32_port(ppd, crp_iblinkdown); | ||
5442 | if (ppd->cpspec->ibdeltainprog) { | ||
5443 | ppd->cpspec->ibdeltainprog = 0; | ||
5444 | ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd, | ||
5445 | crp_ibsymbolerr) - ppd->cpspec->ibsymsnap; | ||
5446 | ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd, | ||
5447 | crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap; | ||
5448 | } | ||
5449 | } else if (!ibup && qib_compat_ddr_negotiate && | ||
5450 | !ppd->cpspec->ibdeltainprog && | ||
5451 | !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { | ||
5452 | ppd->cpspec->ibdeltainprog = 1; | ||
5453 | ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd, | ||
5454 | crp_ibsymbolerr); | ||
5455 | ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd, | ||
5456 | crp_iblinkerrrecov); | ||
5457 | } | ||
5458 | |||
5459 | if (!ret) | ||
5460 | qib_setup_7322_setextled(ppd, ibup); | ||
5461 | return ret; | ||
5462 | } | ||
5463 | |||
5464 | /* | ||
5465 | * Does read/modify/write to appropriate registers to | ||
5466 | * set output and direction bits selected by mask. | ||
5467 | * these are in their canonical postions (e.g. lsb of | ||
5468 | * dir will end up in D48 of extctrl on existing chips). | ||
5469 | * returns contents of GP Inputs. | ||
5470 | */ | ||
5471 | static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask) | ||
5472 | { | ||
5473 | u64 read_val, new_out; | ||
5474 | unsigned long flags; | ||
5475 | |||
5476 | if (mask) { | ||
5477 | /* some bits being written, lock access to GPIO */ | ||
5478 | dir &= mask; | ||
5479 | out &= mask; | ||
5480 | spin_lock_irqsave(&dd->cspec->gpio_lock, flags); | ||
5481 | dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe)); | ||
5482 | dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe)); | ||
5483 | new_out = (dd->cspec->gpio_out & ~mask) | out; | ||
5484 | |||
5485 | qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl); | ||
5486 | qib_write_kreg(dd, kr_gpio_out, new_out); | ||
5487 | dd->cspec->gpio_out = new_out; | ||
5488 | spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); | ||
5489 | } | ||
5490 | /* | ||
5491 | * It is unlikely that a read at this time would get valid | ||
5492 | * data on a pin whose direction line was set in the same | ||
5493 | * call to this function. We include the read here because | ||
5494 | * that allows us to potentially combine a change on one pin with | ||
5495 | * a read on another, and because the old code did something like | ||
5496 | * this. | ||
5497 | */ | ||
5498 | read_val = qib_read_kreg64(dd, kr_extstatus); | ||
5499 | return SYM_FIELD(read_val, EXTStatus, GPIOIn); | ||
5500 | } | ||
5501 | |||
5502 | /* Enable writes to config EEPROM, if possible. Returns previous state */ | ||
5503 | static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen) | ||
5504 | { | ||
5505 | int prev_wen; | ||
5506 | u32 mask; | ||
5507 | |||
5508 | mask = 1 << QIB_EEPROM_WEN_NUM; | ||
5509 | prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM; | ||
5510 | gpio_7322_mod(dd, wen ? 0 : mask, mask, mask); | ||
5511 | |||
5512 | return prev_wen & 1; | ||
5513 | } | ||
5514 | |||
5515 | /* | ||
5516 | * Read fundamental info we need to use the chip. These are | ||
5517 | * the registers that describe chip capabilities, and are | ||
5518 | * saved in shadow registers. | ||
5519 | */ | ||
5520 | static void get_7322_chip_params(struct qib_devdata *dd) | ||
5521 | { | ||
5522 | u64 val; | ||
5523 | u32 piobufs; | ||
5524 | int mtu; | ||
5525 | |||
5526 | dd->palign = qib_read_kreg32(dd, kr_pagealign); | ||
5527 | |||
5528 | dd->uregbase = qib_read_kreg32(dd, kr_userregbase); | ||
5529 | |||
5530 | dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt); | ||
5531 | dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase); | ||
5532 | dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase); | ||
5533 | dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase); | ||
5534 | dd->pio2k_bufbase = dd->piobufbase & 0xffffffff; | ||
5535 | |||
5536 | val = qib_read_kreg64(dd, kr_sendpiobufcnt); | ||
5537 | dd->piobcnt2k = val & ~0U; | ||
5538 | dd->piobcnt4k = val >> 32; | ||
5539 | val = qib_read_kreg64(dd, kr_sendpiosize); | ||
5540 | dd->piosize2k = val & ~0U; | ||
5541 | dd->piosize4k = val >> 32; | ||
5542 | |||
5543 | mtu = ib_mtu_enum_to_int(qib_ibmtu); | ||
5544 | if (mtu == -1) | ||
5545 | mtu = QIB_DEFAULT_MTU; | ||
5546 | dd->pport[0].ibmtu = (u32)mtu; | ||
5547 | dd->pport[1].ibmtu = (u32)mtu; | ||
5548 | |||
5549 | /* these may be adjusted in init_chip_wc_pat() */ | ||
5550 | dd->pio2kbase = (u32 __iomem *) | ||
5551 | ((char __iomem *) dd->kregbase + dd->pio2k_bufbase); | ||
5552 | dd->pio4kbase = (u32 __iomem *) | ||
5553 | ((char __iomem *) dd->kregbase + | ||
5554 | (dd->piobufbase >> 32)); | ||
5555 | /* | ||
5556 | * 4K buffers take 2 pages; we use roundup just to be | ||
5557 | * paranoid; we calculate it once here, rather than on | ||
5558 | * ever buf allocate | ||
5559 | */ | ||
5560 | dd->align4k = ALIGN(dd->piosize4k, dd->palign); | ||
5561 | |||
5562 | piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS; | ||
5563 | |||
5564 | dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) / | ||
5565 | (sizeof(u64) * BITS_PER_BYTE / 2); | ||
5566 | } | ||
5567 | |||
5568 | /* | ||
5569 | * The chip base addresses in cspec and cpspec have to be set | ||
5570 | * after possible init_chip_wc_pat(), rather than in | ||
5571 | * get_7322_chip_params(), so split out as separate function | ||
5572 | */ | ||
5573 | static void qib_7322_set_baseaddrs(struct qib_devdata *dd) | ||
5574 | { | ||
5575 | u32 cregbase; | ||
5576 | cregbase = qib_read_kreg32(dd, kr_counterregbase); | ||
5577 | |||
5578 | dd->cspec->cregbase = (u64 __iomem *)(cregbase + | ||
5579 | (char __iomem *)dd->kregbase); | ||
5580 | |||
5581 | dd->egrtidbase = (u64 __iomem *) | ||
5582 | ((char __iomem *) dd->kregbase + dd->rcvegrbase); | ||
5583 | |||
5584 | /* port registers are defined as relative to base of chip */ | ||
5585 | dd->pport[0].cpspec->kpregbase = | ||
5586 | (u64 __iomem *)((char __iomem *)dd->kregbase); | ||
5587 | dd->pport[1].cpspec->kpregbase = | ||
5588 | (u64 __iomem *)(dd->palign + | ||
5589 | (char __iomem *)dd->kregbase); | ||
5590 | dd->pport[0].cpspec->cpregbase = | ||
5591 | (u64 __iomem *)(qib_read_kreg_port(&dd->pport[0], | ||
5592 | kr_counterregbase) + (char __iomem *)dd->kregbase); | ||
5593 | dd->pport[1].cpspec->cpregbase = | ||
5594 | (u64 __iomem *)(qib_read_kreg_port(&dd->pport[1], | ||
5595 | kr_counterregbase) + (char __iomem *)dd->kregbase); | ||
5596 | } | ||
5597 | |||
5598 | /* | ||
5599 | * This is a fairly special-purpose observer, so we only support | ||
5600 | * the port-specific parts of SendCtrl | ||
5601 | */ | ||
5602 | |||
5603 | #define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) | \ | ||
5604 | SYM_MASK(SendCtrl_0, SDmaEnable) | \ | ||
5605 | SYM_MASK(SendCtrl_0, SDmaIntEnable) | \ | ||
5606 | SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \ | ||
5607 | SYM_MASK(SendCtrl_0, SDmaHalt) | \ | ||
5608 | SYM_MASK(SendCtrl_0, IBVLArbiterEn) | \ | ||
5609 | SYM_MASK(SendCtrl_0, ForceCreditUpToDate)) | ||
5610 | |||
5611 | static int sendctrl_hook(struct qib_devdata *dd, | ||
5612 | const struct diag_observer *op, u32 offs, | ||
5613 | u64 *data, u64 mask, int only_32) | ||
5614 | { | ||
5615 | unsigned long flags; | ||
5616 | unsigned idx; | ||
5617 | unsigned pidx; | ||
5618 | struct qib_pportdata *ppd = NULL; | ||
5619 | u64 local_data, all_bits; | ||
5620 | |||
5621 | /* | ||
5622 | * The fixed correspondence between Physical ports and pports is | ||
5623 | * severed. We need to hunt for the ppd that corresponds | ||
5624 | * to the offset we got. And we have to do that without admitting | ||
5625 | * we know the stride, apparently. | ||
5626 | */ | ||
5627 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | ||
5628 | u64 __iomem *psptr; | ||
5629 | u32 psoffs; | ||
5630 | |||
5631 | ppd = dd->pport + pidx; | ||
5632 | if (!ppd->cpspec->kpregbase) | ||
5633 | continue; | ||
5634 | |||
5635 | psptr = ppd->cpspec->kpregbase + krp_sendctrl; | ||
5636 | psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr); | ||
5637 | if (psoffs == offs) | ||
5638 | break; | ||
5639 | } | ||
5640 | |||
5641 | /* If pport is not being managed by driver, just avoid shadows. */ | ||
5642 | if (pidx >= dd->num_pports) | ||
5643 | ppd = NULL; | ||
5644 | |||
5645 | /* In any case, "idx" is flat index in kreg space */ | ||
5646 | idx = offs / sizeof(u64); | ||
5647 | |||
5648 | all_bits = ~0ULL; | ||
5649 | if (only_32) | ||
5650 | all_bits >>= 32; | ||
5651 | |||
5652 | spin_lock_irqsave(&dd->sendctrl_lock, flags); | ||
5653 | if (!ppd || (mask & all_bits) != all_bits) { | ||
5654 | /* | ||
5655 | * At least some mask bits are zero, so we need | ||
5656 | * to read. The judgement call is whether from | ||
5657 | * reg or shadow. First-cut: read reg, and complain | ||
5658 | * if any bits which should be shadowed are different | ||
5659 | * from their shadowed value. | ||
5660 | */ | ||
5661 | if (only_32) | ||
5662 | local_data = (u64)qib_read_kreg32(dd, idx); | ||
5663 | else | ||
5664 | local_data = qib_read_kreg64(dd, idx); | ||
5665 | *data = (local_data & ~mask) | (*data & mask); | ||
5666 | } | ||
5667 | if (mask) { | ||
5668 | /* | ||
5669 | * At least some mask bits are one, so we need | ||
5670 | * to write, but only shadow some bits. | ||
5671 | */ | ||
5672 | u64 sval, tval; /* Shadowed, transient */ | ||
5673 | |||
5674 | /* | ||
5675 | * New shadow val is bits we don't want to touch, | ||
5676 | * ORed with bits we do, that are intended for shadow. | ||
5677 | */ | ||
5678 | if (ppd) { | ||
5679 | sval = ppd->p_sendctrl & ~mask; | ||
5680 | sval |= *data & SENDCTRL_SHADOWED & mask; | ||
5681 | ppd->p_sendctrl = sval; | ||
5682 | } else | ||
5683 | sval = *data & SENDCTRL_SHADOWED & mask; | ||
5684 | tval = sval | (*data & ~SENDCTRL_SHADOWED & mask); | ||
5685 | qib_write_kreg(dd, idx, tval); | ||
5686 | qib_write_kreg(dd, kr_scratch, 0Ull); | ||
5687 | } | ||
5688 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | ||
5689 | return only_32 ? 4 : 8; | ||
5690 | } | ||
5691 | |||
5692 | static const struct diag_observer sendctrl_0_observer = { | ||
5693 | sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64), | ||
5694 | KREG_IDX(SendCtrl_0) * sizeof(u64) | ||
5695 | }; | ||
5696 | |||
5697 | static const struct diag_observer sendctrl_1_observer = { | ||
5698 | sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64), | ||
5699 | KREG_IDX(SendCtrl_1) * sizeof(u64) | ||
5700 | }; | ||
5701 | |||
5702 | static ushort sdma_fetch_prio = 8; | ||
5703 | module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO); | ||
5704 | MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority"); | ||
5705 | |||
5706 | /* Besides logging QSFP events, we set appropriate TxDDS values */ | ||
5707 | static void init_txdds_table(struct qib_pportdata *ppd, int override); | ||
5708 | |||
5709 | static void qsfp_7322_event(struct work_struct *work) | ||
5710 | { | ||
5711 | struct qib_qsfp_data *qd; | ||
5712 | struct qib_pportdata *ppd; | ||
5713 | u64 pwrup; | ||
5714 | int ret; | ||
5715 | u32 le2; | ||
5716 | |||
5717 | qd = container_of(work, struct qib_qsfp_data, work); | ||
5718 | ppd = qd->ppd; | ||
5719 | pwrup = qd->t_insert + msecs_to_jiffies(QSFP_PWR_LAG_MSEC); | ||
5720 | |||
5721 | /* | ||
5722 | * Some QSFP's not only do not respond until the full power-up | ||
5723 | * time, but may behave badly if we try. So hold off responding | ||
5724 | * to insertion. | ||
5725 | */ | ||
5726 | while (1) { | ||
5727 | u64 now = get_jiffies_64(); | ||
5728 | if (time_after64(now, pwrup)) | ||
5729 | break; | ||
5730 | msleep(1); | ||
5731 | } | ||
5732 | ret = qib_refresh_qsfp_cache(ppd, &qd->cache); | ||
5733 | /* | ||
5734 | * Need to change LE2 back to defaults if we couldn't | ||
5735 | * read the cable type (to handle cable swaps), so do this | ||
5736 | * even on failure to read cable information. We don't | ||
5737 | * get here for QME, so IS_QME check not needed here. | ||
5738 | */ | ||
5739 | le2 = (!ret && qd->cache.atten[1] >= qib_long_atten && | ||
5740 | !ppd->dd->cspec->r1 && QSFP_IS_CU(qd->cache.tech)) ? | ||
5741 | LE2_5m : LE2_DEFAULT; | ||
5742 | ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7)); | ||
5743 | init_txdds_table(ppd, 0); | ||
5744 | } | ||
5745 | |||
5746 | /* | ||
5747 | * There is little we can do but complain to the user if QSFP | ||
5748 | * initialization fails. | ||
5749 | */ | ||
5750 | static void qib_init_7322_qsfp(struct qib_pportdata *ppd) | ||
5751 | { | ||
5752 | unsigned long flags; | ||
5753 | struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data; | ||
5754 | struct qib_devdata *dd = ppd->dd; | ||
5755 | u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N; | ||
5756 | |||
5757 | mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx); | ||
5758 | qd->ppd = ppd; | ||
5759 | qib_qsfp_init(qd, qsfp_7322_event); | ||
5760 | spin_lock_irqsave(&dd->cspec->gpio_lock, flags); | ||
5761 | dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert)); | ||
5762 | dd->cspec->gpio_mask |= mod_prs_bit; | ||
5763 | qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl); | ||
5764 | qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask); | ||
5765 | spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); | ||
5766 | } | ||
5767 | |||
5768 | /* | ||
5769 | * called at device initialization time, and also if the cable_atten | ||
5770 | * module parameter is changed. This is used for cables that don't | ||
5771 | * have valid QSFP EEPROMs (not present, or attenuation is zero). | ||
5772 | * We initialize to the default, then if there is a specific | ||
5773 | * unit,port match, we use that. | ||
5774 | * String format is "default# unit#,port#=# ... u,p=#", separators must | ||
5775 | * be a SPACE character. A newline terminates. | ||
5776 | * The last specific match is used (actually, all are used, but last | ||
5777 | * one is the one that winds up set); if none at all, fall back on default. | ||
5778 | */ | ||
5779 | static void set_no_qsfp_atten(struct qib_devdata *dd, int change) | ||
5780 | { | ||
5781 | char *nxt, *str; | ||
5782 | int pidx, unit, port, deflt; | ||
5783 | unsigned long val; | ||
5784 | int any = 0; | ||
5785 | |||
5786 | str = cable_atten_list; | ||
5787 | |||
5788 | /* default number is validated in setup_cable_atten() */ | ||
5789 | deflt = simple_strtoul(str, &nxt, 0); | ||
5790 | for (pidx = 0; pidx < dd->num_pports; ++pidx) | ||
5791 | dd->pport[pidx].cpspec->no_eep = deflt; | ||
5792 | |||
5793 | while (*nxt && nxt[1]) { | ||
5794 | str = ++nxt; | ||
5795 | unit = simple_strtoul(str, &nxt, 0); | ||
5796 | if (nxt == str || !*nxt || *nxt != ',') { | ||
5797 | while (*nxt && *nxt++ != ' ') /* skip to next, if any */ | ||
5798 | ; | ||
5799 | continue; | ||
5800 | } | ||
5801 | str = ++nxt; | ||
5802 | port = simple_strtoul(str, &nxt, 0); | ||
5803 | if (nxt == str || *nxt != '=') { | ||
5804 | while (*nxt && *nxt++ != ' ') /* skip to next, if any */ | ||
5805 | ; | ||
5806 | continue; | ||
5807 | } | ||
5808 | str = ++nxt; | ||
5809 | val = simple_strtoul(str, &nxt, 0); | ||
5810 | if (nxt == str) { | ||
5811 | while (*nxt && *nxt++ != ' ') /* skip to next, if any */ | ||
5812 | ; | ||
5813 | continue; | ||
5814 | } | ||
5815 | if (val >= TXDDS_TABLE_SZ) | ||
5816 | continue; | ||
5817 | for (pidx = 0; dd->unit == unit && pidx < dd->num_pports; | ||
5818 | ++pidx) { | ||
5819 | if (dd->pport[pidx].port != port || | ||
5820 | !dd->pport[pidx].link_speed_supported) | ||
5821 | continue; | ||
5822 | dd->pport[pidx].cpspec->no_eep = val; | ||
5823 | /* now change the IBC and serdes, overriding generic */ | ||
5824 | init_txdds_table(&dd->pport[pidx], 1); | ||
5825 | any++; | ||
5826 | } | ||
5827 | if (*nxt == '\n') | ||
5828 | break; /* done */ | ||
5829 | } | ||
5830 | if (change && !any) { | ||
5831 | /* no specific setting, use the default. | ||
5832 | * Change the IBC and serdes, but since it's | ||
5833 | * general, don't override specific settings. | ||
5834 | */ | ||
5835 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | ||
5836 | if (!dd->pport[pidx].link_speed_supported) | ||
5837 | continue; | ||
5838 | init_txdds_table(&dd->pport[pidx], 0); | ||
5839 | } | ||
5840 | } | ||
5841 | } | ||
5842 | |||
5843 | /* handle the cable_atten parameter changing */ | ||
5844 | static int setup_cable_atten(const char *str, struct kernel_param *kp) | ||
5845 | { | ||
5846 | struct qib_devdata *dd; | ||
5847 | unsigned long val; | ||
5848 | char *n; | ||
5849 | if (strlen(str) >= MAX_ATTEN_LEN) { | ||
5850 | printk(KERN_INFO QIB_DRV_NAME " cable_atten_values string " | ||
5851 | "too long\n"); | ||
5852 | return -ENOSPC; | ||
5853 | } | ||
5854 | val = simple_strtoul(str, &n, 0); | ||
5855 | if (n == str || val >= TXDDS_TABLE_SZ) { | ||
5856 | printk(KERN_INFO QIB_DRV_NAME | ||
5857 | "cable_atten_values must start with a number\n"); | ||
5858 | return -EINVAL; | ||
5859 | } | ||
5860 | strcpy(cable_atten_list, str); | ||
5861 | |||
5862 | list_for_each_entry(dd, &qib_dev_list, list) | ||
5863 | set_no_qsfp_atten(dd, 1); | ||
5864 | return 0; | ||
5865 | } | ||
5866 | |||
5867 | /* | ||
5868 | * Write the final few registers that depend on some of the | ||
5869 | * init setup. Done late in init, just before bringing up | ||
5870 | * the serdes. | ||
5871 | */ | ||
5872 | static int qib_late_7322_initreg(struct qib_devdata *dd) | ||
5873 | { | ||
5874 | int ret = 0, n; | ||
5875 | u64 val; | ||
5876 | |||
5877 | qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize); | ||
5878 | qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize); | ||
5879 | qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt); | ||
5880 | qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys); | ||
5881 | val = qib_read_kreg64(dd, kr_sendpioavailaddr); | ||
5882 | if (val != dd->pioavailregs_phys) { | ||
5883 | qib_dev_err(dd, "Catastrophic software error, " | ||
5884 | "SendPIOAvailAddr written as %lx, " | ||
5885 | "read back as %llx\n", | ||
5886 | (unsigned long) dd->pioavailregs_phys, | ||
5887 | (unsigned long long) val); | ||
5888 | ret = -EINVAL; | ||
5889 | } | ||
5890 | |||
5891 | n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS; | ||
5892 | qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL); | ||
5893 | /* driver sends get pkey, lid, etc. checking also, to catch bugs */ | ||
5894 | qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL); | ||
5895 | |||
5896 | qib_register_observer(dd, &sendctrl_0_observer); | ||
5897 | qib_register_observer(dd, &sendctrl_1_observer); | ||
5898 | |||
5899 | dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN; | ||
5900 | qib_write_kreg(dd, kr_control, dd->control); | ||
5901 | /* | ||
5902 | * Set SendDmaFetchPriority and init Tx params, including | ||
5903 | * QSFP handler on boards that have QSFP. | ||
5904 | * First set our default attenuation entry for cables that | ||
5905 | * don't have valid attenuation. | ||
5906 | */ | ||
5907 | set_no_qsfp_atten(dd, 0); | ||
5908 | for (n = 0; n < dd->num_pports; ++n) { | ||
5909 | struct qib_pportdata *ppd = dd->pport + n; | ||
5910 | |||
5911 | qib_write_kreg_port(ppd, krp_senddmaprioritythld, | ||
5912 | sdma_fetch_prio & 0xf); | ||
5913 | /* Initialize qsfp if present on board. */ | ||
5914 | if (dd->flags & QIB_HAS_QSFP) | ||
5915 | qib_init_7322_qsfp(ppd); | ||
5916 | } | ||
5917 | dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN; | ||
5918 | qib_write_kreg(dd, kr_control, dd->control); | ||
5919 | |||
5920 | return ret; | ||
5921 | } | ||
5922 | |||
5923 | /* per IB port errors. */ | ||
5924 | #define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \ | ||
5925 | MASK_ACROSS(8, 15)) | ||
5926 | #define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41)) | ||
5927 | #define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \ | ||
5928 | MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \ | ||
5929 | MASK_ACROSS(0, 11)) | ||
5930 | |||
5931 | /* | ||
5932 | * Write the initialization per-port registers that need to be done at | ||
5933 | * driver load and after reset completes (i.e., that aren't done as part | ||
5934 | * of other init procedures called from qib_init.c). | ||
5935 | * Some of these should be redundant on reset, but play safe. | ||
5936 | */ | ||
5937 | static void write_7322_init_portregs(struct qib_pportdata *ppd) | ||
5938 | { | ||
5939 | u64 val; | ||
5940 | int i; | ||
5941 | |||
5942 | if (!ppd->link_speed_supported) { | ||
5943 | /* no buffer credits for this port */ | ||
5944 | for (i = 1; i < 8; i++) | ||
5945 | qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0); | ||
5946 | qib_write_kreg_port(ppd, krp_ibcctrl_b, 0); | ||
5947 | qib_write_kreg(ppd->dd, kr_scratch, 0); | ||
5948 | return; | ||
5949 | } | ||
5950 | |||
5951 | /* | ||
5952 | * Set the number of supported virtual lanes in IBC, | ||
5953 | * for flow control packet handling on unsupported VLs | ||
5954 | */ | ||
5955 | val = qib_read_kreg_port(ppd, krp_ibsdtestiftx); | ||
5956 | val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP); | ||
5957 | val |= (u64)(ppd->vls_supported - 1) << | ||
5958 | SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP); | ||
5959 | qib_write_kreg_port(ppd, krp_ibsdtestiftx, val); | ||
5960 | |||
5961 | qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP); | ||
5962 | |||
5963 | /* enable tx header checking */ | ||
5964 | qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY | | ||
5965 | IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID | | ||
5966 | IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ); | ||
5967 | |||
5968 | qib_write_kreg_port(ppd, krp_ncmodectrl, | ||
5969 | SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal)); | ||
5970 | |||
5971 | /* | ||
5972 | * Unconditionally clear the bufmask bits. If SDMA is | ||
5973 | * enabled, we'll set them appropriately later. | ||
5974 | */ | ||
5975 | qib_write_kreg_port(ppd, krp_senddmabufmask0, 0); | ||
5976 | qib_write_kreg_port(ppd, krp_senddmabufmask1, 0); | ||
5977 | qib_write_kreg_port(ppd, krp_senddmabufmask2, 0); | ||
5978 | if (ppd->dd->cspec->r1) | ||
5979 | ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate); | ||
5980 | } | ||
5981 | |||
5982 | /* | ||
5983 | * Write the initialization per-device registers that need to be done at | ||
5984 | * driver load and after reset completes (i.e., that aren't done as part | ||
5985 | * of other init procedures called from qib_init.c). Also write per-port | ||
5986 | * registers that are affected by overall device config, such as QP mapping | ||
5987 | * Some of these should be redundant on reset, but play safe. | ||
5988 | */ | ||
5989 | static void write_7322_initregs(struct qib_devdata *dd) | ||
5990 | { | ||
5991 | struct qib_pportdata *ppd; | ||
5992 | int i, pidx; | ||
5993 | u64 val; | ||
5994 | |||
5995 | /* Set Multicast QPs received by port 2 to map to context one. */ | ||
5996 | qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1); | ||
5997 | |||
5998 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | ||
5999 | unsigned n, regno; | ||
6000 | unsigned long flags; | ||
6001 | |||
6002 | if (!dd->qpn_mask || !dd->pport[pidx].link_speed_supported) | ||
6003 | continue; | ||
6004 | |||
6005 | ppd = &dd->pport[pidx]; | ||
6006 | |||
6007 | /* be paranoid against later code motion, etc. */ | ||
6008 | spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); | ||
6009 | ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable); | ||
6010 | spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); | ||
6011 | |||
6012 | /* Initialize QP to context mapping */ | ||
6013 | regno = krp_rcvqpmaptable; | ||
6014 | val = 0; | ||
6015 | if (dd->num_pports > 1) | ||
6016 | n = dd->first_user_ctxt / dd->num_pports; | ||
6017 | else | ||
6018 | n = dd->first_user_ctxt - 1; | ||
6019 | for (i = 0; i < 32; ) { | ||
6020 | unsigned ctxt; | ||
6021 | |||
6022 | if (dd->num_pports > 1) | ||
6023 | ctxt = (i % n) * dd->num_pports + pidx; | ||
6024 | else if (i % n) | ||
6025 | ctxt = (i % n) + 1; | ||
6026 | else | ||
6027 | ctxt = ppd->hw_pidx; | ||
6028 | val |= ctxt << (5 * (i % 6)); | ||
6029 | i++; | ||
6030 | if (i % 6 == 0) { | ||
6031 | qib_write_kreg_port(ppd, regno, val); | ||
6032 | val = 0; | ||
6033 | regno++; | ||
6034 | } | ||
6035 | } | ||
6036 | qib_write_kreg_port(ppd, regno, val); | ||
6037 | } | ||
6038 | |||
6039 | /* | ||
6040 | * Setup up interrupt mitigation for kernel contexts, but | ||
6041 | * not user contexts (user contexts use interrupts when | ||
6042 | * stalled waiting for any packet, so want those interrupts | ||
6043 | * right away). | ||
6044 | */ | ||
6045 | for (i = 0; i < dd->first_user_ctxt; i++) { | ||
6046 | dd->cspec->rcvavail_timeout[i] = rcv_int_timeout; | ||
6047 | qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout); | ||
6048 | } | ||
6049 | |||
6050 | /* | ||
6051 | * Initialize as (disabled) rcvflow tables. Application code | ||
6052 | * will setup each flow as it uses the flow. | ||
6053 | * Doesn't clear any of the error bits that might be set. | ||
6054 | */ | ||
6055 | val = TIDFLOW_ERRBITS; /* these are W1C */ | ||
6056 | for (i = 0; i < dd->ctxtcnt; i++) { | ||
6057 | int flow; | ||
6058 | for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++) | ||
6059 | qib_write_ureg(dd, ur_rcvflowtable+flow, val, i); | ||
6060 | } | ||
6061 | |||
6062 | /* | ||
6063 | * dual cards init to dual port recovery, single port cards to | ||
6064 | * the one port. Dual port cards may later adjust to 1 port, | ||
6065 | * and then back to dual port if both ports are connected | ||
6066 | * */ | ||
6067 | if (dd->num_pports) | ||
6068 | setup_7322_link_recovery(dd->pport, dd->num_pports > 1); | ||
6069 | } | ||
6070 | |||
6071 | static int qib_init_7322_variables(struct qib_devdata *dd) | ||
6072 | { | ||
6073 | struct qib_pportdata *ppd; | ||
6074 | unsigned features, pidx, sbufcnt; | ||
6075 | int ret, mtu; | ||
6076 | u32 sbufs, updthresh; | ||
6077 | |||
6078 | /* pport structs are contiguous, allocated after devdata */ | ||
6079 | ppd = (struct qib_pportdata *)(dd + 1); | ||
6080 | dd->pport = ppd; | ||
6081 | ppd[0].dd = dd; | ||
6082 | ppd[1].dd = dd; | ||
6083 | |||
6084 | dd->cspec = (struct qib_chip_specific *)(ppd + 2); | ||
6085 | |||
6086 | ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1); | ||
6087 | ppd[1].cpspec = &ppd[0].cpspec[1]; | ||
6088 | ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */ | ||
6089 | ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */ | ||
6090 | |||
6091 | spin_lock_init(&dd->cspec->rcvmod_lock); | ||
6092 | spin_lock_init(&dd->cspec->gpio_lock); | ||
6093 | |||
6094 | /* we haven't yet set QIB_PRESENT, so use read directly */ | ||
6095 | dd->revision = readq(&dd->kregbase[kr_revision]); | ||
6096 | |||
6097 | if ((dd->revision & 0xffffffffU) == 0xffffffffU) { | ||
6098 | qib_dev_err(dd, "Revision register read failure, " | ||
6099 | "giving up initialization\n"); | ||
6100 | ret = -ENODEV; | ||
6101 | goto bail; | ||
6102 | } | ||
6103 | dd->flags |= QIB_PRESENT; /* now register routines work */ | ||
6104 | |||
6105 | dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor); | ||
6106 | dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor); | ||
6107 | dd->cspec->r1 = dd->minrev == 1; | ||
6108 | |||
6109 | get_7322_chip_params(dd); | ||
6110 | features = qib_7322_boardname(dd); | ||
6111 | |||
6112 | /* now that piobcnt2k and 4k set, we can allocate these */ | ||
6113 | sbufcnt = dd->piobcnt2k + dd->piobcnt4k + | ||
6114 | NUM_VL15_BUFS + BITS_PER_LONG - 1; | ||
6115 | sbufcnt /= BITS_PER_LONG; | ||
6116 | dd->cspec->sendchkenable = kmalloc(sbufcnt * | ||
6117 | sizeof(*dd->cspec->sendchkenable), GFP_KERNEL); | ||
6118 | dd->cspec->sendgrhchk = kmalloc(sbufcnt * | ||
6119 | sizeof(*dd->cspec->sendgrhchk), GFP_KERNEL); | ||
6120 | dd->cspec->sendibchk = kmalloc(sbufcnt * | ||
6121 | sizeof(*dd->cspec->sendibchk), GFP_KERNEL); | ||
6122 | if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk || | ||
6123 | !dd->cspec->sendibchk) { | ||
6124 | qib_dev_err(dd, "Failed allocation for hdrchk bitmaps\n"); | ||
6125 | ret = -ENOMEM; | ||
6126 | goto bail; | ||
6127 | } | ||
6128 | |||
6129 | ppd = dd->pport; | ||
6130 | |||
6131 | /* | ||
6132 | * GPIO bits for TWSI data and clock, | ||
6133 | * used for serial EEPROM. | ||
6134 | */ | ||
6135 | dd->gpio_sda_num = _QIB_GPIO_SDA_NUM; | ||
6136 | dd->gpio_scl_num = _QIB_GPIO_SCL_NUM; | ||
6137 | dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV; | ||
6138 | |||
6139 | dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY | | ||
6140 | QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP | | ||
6141 | QIB_HAS_THRESH_UPDATE | | ||
6142 | (sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0); | ||
6143 | dd->flags |= qib_special_trigger ? | ||
6144 | QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA; | ||
6145 | |||
6146 | /* | ||
6147 | * Setup initial values. These may change when PAT is enabled, but | ||
6148 | * we need these to do initial chip register accesses. | ||
6149 | */ | ||
6150 | qib_7322_set_baseaddrs(dd); | ||
6151 | |||
6152 | mtu = ib_mtu_enum_to_int(qib_ibmtu); | ||
6153 | if (mtu == -1) | ||
6154 | mtu = QIB_DEFAULT_MTU; | ||
6155 | |||
6156 | dd->cspec->int_enable_mask = QIB_I_BITSEXTANT; | ||
6157 | /* all hwerrors become interrupts, unless special purposed */ | ||
6158 | dd->cspec->hwerrmask = ~0ULL; | ||
6159 | /* link_recovery setup causes these errors, so ignore them, | ||
6160 | * other than clearing them when they occur */ | ||
6161 | dd->cspec->hwerrmask &= | ||
6162 | ~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) | | ||
6163 | SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) | | ||
6164 | HWE_MASK(LATriggered)); | ||
6165 | |||
6166 | for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) { | ||
6167 | struct qib_chippport_specific *cp = ppd->cpspec; | ||
6168 | ppd->link_speed_supported = features & PORT_SPD_CAP; | ||
6169 | features >>= PORT_SPD_CAP_SHIFT; | ||
6170 | if (!ppd->link_speed_supported) { | ||
6171 | /* single port mode (7340, or configured) */ | ||
6172 | dd->skip_kctxt_mask |= 1 << pidx; | ||
6173 | if (pidx == 0) { | ||
6174 | /* Make sure port is disabled. */ | ||
6175 | qib_write_kreg_port(ppd, krp_rcvctrl, 0); | ||
6176 | qib_write_kreg_port(ppd, krp_ibcctrl_a, 0); | ||
6177 | ppd[0] = ppd[1]; | ||
6178 | dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask, | ||
6179 | IBSerdesPClkNotDetectMask_0) | ||
6180 | | SYM_MASK(HwErrMask, | ||
6181 | SDmaMemReadErrMask_0)); | ||
6182 | dd->cspec->int_enable_mask &= ~( | ||
6183 | SYM_MASK(IntMask, SDmaCleanupDoneMask_0) | | ||
6184 | SYM_MASK(IntMask, SDmaIdleIntMask_0) | | ||
6185 | SYM_MASK(IntMask, SDmaProgressIntMask_0) | | ||
6186 | SYM_MASK(IntMask, SDmaIntMask_0) | | ||
6187 | SYM_MASK(IntMask, ErrIntMask_0) | | ||
6188 | SYM_MASK(IntMask, SendDoneIntMask_0)); | ||
6189 | } else { | ||
6190 | /* Make sure port is disabled. */ | ||
6191 | qib_write_kreg_port(ppd, krp_rcvctrl, 0); | ||
6192 | qib_write_kreg_port(ppd, krp_ibcctrl_a, 0); | ||
6193 | dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask, | ||
6194 | IBSerdesPClkNotDetectMask_1) | ||
6195 | | SYM_MASK(HwErrMask, | ||
6196 | SDmaMemReadErrMask_1)); | ||
6197 | dd->cspec->int_enable_mask &= ~( | ||
6198 | SYM_MASK(IntMask, SDmaCleanupDoneMask_1) | | ||
6199 | SYM_MASK(IntMask, SDmaIdleIntMask_1) | | ||
6200 | SYM_MASK(IntMask, SDmaProgressIntMask_1) | | ||
6201 | SYM_MASK(IntMask, SDmaIntMask_1) | | ||
6202 | SYM_MASK(IntMask, ErrIntMask_1) | | ||
6203 | SYM_MASK(IntMask, SendDoneIntMask_1)); | ||
6204 | } | ||
6205 | continue; | ||
6206 | } | ||
6207 | |||
6208 | dd->num_pports++; | ||
6209 | qib_init_pportdata(ppd, dd, pidx, dd->num_pports); | ||
6210 | |||
6211 | ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X; | ||
6212 | ppd->link_width_enabled = IB_WIDTH_4X; | ||
6213 | ppd->link_speed_enabled = ppd->link_speed_supported; | ||
6214 | /* | ||
6215 | * Set the initial values to reasonable default, will be set | ||
6216 | * for real when link is up. | ||
6217 | */ | ||
6218 | ppd->link_width_active = IB_WIDTH_4X; | ||
6219 | ppd->link_speed_active = QIB_IB_SDR; | ||
6220 | ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS]; | ||
6221 | switch (qib_num_cfg_vls) { | ||
6222 | case 1: | ||
6223 | ppd->vls_supported = IB_VL_VL0; | ||
6224 | break; | ||
6225 | case 2: | ||
6226 | ppd->vls_supported = IB_VL_VL0_1; | ||
6227 | break; | ||
6228 | default: | ||
6229 | qib_devinfo(dd->pcidev, | ||
6230 | "Invalid num_vls %u, using 4 VLs\n", | ||
6231 | qib_num_cfg_vls); | ||
6232 | qib_num_cfg_vls = 4; | ||
6233 | /* fall through */ | ||
6234 | case 4: | ||
6235 | ppd->vls_supported = IB_VL_VL0_3; | ||
6236 | break; | ||
6237 | case 8: | ||
6238 | if (mtu <= 2048) | ||
6239 | ppd->vls_supported = IB_VL_VL0_7; | ||
6240 | else { | ||
6241 | qib_devinfo(dd->pcidev, | ||
6242 | "Invalid num_vls %u for MTU %d " | ||
6243 | ", using 4 VLs\n", | ||
6244 | qib_num_cfg_vls, mtu); | ||
6245 | ppd->vls_supported = IB_VL_VL0_3; | ||
6246 | qib_num_cfg_vls = 4; | ||
6247 | } | ||
6248 | break; | ||
6249 | } | ||
6250 | ppd->vls_operational = ppd->vls_supported; | ||
6251 | |||
6252 | init_waitqueue_head(&cp->autoneg_wait); | ||
6253 | INIT_DELAYED_WORK(&cp->autoneg_work, | ||
6254 | autoneg_7322_work); | ||
6255 | if (ppd->dd->cspec->r1) | ||
6256 | INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work); | ||
6257 | |||
6258 | /* | ||
6259 | * For Mez and similar cards, no qsfp info, so do | ||
6260 | * the "cable info" setup here. Can be overridden | ||
6261 | * in adapter-specific routines. | ||
6262 | */ | ||
6263 | if (!(ppd->dd->flags & QIB_HAS_QSFP)) { | ||
6264 | int i; | ||
6265 | const struct txdds_ent *txdds; | ||
6266 | |||
6267 | if (!IS_QMH(ppd->dd) && !IS_QME(ppd->dd)) | ||
6268 | qib_devinfo(ppd->dd->pcidev, "IB%u:%u: " | ||
6269 | "Unknown mezzanine card type\n", | ||
6270 | ppd->dd->unit, ppd->port); | ||
6271 | txdds = IS_QMH(ppd->dd) ? &qmh_qdr_txdds : | ||
6272 | &qme_qdr_txdds; | ||
6273 | |||
6274 | /* | ||
6275 | * set values in case link comes up | ||
6276 | * before table is written to driver. | ||
6277 | */ | ||
6278 | cp->h1_val = IS_QMH(ppd->dd) ? H1_FORCE_QMH : | ||
6279 | H1_FORCE_QME; | ||
6280 | for (i = 0; i < SERDES_CHANS; i++) { | ||
6281 | cp->amp[i] = txdds->amp; | ||
6282 | cp->pre[i] = txdds->pre; | ||
6283 | cp->mainv[i] = txdds->main; | ||
6284 | cp->post[i] = txdds->post; | ||
6285 | } | ||
6286 | } else | ||
6287 | cp->h1_val = H1_FORCE_VAL; | ||
6288 | |||
6289 | /* Avoid writes to chip for mini_init */ | ||
6290 | if (!qib_mini_init) | ||
6291 | write_7322_init_portregs(ppd); | ||
6292 | |||
6293 | init_timer(&cp->chase_timer); | ||
6294 | cp->chase_timer.function = reenable_chase; | ||
6295 | cp->chase_timer.data = (unsigned long)ppd; | ||
6296 | |||
6297 | ppd++; | ||
6298 | } | ||
6299 | |||
6300 | dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE; | ||
6301 | dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE; | ||
6302 | dd->rhf_offset = | ||
6303 | dd->rcvhdrentsize - sizeof(u64) / sizeof(u32); | ||
6304 | |||
6305 | /* we always allocate at least 2048 bytes for eager buffers */ | ||
6306 | dd->rcvegrbufsize = max(mtu, 2048); | ||
6307 | |||
6308 | qib_7322_tidtemplate(dd); | ||
6309 | |||
6310 | /* | ||
6311 | * We can request a receive interrupt for 1 or | ||
6312 | * more packets from current offset. | ||
6313 | */ | ||
6314 | dd->rhdrhead_intr_off = | ||
6315 | (u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT; | ||
6316 | |||
6317 | /* setup the stats timer; the add_timer is done at end of init */ | ||
6318 | init_timer(&dd->stats_timer); | ||
6319 | dd->stats_timer.function = qib_get_7322_faststats; | ||
6320 | dd->stats_timer.data = (unsigned long) dd; | ||
6321 | |||
6322 | dd->ureg_align = 0x10000; /* 64KB alignment */ | ||
6323 | |||
6324 | dd->piosize2kmax_dwords = dd->piosize2k >> 2; | ||
6325 | |||
6326 | qib_7322_config_ctxts(dd); | ||
6327 | qib_set_ctxtcnt(dd); | ||
6328 | |||
6329 | if (qib_wc_pat) { | ||
6330 | ret = init_chip_wc_pat(dd, NUM_VL15_BUFS * dd->align4k); | ||
6331 | if (ret) | ||
6332 | goto bail; | ||
6333 | } | ||
6334 | qib_7322_set_baseaddrs(dd); /* set chip access pointers now */ | ||
6335 | |||
6336 | ret = 0; | ||
6337 | if (qib_mini_init) | ||
6338 | goto bail; | ||
6339 | if (!dd->num_pports) { | ||
6340 | qib_dev_err(dd, "No ports enabled, giving up initialization\n"); | ||
6341 | goto bail; /* no error, so can still figure out why err */ | ||
6342 | } | ||
6343 | |||
6344 | write_7322_initregs(dd); | ||
6345 | ret = qib_create_ctxts(dd); | ||
6346 | init_7322_cntrnames(dd); | ||
6347 | |||
6348 | updthresh = 8U; /* update threshold */ | ||
6349 | |||
6350 | /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA. | ||
6351 | * reserve the update threshold amount for other kernel use, such | ||
6352 | * as sending SMI, MAD, and ACKs, or 3, whichever is greater, | ||
6353 | * unless we aren't enabling SDMA, in which case we want to use | ||
6354 | * all the 4k bufs for the kernel. | ||
6355 | * if this was less than the update threshold, we could wait | ||
6356 | * a long time for an update. Coded this way because we | ||
6357 | * sometimes change the update threshold for various reasons, | ||
6358 | * and we want this to remain robust. | ||
6359 | */ | ||
6360 | if (dd->flags & QIB_HAS_SEND_DMA) { | ||
6361 | dd->cspec->sdmabufcnt = dd->piobcnt4k; | ||
6362 | sbufs = updthresh > 3 ? updthresh : 3; | ||
6363 | } else { | ||
6364 | dd->cspec->sdmabufcnt = 0; | ||
6365 | sbufs = dd->piobcnt4k; | ||
6366 | } | ||
6367 | dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k - | ||
6368 | dd->cspec->sdmabufcnt; | ||
6369 | dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs; | ||
6370 | dd->cspec->lastbuf_for_pio--; /* range is <= , not < */ | ||
6371 | dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ? | ||
6372 | dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0; | ||
6373 | |||
6374 | /* | ||
6375 | * If we have 16 user contexts, we will have 7 sbufs | ||
6376 | * per context, so reduce the update threshold to match. We | ||
6377 | * want to update before we actually run out, at low pbufs/ctxt | ||
6378 | * so give ourselves some margin. | ||
6379 | */ | ||
6380 | if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh) | ||
6381 | updthresh = dd->pbufsctxt - 2; | ||
6382 | dd->cspec->updthresh_dflt = updthresh; | ||
6383 | dd->cspec->updthresh = updthresh; | ||
6384 | |||
6385 | /* before full enable, no interrupts, no locking needed */ | ||
6386 | dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld)) | ||
6387 | << SYM_LSB(SendCtrl, AvailUpdThld)) | | ||
6388 | SYM_MASK(SendCtrl, SendBufAvailPad64Byte); | ||
6389 | |||
6390 | dd->psxmitwait_supported = 1; | ||
6391 | dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE; | ||
6392 | bail: | ||
6393 | if (!dd->ctxtcnt) | ||
6394 | dd->ctxtcnt = 1; /* for other initialization code */ | ||
6395 | |||
6396 | return ret; | ||
6397 | } | ||
6398 | |||
6399 | static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc, | ||
6400 | u32 *pbufnum) | ||
6401 | { | ||
6402 | u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK; | ||
6403 | struct qib_devdata *dd = ppd->dd; | ||
6404 | |||
6405 | /* last is same for 2k and 4k, because we use 4k if all 2k busy */ | ||
6406 | if (pbc & PBC_7322_VL15_SEND) { | ||
6407 | first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx; | ||
6408 | last = first; | ||
6409 | } else { | ||
6410 | if ((plen + 1) > dd->piosize2kmax_dwords) | ||
6411 | first = dd->piobcnt2k; | ||
6412 | else | ||
6413 | first = 0; | ||
6414 | last = dd->cspec->lastbuf_for_pio; | ||
6415 | } | ||
6416 | return qib_getsendbuf_range(dd, pbufnum, first, last); | ||
6417 | } | ||
6418 | |||
6419 | static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv, | ||
6420 | u32 start) | ||
6421 | { | ||
6422 | qib_write_kreg_port(ppd, krp_psinterval, intv); | ||
6423 | qib_write_kreg_port(ppd, krp_psstart, start); | ||
6424 | } | ||
6425 | |||
6426 | /* | ||
6427 | * Must be called with sdma_lock held, or before init finished. | ||
6428 | */ | ||
6429 | static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt) | ||
6430 | { | ||
6431 | qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt); | ||
6432 | } | ||
6433 | |||
6434 | static struct sdma_set_state_action sdma_7322_action_table[] = { | ||
6435 | [qib_sdma_state_s00_hw_down] = { | ||
6436 | .go_s99_running_tofalse = 1, | ||
6437 | .op_enable = 0, | ||
6438 | .op_intenable = 0, | ||
6439 | .op_halt = 0, | ||
6440 | .op_drain = 0, | ||
6441 | }, | ||
6442 | [qib_sdma_state_s10_hw_start_up_wait] = { | ||
6443 | .op_enable = 0, | ||
6444 | .op_intenable = 1, | ||
6445 | .op_halt = 1, | ||
6446 | .op_drain = 0, | ||
6447 | }, | ||
6448 | [qib_sdma_state_s20_idle] = { | ||
6449 | .op_enable = 1, | ||
6450 | .op_intenable = 1, | ||
6451 | .op_halt = 1, | ||
6452 | .op_drain = 0, | ||
6453 | }, | ||
6454 | [qib_sdma_state_s30_sw_clean_up_wait] = { | ||
6455 | .op_enable = 0, | ||
6456 | .op_intenable = 1, | ||
6457 | .op_halt = 1, | ||
6458 | .op_drain = 0, | ||
6459 | }, | ||
6460 | [qib_sdma_state_s40_hw_clean_up_wait] = { | ||
6461 | .op_enable = 1, | ||
6462 | .op_intenable = 1, | ||
6463 | .op_halt = 1, | ||
6464 | .op_drain = 0, | ||
6465 | }, | ||
6466 | [qib_sdma_state_s50_hw_halt_wait] = { | ||
6467 | .op_enable = 1, | ||
6468 | .op_intenable = 1, | ||
6469 | .op_halt = 1, | ||
6470 | .op_drain = 1, | ||
6471 | }, | ||
6472 | [qib_sdma_state_s99_running] = { | ||
6473 | .op_enable = 1, | ||
6474 | .op_intenable = 1, | ||
6475 | .op_halt = 0, | ||
6476 | .op_drain = 0, | ||
6477 | .go_s99_running_totrue = 1, | ||
6478 | }, | ||
6479 | }; | ||
6480 | |||
6481 | static void qib_7322_sdma_init_early(struct qib_pportdata *ppd) | ||
6482 | { | ||
6483 | ppd->sdma_state.set_state_action = sdma_7322_action_table; | ||
6484 | } | ||
6485 | |||
6486 | static int init_sdma_7322_regs(struct qib_pportdata *ppd) | ||
6487 | { | ||
6488 | struct qib_devdata *dd = ppd->dd; | ||
6489 | unsigned lastbuf, erstbuf; | ||
6490 | u64 senddmabufmask[3] = { 0 }; | ||
6491 | int n, ret = 0; | ||
6492 | |||
6493 | qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys); | ||
6494 | qib_sdma_7322_setlengen(ppd); | ||
6495 | qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */ | ||
6496 | qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt); | ||
6497 | qib_write_kreg_port(ppd, krp_senddmadesccnt, 0); | ||
6498 | qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys); | ||
6499 | |||
6500 | if (dd->num_pports) | ||
6501 | n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */ | ||
6502 | else | ||
6503 | n = dd->cspec->sdmabufcnt; /* failsafe for init */ | ||
6504 | erstbuf = (dd->piobcnt2k + dd->piobcnt4k) - | ||
6505 | ((dd->num_pports == 1 || ppd->port == 2) ? n : | ||
6506 | dd->cspec->sdmabufcnt); | ||
6507 | lastbuf = erstbuf + n; | ||
6508 | |||
6509 | ppd->sdma_state.first_sendbuf = erstbuf; | ||
6510 | ppd->sdma_state.last_sendbuf = lastbuf; | ||
6511 | for (; erstbuf < lastbuf; ++erstbuf) { | ||
6512 | unsigned word = erstbuf / BITS_PER_LONG; | ||
6513 | unsigned bit = erstbuf & (BITS_PER_LONG - 1); | ||
6514 | |||
6515 | BUG_ON(word >= 3); | ||
6516 | senddmabufmask[word] |= 1ULL << bit; | ||
6517 | } | ||
6518 | qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]); | ||
6519 | qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]); | ||
6520 | qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]); | ||
6521 | return ret; | ||
6522 | } | ||
6523 | |||
6524 | /* sdma_lock must be held */ | ||
6525 | static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd) | ||
6526 | { | ||
6527 | struct qib_devdata *dd = ppd->dd; | ||
6528 | int sane; | ||
6529 | int use_dmahead; | ||
6530 | u16 swhead; | ||
6531 | u16 swtail; | ||
6532 | u16 cnt; | ||
6533 | u16 hwhead; | ||
6534 | |||
6535 | use_dmahead = __qib_sdma_running(ppd) && | ||
6536 | (dd->flags & QIB_HAS_SDMA_TIMEOUT); | ||
6537 | retry: | ||
6538 | hwhead = use_dmahead ? | ||
6539 | (u16) le64_to_cpu(*ppd->sdma_head_dma) : | ||
6540 | (u16) qib_read_kreg_port(ppd, krp_senddmahead); | ||
6541 | |||
6542 | swhead = ppd->sdma_descq_head; | ||
6543 | swtail = ppd->sdma_descq_tail; | ||
6544 | cnt = ppd->sdma_descq_cnt; | ||
6545 | |||
6546 | if (swhead < swtail) | ||
6547 | /* not wrapped */ | ||
6548 | sane = (hwhead >= swhead) & (hwhead <= swtail); | ||
6549 | else if (swhead > swtail) | ||
6550 | /* wrapped around */ | ||
6551 | sane = ((hwhead >= swhead) && (hwhead < cnt)) || | ||
6552 | (hwhead <= swtail); | ||
6553 | else | ||
6554 | /* empty */ | ||
6555 | sane = (hwhead == swhead); | ||
6556 | |||
6557 | if (unlikely(!sane)) { | ||
6558 | if (use_dmahead) { | ||
6559 | /* try one more time, directly from the register */ | ||
6560 | use_dmahead = 0; | ||
6561 | goto retry; | ||
6562 | } | ||
6563 | /* proceed as if no progress */ | ||
6564 | hwhead = swhead; | ||
6565 | } | ||
6566 | |||
6567 | return hwhead; | ||
6568 | } | ||
6569 | |||
6570 | static int qib_sdma_7322_busy(struct qib_pportdata *ppd) | ||
6571 | { | ||
6572 | u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus); | ||
6573 | |||
6574 | return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) || | ||
6575 | (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) || | ||
6576 | !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) || | ||
6577 | !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty)); | ||
6578 | } | ||
6579 | |||
6580 | /* | ||
6581 | * Compute the amount of delay before sending the next packet if the | ||
6582 | * port's send rate differs from the static rate set for the QP. | ||
6583 | * The delay affects the next packet and the amount of the delay is | ||
6584 | * based on the length of the this packet. | ||
6585 | */ | ||
6586 | static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen, | ||
6587 | u8 srate, u8 vl) | ||
6588 | { | ||
6589 | u8 snd_mult = ppd->delay_mult; | ||
6590 | u8 rcv_mult = ib_rate_to_delay[srate]; | ||
6591 | u32 ret; | ||
6592 | |||
6593 | ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0; | ||
6594 | |||
6595 | /* Indicate VL15, else set the VL in the control word */ | ||
6596 | if (vl == 15) | ||
6597 | ret |= PBC_7322_VL15_SEND_CTRL; | ||
6598 | else | ||
6599 | ret |= vl << PBC_VL_NUM_LSB; | ||
6600 | ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB; | ||
6601 | |||
6602 | return ret; | ||
6603 | } | ||
6604 | |||
6605 | /* | ||
6606 | * Enable the per-port VL15 send buffers for use. | ||
6607 | * They follow the rest of the buffers, without a config parameter. | ||
6608 | * This was in initregs, but that is done before the shadow | ||
6609 | * is set up, and this has to be done after the shadow is | ||
6610 | * set up. | ||
6611 | */ | ||
6612 | static void qib_7322_initvl15_bufs(struct qib_devdata *dd) | ||
6613 | { | ||
6614 | unsigned vl15bufs; | ||
6615 | |||
6616 | vl15bufs = dd->piobcnt2k + dd->piobcnt4k; | ||
6617 | qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS, | ||
6618 | TXCHK_CHG_TYPE_KERN, NULL); | ||
6619 | } | ||
6620 | |||
6621 | static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd) | ||
6622 | { | ||
6623 | if (rcd->ctxt < NUM_IB_PORTS) { | ||
6624 | if (rcd->dd->num_pports > 1) { | ||
6625 | rcd->rcvegrcnt = KCTXT0_EGRCNT / 2; | ||
6626 | rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0; | ||
6627 | } else { | ||
6628 | rcd->rcvegrcnt = KCTXT0_EGRCNT; | ||
6629 | rcd->rcvegr_tid_base = 0; | ||
6630 | } | ||
6631 | } else { | ||
6632 | rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt; | ||
6633 | rcd->rcvegr_tid_base = KCTXT0_EGRCNT + | ||
6634 | (rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt; | ||
6635 | } | ||
6636 | } | ||
6637 | |||
6638 | #define QTXSLEEPS 5000 | ||
6639 | static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start, | ||
6640 | u32 len, u32 which, struct qib_ctxtdata *rcd) | ||
6641 | { | ||
6642 | int i; | ||
6643 | const int last = start + len - 1; | ||
6644 | const int lastr = last / BITS_PER_LONG; | ||
6645 | u32 sleeps = 0; | ||
6646 | int wait = rcd != NULL; | ||
6647 | unsigned long flags; | ||
6648 | |||
6649 | while (wait) { | ||
6650 | unsigned long shadow; | ||
6651 | int cstart, previ = -1; | ||
6652 | |||
6653 | /* | ||
6654 | * when flipping from kernel to user, we can't change | ||
6655 | * the checking type if the buffer is allocated to the | ||
6656 | * driver. It's OK the other direction, because it's | ||
6657 | * from close, and we have just disarm'ed all the | ||
6658 | * buffers. All the kernel to kernel changes are also | ||
6659 | * OK. | ||
6660 | */ | ||
6661 | for (cstart = start; cstart <= last; cstart++) { | ||
6662 | i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT) | ||
6663 | / BITS_PER_LONG; | ||
6664 | if (i != previ) { | ||
6665 | shadow = (unsigned long) | ||
6666 | le64_to_cpu(dd->pioavailregs_dma[i]); | ||
6667 | previ = i; | ||
6668 | } | ||
6669 | if (test_bit(((2 * cstart) + | ||
6670 | QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT) | ||
6671 | % BITS_PER_LONG, &shadow)) | ||
6672 | break; | ||
6673 | } | ||
6674 | |||
6675 | if (cstart > last) | ||
6676 | break; | ||
6677 | |||
6678 | if (sleeps == QTXSLEEPS) | ||
6679 | break; | ||
6680 | /* make sure we see an updated copy next time around */ | ||
6681 | sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); | ||
6682 | sleeps++; | ||
6683 | msleep(1); | ||
6684 | } | ||
6685 | |||
6686 | switch (which) { | ||
6687 | case TXCHK_CHG_TYPE_DIS1: | ||
6688 | /* | ||
6689 | * disable checking on a range; used by diags; just | ||
6690 | * one buffer, but still written generically | ||
6691 | */ | ||
6692 | for (i = start; i <= last; i++) | ||
6693 | clear_bit(i, dd->cspec->sendchkenable); | ||
6694 | break; | ||
6695 | |||
6696 | case TXCHK_CHG_TYPE_ENAB1: | ||
6697 | /* | ||
6698 | * (re)enable checking on a range; used by diags; just | ||
6699 | * one buffer, but still written generically; read | ||
6700 | * scratch to be sure buffer actually triggered, not | ||
6701 | * just flushed from processor. | ||
6702 | */ | ||
6703 | qib_read_kreg32(dd, kr_scratch); | ||
6704 | for (i = start; i <= last; i++) | ||
6705 | set_bit(i, dd->cspec->sendchkenable); | ||
6706 | break; | ||
6707 | |||
6708 | case TXCHK_CHG_TYPE_KERN: | ||
6709 | /* usable by kernel */ | ||
6710 | for (i = start; i <= last; i++) { | ||
6711 | set_bit(i, dd->cspec->sendibchk); | ||
6712 | clear_bit(i, dd->cspec->sendgrhchk); | ||
6713 | } | ||
6714 | spin_lock_irqsave(&dd->uctxt_lock, flags); | ||
6715 | /* see if we need to raise avail update threshold */ | ||
6716 | for (i = dd->first_user_ctxt; | ||
6717 | dd->cspec->updthresh != dd->cspec->updthresh_dflt | ||
6718 | && i < dd->cfgctxts; i++) | ||
6719 | if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt && | ||
6720 | ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1) | ||
6721 | < dd->cspec->updthresh_dflt) | ||
6722 | break; | ||
6723 | spin_unlock_irqrestore(&dd->uctxt_lock, flags); | ||
6724 | if (i == dd->cfgctxts) { | ||
6725 | spin_lock_irqsave(&dd->sendctrl_lock, flags); | ||
6726 | dd->cspec->updthresh = dd->cspec->updthresh_dflt; | ||
6727 | dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld); | ||
6728 | dd->sendctrl |= (dd->cspec->updthresh & | ||
6729 | SYM_RMASK(SendCtrl, AvailUpdThld)) << | ||
6730 | SYM_LSB(SendCtrl, AvailUpdThld); | ||
6731 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | ||
6732 | sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); | ||
6733 | } | ||
6734 | break; | ||
6735 | |||
6736 | case TXCHK_CHG_TYPE_USER: | ||
6737 | /* for user process */ | ||
6738 | for (i = start; i <= last; i++) { | ||
6739 | clear_bit(i, dd->cspec->sendibchk); | ||
6740 | set_bit(i, dd->cspec->sendgrhchk); | ||
6741 | } | ||
6742 | spin_lock_irqsave(&dd->sendctrl_lock, flags); | ||
6743 | if (rcd && rcd->subctxt_cnt && ((rcd->piocnt | ||
6744 | / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) { | ||
6745 | dd->cspec->updthresh = (rcd->piocnt / | ||
6746 | rcd->subctxt_cnt) - 1; | ||
6747 | dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld); | ||
6748 | dd->sendctrl |= (dd->cspec->updthresh & | ||
6749 | SYM_RMASK(SendCtrl, AvailUpdThld)) | ||
6750 | << SYM_LSB(SendCtrl, AvailUpdThld); | ||
6751 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | ||
6752 | sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); | ||
6753 | } else | ||
6754 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); | ||
6755 | break; | ||
6756 | |||
6757 | default: | ||
6758 | break; | ||
6759 | } | ||
6760 | |||
6761 | for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i) | ||
6762 | qib_write_kreg(dd, kr_sendcheckmask + i, | ||
6763 | dd->cspec->sendchkenable[i]); | ||
6764 | |||
6765 | for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) { | ||
6766 | qib_write_kreg(dd, kr_sendgrhcheckmask + i, | ||
6767 | dd->cspec->sendgrhchk[i]); | ||
6768 | qib_write_kreg(dd, kr_sendibpktmask + i, | ||
6769 | dd->cspec->sendibchk[i]); | ||
6770 | } | ||
6771 | |||
6772 | /* | ||
6773 | * Be sure whatever we did was seen by the chip and acted upon, | ||
6774 | * before we return. Mostly important for which >= 2. | ||
6775 | */ | ||
6776 | qib_read_kreg32(dd, kr_scratch); | ||
6777 | } | ||
6778 | |||
6779 | |||
6780 | /* useful for trigger analyzers, etc. */ | ||
6781 | static void writescratch(struct qib_devdata *dd, u32 val) | ||
6782 | { | ||
6783 | qib_write_kreg(dd, kr_scratch, val); | ||
6784 | } | ||
6785 | |||
6786 | /* Dummy for now, use chip regs soon */ | ||
6787 | static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum) | ||
6788 | { | ||
6789 | return -ENXIO; | ||
6790 | } | ||
6791 | |||
6792 | /** | ||
6793 | * qib_init_iba7322_funcs - set up the chip-specific function pointers | ||
6794 | * @dev: the pci_dev for qlogic_ib device | ||
6795 | * @ent: pci_device_id struct for this dev | ||
6796 | * | ||
6797 | * Also allocates, inits, and returns the devdata struct for this | ||
6798 | * device instance | ||
6799 | * | ||
6800 | * This is global, and is called directly at init to set up the | ||
6801 | * chip-specific function pointers for later use. | ||
6802 | */ | ||
6803 | struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev, | ||
6804 | const struct pci_device_id *ent) | ||
6805 | { | ||
6806 | struct qib_devdata *dd; | ||
6807 | int ret, i; | ||
6808 | u32 tabsize, actual_cnt = 0; | ||
6809 | |||
6810 | dd = qib_alloc_devdata(pdev, | ||
6811 | NUM_IB_PORTS * sizeof(struct qib_pportdata) + | ||
6812 | sizeof(struct qib_chip_specific) + | ||
6813 | NUM_IB_PORTS * sizeof(struct qib_chippport_specific)); | ||
6814 | if (IS_ERR(dd)) | ||
6815 | goto bail; | ||
6816 | |||
6817 | dd->f_bringup_serdes = qib_7322_bringup_serdes; | ||
6818 | dd->f_cleanup = qib_setup_7322_cleanup; | ||
6819 | dd->f_clear_tids = qib_7322_clear_tids; | ||
6820 | dd->f_free_irq = qib_7322_free_irq; | ||
6821 | dd->f_get_base_info = qib_7322_get_base_info; | ||
6822 | dd->f_get_msgheader = qib_7322_get_msgheader; | ||
6823 | dd->f_getsendbuf = qib_7322_getsendbuf; | ||
6824 | dd->f_gpio_mod = gpio_7322_mod; | ||
6825 | dd->f_eeprom_wen = qib_7322_eeprom_wen; | ||
6826 | dd->f_hdrqempty = qib_7322_hdrqempty; | ||
6827 | dd->f_ib_updown = qib_7322_ib_updown; | ||
6828 | dd->f_init_ctxt = qib_7322_init_ctxt; | ||
6829 | dd->f_initvl15_bufs = qib_7322_initvl15_bufs; | ||
6830 | dd->f_intr_fallback = qib_7322_intr_fallback; | ||
6831 | dd->f_late_initreg = qib_late_7322_initreg; | ||
6832 | dd->f_setpbc_control = qib_7322_setpbc_control; | ||
6833 | dd->f_portcntr = qib_portcntr_7322; | ||
6834 | dd->f_put_tid = qib_7322_put_tid; | ||
6835 | dd->f_quiet_serdes = qib_7322_mini_quiet_serdes; | ||
6836 | dd->f_rcvctrl = rcvctrl_7322_mod; | ||
6837 | dd->f_read_cntrs = qib_read_7322cntrs; | ||
6838 | dd->f_read_portcntrs = qib_read_7322portcntrs; | ||
6839 | dd->f_reset = qib_do_7322_reset; | ||
6840 | dd->f_init_sdma_regs = init_sdma_7322_regs; | ||
6841 | dd->f_sdma_busy = qib_sdma_7322_busy; | ||
6842 | dd->f_sdma_gethead = qib_sdma_7322_gethead; | ||
6843 | dd->f_sdma_sendctrl = qib_7322_sdma_sendctrl; | ||
6844 | dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt; | ||
6845 | dd->f_sdma_update_tail = qib_sdma_update_7322_tail; | ||
6846 | dd->f_sendctrl = sendctrl_7322_mod; | ||
6847 | dd->f_set_armlaunch = qib_set_7322_armlaunch; | ||
6848 | dd->f_set_cntr_sample = qib_set_cntr_7322_sample; | ||
6849 | dd->f_iblink_state = qib_7322_iblink_state; | ||
6850 | dd->f_ibphys_portstate = qib_7322_phys_portstate; | ||
6851 | dd->f_get_ib_cfg = qib_7322_get_ib_cfg; | ||
6852 | dd->f_set_ib_cfg = qib_7322_set_ib_cfg; | ||
6853 | dd->f_set_ib_loopback = qib_7322_set_loopback; | ||
6854 | dd->f_get_ib_table = qib_7322_get_ib_table; | ||
6855 | dd->f_set_ib_table = qib_7322_set_ib_table; | ||
6856 | dd->f_set_intr_state = qib_7322_set_intr_state; | ||
6857 | dd->f_setextled = qib_setup_7322_setextled; | ||
6858 | dd->f_txchk_change = qib_7322_txchk_change; | ||
6859 | dd->f_update_usrhead = qib_update_7322_usrhead; | ||
6860 | dd->f_wantpiobuf_intr = qib_wantpiobuf_7322_intr; | ||
6861 | dd->f_xgxs_reset = qib_7322_mini_pcs_reset; | ||
6862 | dd->f_sdma_hw_clean_up = qib_7322_sdma_hw_clean_up; | ||
6863 | dd->f_sdma_hw_start_up = qib_7322_sdma_hw_start_up; | ||
6864 | dd->f_sdma_init_early = qib_7322_sdma_init_early; | ||
6865 | dd->f_writescratch = writescratch; | ||
6866 | dd->f_tempsense_rd = qib_7322_tempsense_rd; | ||
6867 | /* | ||
6868 | * Do remaining PCIe setup and save PCIe values in dd. | ||
6869 | * Any error printing is already done by the init code. | ||
6870 | * On return, we have the chip mapped, but chip registers | ||
6871 | * are not set up until start of qib_init_7322_variables. | ||
6872 | */ | ||
6873 | ret = qib_pcie_ddinit(dd, pdev, ent); | ||
6874 | if (ret < 0) | ||
6875 | goto bail_free; | ||
6876 | |||
6877 | /* initialize chip-specific variables */ | ||
6878 | ret = qib_init_7322_variables(dd); | ||
6879 | if (ret) | ||
6880 | goto bail_cleanup; | ||
6881 | |||
6882 | if (qib_mini_init || !dd->num_pports) | ||
6883 | goto bail; | ||
6884 | |||
6885 | /* | ||
6886 | * Determine number of vectors we want; depends on port count | ||
6887 | * and number of configured kernel receive queues actually used. | ||
6888 | * Should also depend on whether sdma is enabled or not, but | ||
6889 | * that's such a rare testing case it's not worth worrying about. | ||
6890 | */ | ||
6891 | tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table); | ||
6892 | for (i = 0; i < tabsize; i++) | ||
6893 | if ((i < ARRAY_SIZE(irq_table) && | ||
6894 | irq_table[i].port <= dd->num_pports) || | ||
6895 | (i >= ARRAY_SIZE(irq_table) && | ||
6896 | dd->rcd[i - ARRAY_SIZE(irq_table)])) | ||
6897 | actual_cnt++; | ||
6898 | tabsize = actual_cnt; | ||
6899 | dd->cspec->msix_entries = kmalloc(tabsize * | ||
6900 | sizeof(struct msix_entry), GFP_KERNEL); | ||
6901 | dd->cspec->msix_arg = kmalloc(tabsize * | ||
6902 | sizeof(void *), GFP_KERNEL); | ||
6903 | if (!dd->cspec->msix_entries || !dd->cspec->msix_arg) { | ||
6904 | qib_dev_err(dd, "No memory for MSIx table\n"); | ||
6905 | tabsize = 0; | ||
6906 | } | ||
6907 | for (i = 0; i < tabsize; i++) | ||
6908 | dd->cspec->msix_entries[i].entry = i; | ||
6909 | |||
6910 | if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries)) | ||
6911 | qib_dev_err(dd, "Failed to setup PCIe or interrupts; " | ||
6912 | "continuing anyway\n"); | ||
6913 | /* may be less than we wanted, if not enough available */ | ||
6914 | dd->cspec->num_msix_entries = tabsize; | ||
6915 | |||
6916 | /* setup interrupt handler */ | ||
6917 | qib_setup_7322_interrupt(dd, 1); | ||
6918 | |||
6919 | /* clear diagctrl register, in case diags were running and crashed */ | ||
6920 | qib_write_kreg(dd, kr_hwdiagctrl, 0); | ||
6921 | |||
6922 | #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) | ||
6923 | ret = dca_add_requester(&pdev->dev); | ||
6924 | if (!ret) { | ||
6925 | dd->flags |= QIB_DCA_ENABLED; | ||
6926 | qib_setup_dca(dd); | ||
6927 | } | ||
6928 | #endif | ||
6929 | goto bail; | ||
6930 | |||
6931 | bail_cleanup: | ||
6932 | qib_pcie_ddcleanup(dd); | ||
6933 | bail_free: | ||
6934 | qib_free_devdata(dd); | ||
6935 | dd = ERR_PTR(ret); | ||
6936 | bail: | ||
6937 | return dd; | ||
6938 | } | ||
6939 | |||
6940 | /* | ||
6941 | * Set the table entry at the specified index from the table specifed. | ||
6942 | * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first | ||
6943 | * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR. | ||
6944 | * 'idx' below addresses the correct entry, while its 4 LSBs select the | ||
6945 | * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table. | ||
6946 | */ | ||
6947 | #define DDS_ENT_AMP_LSB 14 | ||
6948 | #define DDS_ENT_MAIN_LSB 9 | ||
6949 | #define DDS_ENT_POST_LSB 5 | ||
6950 | #define DDS_ENT_PRE_XTRA_LSB 3 | ||
6951 | #define DDS_ENT_PRE_LSB 0 | ||
6952 | |||
6953 | /* | ||
6954 | * Set one entry in the TxDDS table for spec'd port | ||
6955 | * ridx picks one of the entries, while tp points | ||
6956 | * to the appropriate table entry. | ||
6957 | */ | ||
6958 | static void set_txdds(struct qib_pportdata *ppd, int ridx, | ||
6959 | const struct txdds_ent *tp) | ||
6960 | { | ||
6961 | struct qib_devdata *dd = ppd->dd; | ||
6962 | u32 pack_ent; | ||
6963 | int regidx; | ||
6964 | |||
6965 | /* Get correct offset in chip-space, and in source table */ | ||
6966 | regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx; | ||
6967 | /* | ||
6968 | * We do not use qib_write_kreg_port() because it was intended | ||
6969 | * only for registers in the lower "port specific" pages. | ||
6970 | * So do index calculation by hand. | ||
6971 | */ | ||
6972 | if (ppd->hw_pidx) | ||
6973 | regidx += (dd->palign / sizeof(u64)); | ||
6974 | |||
6975 | pack_ent = tp->amp << DDS_ENT_AMP_LSB; | ||
6976 | pack_ent |= tp->main << DDS_ENT_MAIN_LSB; | ||
6977 | pack_ent |= tp->pre << DDS_ENT_PRE_LSB; | ||
6978 | pack_ent |= tp->post << DDS_ENT_POST_LSB; | ||
6979 | qib_write_kreg(dd, regidx, pack_ent); | ||
6980 | /* Prevent back-to-back writes by hitting scratch */ | ||
6981 | qib_write_kreg(ppd->dd, kr_scratch, 0); | ||
6982 | } | ||
6983 | |||
6984 | static const struct vendor_txdds_ent vendor_txdds[] = { | ||
6985 | { /* Amphenol 1m 30awg NoEq */ | ||
6986 | { 0x41, 0x50, 0x48 }, "584470002 ", | ||
6987 | { 10, 0, 0, 5 }, { 10, 0, 0, 9 }, { 7, 1, 0, 13 }, | ||
6988 | }, | ||
6989 | { /* Amphenol 3m 28awg NoEq */ | ||
6990 | { 0x41, 0x50, 0x48 }, "584470004 ", | ||
6991 | { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 7, 15 }, | ||
6992 | }, | ||
6993 | { /* Finisar 3m OM2 Optical */ | ||
6994 | { 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL", | ||
6995 | { 0, 0, 0, 3 }, { 0, 0, 0, 4 }, { 0, 0, 0, 13 }, | ||
6996 | }, | ||
6997 | { /* Finisar 30m OM2 Optical */ | ||
6998 | { 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL", | ||
6999 | { 0, 0, 0, 1 }, { 0, 0, 0, 5 }, { 0, 0, 0, 11 }, | ||
7000 | }, | ||
7001 | { /* Finisar Default OM2 Optical */ | ||
7002 | { 0x00, 0x90, 0x65 }, NULL, | ||
7003 | { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 0, 0, 12 }, | ||
7004 | }, | ||
7005 | { /* Gore 1m 30awg NoEq */ | ||
7006 | { 0x00, 0x21, 0x77 }, "QSN3300-1 ", | ||
7007 | { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 0, 15 }, | ||
7008 | }, | ||
7009 | { /* Gore 2m 30awg NoEq */ | ||
7010 | { 0x00, 0x21, 0x77 }, "QSN3300-2 ", | ||
7011 | { 0, 0, 0, 8 }, { 0, 0, 0, 10 }, { 0, 1, 7, 15 }, | ||
7012 | }, | ||
7013 | { /* Gore 1m 28awg NoEq */ | ||
7014 | { 0x00, 0x21, 0x77 }, "QSN3800-1 ", | ||
7015 | { 0, 0, 0, 6 }, { 0, 0, 0, 8 }, { 0, 1, 0, 15 }, | ||
7016 | }, | ||
7017 | { /* Gore 3m 28awg NoEq */ | ||
7018 | { 0x00, 0x21, 0x77 }, "QSN3800-3 ", | ||
7019 | { 0, 0, 0, 9 }, { 0, 0, 0, 13 }, { 0, 1, 7, 15 }, | ||
7020 | }, | ||
7021 | { /* Gore 5m 24awg Eq */ | ||
7022 | { 0x00, 0x21, 0x77 }, "QSN7000-5 ", | ||
7023 | { 0, 0, 0, 7 }, { 0, 0, 0, 9 }, { 0, 1, 3, 15 }, | ||
7024 | }, | ||
7025 | { /* Gore 7m 24awg Eq */ | ||
7026 | { 0x00, 0x21, 0x77 }, "QSN7000-7 ", | ||
7027 | { 0, 0, 0, 9 }, { 0, 0, 0, 11 }, { 0, 2, 6, 15 }, | ||
7028 | }, | ||
7029 | { /* Gore 5m 26awg Eq */ | ||
7030 | { 0x00, 0x21, 0x77 }, "QSN7600-5 ", | ||
7031 | { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 9, 13 }, | ||
7032 | }, | ||
7033 | { /* Gore 7m 26awg Eq */ | ||
7034 | { 0x00, 0x21, 0x77 }, "QSN7600-7 ", | ||
7035 | { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 10, 1, 8, 15 }, | ||
7036 | }, | ||
7037 | { /* Intersil 12m 24awg Active */ | ||
7038 | { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224", | ||
7039 | { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 3, 0, 9 }, | ||
7040 | }, | ||
7041 | { /* Intersil 10m 28awg Active */ | ||
7042 | { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028", | ||
7043 | { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 2, 0, 2 }, | ||
7044 | }, | ||
7045 | { /* Intersil 7m 30awg Active */ | ||
7046 | { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730", | ||
7047 | { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 1, 0, 3 }, | ||
7048 | }, | ||
7049 | { /* Intersil 5m 32awg Active */ | ||
7050 | { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532", | ||
7051 | { 0, 0, 0, 6 }, { 0, 0, 0, 6 }, { 0, 2, 0, 8 }, | ||
7052 | }, | ||
7053 | { /* Intersil Default Active */ | ||
7054 | { 0x00, 0x30, 0xB4 }, NULL, | ||
7055 | { 0, 0, 0, 6 }, { 0, 0, 0, 5 }, { 0, 2, 0, 5 }, | ||
7056 | }, | ||
7057 | { /* Luxtera 20m Active Optical */ | ||
7058 | { 0x00, 0x25, 0x63 }, NULL, | ||
7059 | { 0, 0, 0, 5 }, { 0, 0, 0, 8 }, { 0, 2, 0, 12 }, | ||
7060 | }, | ||
7061 | { /* Molex 1M Cu loopback */ | ||
7062 | { 0x00, 0x09, 0x3A }, "74763-0025 ", | ||
7063 | { 2, 2, 6, 15 }, { 2, 2, 6, 15 }, { 2, 2, 6, 15 }, | ||
7064 | }, | ||
7065 | { /* Molex 2m 28awg NoEq */ | ||
7066 | { 0x00, 0x09, 0x3A }, "74757-2201 ", | ||
7067 | { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 1, 15 }, | ||
7068 | }, | ||
7069 | }; | ||
7070 | |||
7071 | static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = { | ||
7072 | /* amp, pre, main, post */ | ||
7073 | { 2, 2, 15, 6 }, /* Loopback */ | ||
7074 | { 0, 0, 0, 1 }, /* 2 dB */ | ||
7075 | { 0, 0, 0, 2 }, /* 3 dB */ | ||
7076 | { 0, 0, 0, 3 }, /* 4 dB */ | ||
7077 | { 0, 0, 0, 4 }, /* 5 dB */ | ||
7078 | { 0, 0, 0, 5 }, /* 6 dB */ | ||
7079 | { 0, 0, 0, 6 }, /* 7 dB */ | ||
7080 | { 0, 0, 0, 7 }, /* 8 dB */ | ||
7081 | { 0, 0, 0, 8 }, /* 9 dB */ | ||
7082 | { 0, 0, 0, 9 }, /* 10 dB */ | ||
7083 | { 0, 0, 0, 10 }, /* 11 dB */ | ||
7084 | { 0, 0, 0, 11 }, /* 12 dB */ | ||
7085 | { 0, 0, 0, 12 }, /* 13 dB */ | ||
7086 | { 0, 0, 0, 13 }, /* 14 dB */ | ||
7087 | { 0, 0, 0, 14 }, /* 15 dB */ | ||
7088 | { 0, 0, 0, 15 }, /* 16 dB */ | ||
7089 | }; | ||
7090 | |||
7091 | static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = { | ||
7092 | /* amp, pre, main, post */ | ||
7093 | { 2, 2, 15, 6 }, /* Loopback */ | ||
7094 | { 0, 0, 0, 8 }, /* 2 dB */ | ||
7095 | { 0, 0, 0, 8 }, /* 3 dB */ | ||
7096 | { 0, 0, 0, 9 }, /* 4 dB */ | ||
7097 | { 0, 0, 0, 9 }, /* 5 dB */ | ||
7098 | { 0, 0, 0, 10 }, /* 6 dB */ | ||
7099 | { 0, 0, 0, 10 }, /* 7 dB */ | ||
7100 | { 0, 0, 0, 11 }, /* 8 dB */ | ||
7101 | { 0, 0, 0, 11 }, /* 9 dB */ | ||
7102 | { 0, 0, 0, 12 }, /* 10 dB */ | ||
7103 | { 0, 0, 0, 12 }, /* 11 dB */ | ||
7104 | { 0, 0, 0, 13 }, /* 12 dB */ | ||
7105 | { 0, 0, 0, 13 }, /* 13 dB */ | ||
7106 | { 0, 0, 0, 14 }, /* 14 dB */ | ||
7107 | { 0, 0, 0, 14 }, /* 15 dB */ | ||
7108 | { 0, 0, 0, 15 }, /* 16 dB */ | ||
7109 | }; | ||
7110 | |||
7111 | static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = { | ||
7112 | /* amp, pre, main, post */ | ||
7113 | { 2, 2, 15, 6 }, /* Loopback */ | ||
7114 | { 0, 1, 0, 7 }, /* 2 dB */ | ||
7115 | { 0, 1, 0, 9 }, /* 3 dB */ | ||
7116 | { 0, 1, 0, 11 }, /* 4 dB */ | ||
7117 | { 0, 1, 0, 13 }, /* 5 dB */ | ||
7118 | { 0, 1, 0, 15 }, /* 6 dB */ | ||
7119 | { 0, 1, 3, 15 }, /* 7 dB */ | ||
7120 | { 0, 1, 7, 15 }, /* 8 dB */ | ||
7121 | { 0, 1, 7, 15 }, /* 9 dB */ | ||
7122 | { 0, 1, 8, 15 }, /* 10 dB */ | ||
7123 | { 0, 1, 9, 15 }, /* 11 dB */ | ||
7124 | { 0, 1, 10, 15 }, /* 12 dB */ | ||
7125 | { 0, 2, 6, 15 }, /* 13 dB */ | ||
7126 | { 0, 2, 7, 15 }, /* 14 dB */ | ||
7127 | { 0, 2, 8, 15 }, /* 15 dB */ | ||
7128 | { 0, 2, 9, 15 }, /* 16 dB */ | ||
7129 | }; | ||
7130 | |||
7131 | static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds, | ||
7132 | unsigned atten) | ||
7133 | { | ||
7134 | /* | ||
7135 | * The attenuation table starts at 2dB for entry 1, | ||
7136 | * with entry 0 being the loopback entry. | ||
7137 | */ | ||
7138 | if (atten <= 2) | ||
7139 | atten = 1; | ||
7140 | else if (atten > TXDDS_TABLE_SZ) | ||
7141 | atten = TXDDS_TABLE_SZ - 1; | ||
7142 | else | ||
7143 | atten--; | ||
7144 | return txdds + atten; | ||
7145 | } | ||
7146 | |||
7147 | /* | ||
7148 | * if override is set, the module parameter cable_atten has a value | ||
7149 | * for this specific port, so use it, rather than our normal mechanism. | ||
7150 | */ | ||
7151 | static void find_best_ent(struct qib_pportdata *ppd, | ||
7152 | const struct txdds_ent **sdr_dds, | ||
7153 | const struct txdds_ent **ddr_dds, | ||
7154 | const struct txdds_ent **qdr_dds, int override) | ||
7155 | { | ||
7156 | struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache; | ||
7157 | int idx; | ||
7158 | |||
7159 | /* Search table of known cables */ | ||
7160 | for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) { | ||
7161 | const struct vendor_txdds_ent *v = vendor_txdds + idx; | ||
7162 | |||
7163 | if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) && | ||
7164 | (!v->partnum || | ||
7165 | !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) { | ||
7166 | *sdr_dds = &v->sdr; | ||
7167 | *ddr_dds = &v->ddr; | ||
7168 | *qdr_dds = &v->qdr; | ||
7169 | return; | ||
7170 | } | ||
7171 | } | ||
7172 | |||
7173 | /* Lookup serdes setting by cable type and attenuation */ | ||
7174 | if (!override && QSFP_IS_ACTIVE(qd->tech)) { | ||
7175 | *sdr_dds = txdds_sdr + ppd->dd->board_atten; | ||
7176 | *ddr_dds = txdds_ddr + ppd->dd->board_atten; | ||
7177 | *qdr_dds = txdds_qdr + ppd->dd->board_atten; | ||
7178 | return; | ||
7179 | } | ||
7180 | |||
7181 | if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] || | ||
7182 | qd->atten[1])) { | ||
7183 | *sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]); | ||
7184 | *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]); | ||
7185 | *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]); | ||
7186 | return; | ||
7187 | } else { | ||
7188 | /* | ||
7189 | * If we have no (or incomplete) data from the cable | ||
7190 | * EEPROM, or no QSFP, use the module parameter value | ||
7191 | * to index into the attentuation table. | ||
7192 | */ | ||
7193 | *sdr_dds = &txdds_sdr[ppd->cpspec->no_eep]; | ||
7194 | *ddr_dds = &txdds_ddr[ppd->cpspec->no_eep]; | ||
7195 | *qdr_dds = &txdds_qdr[ppd->cpspec->no_eep]; | ||
7196 | } | ||
7197 | } | ||
7198 | |||
7199 | static void init_txdds_table(struct qib_pportdata *ppd, int override) | ||
7200 | { | ||
7201 | const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds; | ||
7202 | struct txdds_ent *dds; | ||
7203 | int idx; | ||
7204 | int single_ent = 0; | ||
7205 | |||
7206 | if (IS_QMH(ppd->dd)) { | ||
7207 | /* normally will be overridden, via setup_qmh() */ | ||
7208 | sdr_dds = &qmh_sdr_txdds; | ||
7209 | ddr_dds = &qmh_ddr_txdds; | ||
7210 | qdr_dds = &qmh_qdr_txdds; | ||
7211 | single_ent = 1; | ||
7212 | } else if (IS_QME(ppd->dd)) { | ||
7213 | sdr_dds = &qme_sdr_txdds; | ||
7214 | ddr_dds = &qme_ddr_txdds; | ||
7215 | qdr_dds = &qme_qdr_txdds; | ||
7216 | single_ent = 1; | ||
7217 | } else | ||
7218 | find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override); | ||
7219 | |||
7220 | /* Fill in the first entry with the best entry found. */ | ||
7221 | set_txdds(ppd, 0, sdr_dds); | ||
7222 | set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds); | ||
7223 | set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds); | ||
7224 | |||
7225 | /* | ||
7226 | * for our current speed, also write that value into the | ||
7227 | * tx serdes registers. | ||
7228 | */ | ||
7229 | dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ? | ||
7230 | qdr_dds : (ppd->link_speed_active == | ||
7231 | QIB_IB_DDR ? ddr_dds : sdr_dds)); | ||
7232 | write_tx_serdes_param(ppd, dds); | ||
7233 | |||
7234 | /* Fill in the remaining entries with the default table values. */ | ||
7235 | for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) { | ||
7236 | set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx); | ||
7237 | set_txdds(ppd, idx + TXDDS_TABLE_SZ, | ||
7238 | single_ent ? ddr_dds : txdds_ddr + idx); | ||
7239 | set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ, | ||
7240 | single_ent ? qdr_dds : txdds_qdr + idx); | ||
7241 | } | ||
7242 | } | ||
7243 | |||
7244 | #define KR_AHB_ACC KREG_IDX(ahb_access_ctrl) | ||
7245 | #define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg) | ||
7246 | #define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy) | ||
7247 | #define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address) | ||
7248 | #define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data) | ||
7249 | #define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read) | ||
7250 | #define AHB_TRANS_TRIES 10 | ||
7251 | |||
7252 | /* | ||
7253 | * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4, | ||
7254 | * 5=subsystem which is why most calls have "chan + chan >> 1" | ||
7255 | * for the channel argument. | ||
7256 | */ | ||
7257 | static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr, | ||
7258 | u32 data, u32 mask) | ||
7259 | { | ||
7260 | u32 rd_data, wr_data, sz_mask; | ||
7261 | u64 trans, acc, prev_acc; | ||
7262 | u32 ret = 0xBAD0BAD; | ||
7263 | int tries; | ||
7264 | |||
7265 | prev_acc = qib_read_kreg64(dd, KR_AHB_ACC); | ||
7266 | /* From this point on, make sure we return access */ | ||
7267 | acc = (quad << 1) | 1; | ||
7268 | qib_write_kreg(dd, KR_AHB_ACC, acc); | ||
7269 | |||
7270 | for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) { | ||
7271 | trans = qib_read_kreg64(dd, KR_AHB_TRANS); | ||
7272 | if (trans & AHB_TRANS_RDY) | ||
7273 | break; | ||
7274 | } | ||
7275 | if (tries >= AHB_TRANS_TRIES) { | ||
7276 | qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES); | ||
7277 | goto bail; | ||
7278 | } | ||
7279 | |||
7280 | /* If mask is not all 1s, we need to read, but different SerDes | ||
7281 | * entities have different sizes | ||
7282 | */ | ||
7283 | sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1; | ||
7284 | wr_data = data & mask & sz_mask; | ||
7285 | if ((~mask & sz_mask) != 0) { | ||
7286 | trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1); | ||
7287 | qib_write_kreg(dd, KR_AHB_TRANS, trans); | ||
7288 | |||
7289 | for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) { | ||
7290 | trans = qib_read_kreg64(dd, KR_AHB_TRANS); | ||
7291 | if (trans & AHB_TRANS_RDY) | ||
7292 | break; | ||
7293 | } | ||
7294 | if (tries >= AHB_TRANS_TRIES) { | ||
7295 | qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n", | ||
7296 | AHB_TRANS_TRIES); | ||
7297 | goto bail; | ||
7298 | } | ||
7299 | /* Re-read in case host split reads and read data first */ | ||
7300 | trans = qib_read_kreg64(dd, KR_AHB_TRANS); | ||
7301 | rd_data = (uint32_t)(trans >> AHB_DATA_LSB); | ||
7302 | wr_data |= (rd_data & ~mask & sz_mask); | ||
7303 | } | ||
7304 | |||
7305 | /* If mask is not zero, we need to write. */ | ||
7306 | if (mask & sz_mask) { | ||
7307 | trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1); | ||
7308 | trans |= ((uint64_t)wr_data << AHB_DATA_LSB); | ||
7309 | trans |= AHB_WR; | ||
7310 | qib_write_kreg(dd, KR_AHB_TRANS, trans); | ||
7311 | |||
7312 | for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) { | ||
7313 | trans = qib_read_kreg64(dd, KR_AHB_TRANS); | ||
7314 | if (trans & AHB_TRANS_RDY) | ||
7315 | break; | ||
7316 | } | ||
7317 | if (tries >= AHB_TRANS_TRIES) { | ||
7318 | qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n", | ||
7319 | AHB_TRANS_TRIES); | ||
7320 | goto bail; | ||
7321 | } | ||
7322 | } | ||
7323 | ret = wr_data; | ||
7324 | bail: | ||
7325 | qib_write_kreg(dd, KR_AHB_ACC, prev_acc); | ||
7326 | return ret; | ||
7327 | } | ||
7328 | |||
7329 | static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data, | ||
7330 | unsigned mask) | ||
7331 | { | ||
7332 | struct qib_devdata *dd = ppd->dd; | ||
7333 | int chan; | ||
7334 | u32 rbc; | ||
7335 | |||
7336 | for (chan = 0; chan < SERDES_CHANS; ++chan) { | ||
7337 | ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr, | ||
7338 | data, mask); | ||
7339 | rbc = ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), | ||
7340 | addr, 0, 0); | ||
7341 | } | ||
7342 | } | ||
7343 | |||
7344 | static int serdes_7322_init(struct qib_pportdata *ppd) | ||
7345 | { | ||
7346 | u64 data; | ||
7347 | u32 le_val; | ||
7348 | |||
7349 | /* | ||
7350 | * Initialize the Tx DDS tables. Also done every QSFP event, | ||
7351 | * for adapters with QSFP | ||
7352 | */ | ||
7353 | init_txdds_table(ppd, 0); | ||
7354 | |||
7355 | /* Patch some SerDes defaults to "Better for IB" */ | ||
7356 | /* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */ | ||
7357 | ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9)); | ||
7358 | |||
7359 | /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */ | ||
7360 | ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11)); | ||
7361 | /* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */ | ||
7362 | ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6)); | ||
7363 | |||
7364 | /* May be overridden in qsfp_7322_event */ | ||
7365 | le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT; | ||
7366 | ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7)); | ||
7367 | |||
7368 | /* enable LE1 adaptation for all but QME, which is disabled */ | ||
7369 | le_val = IS_QME(ppd->dd) ? 0 : 1; | ||
7370 | ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5)); | ||
7371 | |||
7372 | /* Clear cmode-override, may be set from older driver */ | ||
7373 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14); | ||
7374 | |||
7375 | /* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */ | ||
7376 | ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8)); | ||
7377 | |||
7378 | /* setup LoS params; these are subsystem, so chan == 5 */ | ||
7379 | /* LoS filter threshold_count on, ch 0-3, set to 8 */ | ||
7380 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11)); | ||
7381 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4)); | ||
7382 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11)); | ||
7383 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4)); | ||
7384 | |||
7385 | /* LoS filter threshold_count off, ch 0-3, set to 4 */ | ||
7386 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0)); | ||
7387 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8)); | ||
7388 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0)); | ||
7389 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8)); | ||
7390 | |||
7391 | /* LoS filter select enabled */ | ||
7392 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15); | ||
7393 | |||
7394 | /* LoS target data: SDR=4, DDR=2, QDR=1 */ | ||
7395 | ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */ | ||
7396 | ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */ | ||
7397 | ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */ | ||
7398 | |||
7399 | data = qib_read_kreg_port(ppd, krp_serdesctrl); | ||
7400 | qib_write_kreg_port(ppd, krp_serdesctrl, data | | ||
7401 | SYM_MASK(IBSerdesCtrl_0, RXLOSEN)); | ||
7402 | |||
7403 | /* rxbistena; set 0 to avoid effects of it switch later */ | ||
7404 | ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15); | ||
7405 | |||
7406 | /* Configure 4 DFE taps, and only they adapt */ | ||
7407 | ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0)); | ||
7408 | |||
7409 | /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */ | ||
7410 | le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac; | ||
7411 | ibsd_wr_allchans(ppd, 21, le_val, 0xfffe); | ||
7412 | |||
7413 | /* | ||
7414 | * Set receive adaptation mode. SDR and DDR adaptation are | ||
7415 | * always on, and QDR is initially enabled; later disabled. | ||
7416 | */ | ||
7417 | qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL); | ||
7418 | qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL); | ||
7419 | qib_write_kreg_port(ppd, krp_static_adapt_dis(2), | ||
7420 | ppd->dd->cspec->r1 ? | ||
7421 | QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN); | ||
7422 | ppd->cpspec->qdr_dfe_on = 1; | ||
7423 | |||
7424 | /* (FLoop LOS gate: PPM filter enabled */ | ||
7425 | ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10); | ||
7426 | |||
7427 | /* rx offset center enabled */ | ||
7428 | ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4); | ||
7429 | |||
7430 | if (!ppd->dd->cspec->r1) { | ||
7431 | ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12); | ||
7432 | ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8); | ||
7433 | } | ||
7434 | |||
7435 | /* Set the frequency loop bandwidth to 15 */ | ||
7436 | ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5)); | ||
7437 | |||
7438 | return 0; | ||
7439 | } | ||
7440 | |||
7441 | /* start adjust QMH serdes parameters */ | ||
7442 | |||
7443 | static void set_man_code(struct qib_pportdata *ppd, int chan, int code) | ||
7444 | { | ||
7445 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), | ||
7446 | 9, code << 9, 0x3f << 9); | ||
7447 | } | ||
7448 | |||
7449 | static void set_man_mode_h1(struct qib_pportdata *ppd, int chan, | ||
7450 | int enable, u32 tapenable) | ||
7451 | { | ||
7452 | if (enable) | ||
7453 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), | ||
7454 | 1, 3 << 10, 0x1f << 10); | ||
7455 | else | ||
7456 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), | ||
7457 | 1, 0, 0x1f << 10); | ||
7458 | } | ||
7459 | |||
7460 | /* Set clock to 1, 0, 1, 0 */ | ||
7461 | static void clock_man(struct qib_pportdata *ppd, int chan) | ||
7462 | { | ||
7463 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), | ||
7464 | 4, 0x4000, 0x4000); | ||
7465 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), | ||
7466 | 4, 0, 0x4000); | ||
7467 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), | ||
7468 | 4, 0x4000, 0x4000); | ||
7469 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), | ||
7470 | 4, 0, 0x4000); | ||
7471 | } | ||
7472 | |||
7473 | /* | ||
7474 | * write the current Tx serdes pre,post,main,amp settings into the serdes. | ||
7475 | * The caller must pass the settings appropriate for the current speed, | ||
7476 | * or not care if they are correct for the current speed. | ||
7477 | */ | ||
7478 | static void write_tx_serdes_param(struct qib_pportdata *ppd, | ||
7479 | struct txdds_ent *txdds) | ||
7480 | { | ||
7481 | u64 deemph; | ||
7482 | |||
7483 | deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override); | ||
7484 | /* field names for amp, main, post, pre, respectively */ | ||
7485 | deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) | | ||
7486 | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) | | ||
7487 | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) | | ||
7488 | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena)); | ||
7489 | deemph |= 1ULL << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | ||
7490 | tx_override_deemphasis_select); | ||
7491 | deemph |= txdds->amp << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | ||
7492 | txampcntl_d2a); | ||
7493 | deemph |= txdds->main << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | ||
7494 | txc0_ena); | ||
7495 | deemph |= txdds->post << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | ||
7496 | txcp1_ena); | ||
7497 | deemph |= txdds->pre << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | ||
7498 | txcn1_ena); | ||
7499 | qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph); | ||
7500 | } | ||
7501 | |||
7502 | /* | ||
7503 | * set per-bay, per channel parameters. For now, we ignore | ||
7504 | * do_tx, and always set tx parameters, and set them with the same value | ||
7505 | * for all channels, using the channel 0 value. We may switch to | ||
7506 | * per-channel settings in the future, and that method only needs | ||
7507 | * to be done once. | ||
7508 | * Because this also writes the IBC txdds table with a single set | ||
7509 | * of values, it should be called only for cases where we want to completely | ||
7510 | * force a specific setting, typically only for mez cards. | ||
7511 | */ | ||
7512 | static void adj_tx_serdes(struct qib_pportdata *ppd) | ||
7513 | { | ||
7514 | struct txdds_ent txdds; | ||
7515 | int i; | ||
7516 | u8 *amp, *pre, *mainv, *post; | ||
7517 | |||
7518 | /* | ||
7519 | * Because we use TX_DEEMPHASIS_OVERRIDE, we need to | ||
7520 | * always do tx side, just like H1, since it is cleared | ||
7521 | * by link down | ||
7522 | */ | ||
7523 | amp = ppd->cpspec->amp; | ||
7524 | pre = ppd->cpspec->pre; | ||
7525 | mainv = ppd->cpspec->mainv; | ||
7526 | post = ppd->cpspec->post; | ||
7527 | |||
7528 | amp[0] &= SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | ||
7529 | txampcntl_d2a); | ||
7530 | mainv[0] &= SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | ||
7531 | txc0_ena); | ||
7532 | post[0] &= SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | ||
7533 | txcp1_ena); | ||
7534 | pre[0] &= SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | ||
7535 | txcn1_ena); | ||
7536 | |||
7537 | /* | ||
7538 | * Use the channel zero values, only, for now, for | ||
7539 | * all channels | ||
7540 | */ | ||
7541 | txdds.amp = amp[0]; | ||
7542 | txdds.pre = pre[0]; | ||
7543 | txdds.main = mainv[0]; | ||
7544 | txdds.post = post[0]; | ||
7545 | |||
7546 | /* write the QDR table for IBC use, as backup for link down */ | ||
7547 | for (i = 0; i < ARRAY_SIZE(txdds_qdr); ++i) | ||
7548 | set_txdds(ppd, i + 32, &txdds); | ||
7549 | |||
7550 | write_tx_serdes_param(ppd, &txdds); | ||
7551 | } | ||
7552 | |||
7553 | /* set QDR forced value for H1, if needed */ | ||
7554 | static void force_h1(struct qib_pportdata *ppd) | ||
7555 | { | ||
7556 | int chan; | ||
7557 | |||
7558 | ppd->cpspec->qdr_reforce = 0; | ||
7559 | if (!ppd->dd->cspec->r1) | ||
7560 | return; | ||
7561 | |||
7562 | for (chan = 0; chan < SERDES_CHANS; chan++) { | ||
7563 | set_man_mode_h1(ppd, chan, 1, 0); | ||
7564 | set_man_code(ppd, chan, ppd->cpspec->h1_val); | ||
7565 | clock_man(ppd, chan); | ||
7566 | set_man_mode_h1(ppd, chan, 0, 0); | ||
7567 | } | ||
7568 | } | ||
7569 | |||
7570 | /* | ||
7571 | * Parse the parameters for the QMH7342, to get rx and tx serdes | ||
7572 | * settings for that Bay, for both possible mez connectors (PCIe bus) | ||
7573 | * and IB link (one link on mez1, two possible on mez2). | ||
7574 | * | ||
7575 | * Data is comma or white space separated. | ||
7576 | * | ||
7577 | * A set of data has 7 groups, rx and tx groups have SERDES_CHANS values, | ||
7578 | * one per IB lane (serdes channel). | ||
7579 | * The groups are Bay, bus# H1 rcv, and amp, pre, post, main Tx values (QDR). | ||
7580 | * The Bay # is used only for debugging currently. | ||
7581 | * H1 values are set whenever the link goes down, or is at cfg_test or | ||
7582 | * cfg_wait_enh. Tx values are programmed once, when this routine is called | ||
7583 | * (and with default values at chip initialization). Values are any base, in | ||
7584 | * strtoul style, and values are seperated by comma, or any white space | ||
7585 | * (space, tab, newline). | ||
7586 | * | ||
7587 | * An example set might look like this (white space vs | ||
7588 | * comma used for human ease of reading) | ||
7589 | * The ordering is a set of Bay# Bus# H1, amp, pre, post, and main for mez1 IB1, | ||
7590 | * repeat for mez2 IB1, then mez2 IB2. | ||
7591 | * | ||
7592 | * B B H1:0 amp:0 pre:0 post: 0 main:0 | ||
7593 | * a u H1: 1 amp: 1 pre: 1 post: 1 main: 1 | ||
7594 | * y s H1: 2 amp: 2 pre: 2 post: 2 main: 2 | ||
7595 | * H1: 4 amp: 3 pre: 3 post: 3 main: 3 | ||
7596 | * 1 3 8,6,5,6 0,0,0,0 1,1,1,1 10,10,10,10 3,3,3,3 | ||
7597 | * 1 6 7,6,6,7 0,0,0,0 1,1,1,1 10,10,10,10 3,3,3,3 | ||
7598 | * 1 6 9,7,7,8 0,0,0,0 1,1,1,1 10,10,10,10 3,3,3,3 | ||
7599 | */ | ||
7600 | #define N_QMH_FIELDS 22 | ||
7601 | static int setup_qmh_params(const char *str, struct kernel_param *kp) | ||
7602 | { | ||
7603 | char *abuf, *v, *nv, *nvp; | ||
7604 | struct qib_devdata *dd; | ||
7605 | struct qib_pportdata *ppd; | ||
7606 | u32 mez, vlen, nf, port, bay; | ||
7607 | int ret = 0, found = 0; | ||
7608 | |||
7609 | vlen = strlen(str) + 1; | ||
7610 | abuf = kmalloc(vlen, GFP_KERNEL); | ||
7611 | if (!abuf) { | ||
7612 | printk(KERN_INFO QIB_DRV_NAME | ||
7613 | " Unable to allocate QMH param buffer; ignoring\n"); | ||
7614 | return 0; | ||
7615 | } | ||
7616 | memcpy(abuf, str, vlen); | ||
7617 | v = abuf; | ||
7618 | |||
7619 | /* these 3 are because gcc can't know they are set before used */ | ||
7620 | port = 1; | ||
7621 | mez = 1; /* used only for debugging */ | ||
7622 | bay = 0; /* used only for debugging */ | ||
7623 | ppd = NULL; | ||
7624 | for (nf = 0; (nv = strsep(&v, ", \t\n\r")) && | ||
7625 | nf < (N_QMH_FIELDS * 3);) { | ||
7626 | u32 val; | ||
7627 | |||
7628 | if (!*nv) | ||
7629 | /* allow for multiple separators */ | ||
7630 | continue; | ||
7631 | |||
7632 | val = simple_strtoul(nv, &nvp, 0); | ||
7633 | if (nv == nvp) { | ||
7634 | printk(KERN_INFO QIB_DRV_NAME | ||
7635 | " Bay%u, mez%u IB%u non-numeric value (%s) " | ||
7636 | "field #%u, ignoring rest\n", bay, mez, | ||
7637 | port, nv, nf % (N_QMH_FIELDS * 3)); | ||
7638 | ret = -EINVAL; | ||
7639 | goto bail; | ||
7640 | } | ||
7641 | if (!(nf % N_QMH_FIELDS)) { | ||
7642 | ppd = NULL; | ||
7643 | bay = val; | ||
7644 | if (!bay || bay > 16) { | ||
7645 | printk(KERN_INFO QIB_DRV_NAME | ||
7646 | " Invalid bay # %u, field %u, " | ||
7647 | "ignoring rest\n", bay, nf); | ||
7648 | ret = -EINVAL; | ||
7649 | goto bail; | ||
7650 | } | ||
7651 | } else if ((nf % N_QMH_FIELDS) == 1) { | ||
7652 | u32 bus = val; | ||
7653 | if (nf == 1) { | ||
7654 | mez = 1; | ||
7655 | port = 1; | ||
7656 | } else if (nf == (N_QMH_FIELDS + 1)) { | ||
7657 | mez = 2; | ||
7658 | port = 1; | ||
7659 | } else { | ||
7660 | mez = 2; | ||
7661 | port = 2; | ||
7662 | } | ||
7663 | list_for_each_entry(dd, &qib_dev_list, list) { | ||
7664 | if (dd->deviceid != PCI_DEVICE_ID_QLOGIC_IB_7322 | ||
7665 | || !IS_QMH(dd)) | ||
7666 | continue; /* only for QMH cards */ | ||
7667 | if (dd->pcidev->bus->number == bus) { | ||
7668 | found++; | ||
7669 | ppd = &dd->pport[port - 1]; | ||
7670 | } | ||
7671 | } | ||
7672 | } else if (ppd) { | ||
7673 | u32 parm = (nf % N_QMH_FIELDS) - 2; | ||
7674 | if (parm < SERDES_CHANS && !(parm % SERDES_CHANS)) | ||
7675 | ppd->cpspec->h1_val = val; | ||
7676 | else if (parm < (2 * SERDES_CHANS)) | ||
7677 | ppd->cpspec->amp[parm % SERDES_CHANS] = val; | ||
7678 | else if (parm < (3 * SERDES_CHANS)) | ||
7679 | ppd->cpspec->pre[parm % SERDES_CHANS] = val; | ||
7680 | else if (parm < (4 * SERDES_CHANS)) | ||
7681 | ppd->cpspec->post[parm % SERDES_CHANS] = val; | ||
7682 | else { | ||
7683 | ppd->cpspec->mainv[parm % SERDES_CHANS] = val; | ||
7684 | /* At the end of a port, set params */ | ||
7685 | if (parm == ((5 * SERDES_CHANS) - 1)) | ||
7686 | adj_tx_serdes(ppd); | ||
7687 | } | ||
7688 | } | ||
7689 | nf++; | ||
7690 | } | ||
7691 | if (!found) { | ||
7692 | printk(KERN_ERR QIB_DRV_NAME | ||
7693 | ": No match found for qmh_serdes_setup parameter\n"); | ||
7694 | ret = -EINVAL; | ||
7695 | } | ||
7696 | bail: | ||
7697 | kfree(abuf); | ||
7698 | return ret; | ||
7699 | } | ||
7700 | |||
7701 | /* | ||
7702 | * Similarly for QME7342, but the format is simpler, values are the | ||
7703 | * same for all mez card positions in a blade (2 or 4 per blade), but | ||
7704 | * are different for some blades vs others, and we don't need to | ||
7705 | * specify different parameters for different serdes channels or different | ||
7706 | * IB ports. | ||
7707 | * Format is: h1 amp,pre,post,main | ||
7708 | * Alternate format (so ports can be different): Pport# h1 amp,pre,post,main | ||
7709 | */ | ||
7710 | #define N_QME_FIELDS 5 | ||
7711 | static int setup_qme_params(const char *str, struct kernel_param *kp) | ||
7712 | { | ||
7713 | char *abuf, *v, *nv, *nvp; | ||
7714 | struct qib_devdata *dd; | ||
7715 | u32 vlen, nf, port = 0; | ||
7716 | u8 h1, tx[4]; /* amp, pre, post, main */ | ||
7717 | int ret = -EINVAL; | ||
7718 | char *seplist; | ||
7719 | |||
7720 | vlen = strlen(str) + 1; | ||
7721 | abuf = kmalloc(vlen, GFP_KERNEL); | ||
7722 | if (!abuf) { | ||
7723 | printk(KERN_INFO QIB_DRV_NAME | ||
7724 | " Unable to allocate QME param buffer; ignoring\n"); | ||
7725 | return 0; | ||
7726 | } | ||
7727 | strncpy(abuf, str, vlen); | ||
7728 | |||
7729 | v = abuf; | ||
7730 | seplist = " \t"; | ||
7731 | h1 = H1_FORCE_QME; /* gcc can't figure out always set before used */ | ||
7732 | |||
7733 | for (nf = 0; (nv = strsep(&v, seplist)); ) { | ||
7734 | u32 val; | ||
7735 | |||
7736 | if (!*nv) | ||
7737 | /* allow for multiple separators */ | ||
7738 | continue; | ||
7739 | |||
7740 | if (!nf && *nv == 'P') { | ||
7741 | /* alternate format with port */ | ||
7742 | val = simple_strtoul(++nv, &nvp, 0); | ||
7743 | if (nv == nvp || port >= NUM_IB_PORTS) { | ||
7744 | printk(KERN_INFO QIB_DRV_NAME | ||
7745 | " %s: non-numeric port value (%s) " | ||
7746 | "ignoring rest\n", __func__, nv); | ||
7747 | goto done; | ||
7748 | } | ||
7749 | port = val; | ||
7750 | continue; /* without incrementing nf */ | ||
7751 | } | ||
7752 | val = simple_strtoul(nv, &nvp, 0); | ||
7753 | if (nv == nvp) { | ||
7754 | printk(KERN_INFO QIB_DRV_NAME | ||
7755 | " %s: non-numeric value (%s) " | ||
7756 | "field #%u, ignoring rest\n", __func__, | ||
7757 | nv, nf); | ||
7758 | goto done; | ||
7759 | } | ||
7760 | if (!nf) { | ||
7761 | h1 = val; | ||
7762 | seplist = ","; | ||
7763 | } else | ||
7764 | tx[nf - 1] = val; | ||
7765 | if (++nf == N_QME_FIELDS) { | ||
7766 | list_for_each_entry(dd, &qib_dev_list, list) { | ||
7767 | int pidx, i; | ||
7768 | if (dd->deviceid != PCI_DEVICE_ID_QLOGIC_IB_7322 | ||
7769 | || !IS_QME(dd)) | ||
7770 | continue; /* only for QME cards */ | ||
7771 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | ||
7772 | struct qib_pportdata *ppd; | ||
7773 | ppd = &dd->pport[pidx]; | ||
7774 | if ((port && ppd->port != port) || | ||
7775 | !ppd->link_speed_supported) | ||
7776 | continue; | ||
7777 | ppd->cpspec->h1_val = h1; | ||
7778 | for (i = 0; i < SERDES_CHANS; i++) { | ||
7779 | ppd->cpspec->amp[i] = tx[0]; | ||
7780 | ppd->cpspec->pre[i] = tx[1]; | ||
7781 | ppd->cpspec->post[i] = tx[2]; | ||
7782 | ppd->cpspec->mainv[i] = tx[3]; | ||
7783 | } | ||
7784 | adj_tx_serdes(ppd); | ||
7785 | } | ||
7786 | } | ||
7787 | ret = 0; | ||
7788 | goto done; | ||
7789 | } | ||
7790 | } | ||
7791 | printk(KERN_INFO QIB_DRV_NAME | ||
7792 | " %s: Only %u of %u fields provided, skipping\n", | ||
7793 | __func__, nf, N_QME_FIELDS); | ||
7794 | done: | ||
7795 | kfree(abuf); | ||
7796 | return ret; | ||
7797 | } | ||
7798 | |||
7799 | #define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN) | ||
7800 | #define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en) | ||
7801 | |||
7802 | #define R_OPCODE_LSB 3 | ||
7803 | #define R_OP_NOP 0 | ||
7804 | #define R_OP_SHIFT 2 | ||
7805 | #define R_OP_UPDATE 3 | ||
7806 | #define R_TDI_LSB 2 | ||
7807 | #define R_TDO_LSB 1 | ||
7808 | #define R_RDY 1 | ||
7809 | |||
7810 | static int qib_r_grab(struct qib_devdata *dd) | ||
7811 | { | ||
7812 | u64 val; | ||
7813 | val = SJA_EN; | ||
7814 | qib_write_kreg(dd, kr_r_access, val); | ||
7815 | qib_read_kreg32(dd, kr_scratch); | ||
7816 | return 0; | ||
7817 | } | ||
7818 | |||
7819 | /* qib_r_wait_for_rdy() not only waits for the ready bit, it | ||
7820 | * returns the current state of R_TDO | ||
7821 | */ | ||
7822 | static int qib_r_wait_for_rdy(struct qib_devdata *dd) | ||
7823 | { | ||
7824 | u64 val; | ||
7825 | int timeout; | ||
7826 | for (timeout = 0; timeout < 100 ; ++timeout) { | ||
7827 | val = qib_read_kreg32(dd, kr_r_access); | ||
7828 | if (val & R_RDY) | ||
7829 | return (val >> R_TDO_LSB) & 1; | ||
7830 | } | ||
7831 | return -1; | ||
7832 | } | ||
7833 | |||
7834 | static int qib_r_shift(struct qib_devdata *dd, int bisten, | ||
7835 | int len, u8 *inp, u8 *outp) | ||
7836 | { | ||
7837 | u64 valbase, val; | ||
7838 | int ret, pos; | ||
7839 | |||
7840 | valbase = SJA_EN | (bisten << BISTEN_LSB) | | ||
7841 | (R_OP_SHIFT << R_OPCODE_LSB); | ||
7842 | ret = qib_r_wait_for_rdy(dd); | ||
7843 | if (ret < 0) | ||
7844 | goto bail; | ||
7845 | for (pos = 0; pos < len; ++pos) { | ||
7846 | val = valbase; | ||
7847 | if (outp) { | ||
7848 | outp[pos >> 3] &= ~(1 << (pos & 7)); | ||
7849 | outp[pos >> 3] |= (ret << (pos & 7)); | ||
7850 | } | ||
7851 | if (inp) { | ||
7852 | int tdi = inp[pos >> 3] >> (pos & 7); | ||
7853 | val |= ((tdi & 1) << R_TDI_LSB); | ||
7854 | } | ||
7855 | qib_write_kreg(dd, kr_r_access, val); | ||
7856 | qib_read_kreg32(dd, kr_scratch); | ||
7857 | ret = qib_r_wait_for_rdy(dd); | ||
7858 | if (ret < 0) | ||
7859 | break; | ||
7860 | } | ||
7861 | /* Restore to NOP between operations. */ | ||
7862 | val = SJA_EN | (bisten << BISTEN_LSB); | ||
7863 | qib_write_kreg(dd, kr_r_access, val); | ||
7864 | qib_read_kreg32(dd, kr_scratch); | ||
7865 | ret = qib_r_wait_for_rdy(dd); | ||
7866 | |||
7867 | if (ret >= 0) | ||
7868 | ret = pos; | ||
7869 | bail: | ||
7870 | return ret; | ||
7871 | } | ||
7872 | |||
7873 | static int qib_r_update(struct qib_devdata *dd, int bisten) | ||
7874 | { | ||
7875 | u64 val; | ||
7876 | int ret; | ||
7877 | |||
7878 | val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB); | ||
7879 | ret = qib_r_wait_for_rdy(dd); | ||
7880 | if (ret >= 0) { | ||
7881 | qib_write_kreg(dd, kr_r_access, val); | ||
7882 | qib_read_kreg32(dd, kr_scratch); | ||
7883 | } | ||
7884 | return ret; | ||
7885 | } | ||
7886 | |||
7887 | #define BISTEN_PORT_SEL 15 | ||
7888 | #define LEN_PORT_SEL 625 | ||
7889 | #define BISTEN_AT 17 | ||
7890 | #define LEN_AT 156 | ||
7891 | #define BISTEN_ETM 16 | ||
7892 | #define LEN_ETM 632 | ||
7893 | |||
7894 | #define BIT2BYTE(x) (((x) + BITS_PER_BYTE - 1) / BITS_PER_BYTE) | ||
7895 | |||
7896 | /* these are common for all IB port use cases. */ | ||
7897 | static u8 reset_at[BIT2BYTE(LEN_AT)] = { | ||
7898 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
7899 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, | ||
7900 | }; | ||
7901 | static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = { | ||
7902 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
7903 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
7904 | 0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e, | ||
7905 | 0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7, | ||
7906 | 0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70, | ||
7907 | 0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00, | ||
7908 | 0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
7909 | 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, | ||
7910 | }; | ||
7911 | static u8 at[BIT2BYTE(LEN_AT)] = { | ||
7912 | 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, | ||
7913 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, | ||
7914 | }; | ||
7915 | |||
7916 | /* used for IB1 or IB2, only one in use */ | ||
7917 | static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = { | ||
7918 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
7919 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
7920 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
7921 | 0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00, | ||
7922 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
7923 | 0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03, | ||
7924 | 0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00, | ||
7925 | 0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00, | ||
7926 | }; | ||
7927 | |||
7928 | /* used when both IB1 and IB2 are in use */ | ||
7929 | static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = { | ||
7930 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
7931 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79, | ||
7932 | 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
7933 | 0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05, | ||
7934 | 0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, | ||
7935 | 0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07, | ||
7936 | 0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00, | ||
7937 | 0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, | ||
7938 | }; | ||
7939 | |||
7940 | /* used when only IB1 is in use */ | ||
7941 | static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = { | ||
7942 | 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13, | ||
7943 | 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c, | ||
7944 | 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, | ||
7945 | 0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, | ||
7946 | 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32, | ||
7947 | 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, | ||
7948 | 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, | ||
7949 | 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
7950 | }; | ||
7951 | |||
7952 | /* used when only IB2 is in use */ | ||
7953 | static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = { | ||
7954 | 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39, | ||
7955 | 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32, | ||
7956 | 0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, | ||
7957 | 0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, | ||
7958 | 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32, | ||
7959 | 0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, | ||
7960 | 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, | ||
7961 | 0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01, | ||
7962 | }; | ||
7963 | |||
7964 | /* used when both IB1 and IB2 are in use */ | ||
7965 | static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = { | ||
7966 | 0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13, | ||
7967 | 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c, | ||
7968 | 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, | ||
7969 | 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, | ||
7970 | 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32, | ||
7971 | 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a, | ||
7972 | 0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, | ||
7973 | 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
7974 | }; | ||
7975 | |||
7976 | /* | ||
7977 | * Do setup to properly handle IB link recovery; if port is zero, we | ||
7978 | * are initializing to cover both ports; otherwise we are initializing | ||
7979 | * to cover a single port card, or the port has reached INIT and we may | ||
7980 | * need to switch coverage types. | ||
7981 | */ | ||
7982 | static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both) | ||
7983 | { | ||
7984 | u8 *portsel, *etm; | ||
7985 | struct qib_devdata *dd = ppd->dd; | ||
7986 | |||
7987 | if (!ppd->dd->cspec->r1) | ||
7988 | return; | ||
7989 | if (!both) { | ||
7990 | dd->cspec->recovery_ports_initted++; | ||
7991 | ppd->cpspec->recovery_init = 1; | ||
7992 | } | ||
7993 | if (!both && dd->cspec->recovery_ports_initted == 1) { | ||
7994 | portsel = ppd->port == 1 ? portsel_port1 : portsel_port2; | ||
7995 | etm = atetm_1port; | ||
7996 | } else { | ||
7997 | portsel = portsel_2port; | ||
7998 | etm = atetm_2port; | ||
7999 | } | ||
8000 | |||
8001 | if (qib_r_grab(dd) < 0 || | ||
8002 | qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 || | ||
8003 | qib_r_update(dd, BISTEN_ETM) < 0 || | ||
8004 | qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 || | ||
8005 | qib_r_update(dd, BISTEN_AT) < 0 || | ||
8006 | qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL, | ||
8007 | portsel, NULL) < 0 || | ||
8008 | qib_r_update(dd, BISTEN_PORT_SEL) < 0 || | ||
8009 | qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 || | ||
8010 | qib_r_update(dd, BISTEN_AT) < 0 || | ||
8011 | qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 || | ||
8012 | qib_r_update(dd, BISTEN_ETM) < 0) | ||
8013 | qib_dev_err(dd, "Failed IB link recovery setup\n"); | ||
8014 | } | ||
8015 | |||
8016 | static void check_7322_rxe_status(struct qib_pportdata *ppd) | ||
8017 | { | ||
8018 | struct qib_devdata *dd = ppd->dd; | ||
8019 | u64 fmask; | ||
8020 | |||
8021 | if (dd->cspec->recovery_ports_initted != 1) | ||
8022 | return; /* rest doesn't apply to dualport */ | ||
8023 | qib_write_kreg(dd, kr_control, dd->control | | ||
8024 | SYM_MASK(Control, FreezeMode)); | ||
8025 | (void)qib_read_kreg64(dd, kr_scratch); | ||
8026 | udelay(3); /* ibcreset asserted 400ns, be sure that's over */ | ||
8027 | fmask = qib_read_kreg64(dd, kr_act_fmask); | ||
8028 | if (!fmask) { | ||
8029 | /* | ||
8030 | * require a powercycle before we'll work again, and make | ||
8031 | * sure we get no more interrupts, and don't turn off | ||
8032 | * freeze. | ||
8033 | */ | ||
8034 | ppd->dd->cspec->stay_in_freeze = 1; | ||
8035 | qib_7322_set_intr_state(ppd->dd, 0); | ||
8036 | qib_write_kreg(dd, kr_fmask, 0ULL); | ||
8037 | qib_dev_err(dd, "HCA unusable until powercycled\n"); | ||
8038 | return; /* eventually reset */ | ||
8039 | } | ||
8040 | |||
8041 | qib_write_kreg(ppd->dd, kr_hwerrclear, | ||
8042 | SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1)); | ||
8043 | |||
8044 | /* don't do the full clear_freeze(), not needed for this */ | ||
8045 | qib_write_kreg(dd, kr_control, dd->control); | ||
8046 | qib_read_kreg32(dd, kr_scratch); | ||
8047 | /* take IBC out of reset */ | ||
8048 | if (ppd->link_speed_supported) { | ||
8049 | ppd->cpspec->ibcctrl_a &= | ||
8050 | ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn); | ||
8051 | qib_write_kreg_port(ppd, krp_ibcctrl_a, | ||
8052 | ppd->cpspec->ibcctrl_a); | ||
8053 | qib_read_kreg32(dd, kr_scratch); | ||
8054 | if (ppd->lflags & QIBL_IB_LINK_DISABLED) | ||
8055 | qib_set_ib_7322_lstate(ppd, 0, | ||
8056 | QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); | ||
8057 | } | ||
8058 | } | ||
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c new file mode 100644 index 000000000000..c0139c07e97e --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_init.c | |||
@@ -0,0 +1,1580 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. | ||
3 | * All rights reserved. | ||
4 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #include <linux/pci.h> | ||
36 | #include <linux/netdevice.h> | ||
37 | #include <linux/vmalloc.h> | ||
38 | #include <linux/delay.h> | ||
39 | #include <linux/idr.h> | ||
40 | |||
41 | #include "qib.h" | ||
42 | #include "qib_common.h" | ||
43 | |||
44 | /* | ||
45 | * min buffers we want to have per context, after driver | ||
46 | */ | ||
47 | #define QIB_MIN_USER_CTXT_BUFCNT 7 | ||
48 | |||
49 | #define QLOGIC_IB_R_SOFTWARE_MASK 0xFF | ||
50 | #define QLOGIC_IB_R_SOFTWARE_SHIFT 24 | ||
51 | #define QLOGIC_IB_R_EMULATOR_MASK (1ULL<<62) | ||
52 | |||
53 | /* | ||
54 | * Number of ctxts we are configured to use (to allow for more pio | ||
55 | * buffers per ctxt, etc.) Zero means use chip value. | ||
56 | */ | ||
57 | ushort qib_cfgctxts; | ||
58 | module_param_named(cfgctxts, qib_cfgctxts, ushort, S_IRUGO); | ||
59 | MODULE_PARM_DESC(cfgctxts, "Set max number of contexts to use"); | ||
60 | |||
61 | /* | ||
62 | * If set, do not write to any regs if avoidable, hack to allow | ||
63 | * check for deranged default register values. | ||
64 | */ | ||
65 | ushort qib_mini_init; | ||
66 | module_param_named(mini_init, qib_mini_init, ushort, S_IRUGO); | ||
67 | MODULE_PARM_DESC(mini_init, "If set, do minimal diag init"); | ||
68 | |||
69 | unsigned qib_n_krcv_queues; | ||
70 | module_param_named(krcvqs, qib_n_krcv_queues, uint, S_IRUGO); | ||
71 | MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port"); | ||
72 | |||
73 | /* | ||
74 | * qib_wc_pat parameter: | ||
75 | * 0 is WC via MTRR | ||
76 | * 1 is WC via PAT | ||
77 | * If PAT initialization fails, code reverts back to MTRR | ||
78 | */ | ||
79 | unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */ | ||
80 | module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO); | ||
81 | MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism"); | ||
82 | |||
83 | struct workqueue_struct *qib_wq; | ||
84 | struct workqueue_struct *qib_cq_wq; | ||
85 | |||
86 | static void verify_interrupt(unsigned long); | ||
87 | |||
88 | static struct idr qib_unit_table; | ||
89 | u32 qib_cpulist_count; | ||
90 | unsigned long *qib_cpulist; | ||
91 | |||
92 | /* set number of contexts we'll actually use */ | ||
93 | void qib_set_ctxtcnt(struct qib_devdata *dd) | ||
94 | { | ||
95 | if (!qib_cfgctxts) | ||
96 | dd->cfgctxts = dd->ctxtcnt; | ||
97 | else if (qib_cfgctxts < dd->num_pports) | ||
98 | dd->cfgctxts = dd->ctxtcnt; | ||
99 | else if (qib_cfgctxts <= dd->ctxtcnt) | ||
100 | dd->cfgctxts = qib_cfgctxts; | ||
101 | else | ||
102 | dd->cfgctxts = dd->ctxtcnt; | ||
103 | } | ||
104 | |||
105 | /* | ||
106 | * Common code for creating the receive context array. | ||
107 | */ | ||
108 | int qib_create_ctxts(struct qib_devdata *dd) | ||
109 | { | ||
110 | unsigned i; | ||
111 | int ret; | ||
112 | |||
113 | /* | ||
114 | * Allocate full ctxtcnt array, rather than just cfgctxts, because | ||
115 | * cleanup iterates across all possible ctxts. | ||
116 | */ | ||
117 | dd->rcd = kzalloc(sizeof(*dd->rcd) * dd->ctxtcnt, GFP_KERNEL); | ||
118 | if (!dd->rcd) { | ||
119 | qib_dev_err(dd, "Unable to allocate ctxtdata array, " | ||
120 | "failing\n"); | ||
121 | ret = -ENOMEM; | ||
122 | goto done; | ||
123 | } | ||
124 | |||
125 | /* create (one or more) kctxt */ | ||
126 | for (i = 0; i < dd->first_user_ctxt; ++i) { | ||
127 | struct qib_pportdata *ppd; | ||
128 | struct qib_ctxtdata *rcd; | ||
129 | |||
130 | if (dd->skip_kctxt_mask & (1 << i)) | ||
131 | continue; | ||
132 | |||
133 | ppd = dd->pport + (i % dd->num_pports); | ||
134 | rcd = qib_create_ctxtdata(ppd, i); | ||
135 | if (!rcd) { | ||
136 | qib_dev_err(dd, "Unable to allocate ctxtdata" | ||
137 | " for Kernel ctxt, failing\n"); | ||
138 | ret = -ENOMEM; | ||
139 | goto done; | ||
140 | } | ||
141 | rcd->pkeys[0] = QIB_DEFAULT_P_KEY; | ||
142 | rcd->seq_cnt = 1; | ||
143 | } | ||
144 | ret = 0; | ||
145 | done: | ||
146 | return ret; | ||
147 | } | ||
148 | |||
149 | /* | ||
150 | * Common code for user and kernel context setup. | ||
151 | */ | ||
152 | struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt) | ||
153 | { | ||
154 | struct qib_devdata *dd = ppd->dd; | ||
155 | struct qib_ctxtdata *rcd; | ||
156 | |||
157 | rcd = kzalloc(sizeof(*rcd), GFP_KERNEL); | ||
158 | if (rcd) { | ||
159 | INIT_LIST_HEAD(&rcd->qp_wait_list); | ||
160 | rcd->ppd = ppd; | ||
161 | rcd->dd = dd; | ||
162 | rcd->cnt = 1; | ||
163 | rcd->ctxt = ctxt; | ||
164 | dd->rcd[ctxt] = rcd; | ||
165 | |||
166 | dd->f_init_ctxt(rcd); | ||
167 | |||
168 | /* | ||
169 | * To avoid wasting a lot of memory, we allocate 32KB chunks | ||
170 | * of physically contiguous memory, advance through it until | ||
171 | * used up and then allocate more. Of course, we need | ||
172 | * memory to store those extra pointers, now. 32KB seems to | ||
173 | * be the most that is "safe" under memory pressure | ||
174 | * (creating large files and then copying them over | ||
175 | * NFS while doing lots of MPI jobs). The OOM killer can | ||
176 | * get invoked, even though we say we can sleep and this can | ||
177 | * cause significant system problems.... | ||
178 | */ | ||
179 | rcd->rcvegrbuf_size = 0x8000; | ||
180 | rcd->rcvegrbufs_perchunk = | ||
181 | rcd->rcvegrbuf_size / dd->rcvegrbufsize; | ||
182 | rcd->rcvegrbuf_chunks = (rcd->rcvegrcnt + | ||
183 | rcd->rcvegrbufs_perchunk - 1) / | ||
184 | rcd->rcvegrbufs_perchunk; | ||
185 | } | ||
186 | return rcd; | ||
187 | } | ||
188 | |||
189 | /* | ||
190 | * Common code for initializing the physical port structure. | ||
191 | */ | ||
192 | void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd, | ||
193 | u8 hw_pidx, u8 port) | ||
194 | { | ||
195 | ppd->dd = dd; | ||
196 | ppd->hw_pidx = hw_pidx; | ||
197 | ppd->port = port; /* IB port number, not index */ | ||
198 | |||
199 | spin_lock_init(&ppd->sdma_lock); | ||
200 | spin_lock_init(&ppd->lflags_lock); | ||
201 | init_waitqueue_head(&ppd->state_wait); | ||
202 | |||
203 | init_timer(&ppd->symerr_clear_timer); | ||
204 | ppd->symerr_clear_timer.function = qib_clear_symerror_on_linkup; | ||
205 | ppd->symerr_clear_timer.data = (unsigned long)ppd; | ||
206 | } | ||
207 | |||
208 | static int init_pioavailregs(struct qib_devdata *dd) | ||
209 | { | ||
210 | int ret, pidx; | ||
211 | u64 *status_page; | ||
212 | |||
213 | dd->pioavailregs_dma = dma_alloc_coherent( | ||
214 | &dd->pcidev->dev, PAGE_SIZE, &dd->pioavailregs_phys, | ||
215 | GFP_KERNEL); | ||
216 | if (!dd->pioavailregs_dma) { | ||
217 | qib_dev_err(dd, "failed to allocate PIOavail reg area " | ||
218 | "in memory\n"); | ||
219 | ret = -ENOMEM; | ||
220 | goto done; | ||
221 | } | ||
222 | |||
223 | /* | ||
224 | * We really want L2 cache aligned, but for current CPUs of | ||
225 | * interest, they are the same. | ||
226 | */ | ||
227 | status_page = (u64 *) | ||
228 | ((char *) dd->pioavailregs_dma + | ||
229 | ((2 * L1_CACHE_BYTES + | ||
230 | dd->pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES)); | ||
231 | /* device status comes first, for backwards compatibility */ | ||
232 | dd->devstatusp = status_page; | ||
233 | *status_page++ = 0; | ||
234 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | ||
235 | dd->pport[pidx].statusp = status_page; | ||
236 | *status_page++ = 0; | ||
237 | } | ||
238 | |||
239 | /* | ||
240 | * Setup buffer to hold freeze and other messages, accessible to | ||
241 | * apps, following statusp. This is per-unit, not per port. | ||
242 | */ | ||
243 | dd->freezemsg = (char *) status_page; | ||
244 | *dd->freezemsg = 0; | ||
245 | /* length of msg buffer is "whatever is left" */ | ||
246 | ret = (char *) status_page - (char *) dd->pioavailregs_dma; | ||
247 | dd->freezelen = PAGE_SIZE - ret; | ||
248 | |||
249 | ret = 0; | ||
250 | |||
251 | done: | ||
252 | return ret; | ||
253 | } | ||
254 | |||
255 | /** | ||
256 | * init_shadow_tids - allocate the shadow TID array | ||
257 | * @dd: the qlogic_ib device | ||
258 | * | ||
259 | * allocate the shadow TID array, so we can qib_munlock previous | ||
260 | * entries. It may make more sense to move the pageshadow to the | ||
261 | * ctxt data structure, so we only allocate memory for ctxts actually | ||
262 | * in use, since we at 8k per ctxt, now. | ||
263 | * We don't want failures here to prevent use of the driver/chip, | ||
264 | * so no return value. | ||
265 | */ | ||
266 | static void init_shadow_tids(struct qib_devdata *dd) | ||
267 | { | ||
268 | struct page **pages; | ||
269 | dma_addr_t *addrs; | ||
270 | |||
271 | pages = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); | ||
272 | if (!pages) { | ||
273 | qib_dev_err(dd, "failed to allocate shadow page * " | ||
274 | "array, no expected sends!\n"); | ||
275 | goto bail; | ||
276 | } | ||
277 | |||
278 | addrs = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); | ||
279 | if (!addrs) { | ||
280 | qib_dev_err(dd, "failed to allocate shadow dma handle " | ||
281 | "array, no expected sends!\n"); | ||
282 | goto bail_free; | ||
283 | } | ||
284 | |||
285 | memset(pages, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); | ||
286 | memset(addrs, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); | ||
287 | |||
288 | dd->pageshadow = pages; | ||
289 | dd->physshadow = addrs; | ||
290 | return; | ||
291 | |||
292 | bail_free: | ||
293 | vfree(pages); | ||
294 | bail: | ||
295 | dd->pageshadow = NULL; | ||
296 | } | ||
297 | |||
298 | /* | ||
299 | * Do initialization for device that is only needed on | ||
300 | * first detect, not on resets. | ||
301 | */ | ||
302 | static int loadtime_init(struct qib_devdata *dd) | ||
303 | { | ||
304 | int ret = 0; | ||
305 | |||
306 | if (((dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) & | ||
307 | QLOGIC_IB_R_SOFTWARE_MASK) != QIB_CHIP_SWVERSION) { | ||
308 | qib_dev_err(dd, "Driver only handles version %d, " | ||
309 | "chip swversion is %d (%llx), failng\n", | ||
310 | QIB_CHIP_SWVERSION, | ||
311 | (int)(dd->revision >> | ||
312 | QLOGIC_IB_R_SOFTWARE_SHIFT) & | ||
313 | QLOGIC_IB_R_SOFTWARE_MASK, | ||
314 | (unsigned long long) dd->revision); | ||
315 | ret = -ENOSYS; | ||
316 | goto done; | ||
317 | } | ||
318 | |||
319 | if (dd->revision & QLOGIC_IB_R_EMULATOR_MASK) | ||
320 | qib_devinfo(dd->pcidev, "%s", dd->boardversion); | ||
321 | |||
322 | spin_lock_init(&dd->pioavail_lock); | ||
323 | spin_lock_init(&dd->sendctrl_lock); | ||
324 | spin_lock_init(&dd->uctxt_lock); | ||
325 | spin_lock_init(&dd->qib_diag_trans_lock); | ||
326 | spin_lock_init(&dd->eep_st_lock); | ||
327 | mutex_init(&dd->eep_lock); | ||
328 | |||
329 | if (qib_mini_init) | ||
330 | goto done; | ||
331 | |||
332 | ret = init_pioavailregs(dd); | ||
333 | init_shadow_tids(dd); | ||
334 | |||
335 | qib_get_eeprom_info(dd); | ||
336 | |||
337 | /* setup time (don't start yet) to verify we got interrupt */ | ||
338 | init_timer(&dd->intrchk_timer); | ||
339 | dd->intrchk_timer.function = verify_interrupt; | ||
340 | dd->intrchk_timer.data = (unsigned long) dd; | ||
341 | |||
342 | done: | ||
343 | return ret; | ||
344 | } | ||
345 | |||
346 | /** | ||
347 | * init_after_reset - re-initialize after a reset | ||
348 | * @dd: the qlogic_ib device | ||
349 | * | ||
350 | * sanity check at least some of the values after reset, and | ||
351 | * ensure no receive or transmit (explictly, in case reset | ||
352 | * failed | ||
353 | */ | ||
354 | static int init_after_reset(struct qib_devdata *dd) | ||
355 | { | ||
356 | int i; | ||
357 | |||
358 | /* | ||
359 | * Ensure chip does no sends or receives, tail updates, or | ||
360 | * pioavail updates while we re-initialize. This is mostly | ||
361 | * for the driver data structures, not chip registers. | ||
362 | */ | ||
363 | for (i = 0; i < dd->num_pports; ++i) { | ||
364 | /* | ||
365 | * ctxt == -1 means "all contexts". Only really safe for | ||
366 | * _dis_abling things, as here. | ||
367 | */ | ||
368 | dd->f_rcvctrl(dd->pport + i, QIB_RCVCTRL_CTXT_DIS | | ||
369 | QIB_RCVCTRL_INTRAVAIL_DIS | | ||
370 | QIB_RCVCTRL_TAILUPD_DIS, -1); | ||
371 | /* Redundant across ports for some, but no big deal. */ | ||
372 | dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_DIS | | ||
373 | QIB_SENDCTRL_AVAIL_DIS); | ||
374 | } | ||
375 | |||
376 | return 0; | ||
377 | } | ||
378 | |||
379 | static void enable_chip(struct qib_devdata *dd) | ||
380 | { | ||
381 | u64 rcvmask; | ||
382 | int i; | ||
383 | |||
384 | /* | ||
385 | * Enable PIO send, and update of PIOavail regs to memory. | ||
386 | */ | ||
387 | for (i = 0; i < dd->num_pports; ++i) | ||
388 | dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_ENB | | ||
389 | QIB_SENDCTRL_AVAIL_ENB); | ||
390 | /* | ||
391 | * Enable kernel ctxts' receive and receive interrupt. | ||
392 | * Other ctxts done as user opens and inits them. | ||
393 | */ | ||
394 | rcvmask = QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_INTRAVAIL_ENB; | ||
395 | rcvmask |= (dd->flags & QIB_NODMA_RTAIL) ? | ||
396 | QIB_RCVCTRL_TAILUPD_DIS : QIB_RCVCTRL_TAILUPD_ENB; | ||
397 | for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) { | ||
398 | struct qib_ctxtdata *rcd = dd->rcd[i]; | ||
399 | |||
400 | if (rcd) | ||
401 | dd->f_rcvctrl(rcd->ppd, rcvmask, i); | ||
402 | } | ||
403 | } | ||
404 | |||
405 | static void verify_interrupt(unsigned long opaque) | ||
406 | { | ||
407 | struct qib_devdata *dd = (struct qib_devdata *) opaque; | ||
408 | |||
409 | if (!dd) | ||
410 | return; /* being torn down */ | ||
411 | |||
412 | /* | ||
413 | * If we don't have a lid or any interrupts, let the user know and | ||
414 | * don't bother checking again. | ||
415 | */ | ||
416 | if (dd->int_counter == 0) { | ||
417 | if (!dd->f_intr_fallback(dd)) | ||
418 | dev_err(&dd->pcidev->dev, "No interrupts detected, " | ||
419 | "not usable.\n"); | ||
420 | else /* re-arm the timer to see if fallback works */ | ||
421 | mod_timer(&dd->intrchk_timer, jiffies + HZ/2); | ||
422 | } | ||
423 | } | ||
424 | |||
425 | static void init_piobuf_state(struct qib_devdata *dd) | ||
426 | { | ||
427 | int i, pidx; | ||
428 | u32 uctxts; | ||
429 | |||
430 | /* | ||
431 | * Ensure all buffers are free, and fifos empty. Buffers | ||
432 | * are common, so only do once for port 0. | ||
433 | * | ||
434 | * After enable and qib_chg_pioavailkernel so we can safely | ||
435 | * enable pioavail updates and PIOENABLE. After this, packets | ||
436 | * are ready and able to go out. | ||
437 | */ | ||
438 | dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_ALL); | ||
439 | for (pidx = 0; pidx < dd->num_pports; ++pidx) | ||
440 | dd->f_sendctrl(dd->pport + pidx, QIB_SENDCTRL_FLUSH); | ||
441 | |||
442 | /* | ||
443 | * If not all sendbufs are used, add the one to each of the lower | ||
444 | * numbered contexts. pbufsctxt and lastctxt_piobuf are | ||
445 | * calculated in chip-specific code because it may cause some | ||
446 | * chip-specific adjustments to be made. | ||
447 | */ | ||
448 | uctxts = dd->cfgctxts - dd->first_user_ctxt; | ||
449 | dd->ctxts_extrabuf = dd->pbufsctxt ? | ||
450 | dd->lastctxt_piobuf - (dd->pbufsctxt * uctxts) : 0; | ||
451 | |||
452 | /* | ||
453 | * Set up the shadow copies of the piobufavail registers, | ||
454 | * which we compare against the chip registers for now, and | ||
455 | * the in memory DMA'ed copies of the registers. | ||
456 | * By now pioavail updates to memory should have occurred, so | ||
457 | * copy them into our working/shadow registers; this is in | ||
458 | * case something went wrong with abort, but mostly to get the | ||
459 | * initial values of the generation bit correct. | ||
460 | */ | ||
461 | for (i = 0; i < dd->pioavregs; i++) { | ||
462 | __le64 tmp; | ||
463 | |||
464 | tmp = dd->pioavailregs_dma[i]; | ||
465 | /* | ||
466 | * Don't need to worry about pioavailkernel here | ||
467 | * because we will call qib_chg_pioavailkernel() later | ||
468 | * in initialization, to busy out buffers as needed. | ||
469 | */ | ||
470 | dd->pioavailshadow[i] = le64_to_cpu(tmp); | ||
471 | } | ||
472 | while (i < ARRAY_SIZE(dd->pioavailshadow)) | ||
473 | dd->pioavailshadow[i++] = 0; /* for debugging sanity */ | ||
474 | |||
475 | /* after pioavailshadow is setup */ | ||
476 | qib_chg_pioavailkernel(dd, 0, dd->piobcnt2k + dd->piobcnt4k, | ||
477 | TXCHK_CHG_TYPE_KERN, NULL); | ||
478 | dd->f_initvl15_bufs(dd); | ||
479 | } | ||
480 | |||
481 | /** | ||
482 | * qib_init - do the actual initialization sequence on the chip | ||
483 | * @dd: the qlogic_ib device | ||
484 | * @reinit: reinitializing, so don't allocate new memory | ||
485 | * | ||
486 | * Do the actual initialization sequence on the chip. This is done | ||
487 | * both from the init routine called from the PCI infrastructure, and | ||
488 | * when we reset the chip, or detect that it was reset internally, | ||
489 | * or it's administratively re-enabled. | ||
490 | * | ||
491 | * Memory allocation here and in called routines is only done in | ||
492 | * the first case (reinit == 0). We have to be careful, because even | ||
493 | * without memory allocation, we need to re-write all the chip registers | ||
494 | * TIDs, etc. after the reset or enable has completed. | ||
495 | */ | ||
496 | int qib_init(struct qib_devdata *dd, int reinit) | ||
497 | { | ||
498 | int ret = 0, pidx, lastfail = 0; | ||
499 | u32 portok = 0; | ||
500 | unsigned i; | ||
501 | struct qib_ctxtdata *rcd; | ||
502 | struct qib_pportdata *ppd; | ||
503 | unsigned long flags; | ||
504 | |||
505 | /* Set linkstate to unknown, so we can watch for a transition. */ | ||
506 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | ||
507 | ppd = dd->pport + pidx; | ||
508 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
509 | ppd->lflags &= ~(QIBL_LINKACTIVE | QIBL_LINKARMED | | ||
510 | QIBL_LINKDOWN | QIBL_LINKINIT | | ||
511 | QIBL_LINKV); | ||
512 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
513 | } | ||
514 | |||
515 | if (reinit) | ||
516 | ret = init_after_reset(dd); | ||
517 | else | ||
518 | ret = loadtime_init(dd); | ||
519 | if (ret) | ||
520 | goto done; | ||
521 | |||
522 | /* Bypass most chip-init, to get to device creation */ | ||
523 | if (qib_mini_init) | ||
524 | return 0; | ||
525 | |||
526 | ret = dd->f_late_initreg(dd); | ||
527 | if (ret) | ||
528 | goto done; | ||
529 | |||
530 | /* dd->rcd can be NULL if early init failed */ | ||
531 | for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) { | ||
532 | /* | ||
533 | * Set up the (kernel) rcvhdr queue and egr TIDs. If doing | ||
534 | * re-init, the simplest way to handle this is to free | ||
535 | * existing, and re-allocate. | ||
536 | * Need to re-create rest of ctxt 0 ctxtdata as well. | ||
537 | */ | ||
538 | rcd = dd->rcd[i]; | ||
539 | if (!rcd) | ||
540 | continue; | ||
541 | |||
542 | lastfail = qib_create_rcvhdrq(dd, rcd); | ||
543 | if (!lastfail) | ||
544 | lastfail = qib_setup_eagerbufs(rcd); | ||
545 | if (lastfail) { | ||
546 | qib_dev_err(dd, "failed to allocate kernel ctxt's " | ||
547 | "rcvhdrq and/or egr bufs\n"); | ||
548 | continue; | ||
549 | } | ||
550 | } | ||
551 | |||
552 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | ||
553 | int mtu; | ||
554 | if (lastfail) | ||
555 | ret = lastfail; | ||
556 | ppd = dd->pport + pidx; | ||
557 | mtu = ib_mtu_enum_to_int(qib_ibmtu); | ||
558 | if (mtu == -1) { | ||
559 | mtu = QIB_DEFAULT_MTU; | ||
560 | qib_ibmtu = 0; /* don't leave invalid value */ | ||
561 | } | ||
562 | /* set max we can ever have for this driver load */ | ||
563 | ppd->init_ibmaxlen = min(mtu > 2048 ? | ||
564 | dd->piosize4k : dd->piosize2k, | ||
565 | dd->rcvegrbufsize + | ||
566 | (dd->rcvhdrentsize << 2)); | ||
567 | /* | ||
568 | * Have to initialize ibmaxlen, but this will normally | ||
569 | * change immediately in qib_set_mtu(). | ||
570 | */ | ||
571 | ppd->ibmaxlen = ppd->init_ibmaxlen; | ||
572 | qib_set_mtu(ppd, mtu); | ||
573 | |||
574 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
575 | ppd->lflags |= QIBL_IB_LINK_DISABLED; | ||
576 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
577 | |||
578 | lastfail = dd->f_bringup_serdes(ppd); | ||
579 | if (lastfail) { | ||
580 | qib_devinfo(dd->pcidev, | ||
581 | "Failed to bringup IB port %u\n", ppd->port); | ||
582 | lastfail = -ENETDOWN; | ||
583 | continue; | ||
584 | } | ||
585 | |||
586 | /* let link come up, and enable IBC */ | ||
587 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
588 | ppd->lflags &= ~QIBL_IB_LINK_DISABLED; | ||
589 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
590 | portok++; | ||
591 | } | ||
592 | |||
593 | if (!portok) { | ||
594 | /* none of the ports initialized */ | ||
595 | if (!ret && lastfail) | ||
596 | ret = lastfail; | ||
597 | else if (!ret) | ||
598 | ret = -ENETDOWN; | ||
599 | /* but continue on, so we can debug cause */ | ||
600 | } | ||
601 | |||
602 | enable_chip(dd); | ||
603 | |||
604 | init_piobuf_state(dd); | ||
605 | |||
606 | done: | ||
607 | if (!ret) { | ||
608 | /* chip is OK for user apps; mark it as initialized */ | ||
609 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | ||
610 | ppd = dd->pport + pidx; | ||
611 | /* | ||
612 | * Set status even if port serdes is not initialized | ||
613 | * so that diags will work. | ||
614 | */ | ||
615 | *ppd->statusp |= QIB_STATUS_CHIP_PRESENT | | ||
616 | QIB_STATUS_INITTED; | ||
617 | if (!ppd->link_speed_enabled) | ||
618 | continue; | ||
619 | if (dd->flags & QIB_HAS_SEND_DMA) | ||
620 | ret = qib_setup_sdma(ppd); | ||
621 | init_timer(&ppd->hol_timer); | ||
622 | ppd->hol_timer.function = qib_hol_event; | ||
623 | ppd->hol_timer.data = (unsigned long)ppd; | ||
624 | ppd->hol_state = QIB_HOL_UP; | ||
625 | } | ||
626 | |||
627 | /* now we can enable all interrupts from the chip */ | ||
628 | dd->f_set_intr_state(dd, 1); | ||
629 | |||
630 | /* | ||
631 | * Setup to verify we get an interrupt, and fallback | ||
632 | * to an alternate if necessary and possible. | ||
633 | */ | ||
634 | mod_timer(&dd->intrchk_timer, jiffies + HZ/2); | ||
635 | /* start stats retrieval timer */ | ||
636 | mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER); | ||
637 | } | ||
638 | |||
639 | /* if ret is non-zero, we probably should do some cleanup here... */ | ||
640 | return ret; | ||
641 | } | ||
642 | |||
643 | /* | ||
644 | * These next two routines are placeholders in case we don't have per-arch | ||
645 | * code for controlling write combining. If explicit control of write | ||
646 | * combining is not available, performance will probably be awful. | ||
647 | */ | ||
648 | |||
649 | int __attribute__((weak)) qib_enable_wc(struct qib_devdata *dd) | ||
650 | { | ||
651 | return -EOPNOTSUPP; | ||
652 | } | ||
653 | |||
654 | void __attribute__((weak)) qib_disable_wc(struct qib_devdata *dd) | ||
655 | { | ||
656 | } | ||
657 | |||
658 | static inline struct qib_devdata *__qib_lookup(int unit) | ||
659 | { | ||
660 | return idr_find(&qib_unit_table, unit); | ||
661 | } | ||
662 | |||
663 | struct qib_devdata *qib_lookup(int unit) | ||
664 | { | ||
665 | struct qib_devdata *dd; | ||
666 | unsigned long flags; | ||
667 | |||
668 | spin_lock_irqsave(&qib_devs_lock, flags); | ||
669 | dd = __qib_lookup(unit); | ||
670 | spin_unlock_irqrestore(&qib_devs_lock, flags); | ||
671 | |||
672 | return dd; | ||
673 | } | ||
674 | |||
675 | /* | ||
676 | * Stop the timers during unit shutdown, or after an error late | ||
677 | * in initialization. | ||
678 | */ | ||
679 | static void qib_stop_timers(struct qib_devdata *dd) | ||
680 | { | ||
681 | struct qib_pportdata *ppd; | ||
682 | int pidx; | ||
683 | |||
684 | if (dd->stats_timer.data) { | ||
685 | del_timer_sync(&dd->stats_timer); | ||
686 | dd->stats_timer.data = 0; | ||
687 | } | ||
688 | if (dd->intrchk_timer.data) { | ||
689 | del_timer_sync(&dd->intrchk_timer); | ||
690 | dd->intrchk_timer.data = 0; | ||
691 | } | ||
692 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | ||
693 | ppd = dd->pport + pidx; | ||
694 | if (ppd->hol_timer.data) | ||
695 | del_timer_sync(&ppd->hol_timer); | ||
696 | if (ppd->led_override_timer.data) { | ||
697 | del_timer_sync(&ppd->led_override_timer); | ||
698 | atomic_set(&ppd->led_override_timer_active, 0); | ||
699 | } | ||
700 | if (ppd->symerr_clear_timer.data) | ||
701 | del_timer_sync(&ppd->symerr_clear_timer); | ||
702 | } | ||
703 | } | ||
704 | |||
705 | /** | ||
706 | * qib_shutdown_device - shut down a device | ||
707 | * @dd: the qlogic_ib device | ||
708 | * | ||
709 | * This is called to make the device quiet when we are about to | ||
710 | * unload the driver, and also when the device is administratively | ||
711 | * disabled. It does not free any data structures. | ||
712 | * Everything it does has to be setup again by qib_init(dd, 1) | ||
713 | */ | ||
714 | static void qib_shutdown_device(struct qib_devdata *dd) | ||
715 | { | ||
716 | struct qib_pportdata *ppd; | ||
717 | unsigned pidx; | ||
718 | |||
719 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | ||
720 | ppd = dd->pport + pidx; | ||
721 | |||
722 | spin_lock_irq(&ppd->lflags_lock); | ||
723 | ppd->lflags &= ~(QIBL_LINKDOWN | QIBL_LINKINIT | | ||
724 | QIBL_LINKARMED | QIBL_LINKACTIVE | | ||
725 | QIBL_LINKV); | ||
726 | spin_unlock_irq(&ppd->lflags_lock); | ||
727 | *ppd->statusp &= ~(QIB_STATUS_IB_CONF | QIB_STATUS_IB_READY); | ||
728 | } | ||
729 | dd->flags &= ~QIB_INITTED; | ||
730 | |||
731 | /* mask interrupts, but not errors */ | ||
732 | dd->f_set_intr_state(dd, 0); | ||
733 | |||
734 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | ||
735 | ppd = dd->pport + pidx; | ||
736 | dd->f_rcvctrl(ppd, QIB_RCVCTRL_TAILUPD_DIS | | ||
737 | QIB_RCVCTRL_CTXT_DIS | | ||
738 | QIB_RCVCTRL_INTRAVAIL_DIS | | ||
739 | QIB_RCVCTRL_PKEY_ENB, -1); | ||
740 | /* | ||
741 | * Gracefully stop all sends allowing any in progress to | ||
742 | * trickle out first. | ||
743 | */ | ||
744 | dd->f_sendctrl(ppd, QIB_SENDCTRL_CLEAR); | ||
745 | } | ||
746 | |||
747 | /* | ||
748 | * Enough for anything that's going to trickle out to have actually | ||
749 | * done so. | ||
750 | */ | ||
751 | udelay(20); | ||
752 | |||
753 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | ||
754 | ppd = dd->pport + pidx; | ||
755 | dd->f_setextled(ppd, 0); /* make sure LEDs are off */ | ||
756 | |||
757 | if (dd->flags & QIB_HAS_SEND_DMA) | ||
758 | qib_teardown_sdma(ppd); | ||
759 | |||
760 | dd->f_sendctrl(ppd, QIB_SENDCTRL_AVAIL_DIS | | ||
761 | QIB_SENDCTRL_SEND_DIS); | ||
762 | /* | ||
763 | * Clear SerdesEnable. | ||
764 | * We can't count on interrupts since we are stopping. | ||
765 | */ | ||
766 | dd->f_quiet_serdes(ppd); | ||
767 | } | ||
768 | |||
769 | qib_update_eeprom_log(dd); | ||
770 | } | ||
771 | |||
772 | /** | ||
773 | * qib_free_ctxtdata - free a context's allocated data | ||
774 | * @dd: the qlogic_ib device | ||
775 | * @rcd: the ctxtdata structure | ||
776 | * | ||
777 | * free up any allocated data for a context | ||
778 | * This should not touch anything that would affect a simultaneous | ||
779 | * re-allocation of context data, because it is called after qib_mutex | ||
780 | * is released (and can be called from reinit as well). | ||
781 | * It should never change any chip state, or global driver state. | ||
782 | */ | ||
783 | void qib_free_ctxtdata(struct qib_devdata *dd, struct qib_ctxtdata *rcd) | ||
784 | { | ||
785 | if (!rcd) | ||
786 | return; | ||
787 | |||
788 | if (rcd->rcvhdrq) { | ||
789 | dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size, | ||
790 | rcd->rcvhdrq, rcd->rcvhdrq_phys); | ||
791 | rcd->rcvhdrq = NULL; | ||
792 | if (rcd->rcvhdrtail_kvaddr) { | ||
793 | dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, | ||
794 | rcd->rcvhdrtail_kvaddr, | ||
795 | rcd->rcvhdrqtailaddr_phys); | ||
796 | rcd->rcvhdrtail_kvaddr = NULL; | ||
797 | } | ||
798 | } | ||
799 | if (rcd->rcvegrbuf) { | ||
800 | unsigned e; | ||
801 | |||
802 | for (e = 0; e < rcd->rcvegrbuf_chunks; e++) { | ||
803 | void *base = rcd->rcvegrbuf[e]; | ||
804 | size_t size = rcd->rcvegrbuf_size; | ||
805 | |||
806 | dma_free_coherent(&dd->pcidev->dev, size, | ||
807 | base, rcd->rcvegrbuf_phys[e]); | ||
808 | } | ||
809 | kfree(rcd->rcvegrbuf); | ||
810 | rcd->rcvegrbuf = NULL; | ||
811 | kfree(rcd->rcvegrbuf_phys); | ||
812 | rcd->rcvegrbuf_phys = NULL; | ||
813 | rcd->rcvegrbuf_chunks = 0; | ||
814 | } | ||
815 | |||
816 | kfree(rcd->tid_pg_list); | ||
817 | vfree(rcd->user_event_mask); | ||
818 | vfree(rcd->subctxt_uregbase); | ||
819 | vfree(rcd->subctxt_rcvegrbuf); | ||
820 | vfree(rcd->subctxt_rcvhdr_base); | ||
821 | kfree(rcd); | ||
822 | } | ||
823 | |||
824 | /* | ||
825 | * Perform a PIO buffer bandwidth write test, to verify proper system | ||
826 | * configuration. Even when all the setup calls work, occasionally | ||
827 | * BIOS or other issues can prevent write combining from working, or | ||
828 | * can cause other bandwidth problems to the chip. | ||
829 | * | ||
830 | * This test simply writes the same buffer over and over again, and | ||
831 | * measures close to the peak bandwidth to the chip (not testing | ||
832 | * data bandwidth to the wire). On chips that use an address-based | ||
833 | * trigger to send packets to the wire, this is easy. On chips that | ||
834 | * use a count to trigger, we want to make sure that the packet doesn't | ||
835 | * go out on the wire, or trigger flow control checks. | ||
836 | */ | ||
837 | static void qib_verify_pioperf(struct qib_devdata *dd) | ||
838 | { | ||
839 | u32 pbnum, cnt, lcnt; | ||
840 | u32 __iomem *piobuf; | ||
841 | u32 *addr; | ||
842 | u64 msecs, emsecs; | ||
843 | |||
844 | piobuf = dd->f_getsendbuf(dd->pport, 0ULL, &pbnum); | ||
845 | if (!piobuf) { | ||
846 | qib_devinfo(dd->pcidev, | ||
847 | "No PIObufs for checking perf, skipping\n"); | ||
848 | return; | ||
849 | } | ||
850 | |||
851 | /* | ||
852 | * Enough to give us a reasonable test, less than piobuf size, and | ||
853 | * likely multiple of store buffer length. | ||
854 | */ | ||
855 | cnt = 1024; | ||
856 | |||
857 | addr = vmalloc(cnt); | ||
858 | if (!addr) { | ||
859 | qib_devinfo(dd->pcidev, | ||
860 | "Couldn't get memory for checking PIO perf," | ||
861 | " skipping\n"); | ||
862 | goto done; | ||
863 | } | ||
864 | |||
865 | preempt_disable(); /* we want reasonably accurate elapsed time */ | ||
866 | msecs = 1 + jiffies_to_msecs(jiffies); | ||
867 | for (lcnt = 0; lcnt < 10000U; lcnt++) { | ||
868 | /* wait until we cross msec boundary */ | ||
869 | if (jiffies_to_msecs(jiffies) >= msecs) | ||
870 | break; | ||
871 | udelay(1); | ||
872 | } | ||
873 | |||
874 | dd->f_set_armlaunch(dd, 0); | ||
875 | |||
876 | /* | ||
877 | * length 0, no dwords actually sent | ||
878 | */ | ||
879 | writeq(0, piobuf); | ||
880 | qib_flush_wc(); | ||
881 | |||
882 | /* | ||
883 | * This is only roughly accurate, since even with preempt we | ||
884 | * still take interrupts that could take a while. Running for | ||
885 | * >= 5 msec seems to get us "close enough" to accurate values. | ||
886 | */ | ||
887 | msecs = jiffies_to_msecs(jiffies); | ||
888 | for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) { | ||
889 | qib_pio_copy(piobuf + 64, addr, cnt >> 2); | ||
890 | emsecs = jiffies_to_msecs(jiffies) - msecs; | ||
891 | } | ||
892 | |||
893 | /* 1 GiB/sec, slightly over IB SDR line rate */ | ||
894 | if (lcnt < (emsecs * 1024U)) | ||
895 | qib_dev_err(dd, | ||
896 | "Performance problem: bandwidth to PIO buffers is " | ||
897 | "only %u MiB/sec\n", | ||
898 | lcnt / (u32) emsecs); | ||
899 | |||
900 | preempt_enable(); | ||
901 | |||
902 | vfree(addr); | ||
903 | |||
904 | done: | ||
905 | /* disarm piobuf, so it's available again */ | ||
906 | dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbnum)); | ||
907 | qib_sendbuf_done(dd, pbnum); | ||
908 | dd->f_set_armlaunch(dd, 1); | ||
909 | } | ||
910 | |||
911 | |||
912 | void qib_free_devdata(struct qib_devdata *dd) | ||
913 | { | ||
914 | unsigned long flags; | ||
915 | |||
916 | spin_lock_irqsave(&qib_devs_lock, flags); | ||
917 | idr_remove(&qib_unit_table, dd->unit); | ||
918 | list_del(&dd->list); | ||
919 | spin_unlock_irqrestore(&qib_devs_lock, flags); | ||
920 | |||
921 | ib_dealloc_device(&dd->verbs_dev.ibdev); | ||
922 | } | ||
923 | |||
924 | /* | ||
925 | * Allocate our primary per-unit data structure. Must be done via verbs | ||
926 | * allocator, because the verbs cleanup process both does cleanup and | ||
927 | * free of the data structure. | ||
928 | * "extra" is for chip-specific data. | ||
929 | * | ||
930 | * Use the idr mechanism to get a unit number for this unit. | ||
931 | */ | ||
932 | struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra) | ||
933 | { | ||
934 | unsigned long flags; | ||
935 | struct qib_devdata *dd; | ||
936 | int ret; | ||
937 | |||
938 | if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) { | ||
939 | dd = ERR_PTR(-ENOMEM); | ||
940 | goto bail; | ||
941 | } | ||
942 | |||
943 | dd = (struct qib_devdata *) ib_alloc_device(sizeof(*dd) + extra); | ||
944 | if (!dd) { | ||
945 | dd = ERR_PTR(-ENOMEM); | ||
946 | goto bail; | ||
947 | } | ||
948 | |||
949 | spin_lock_irqsave(&qib_devs_lock, flags); | ||
950 | ret = idr_get_new(&qib_unit_table, dd, &dd->unit); | ||
951 | if (ret >= 0) | ||
952 | list_add(&dd->list, &qib_dev_list); | ||
953 | spin_unlock_irqrestore(&qib_devs_lock, flags); | ||
954 | |||
955 | if (ret < 0) { | ||
956 | qib_early_err(&pdev->dev, | ||
957 | "Could not allocate unit ID: error %d\n", -ret); | ||
958 | ib_dealloc_device(&dd->verbs_dev.ibdev); | ||
959 | dd = ERR_PTR(ret); | ||
960 | goto bail; | ||
961 | } | ||
962 | |||
963 | if (!qib_cpulist_count) { | ||
964 | u32 count = num_online_cpus(); | ||
965 | qib_cpulist = kzalloc(BITS_TO_LONGS(count) * | ||
966 | sizeof(long), GFP_KERNEL); | ||
967 | if (qib_cpulist) | ||
968 | qib_cpulist_count = count; | ||
969 | else | ||
970 | qib_early_err(&pdev->dev, "Could not alloc cpulist " | ||
971 | "info, cpu affinity might be wrong\n"); | ||
972 | } | ||
973 | |||
974 | bail: | ||
975 | return dd; | ||
976 | } | ||
977 | |||
978 | /* | ||
979 | * Called from freeze mode handlers, and from PCI error | ||
980 | * reporting code. Should be paranoid about state of | ||
981 | * system and data structures. | ||
982 | */ | ||
983 | void qib_disable_after_error(struct qib_devdata *dd) | ||
984 | { | ||
985 | if (dd->flags & QIB_INITTED) { | ||
986 | u32 pidx; | ||
987 | |||
988 | dd->flags &= ~QIB_INITTED; | ||
989 | if (dd->pport) | ||
990 | for (pidx = 0; pidx < dd->num_pports; ++pidx) { | ||
991 | struct qib_pportdata *ppd; | ||
992 | |||
993 | ppd = dd->pport + pidx; | ||
994 | if (dd->flags & QIB_PRESENT) { | ||
995 | qib_set_linkstate(ppd, | ||
996 | QIB_IB_LINKDOWN_DISABLE); | ||
997 | dd->f_setextled(ppd, 0); | ||
998 | } | ||
999 | *ppd->statusp &= ~QIB_STATUS_IB_READY; | ||
1000 | } | ||
1001 | } | ||
1002 | |||
1003 | /* | ||
1004 | * Mark as having had an error for driver, and also | ||
1005 | * for /sys and status word mapped to user programs. | ||
1006 | * This marks unit as not usable, until reset. | ||
1007 | */ | ||
1008 | if (dd->devstatusp) | ||
1009 | *dd->devstatusp |= QIB_STATUS_HWERROR; | ||
1010 | } | ||
1011 | |||
1012 | static void __devexit qib_remove_one(struct pci_dev *); | ||
1013 | static int __devinit qib_init_one(struct pci_dev *, | ||
1014 | const struct pci_device_id *); | ||
1015 | |||
1016 | #define DRIVER_LOAD_MSG "QLogic " QIB_DRV_NAME " loaded: " | ||
1017 | #define PFX QIB_DRV_NAME ": " | ||
1018 | |||
1019 | static const struct pci_device_id qib_pci_tbl[] = { | ||
1020 | { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_QLOGIC_IB_6120) }, | ||
1021 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7220) }, | ||
1022 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7322) }, | ||
1023 | { 0, } | ||
1024 | }; | ||
1025 | |||
1026 | MODULE_DEVICE_TABLE(pci, qib_pci_tbl); | ||
1027 | |||
1028 | struct pci_driver qib_driver = { | ||
1029 | .name = QIB_DRV_NAME, | ||
1030 | .probe = qib_init_one, | ||
1031 | .remove = __devexit_p(qib_remove_one), | ||
1032 | .id_table = qib_pci_tbl, | ||
1033 | .err_handler = &qib_pci_err_handler, | ||
1034 | }; | ||
1035 | |||
1036 | /* | ||
1037 | * Do all the generic driver unit- and chip-independent memory | ||
1038 | * allocation and initialization. | ||
1039 | */ | ||
1040 | static int __init qlogic_ib_init(void) | ||
1041 | { | ||
1042 | int ret; | ||
1043 | |||
1044 | ret = qib_dev_init(); | ||
1045 | if (ret) | ||
1046 | goto bail; | ||
1047 | |||
1048 | /* | ||
1049 | * We create our own workqueue mainly because we want to be | ||
1050 | * able to flush it when devices are being removed. We can't | ||
1051 | * use schedule_work()/flush_scheduled_work() because both | ||
1052 | * unregister_netdev() and linkwatch_event take the rtnl lock, | ||
1053 | * so flush_scheduled_work() can deadlock during device | ||
1054 | * removal. | ||
1055 | */ | ||
1056 | qib_wq = create_workqueue("qib"); | ||
1057 | if (!qib_wq) { | ||
1058 | ret = -ENOMEM; | ||
1059 | goto bail_dev; | ||
1060 | } | ||
1061 | |||
1062 | qib_cq_wq = create_workqueue("qib_cq"); | ||
1063 | if (!qib_cq_wq) { | ||
1064 | ret = -ENOMEM; | ||
1065 | goto bail_wq; | ||
1066 | } | ||
1067 | |||
1068 | /* | ||
1069 | * These must be called before the driver is registered with | ||
1070 | * the PCI subsystem. | ||
1071 | */ | ||
1072 | idr_init(&qib_unit_table); | ||
1073 | if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) { | ||
1074 | printk(KERN_ERR QIB_DRV_NAME ": idr_pre_get() failed\n"); | ||
1075 | ret = -ENOMEM; | ||
1076 | goto bail_cq_wq; | ||
1077 | } | ||
1078 | |||
1079 | ret = pci_register_driver(&qib_driver); | ||
1080 | if (ret < 0) { | ||
1081 | printk(KERN_ERR QIB_DRV_NAME | ||
1082 | ": Unable to register driver: error %d\n", -ret); | ||
1083 | goto bail_unit; | ||
1084 | } | ||
1085 | |||
1086 | /* not fatal if it doesn't work */ | ||
1087 | if (qib_init_qibfs()) | ||
1088 | printk(KERN_ERR QIB_DRV_NAME ": Unable to register ipathfs\n"); | ||
1089 | goto bail; /* all OK */ | ||
1090 | |||
1091 | bail_unit: | ||
1092 | idr_destroy(&qib_unit_table); | ||
1093 | bail_cq_wq: | ||
1094 | destroy_workqueue(qib_cq_wq); | ||
1095 | bail_wq: | ||
1096 | destroy_workqueue(qib_wq); | ||
1097 | bail_dev: | ||
1098 | qib_dev_cleanup(); | ||
1099 | bail: | ||
1100 | return ret; | ||
1101 | } | ||
1102 | |||
1103 | module_init(qlogic_ib_init); | ||
1104 | |||
1105 | /* | ||
1106 | * Do the non-unit driver cleanup, memory free, etc. at unload. | ||
1107 | */ | ||
1108 | static void __exit qlogic_ib_cleanup(void) | ||
1109 | { | ||
1110 | int ret; | ||
1111 | |||
1112 | ret = qib_exit_qibfs(); | ||
1113 | if (ret) | ||
1114 | printk(KERN_ERR QIB_DRV_NAME ": " | ||
1115 | "Unable to cleanup counter filesystem: " | ||
1116 | "error %d\n", -ret); | ||
1117 | |||
1118 | pci_unregister_driver(&qib_driver); | ||
1119 | |||
1120 | destroy_workqueue(qib_wq); | ||
1121 | destroy_workqueue(qib_cq_wq); | ||
1122 | |||
1123 | qib_cpulist_count = 0; | ||
1124 | kfree(qib_cpulist); | ||
1125 | |||
1126 | idr_destroy(&qib_unit_table); | ||
1127 | qib_dev_cleanup(); | ||
1128 | } | ||
1129 | |||
1130 | module_exit(qlogic_ib_cleanup); | ||
1131 | |||
1132 | /* this can only be called after a successful initialization */ | ||
1133 | static void cleanup_device_data(struct qib_devdata *dd) | ||
1134 | { | ||
1135 | int ctxt; | ||
1136 | int pidx; | ||
1137 | struct qib_ctxtdata **tmp; | ||
1138 | unsigned long flags; | ||
1139 | |||
1140 | /* users can't do anything more with chip */ | ||
1141 | for (pidx = 0; pidx < dd->num_pports; ++pidx) | ||
1142 | if (dd->pport[pidx].statusp) | ||
1143 | *dd->pport[pidx].statusp &= ~QIB_STATUS_CHIP_PRESENT; | ||
1144 | |||
1145 | if (!qib_wc_pat) | ||
1146 | qib_disable_wc(dd); | ||
1147 | |||
1148 | if (dd->pioavailregs_dma) { | ||
1149 | dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, | ||
1150 | (void *) dd->pioavailregs_dma, | ||
1151 | dd->pioavailregs_phys); | ||
1152 | dd->pioavailregs_dma = NULL; | ||
1153 | } | ||
1154 | |||
1155 | if (dd->pageshadow) { | ||
1156 | struct page **tmpp = dd->pageshadow; | ||
1157 | dma_addr_t *tmpd = dd->physshadow; | ||
1158 | int i, cnt = 0; | ||
1159 | |||
1160 | for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) { | ||
1161 | int ctxt_tidbase = ctxt * dd->rcvtidcnt; | ||
1162 | int maxtid = ctxt_tidbase + dd->rcvtidcnt; | ||
1163 | |||
1164 | for (i = ctxt_tidbase; i < maxtid; i++) { | ||
1165 | if (!tmpp[i]) | ||
1166 | continue; | ||
1167 | pci_unmap_page(dd->pcidev, tmpd[i], | ||
1168 | PAGE_SIZE, PCI_DMA_FROMDEVICE); | ||
1169 | qib_release_user_pages(&tmpp[i], 1); | ||
1170 | tmpp[i] = NULL; | ||
1171 | cnt++; | ||
1172 | } | ||
1173 | } | ||
1174 | |||
1175 | tmpp = dd->pageshadow; | ||
1176 | dd->pageshadow = NULL; | ||
1177 | vfree(tmpp); | ||
1178 | } | ||
1179 | |||
1180 | /* | ||
1181 | * Free any resources still in use (usually just kernel contexts) | ||
1182 | * at unload; we do for ctxtcnt, because that's what we allocate. | ||
1183 | * We acquire lock to be really paranoid that rcd isn't being | ||
1184 | * accessed from some interrupt-related code (that should not happen, | ||
1185 | * but best to be sure). | ||
1186 | */ | ||
1187 | spin_lock_irqsave(&dd->uctxt_lock, flags); | ||
1188 | tmp = dd->rcd; | ||
1189 | dd->rcd = NULL; | ||
1190 | spin_unlock_irqrestore(&dd->uctxt_lock, flags); | ||
1191 | for (ctxt = 0; tmp && ctxt < dd->ctxtcnt; ctxt++) { | ||
1192 | struct qib_ctxtdata *rcd = tmp[ctxt]; | ||
1193 | |||
1194 | tmp[ctxt] = NULL; /* debugging paranoia */ | ||
1195 | qib_free_ctxtdata(dd, rcd); | ||
1196 | } | ||
1197 | kfree(tmp); | ||
1198 | kfree(dd->boardname); | ||
1199 | } | ||
1200 | |||
1201 | /* | ||
1202 | * Clean up on unit shutdown, or error during unit load after | ||
1203 | * successful initialization. | ||
1204 | */ | ||
1205 | static void qib_postinit_cleanup(struct qib_devdata *dd) | ||
1206 | { | ||
1207 | /* | ||
1208 | * Clean up chip-specific stuff. | ||
1209 | * We check for NULL here, because it's outside | ||
1210 | * the kregbase check, and we need to call it | ||
1211 | * after the free_irq. Thus it's possible that | ||
1212 | * the function pointers were never initialized. | ||
1213 | */ | ||
1214 | if (dd->f_cleanup) | ||
1215 | dd->f_cleanup(dd); | ||
1216 | |||
1217 | qib_pcie_ddcleanup(dd); | ||
1218 | |||
1219 | cleanup_device_data(dd); | ||
1220 | |||
1221 | qib_free_devdata(dd); | ||
1222 | } | ||
1223 | |||
1224 | static int __devinit qib_init_one(struct pci_dev *pdev, | ||
1225 | const struct pci_device_id *ent) | ||
1226 | { | ||
1227 | int ret, j, pidx, initfail; | ||
1228 | struct qib_devdata *dd = NULL; | ||
1229 | |||
1230 | ret = qib_pcie_init(pdev, ent); | ||
1231 | if (ret) | ||
1232 | goto bail; | ||
1233 | |||
1234 | /* | ||
1235 | * Do device-specific initialiation, function table setup, dd | ||
1236 | * allocation, etc. | ||
1237 | */ | ||
1238 | switch (ent->device) { | ||
1239 | case PCI_DEVICE_ID_QLOGIC_IB_6120: | ||
1240 | dd = qib_init_iba6120_funcs(pdev, ent); | ||
1241 | break; | ||
1242 | |||
1243 | case PCI_DEVICE_ID_QLOGIC_IB_7220: | ||
1244 | dd = qib_init_iba7220_funcs(pdev, ent); | ||
1245 | break; | ||
1246 | |||
1247 | case PCI_DEVICE_ID_QLOGIC_IB_7322: | ||
1248 | dd = qib_init_iba7322_funcs(pdev, ent); | ||
1249 | break; | ||
1250 | |||
1251 | default: | ||
1252 | qib_early_err(&pdev->dev, "Failing on unknown QLogic " | ||
1253 | "deviceid 0x%x\n", ent->device); | ||
1254 | ret = -ENODEV; | ||
1255 | } | ||
1256 | |||
1257 | if (IS_ERR(dd)) | ||
1258 | ret = PTR_ERR(dd); | ||
1259 | if (ret) | ||
1260 | goto bail; /* error already printed */ | ||
1261 | |||
1262 | /* do the generic initialization */ | ||
1263 | initfail = qib_init(dd, 0); | ||
1264 | |||
1265 | ret = qib_register_ib_device(dd); | ||
1266 | |||
1267 | /* | ||
1268 | * Now ready for use. this should be cleared whenever we | ||
1269 | * detect a reset, or initiate one. If earlier failure, | ||
1270 | * we still create devices, so diags, etc. can be used | ||
1271 | * to determine cause of problem. | ||
1272 | */ | ||
1273 | if (!qib_mini_init && !initfail && !ret) | ||
1274 | dd->flags |= QIB_INITTED; | ||
1275 | |||
1276 | j = qib_device_create(dd); | ||
1277 | if (j) | ||
1278 | qib_dev_err(dd, "Failed to create /dev devices: %d\n", -j); | ||
1279 | j = qibfs_add(dd); | ||
1280 | if (j) | ||
1281 | qib_dev_err(dd, "Failed filesystem setup for counters: %d\n", | ||
1282 | -j); | ||
1283 | |||
1284 | if (qib_mini_init || initfail || ret) { | ||
1285 | qib_stop_timers(dd); | ||
1286 | for (pidx = 0; pidx < dd->num_pports; ++pidx) | ||
1287 | dd->f_quiet_serdes(dd->pport + pidx); | ||
1288 | if (initfail) | ||
1289 | ret = initfail; | ||
1290 | goto bail; | ||
1291 | } | ||
1292 | |||
1293 | if (!qib_wc_pat) { | ||
1294 | ret = qib_enable_wc(dd); | ||
1295 | if (ret) { | ||
1296 | qib_dev_err(dd, "Write combining not enabled " | ||
1297 | "(err %d): performance may be poor\n", | ||
1298 | -ret); | ||
1299 | ret = 0; | ||
1300 | } | ||
1301 | } | ||
1302 | |||
1303 | qib_verify_pioperf(dd); | ||
1304 | bail: | ||
1305 | return ret; | ||
1306 | } | ||
1307 | |||
1308 | static void __devexit qib_remove_one(struct pci_dev *pdev) | ||
1309 | { | ||
1310 | struct qib_devdata *dd = pci_get_drvdata(pdev); | ||
1311 | int ret; | ||
1312 | |||
1313 | /* unregister from IB core */ | ||
1314 | qib_unregister_ib_device(dd); | ||
1315 | |||
1316 | /* | ||
1317 | * Disable the IB link, disable interrupts on the device, | ||
1318 | * clear dma engines, etc. | ||
1319 | */ | ||
1320 | if (!qib_mini_init) | ||
1321 | qib_shutdown_device(dd); | ||
1322 | |||
1323 | qib_stop_timers(dd); | ||
1324 | |||
1325 | /* wait until all of our (qsfp) schedule_work() calls complete */ | ||
1326 | flush_scheduled_work(); | ||
1327 | |||
1328 | ret = qibfs_remove(dd); | ||
1329 | if (ret) | ||
1330 | qib_dev_err(dd, "Failed counters filesystem cleanup: %d\n", | ||
1331 | -ret); | ||
1332 | |||
1333 | qib_device_remove(dd); | ||
1334 | |||
1335 | qib_postinit_cleanup(dd); | ||
1336 | } | ||
1337 | |||
1338 | /** | ||
1339 | * qib_create_rcvhdrq - create a receive header queue | ||
1340 | * @dd: the qlogic_ib device | ||
1341 | * @rcd: the context data | ||
1342 | * | ||
1343 | * This must be contiguous memory (from an i/o perspective), and must be | ||
1344 | * DMA'able (which means for some systems, it will go through an IOMMU, | ||
1345 | * or be forced into a low address range). | ||
1346 | */ | ||
1347 | int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd) | ||
1348 | { | ||
1349 | unsigned amt; | ||
1350 | |||
1351 | if (!rcd->rcvhdrq) { | ||
1352 | dma_addr_t phys_hdrqtail; | ||
1353 | gfp_t gfp_flags; | ||
1354 | |||
1355 | amt = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize * | ||
1356 | sizeof(u32), PAGE_SIZE); | ||
1357 | gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ? | ||
1358 | GFP_USER : GFP_KERNEL; | ||
1359 | rcd->rcvhdrq = dma_alloc_coherent( | ||
1360 | &dd->pcidev->dev, amt, &rcd->rcvhdrq_phys, | ||
1361 | gfp_flags | __GFP_COMP); | ||
1362 | |||
1363 | if (!rcd->rcvhdrq) { | ||
1364 | qib_dev_err(dd, "attempt to allocate %d bytes " | ||
1365 | "for ctxt %u rcvhdrq failed\n", | ||
1366 | amt, rcd->ctxt); | ||
1367 | goto bail; | ||
1368 | } | ||
1369 | |||
1370 | if (rcd->ctxt >= dd->first_user_ctxt) { | ||
1371 | rcd->user_event_mask = vmalloc_user(PAGE_SIZE); | ||
1372 | if (!rcd->user_event_mask) | ||
1373 | goto bail_free_hdrq; | ||
1374 | } | ||
1375 | |||
1376 | if (!(dd->flags & QIB_NODMA_RTAIL)) { | ||
1377 | rcd->rcvhdrtail_kvaddr = dma_alloc_coherent( | ||
1378 | &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail, | ||
1379 | gfp_flags); | ||
1380 | if (!rcd->rcvhdrtail_kvaddr) | ||
1381 | goto bail_free; | ||
1382 | rcd->rcvhdrqtailaddr_phys = phys_hdrqtail; | ||
1383 | } | ||
1384 | |||
1385 | rcd->rcvhdrq_size = amt; | ||
1386 | } | ||
1387 | |||
1388 | /* clear for security and sanity on each use */ | ||
1389 | memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size); | ||
1390 | if (rcd->rcvhdrtail_kvaddr) | ||
1391 | memset(rcd->rcvhdrtail_kvaddr, 0, PAGE_SIZE); | ||
1392 | return 0; | ||
1393 | |||
1394 | bail_free: | ||
1395 | qib_dev_err(dd, "attempt to allocate 1 page for ctxt %u " | ||
1396 | "rcvhdrqtailaddr failed\n", rcd->ctxt); | ||
1397 | vfree(rcd->user_event_mask); | ||
1398 | rcd->user_event_mask = NULL; | ||
1399 | bail_free_hdrq: | ||
1400 | dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq, | ||
1401 | rcd->rcvhdrq_phys); | ||
1402 | rcd->rcvhdrq = NULL; | ||
1403 | bail: | ||
1404 | return -ENOMEM; | ||
1405 | } | ||
1406 | |||
1407 | /** | ||
1408 | * allocate eager buffers, both kernel and user contexts. | ||
1409 | * @rcd: the context we are setting up. | ||
1410 | * | ||
1411 | * Allocate the eager TID buffers and program them into hip. | ||
1412 | * They are no longer completely contiguous, we do multiple allocation | ||
1413 | * calls. Otherwise we get the OOM code involved, by asking for too | ||
1414 | * much per call, with disastrous results on some kernels. | ||
1415 | */ | ||
1416 | int qib_setup_eagerbufs(struct qib_ctxtdata *rcd) | ||
1417 | { | ||
1418 | struct qib_devdata *dd = rcd->dd; | ||
1419 | unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff; | ||
1420 | size_t size; | ||
1421 | gfp_t gfp_flags; | ||
1422 | |||
1423 | /* | ||
1424 | * GFP_USER, but without GFP_FS, so buffer cache can be | ||
1425 | * coalesced (we hope); otherwise, even at order 4, | ||
1426 | * heavy filesystem activity makes these fail, and we can | ||
1427 | * use compound pages. | ||
1428 | */ | ||
1429 | gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP; | ||
1430 | |||
1431 | egrcnt = rcd->rcvegrcnt; | ||
1432 | egroff = rcd->rcvegr_tid_base; | ||
1433 | egrsize = dd->rcvegrbufsize; | ||
1434 | |||
1435 | chunk = rcd->rcvegrbuf_chunks; | ||
1436 | egrperchunk = rcd->rcvegrbufs_perchunk; | ||
1437 | size = rcd->rcvegrbuf_size; | ||
1438 | if (!rcd->rcvegrbuf) { | ||
1439 | rcd->rcvegrbuf = | ||
1440 | kzalloc(chunk * sizeof(rcd->rcvegrbuf[0]), | ||
1441 | GFP_KERNEL); | ||
1442 | if (!rcd->rcvegrbuf) | ||
1443 | goto bail; | ||
1444 | } | ||
1445 | if (!rcd->rcvegrbuf_phys) { | ||
1446 | rcd->rcvegrbuf_phys = | ||
1447 | kmalloc(chunk * sizeof(rcd->rcvegrbuf_phys[0]), | ||
1448 | GFP_KERNEL); | ||
1449 | if (!rcd->rcvegrbuf_phys) | ||
1450 | goto bail_rcvegrbuf; | ||
1451 | } | ||
1452 | for (e = 0; e < rcd->rcvegrbuf_chunks; e++) { | ||
1453 | if (rcd->rcvegrbuf[e]) | ||
1454 | continue; | ||
1455 | rcd->rcvegrbuf[e] = | ||
1456 | dma_alloc_coherent(&dd->pcidev->dev, size, | ||
1457 | &rcd->rcvegrbuf_phys[e], | ||
1458 | gfp_flags); | ||
1459 | if (!rcd->rcvegrbuf[e]) | ||
1460 | goto bail_rcvegrbuf_phys; | ||
1461 | } | ||
1462 | |||
1463 | rcd->rcvegr_phys = rcd->rcvegrbuf_phys[0]; | ||
1464 | |||
1465 | for (e = chunk = 0; chunk < rcd->rcvegrbuf_chunks; chunk++) { | ||
1466 | dma_addr_t pa = rcd->rcvegrbuf_phys[chunk]; | ||
1467 | unsigned i; | ||
1468 | |||
1469 | for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) { | ||
1470 | dd->f_put_tid(dd, e + egroff + | ||
1471 | (u64 __iomem *) | ||
1472 | ((char __iomem *) | ||
1473 | dd->kregbase + | ||
1474 | dd->rcvegrbase), | ||
1475 | RCVHQ_RCV_TYPE_EAGER, pa); | ||
1476 | pa += egrsize; | ||
1477 | } | ||
1478 | cond_resched(); /* don't hog the cpu */ | ||
1479 | } | ||
1480 | |||
1481 | return 0; | ||
1482 | |||
1483 | bail_rcvegrbuf_phys: | ||
1484 | for (e = 0; e < rcd->rcvegrbuf_chunks && rcd->rcvegrbuf[e]; e++) | ||
1485 | dma_free_coherent(&dd->pcidev->dev, size, | ||
1486 | rcd->rcvegrbuf[e], rcd->rcvegrbuf_phys[e]); | ||
1487 | kfree(rcd->rcvegrbuf_phys); | ||
1488 | rcd->rcvegrbuf_phys = NULL; | ||
1489 | bail_rcvegrbuf: | ||
1490 | kfree(rcd->rcvegrbuf); | ||
1491 | rcd->rcvegrbuf = NULL; | ||
1492 | bail: | ||
1493 | return -ENOMEM; | ||
1494 | } | ||
1495 | |||
1496 | int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen) | ||
1497 | { | ||
1498 | u64 __iomem *qib_kregbase = NULL; | ||
1499 | void __iomem *qib_piobase = NULL; | ||
1500 | u64 __iomem *qib_userbase = NULL; | ||
1501 | u64 qib_kreglen; | ||
1502 | u64 qib_pio2koffset = dd->piobufbase & 0xffffffff; | ||
1503 | u64 qib_pio4koffset = dd->piobufbase >> 32; | ||
1504 | u64 qib_pio2klen = dd->piobcnt2k * dd->palign; | ||
1505 | u64 qib_pio4klen = dd->piobcnt4k * dd->align4k; | ||
1506 | u64 qib_physaddr = dd->physaddr; | ||
1507 | u64 qib_piolen; | ||
1508 | u64 qib_userlen = 0; | ||
1509 | |||
1510 | /* | ||
1511 | * Free the old mapping because the kernel will try to reuse the | ||
1512 | * old mapping and not create a new mapping with the | ||
1513 | * write combining attribute. | ||
1514 | */ | ||
1515 | iounmap(dd->kregbase); | ||
1516 | dd->kregbase = NULL; | ||
1517 | |||
1518 | /* | ||
1519 | * Assumes chip address space looks like: | ||
1520 | * - kregs + sregs + cregs + uregs (in any order) | ||
1521 | * - piobufs (2K and 4K bufs in either order) | ||
1522 | * or: | ||
1523 | * - kregs + sregs + cregs (in any order) | ||
1524 | * - piobufs (2K and 4K bufs in either order) | ||
1525 | * - uregs | ||
1526 | */ | ||
1527 | if (dd->piobcnt4k == 0) { | ||
1528 | qib_kreglen = qib_pio2koffset; | ||
1529 | qib_piolen = qib_pio2klen; | ||
1530 | } else if (qib_pio2koffset < qib_pio4koffset) { | ||
1531 | qib_kreglen = qib_pio2koffset; | ||
1532 | qib_piolen = qib_pio4koffset + qib_pio4klen - qib_kreglen; | ||
1533 | } else { | ||
1534 | qib_kreglen = qib_pio4koffset; | ||
1535 | qib_piolen = qib_pio2koffset + qib_pio2klen - qib_kreglen; | ||
1536 | } | ||
1537 | qib_piolen += vl15buflen; | ||
1538 | /* Map just the configured ports (not all hw ports) */ | ||
1539 | if (dd->uregbase > qib_kreglen) | ||
1540 | qib_userlen = dd->ureg_align * dd->cfgctxts; | ||
1541 | |||
1542 | /* Sanity checks passed, now create the new mappings */ | ||
1543 | qib_kregbase = ioremap_nocache(qib_physaddr, qib_kreglen); | ||
1544 | if (!qib_kregbase) | ||
1545 | goto bail; | ||
1546 | |||
1547 | qib_piobase = ioremap_wc(qib_physaddr + qib_kreglen, qib_piolen); | ||
1548 | if (!qib_piobase) | ||
1549 | goto bail_kregbase; | ||
1550 | |||
1551 | if (qib_userlen) { | ||
1552 | qib_userbase = ioremap_nocache(qib_physaddr + dd->uregbase, | ||
1553 | qib_userlen); | ||
1554 | if (!qib_userbase) | ||
1555 | goto bail_piobase; | ||
1556 | } | ||
1557 | |||
1558 | dd->kregbase = qib_kregbase; | ||
1559 | dd->kregend = (u64 __iomem *) | ||
1560 | ((char __iomem *) qib_kregbase + qib_kreglen); | ||
1561 | dd->piobase = qib_piobase; | ||
1562 | dd->pio2kbase = (void __iomem *) | ||
1563 | (((char __iomem *) dd->piobase) + | ||
1564 | qib_pio2koffset - qib_kreglen); | ||
1565 | if (dd->piobcnt4k) | ||
1566 | dd->pio4kbase = (void __iomem *) | ||
1567 | (((char __iomem *) dd->piobase) + | ||
1568 | qib_pio4koffset - qib_kreglen); | ||
1569 | if (qib_userlen) | ||
1570 | /* ureg will now be accessed relative to dd->userbase */ | ||
1571 | dd->userbase = qib_userbase; | ||
1572 | return 0; | ||
1573 | |||
1574 | bail_piobase: | ||
1575 | iounmap(qib_piobase); | ||
1576 | bail_kregbase: | ||
1577 | iounmap(qib_kregbase); | ||
1578 | bail: | ||
1579 | return -ENOMEM; | ||
1580 | } | ||
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c new file mode 100644 index 000000000000..54a40828a106 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_intr.c | |||
@@ -0,0 +1,236 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. | ||
3 | * All rights reserved. | ||
4 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #include <linux/pci.h> | ||
36 | #include <linux/delay.h> | ||
37 | |||
38 | #include "qib.h" | ||
39 | #include "qib_common.h" | ||
40 | |||
41 | /** | ||
42 | * qib_format_hwmsg - format a single hwerror message | ||
43 | * @msg message buffer | ||
44 | * @msgl length of message buffer | ||
45 | * @hwmsg message to add to message buffer | ||
46 | */ | ||
47 | static void qib_format_hwmsg(char *msg, size_t msgl, const char *hwmsg) | ||
48 | { | ||
49 | strlcat(msg, "[", msgl); | ||
50 | strlcat(msg, hwmsg, msgl); | ||
51 | strlcat(msg, "]", msgl); | ||
52 | } | ||
53 | |||
54 | /** | ||
55 | * qib_format_hwerrors - format hardware error messages for display | ||
56 | * @hwerrs hardware errors bit vector | ||
57 | * @hwerrmsgs hardware error descriptions | ||
58 | * @nhwerrmsgs number of hwerrmsgs | ||
59 | * @msg message buffer | ||
60 | * @msgl message buffer length | ||
61 | */ | ||
62 | void qib_format_hwerrors(u64 hwerrs, const struct qib_hwerror_msgs *hwerrmsgs, | ||
63 | size_t nhwerrmsgs, char *msg, size_t msgl) | ||
64 | { | ||
65 | int i; | ||
66 | |||
67 | for (i = 0; i < nhwerrmsgs; i++) | ||
68 | if (hwerrs & hwerrmsgs[i].mask) | ||
69 | qib_format_hwmsg(msg, msgl, hwerrmsgs[i].msg); | ||
70 | } | ||
71 | |||
72 | static void signal_ib_event(struct qib_pportdata *ppd, enum ib_event_type ev) | ||
73 | { | ||
74 | struct ib_event event; | ||
75 | struct qib_devdata *dd = ppd->dd; | ||
76 | |||
77 | event.device = &dd->verbs_dev.ibdev; | ||
78 | event.element.port_num = ppd->port; | ||
79 | event.event = ev; | ||
80 | ib_dispatch_event(&event); | ||
81 | } | ||
82 | |||
83 | void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs) | ||
84 | { | ||
85 | struct qib_devdata *dd = ppd->dd; | ||
86 | unsigned long flags; | ||
87 | u32 lstate; | ||
88 | u8 ltstate; | ||
89 | enum ib_event_type ev = 0; | ||
90 | |||
91 | lstate = dd->f_iblink_state(ibcs); /* linkstate */ | ||
92 | ltstate = dd->f_ibphys_portstate(ibcs); | ||
93 | |||
94 | /* | ||
95 | * If linkstate transitions into INIT from any of the various down | ||
96 | * states, or if it transitions from any of the up (INIT or better) | ||
97 | * states into any of the down states (except link recovery), then | ||
98 | * call the chip-specific code to take appropriate actions. | ||
99 | */ | ||
100 | if (lstate >= IB_PORT_INIT && (ppd->lflags & QIBL_LINKDOWN) && | ||
101 | ltstate == IB_PHYSPORTSTATE_LINKUP) { | ||
102 | /* transitioned to UP */ | ||
103 | if (dd->f_ib_updown(ppd, 1, ibcs)) | ||
104 | goto skip_ibchange; /* chip-code handled */ | ||
105 | } else if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | | ||
106 | QIBL_LINKACTIVE | QIBL_IB_FORCE_NOTIFY)) { | ||
107 | if (ltstate != IB_PHYSPORTSTATE_LINKUP && | ||
108 | ltstate <= IB_PHYSPORTSTATE_CFG_TRAIN && | ||
109 | dd->f_ib_updown(ppd, 0, ibcs)) | ||
110 | goto skip_ibchange; /* chip-code handled */ | ||
111 | qib_set_uevent_bits(ppd, _QIB_EVENT_LINKDOWN_BIT); | ||
112 | } | ||
113 | |||
114 | if (lstate != IB_PORT_DOWN) { | ||
115 | /* lstate is INIT, ARMED, or ACTIVE */ | ||
116 | if (lstate != IB_PORT_ACTIVE) { | ||
117 | *ppd->statusp &= ~QIB_STATUS_IB_READY; | ||
118 | if (ppd->lflags & QIBL_LINKACTIVE) | ||
119 | ev = IB_EVENT_PORT_ERR; | ||
120 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
121 | if (lstate == IB_PORT_ARMED) { | ||
122 | ppd->lflags |= QIBL_LINKARMED | QIBL_LINKV; | ||
123 | ppd->lflags &= ~(QIBL_LINKINIT | | ||
124 | QIBL_LINKDOWN | QIBL_LINKACTIVE); | ||
125 | } else { | ||
126 | ppd->lflags |= QIBL_LINKINIT | QIBL_LINKV; | ||
127 | ppd->lflags &= ~(QIBL_LINKARMED | | ||
128 | QIBL_LINKDOWN | QIBL_LINKACTIVE); | ||
129 | } | ||
130 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
131 | /* start a 75msec timer to clear symbol errors */ | ||
132 | mod_timer(&ppd->symerr_clear_timer, | ||
133 | msecs_to_jiffies(75)); | ||
134 | } else if (ltstate == IB_PHYSPORTSTATE_LINKUP) { | ||
135 | /* active, but not active defered */ | ||
136 | qib_hol_up(ppd); /* useful only for 6120 now */ | ||
137 | *ppd->statusp |= | ||
138 | QIB_STATUS_IB_READY | QIB_STATUS_IB_CONF; | ||
139 | qib_clear_symerror_on_linkup((unsigned long)ppd); | ||
140 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
141 | ppd->lflags |= QIBL_LINKACTIVE | QIBL_LINKV; | ||
142 | ppd->lflags &= ~(QIBL_LINKINIT | | ||
143 | QIBL_LINKDOWN | QIBL_LINKARMED); | ||
144 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
145 | if (dd->flags & QIB_HAS_SEND_DMA) | ||
146 | qib_sdma_process_event(ppd, | ||
147 | qib_sdma_event_e30_go_running); | ||
148 | ev = IB_EVENT_PORT_ACTIVE; | ||
149 | dd->f_setextled(ppd, 1); | ||
150 | } | ||
151 | } else { /* down */ | ||
152 | if (ppd->lflags & QIBL_LINKACTIVE) | ||
153 | ev = IB_EVENT_PORT_ERR; | ||
154 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
155 | ppd->lflags |= QIBL_LINKDOWN | QIBL_LINKV; | ||
156 | ppd->lflags &= ~(QIBL_LINKINIT | | ||
157 | QIBL_LINKACTIVE | QIBL_LINKARMED); | ||
158 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
159 | *ppd->statusp &= ~QIB_STATUS_IB_READY; | ||
160 | } | ||
161 | |||
162 | skip_ibchange: | ||
163 | ppd->lastibcstat = ibcs; | ||
164 | if (ev) | ||
165 | signal_ib_event(ppd, ev); | ||
166 | return; | ||
167 | } | ||
168 | |||
169 | void qib_clear_symerror_on_linkup(unsigned long opaque) | ||
170 | { | ||
171 | struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; | ||
172 | |||
173 | if (ppd->lflags & QIBL_LINKACTIVE) | ||
174 | return; | ||
175 | |||
176 | ppd->ibport_data.z_symbol_error_counter = | ||
177 | ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR); | ||
178 | } | ||
179 | |||
180 | /* | ||
181 | * Handle receive interrupts for user ctxts; this means a user | ||
182 | * process was waiting for a packet to arrive, and didn't want | ||
183 | * to poll. | ||
184 | */ | ||
185 | void qib_handle_urcv(struct qib_devdata *dd, u64 ctxtr) | ||
186 | { | ||
187 | struct qib_ctxtdata *rcd; | ||
188 | unsigned long flags; | ||
189 | int i; | ||
190 | |||
191 | spin_lock_irqsave(&dd->uctxt_lock, flags); | ||
192 | for (i = dd->first_user_ctxt; dd->rcd && i < dd->cfgctxts; i++) { | ||
193 | if (!(ctxtr & (1ULL << i))) | ||
194 | continue; | ||
195 | rcd = dd->rcd[i]; | ||
196 | if (!rcd || !rcd->cnt) | ||
197 | continue; | ||
198 | |||
199 | if (test_and_clear_bit(QIB_CTXT_WAITING_RCV, &rcd->flag)) { | ||
200 | wake_up_interruptible(&rcd->wait); | ||
201 | dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_DIS, | ||
202 | rcd->ctxt); | ||
203 | } else if (test_and_clear_bit(QIB_CTXT_WAITING_URG, | ||
204 | &rcd->flag)) { | ||
205 | rcd->urgent++; | ||
206 | wake_up_interruptible(&rcd->wait); | ||
207 | } | ||
208 | } | ||
209 | spin_unlock_irqrestore(&dd->uctxt_lock, flags); | ||
210 | } | ||
211 | |||
212 | void qib_bad_intrstatus(struct qib_devdata *dd) | ||
213 | { | ||
214 | static int allbits; | ||
215 | |||
216 | /* separate routine, for better optimization of qib_intr() */ | ||
217 | |||
218 | /* | ||
219 | * We print the message and disable interrupts, in hope of | ||
220 | * having a better chance of debugging the problem. | ||
221 | */ | ||
222 | qib_dev_err(dd, "Read of chip interrupt status failed" | ||
223 | " disabling interrupts\n"); | ||
224 | if (allbits++) { | ||
225 | /* disable interrupt delivery, something is very wrong */ | ||
226 | if (allbits == 2) | ||
227 | dd->f_set_intr_state(dd, 0); | ||
228 | if (allbits == 3) { | ||
229 | qib_dev_err(dd, "2nd bad interrupt status, " | ||
230 | "unregistering interrupts\n"); | ||
231 | dd->flags |= QIB_BADINTR; | ||
232 | dd->flags &= ~QIB_INITTED; | ||
233 | dd->f_free_irq(dd); | ||
234 | } | ||
235 | } | ||
236 | } | ||
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c new file mode 100644 index 000000000000..4b80eb153d57 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_keys.c | |||
@@ -0,0 +1,328 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2009 QLogic Corporation. All rights reserved. | ||
3 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | |||
34 | #include "qib.h" | ||
35 | |||
36 | /** | ||
37 | * qib_alloc_lkey - allocate an lkey | ||
38 | * @rkt: lkey table in which to allocate the lkey | ||
39 | * @mr: memory region that this lkey protects | ||
40 | * | ||
41 | * Returns 1 if successful, otherwise returns 0. | ||
42 | */ | ||
43 | |||
44 | int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr) | ||
45 | { | ||
46 | unsigned long flags; | ||
47 | u32 r; | ||
48 | u32 n; | ||
49 | int ret; | ||
50 | |||
51 | spin_lock_irqsave(&rkt->lock, flags); | ||
52 | |||
53 | /* Find the next available LKEY */ | ||
54 | r = rkt->next; | ||
55 | n = r; | ||
56 | for (;;) { | ||
57 | if (rkt->table[r] == NULL) | ||
58 | break; | ||
59 | r = (r + 1) & (rkt->max - 1); | ||
60 | if (r == n) { | ||
61 | spin_unlock_irqrestore(&rkt->lock, flags); | ||
62 | ret = 0; | ||
63 | goto bail; | ||
64 | } | ||
65 | } | ||
66 | rkt->next = (r + 1) & (rkt->max - 1); | ||
67 | /* | ||
68 | * Make sure lkey is never zero which is reserved to indicate an | ||
69 | * unrestricted LKEY. | ||
70 | */ | ||
71 | rkt->gen++; | ||
72 | mr->lkey = (r << (32 - ib_qib_lkey_table_size)) | | ||
73 | ((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen) | ||
74 | << 8); | ||
75 | if (mr->lkey == 0) { | ||
76 | mr->lkey |= 1 << 8; | ||
77 | rkt->gen++; | ||
78 | } | ||
79 | rkt->table[r] = mr; | ||
80 | spin_unlock_irqrestore(&rkt->lock, flags); | ||
81 | |||
82 | ret = 1; | ||
83 | |||
84 | bail: | ||
85 | return ret; | ||
86 | } | ||
87 | |||
88 | /** | ||
89 | * qib_free_lkey - free an lkey | ||
90 | * @rkt: table from which to free the lkey | ||
91 | * @lkey: lkey id to free | ||
92 | */ | ||
93 | int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr) | ||
94 | { | ||
95 | unsigned long flags; | ||
96 | u32 lkey = mr->lkey; | ||
97 | u32 r; | ||
98 | int ret; | ||
99 | |||
100 | spin_lock_irqsave(&dev->lk_table.lock, flags); | ||
101 | if (lkey == 0) { | ||
102 | if (dev->dma_mr && dev->dma_mr == mr) { | ||
103 | ret = atomic_read(&dev->dma_mr->refcount); | ||
104 | if (!ret) | ||
105 | dev->dma_mr = NULL; | ||
106 | } else | ||
107 | ret = 0; | ||
108 | } else { | ||
109 | r = lkey >> (32 - ib_qib_lkey_table_size); | ||
110 | ret = atomic_read(&dev->lk_table.table[r]->refcount); | ||
111 | if (!ret) | ||
112 | dev->lk_table.table[r] = NULL; | ||
113 | } | ||
114 | spin_unlock_irqrestore(&dev->lk_table.lock, flags); | ||
115 | |||
116 | if (ret) | ||
117 | ret = -EBUSY; | ||
118 | return ret; | ||
119 | } | ||
120 | |||
121 | /** | ||
122 | * qib_lkey_ok - check IB SGE for validity and initialize | ||
123 | * @rkt: table containing lkey to check SGE against | ||
124 | * @isge: outgoing internal SGE | ||
125 | * @sge: SGE to check | ||
126 | * @acc: access flags | ||
127 | * | ||
128 | * Return 1 if valid and successful, otherwise returns 0. | ||
129 | * | ||
130 | * Check the IB SGE for validity and initialize our internal version | ||
131 | * of it. | ||
132 | */ | ||
133 | int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, | ||
134 | struct qib_sge *isge, struct ib_sge *sge, int acc) | ||
135 | { | ||
136 | struct qib_mregion *mr; | ||
137 | unsigned n, m; | ||
138 | size_t off; | ||
139 | int ret = 0; | ||
140 | unsigned long flags; | ||
141 | |||
142 | /* | ||
143 | * We use LKEY == zero for kernel virtual addresses | ||
144 | * (see qib_get_dma_mr and qib_dma.c). | ||
145 | */ | ||
146 | spin_lock_irqsave(&rkt->lock, flags); | ||
147 | if (sge->lkey == 0) { | ||
148 | struct qib_ibdev *dev = to_idev(pd->ibpd.device); | ||
149 | |||
150 | if (pd->user) | ||
151 | goto bail; | ||
152 | if (!dev->dma_mr) | ||
153 | goto bail; | ||
154 | atomic_inc(&dev->dma_mr->refcount); | ||
155 | isge->mr = dev->dma_mr; | ||
156 | isge->vaddr = (void *) sge->addr; | ||
157 | isge->length = sge->length; | ||
158 | isge->sge_length = sge->length; | ||
159 | isge->m = 0; | ||
160 | isge->n = 0; | ||
161 | goto ok; | ||
162 | } | ||
163 | mr = rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))]; | ||
164 | if (unlikely(mr == NULL || mr->lkey != sge->lkey || | ||
165 | mr->pd != &pd->ibpd)) | ||
166 | goto bail; | ||
167 | |||
168 | off = sge->addr - mr->user_base; | ||
169 | if (unlikely(sge->addr < mr->user_base || | ||
170 | off + sge->length > mr->length || | ||
171 | (mr->access_flags & acc) != acc)) | ||
172 | goto bail; | ||
173 | |||
174 | off += mr->offset; | ||
175 | m = 0; | ||
176 | n = 0; | ||
177 | while (off >= mr->map[m]->segs[n].length) { | ||
178 | off -= mr->map[m]->segs[n].length; | ||
179 | n++; | ||
180 | if (n >= QIB_SEGSZ) { | ||
181 | m++; | ||
182 | n = 0; | ||
183 | } | ||
184 | } | ||
185 | atomic_inc(&mr->refcount); | ||
186 | isge->mr = mr; | ||
187 | isge->vaddr = mr->map[m]->segs[n].vaddr + off; | ||
188 | isge->length = mr->map[m]->segs[n].length - off; | ||
189 | isge->sge_length = sge->length; | ||
190 | isge->m = m; | ||
191 | isge->n = n; | ||
192 | ok: | ||
193 | ret = 1; | ||
194 | bail: | ||
195 | spin_unlock_irqrestore(&rkt->lock, flags); | ||
196 | return ret; | ||
197 | } | ||
198 | |||
199 | /** | ||
200 | * qib_rkey_ok - check the IB virtual address, length, and RKEY | ||
201 | * @dev: infiniband device | ||
202 | * @ss: SGE state | ||
203 | * @len: length of data | ||
204 | * @vaddr: virtual address to place data | ||
205 | * @rkey: rkey to check | ||
206 | * @acc: access flags | ||
207 | * | ||
208 | * Return 1 if successful, otherwise 0. | ||
209 | */ | ||
210 | int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, | ||
211 | u32 len, u64 vaddr, u32 rkey, int acc) | ||
212 | { | ||
213 | struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; | ||
214 | struct qib_mregion *mr; | ||
215 | unsigned n, m; | ||
216 | size_t off; | ||
217 | int ret = 0; | ||
218 | unsigned long flags; | ||
219 | |||
220 | /* | ||
221 | * We use RKEY == zero for kernel virtual addresses | ||
222 | * (see qib_get_dma_mr and qib_dma.c). | ||
223 | */ | ||
224 | spin_lock_irqsave(&rkt->lock, flags); | ||
225 | if (rkey == 0) { | ||
226 | struct qib_pd *pd = to_ipd(qp->ibqp.pd); | ||
227 | struct qib_ibdev *dev = to_idev(pd->ibpd.device); | ||
228 | |||
229 | if (pd->user) | ||
230 | goto bail; | ||
231 | if (!dev->dma_mr) | ||
232 | goto bail; | ||
233 | atomic_inc(&dev->dma_mr->refcount); | ||
234 | sge->mr = dev->dma_mr; | ||
235 | sge->vaddr = (void *) vaddr; | ||
236 | sge->length = len; | ||
237 | sge->sge_length = len; | ||
238 | sge->m = 0; | ||
239 | sge->n = 0; | ||
240 | goto ok; | ||
241 | } | ||
242 | |||
243 | mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))]; | ||
244 | if (unlikely(mr == NULL || mr->lkey != rkey || qp->ibqp.pd != mr->pd)) | ||
245 | goto bail; | ||
246 | |||
247 | off = vaddr - mr->iova; | ||
248 | if (unlikely(vaddr < mr->iova || off + len > mr->length || | ||
249 | (mr->access_flags & acc) == 0)) | ||
250 | goto bail; | ||
251 | |||
252 | off += mr->offset; | ||
253 | m = 0; | ||
254 | n = 0; | ||
255 | while (off >= mr->map[m]->segs[n].length) { | ||
256 | off -= mr->map[m]->segs[n].length; | ||
257 | n++; | ||
258 | if (n >= QIB_SEGSZ) { | ||
259 | m++; | ||
260 | n = 0; | ||
261 | } | ||
262 | } | ||
263 | atomic_inc(&mr->refcount); | ||
264 | sge->mr = mr; | ||
265 | sge->vaddr = mr->map[m]->segs[n].vaddr + off; | ||
266 | sge->length = mr->map[m]->segs[n].length - off; | ||
267 | sge->sge_length = len; | ||
268 | sge->m = m; | ||
269 | sge->n = n; | ||
270 | ok: | ||
271 | ret = 1; | ||
272 | bail: | ||
273 | spin_unlock_irqrestore(&rkt->lock, flags); | ||
274 | return ret; | ||
275 | } | ||
276 | |||
277 | /* | ||
278 | * Initialize the memory region specified by the work reqeust. | ||
279 | */ | ||
280 | int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr) | ||
281 | { | ||
282 | struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; | ||
283 | struct qib_pd *pd = to_ipd(qp->ibqp.pd); | ||
284 | struct qib_mregion *mr; | ||
285 | u32 rkey = wr->wr.fast_reg.rkey; | ||
286 | unsigned i, n, m; | ||
287 | int ret = -EINVAL; | ||
288 | unsigned long flags; | ||
289 | u64 *page_list; | ||
290 | size_t ps; | ||
291 | |||
292 | spin_lock_irqsave(&rkt->lock, flags); | ||
293 | if (pd->user || rkey == 0) | ||
294 | goto bail; | ||
295 | |||
296 | mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))]; | ||
297 | if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd)) | ||
298 | goto bail; | ||
299 | |||
300 | if (wr->wr.fast_reg.page_list_len > mr->max_segs) | ||
301 | goto bail; | ||
302 | |||
303 | ps = 1UL << wr->wr.fast_reg.page_shift; | ||
304 | if (wr->wr.fast_reg.length > ps * wr->wr.fast_reg.page_list_len) | ||
305 | goto bail; | ||
306 | |||
307 | mr->user_base = wr->wr.fast_reg.iova_start; | ||
308 | mr->iova = wr->wr.fast_reg.iova_start; | ||
309 | mr->lkey = rkey; | ||
310 | mr->length = wr->wr.fast_reg.length; | ||
311 | mr->access_flags = wr->wr.fast_reg.access_flags; | ||
312 | page_list = wr->wr.fast_reg.page_list->page_list; | ||
313 | m = 0; | ||
314 | n = 0; | ||
315 | for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) { | ||
316 | mr->map[m]->segs[n].vaddr = (void *) page_list[i]; | ||
317 | mr->map[m]->segs[n].length = ps; | ||
318 | if (++n == QIB_SEGSZ) { | ||
319 | m++; | ||
320 | n = 0; | ||
321 | } | ||
322 | } | ||
323 | |||
324 | ret = 0; | ||
325 | bail: | ||
326 | spin_unlock_irqrestore(&rkt->lock, flags); | ||
327 | return ret; | ||
328 | } | ||
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c new file mode 100644 index 000000000000..94b0d1f3a8f0 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_mad.c | |||
@@ -0,0 +1,2173 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. | ||
3 | * All rights reserved. | ||
4 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #include <rdma/ib_smi.h> | ||
36 | |||
37 | #include "qib.h" | ||
38 | #include "qib_mad.h" | ||
39 | |||
40 | static int reply(struct ib_smp *smp) | ||
41 | { | ||
42 | /* | ||
43 | * The verbs framework will handle the directed/LID route | ||
44 | * packet changes. | ||
45 | */ | ||
46 | smp->method = IB_MGMT_METHOD_GET_RESP; | ||
47 | if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) | ||
48 | smp->status |= IB_SMP_DIRECTION; | ||
49 | return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; | ||
50 | } | ||
51 | |||
52 | static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len) | ||
53 | { | ||
54 | struct ib_mad_send_buf *send_buf; | ||
55 | struct ib_mad_agent *agent; | ||
56 | struct ib_smp *smp; | ||
57 | int ret; | ||
58 | unsigned long flags; | ||
59 | unsigned long timeout; | ||
60 | |||
61 | agent = ibp->send_agent; | ||
62 | if (!agent) | ||
63 | return; | ||
64 | |||
65 | /* o14-3.2.1 */ | ||
66 | if (!(ppd_from_ibp(ibp)->lflags & QIBL_LINKACTIVE)) | ||
67 | return; | ||
68 | |||
69 | /* o14-2 */ | ||
70 | if (ibp->trap_timeout && time_before(jiffies, ibp->trap_timeout)) | ||
71 | return; | ||
72 | |||
73 | send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR, | ||
74 | IB_MGMT_MAD_DATA, GFP_ATOMIC); | ||
75 | if (IS_ERR(send_buf)) | ||
76 | return; | ||
77 | |||
78 | smp = send_buf->mad; | ||
79 | smp->base_version = IB_MGMT_BASE_VERSION; | ||
80 | smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; | ||
81 | smp->class_version = 1; | ||
82 | smp->method = IB_MGMT_METHOD_TRAP; | ||
83 | ibp->tid++; | ||
84 | smp->tid = cpu_to_be64(ibp->tid); | ||
85 | smp->attr_id = IB_SMP_ATTR_NOTICE; | ||
86 | /* o14-1: smp->mkey = 0; */ | ||
87 | memcpy(smp->data, data, len); | ||
88 | |||
89 | spin_lock_irqsave(&ibp->lock, flags); | ||
90 | if (!ibp->sm_ah) { | ||
91 | if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) { | ||
92 | struct ib_ah *ah; | ||
93 | struct ib_ah_attr attr; | ||
94 | |||
95 | memset(&attr, 0, sizeof attr); | ||
96 | attr.dlid = ibp->sm_lid; | ||
97 | attr.port_num = ppd_from_ibp(ibp)->port; | ||
98 | ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr); | ||
99 | if (IS_ERR(ah)) | ||
100 | ret = -EINVAL; | ||
101 | else { | ||
102 | send_buf->ah = ah; | ||
103 | ibp->sm_ah = to_iah(ah); | ||
104 | ret = 0; | ||
105 | } | ||
106 | } else | ||
107 | ret = -EINVAL; | ||
108 | } else { | ||
109 | send_buf->ah = &ibp->sm_ah->ibah; | ||
110 | ret = 0; | ||
111 | } | ||
112 | spin_unlock_irqrestore(&ibp->lock, flags); | ||
113 | |||
114 | if (!ret) | ||
115 | ret = ib_post_send_mad(send_buf, NULL); | ||
116 | if (!ret) { | ||
117 | /* 4.096 usec. */ | ||
118 | timeout = (4096 * (1UL << ibp->subnet_timeout)) / 1000; | ||
119 | ibp->trap_timeout = jiffies + usecs_to_jiffies(timeout); | ||
120 | } else { | ||
121 | ib_free_send_mad(send_buf); | ||
122 | ibp->trap_timeout = 0; | ||
123 | } | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * Send a bad [PQ]_Key trap (ch. 14.3.8). | ||
128 | */ | ||
129 | void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl, | ||
130 | u32 qp1, u32 qp2, __be16 lid1, __be16 lid2) | ||
131 | { | ||
132 | struct ib_mad_notice_attr data; | ||
133 | |||
134 | if (trap_num == IB_NOTICE_TRAP_BAD_PKEY) | ||
135 | ibp->pkey_violations++; | ||
136 | else | ||
137 | ibp->qkey_violations++; | ||
138 | ibp->n_pkt_drops++; | ||
139 | |||
140 | /* Send violation trap */ | ||
141 | data.generic_type = IB_NOTICE_TYPE_SECURITY; | ||
142 | data.prod_type_msb = 0; | ||
143 | data.prod_type_lsb = IB_NOTICE_PROD_CA; | ||
144 | data.trap_num = trap_num; | ||
145 | data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); | ||
146 | data.toggle_count = 0; | ||
147 | memset(&data.details, 0, sizeof data.details); | ||
148 | data.details.ntc_257_258.lid1 = lid1; | ||
149 | data.details.ntc_257_258.lid2 = lid2; | ||
150 | data.details.ntc_257_258.key = cpu_to_be32(key); | ||
151 | data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1); | ||
152 | data.details.ntc_257_258.qp2 = cpu_to_be32(qp2); | ||
153 | |||
154 | qib_send_trap(ibp, &data, sizeof data); | ||
155 | } | ||
156 | |||
157 | /* | ||
158 | * Send a bad M_Key trap (ch. 14.3.9). | ||
159 | */ | ||
160 | static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp) | ||
161 | { | ||
162 | struct ib_mad_notice_attr data; | ||
163 | |||
164 | /* Send violation trap */ | ||
165 | data.generic_type = IB_NOTICE_TYPE_SECURITY; | ||
166 | data.prod_type_msb = 0; | ||
167 | data.prod_type_lsb = IB_NOTICE_PROD_CA; | ||
168 | data.trap_num = IB_NOTICE_TRAP_BAD_MKEY; | ||
169 | data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); | ||
170 | data.toggle_count = 0; | ||
171 | memset(&data.details, 0, sizeof data.details); | ||
172 | data.details.ntc_256.lid = data.issuer_lid; | ||
173 | data.details.ntc_256.method = smp->method; | ||
174 | data.details.ntc_256.attr_id = smp->attr_id; | ||
175 | data.details.ntc_256.attr_mod = smp->attr_mod; | ||
176 | data.details.ntc_256.mkey = smp->mkey; | ||
177 | if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { | ||
178 | u8 hop_cnt; | ||
179 | |||
180 | data.details.ntc_256.dr_slid = smp->dr_slid; | ||
181 | data.details.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE; | ||
182 | hop_cnt = smp->hop_cnt; | ||
183 | if (hop_cnt > ARRAY_SIZE(data.details.ntc_256.dr_rtn_path)) { | ||
184 | data.details.ntc_256.dr_trunc_hop |= | ||
185 | IB_NOTICE_TRAP_DR_TRUNC; | ||
186 | hop_cnt = ARRAY_SIZE(data.details.ntc_256.dr_rtn_path); | ||
187 | } | ||
188 | data.details.ntc_256.dr_trunc_hop |= hop_cnt; | ||
189 | memcpy(data.details.ntc_256.dr_rtn_path, smp->return_path, | ||
190 | hop_cnt); | ||
191 | } | ||
192 | |||
193 | qib_send_trap(ibp, &data, sizeof data); | ||
194 | } | ||
195 | |||
196 | /* | ||
197 | * Send a Port Capability Mask Changed trap (ch. 14.3.11). | ||
198 | */ | ||
199 | void qib_cap_mask_chg(struct qib_ibport *ibp) | ||
200 | { | ||
201 | struct ib_mad_notice_attr data; | ||
202 | |||
203 | data.generic_type = IB_NOTICE_TYPE_INFO; | ||
204 | data.prod_type_msb = 0; | ||
205 | data.prod_type_lsb = IB_NOTICE_PROD_CA; | ||
206 | data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG; | ||
207 | data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); | ||
208 | data.toggle_count = 0; | ||
209 | memset(&data.details, 0, sizeof data.details); | ||
210 | data.details.ntc_144.lid = data.issuer_lid; | ||
211 | data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags); | ||
212 | |||
213 | qib_send_trap(ibp, &data, sizeof data); | ||
214 | } | ||
215 | |||
216 | /* | ||
217 | * Send a System Image GUID Changed trap (ch. 14.3.12). | ||
218 | */ | ||
219 | void qib_sys_guid_chg(struct qib_ibport *ibp) | ||
220 | { | ||
221 | struct ib_mad_notice_attr data; | ||
222 | |||
223 | data.generic_type = IB_NOTICE_TYPE_INFO; | ||
224 | data.prod_type_msb = 0; | ||
225 | data.prod_type_lsb = IB_NOTICE_PROD_CA; | ||
226 | data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG; | ||
227 | data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); | ||
228 | data.toggle_count = 0; | ||
229 | memset(&data.details, 0, sizeof data.details); | ||
230 | data.details.ntc_145.lid = data.issuer_lid; | ||
231 | data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid; | ||
232 | |||
233 | qib_send_trap(ibp, &data, sizeof data); | ||
234 | } | ||
235 | |||
236 | /* | ||
237 | * Send a Node Description Changed trap (ch. 14.3.13). | ||
238 | */ | ||
239 | void qib_node_desc_chg(struct qib_ibport *ibp) | ||
240 | { | ||
241 | struct ib_mad_notice_attr data; | ||
242 | |||
243 | data.generic_type = IB_NOTICE_TYPE_INFO; | ||
244 | data.prod_type_msb = 0; | ||
245 | data.prod_type_lsb = IB_NOTICE_PROD_CA; | ||
246 | data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG; | ||
247 | data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); | ||
248 | data.toggle_count = 0; | ||
249 | memset(&data.details, 0, sizeof data.details); | ||
250 | data.details.ntc_144.lid = data.issuer_lid; | ||
251 | data.details.ntc_144.local_changes = 1; | ||
252 | data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG; | ||
253 | |||
254 | qib_send_trap(ibp, &data, sizeof data); | ||
255 | } | ||
256 | |||
257 | static int subn_get_nodedescription(struct ib_smp *smp, | ||
258 | struct ib_device *ibdev) | ||
259 | { | ||
260 | if (smp->attr_mod) | ||
261 | smp->status |= IB_SMP_INVALID_FIELD; | ||
262 | |||
263 | memcpy(smp->data, ibdev->node_desc, sizeof(smp->data)); | ||
264 | |||
265 | return reply(smp); | ||
266 | } | ||
267 | |||
268 | static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev, | ||
269 | u8 port) | ||
270 | { | ||
271 | struct ib_node_info *nip = (struct ib_node_info *)&smp->data; | ||
272 | struct qib_devdata *dd = dd_from_ibdev(ibdev); | ||
273 | u32 vendor, majrev, minrev; | ||
274 | unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */ | ||
275 | |||
276 | /* GUID 0 is illegal */ | ||
277 | if (smp->attr_mod || pidx >= dd->num_pports || | ||
278 | dd->pport[pidx].guid == 0) | ||
279 | smp->status |= IB_SMP_INVALID_FIELD; | ||
280 | else | ||
281 | nip->port_guid = dd->pport[pidx].guid; | ||
282 | |||
283 | nip->base_version = 1; | ||
284 | nip->class_version = 1; | ||
285 | nip->node_type = 1; /* channel adapter */ | ||
286 | nip->num_ports = ibdev->phys_port_cnt; | ||
287 | /* This is already in network order */ | ||
288 | nip->sys_guid = ib_qib_sys_image_guid; | ||
289 | nip->node_guid = dd->pport->guid; /* Use first-port GUID as node */ | ||
290 | nip->partition_cap = cpu_to_be16(qib_get_npkeys(dd)); | ||
291 | nip->device_id = cpu_to_be16(dd->deviceid); | ||
292 | majrev = dd->majrev; | ||
293 | minrev = dd->minrev; | ||
294 | nip->revision = cpu_to_be32((majrev << 16) | minrev); | ||
295 | nip->local_port_num = port; | ||
296 | vendor = dd->vendorid; | ||
297 | nip->vendor_id[0] = QIB_SRC_OUI_1; | ||
298 | nip->vendor_id[1] = QIB_SRC_OUI_2; | ||
299 | nip->vendor_id[2] = QIB_SRC_OUI_3; | ||
300 | |||
301 | return reply(smp); | ||
302 | } | ||
303 | |||
304 | static int subn_get_guidinfo(struct ib_smp *smp, struct ib_device *ibdev, | ||
305 | u8 port) | ||
306 | { | ||
307 | struct qib_devdata *dd = dd_from_ibdev(ibdev); | ||
308 | u32 startgx = 8 * be32_to_cpu(smp->attr_mod); | ||
309 | __be64 *p = (__be64 *) smp->data; | ||
310 | unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */ | ||
311 | |||
312 | /* 32 blocks of 8 64-bit GUIDs per block */ | ||
313 | |||
314 | memset(smp->data, 0, sizeof(smp->data)); | ||
315 | |||
316 | if (startgx == 0 && pidx < dd->num_pports) { | ||
317 | struct qib_pportdata *ppd = dd->pport + pidx; | ||
318 | struct qib_ibport *ibp = &ppd->ibport_data; | ||
319 | __be64 g = ppd->guid; | ||
320 | unsigned i; | ||
321 | |||
322 | /* GUID 0 is illegal */ | ||
323 | if (g == 0) | ||
324 | smp->status |= IB_SMP_INVALID_FIELD; | ||
325 | else { | ||
326 | /* The first is a copy of the read-only HW GUID. */ | ||
327 | p[0] = g; | ||
328 | for (i = 1; i < QIB_GUIDS_PER_PORT; i++) | ||
329 | p[i] = ibp->guids[i - 1]; | ||
330 | } | ||
331 | } else | ||
332 | smp->status |= IB_SMP_INVALID_FIELD; | ||
333 | |||
334 | return reply(smp); | ||
335 | } | ||
336 | |||
337 | static void set_link_width_enabled(struct qib_pportdata *ppd, u32 w) | ||
338 | { | ||
339 | (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LWID_ENB, w); | ||
340 | } | ||
341 | |||
342 | static void set_link_speed_enabled(struct qib_pportdata *ppd, u32 s) | ||
343 | { | ||
344 | (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_SPD_ENB, s); | ||
345 | } | ||
346 | |||
347 | static int get_overrunthreshold(struct qib_pportdata *ppd) | ||
348 | { | ||
349 | return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH); | ||
350 | } | ||
351 | |||
352 | /** | ||
353 | * set_overrunthreshold - set the overrun threshold | ||
354 | * @ppd: the physical port data | ||
355 | * @n: the new threshold | ||
356 | * | ||
357 | * Note that this will only take effect when the link state changes. | ||
358 | */ | ||
359 | static int set_overrunthreshold(struct qib_pportdata *ppd, unsigned n) | ||
360 | { | ||
361 | (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH, | ||
362 | (u32)n); | ||
363 | return 0; | ||
364 | } | ||
365 | |||
366 | static int get_phyerrthreshold(struct qib_pportdata *ppd) | ||
367 | { | ||
368 | return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH); | ||
369 | } | ||
370 | |||
371 | /** | ||
372 | * set_phyerrthreshold - set the physical error threshold | ||
373 | * @ppd: the physical port data | ||
374 | * @n: the new threshold | ||
375 | * | ||
376 | * Note that this will only take effect when the link state changes. | ||
377 | */ | ||
378 | static int set_phyerrthreshold(struct qib_pportdata *ppd, unsigned n) | ||
379 | { | ||
380 | (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH, | ||
381 | (u32)n); | ||
382 | return 0; | ||
383 | } | ||
384 | |||
385 | /** | ||
386 | * get_linkdowndefaultstate - get the default linkdown state | ||
387 | * @ppd: the physical port data | ||
388 | * | ||
389 | * Returns zero if the default is POLL, 1 if the default is SLEEP. | ||
390 | */ | ||
391 | static int get_linkdowndefaultstate(struct qib_pportdata *ppd) | ||
392 | { | ||
393 | return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT) == | ||
394 | IB_LINKINITCMD_SLEEP; | ||
395 | } | ||
396 | |||
397 | static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags) | ||
398 | { | ||
399 | int ret = 0; | ||
400 | |||
401 | /* Is the mkey in the process of expiring? */ | ||
402 | if (ibp->mkey_lease_timeout && | ||
403 | time_after_eq(jiffies, ibp->mkey_lease_timeout)) { | ||
404 | /* Clear timeout and mkey protection field. */ | ||
405 | ibp->mkey_lease_timeout = 0; | ||
406 | ibp->mkeyprot = 0; | ||
407 | } | ||
408 | |||
409 | /* M_Key checking depends on Portinfo:M_Key_protect_bits */ | ||
410 | if ((mad_flags & IB_MAD_IGNORE_MKEY) == 0 && ibp->mkey != 0 && | ||
411 | ibp->mkey != smp->mkey && | ||
412 | (smp->method == IB_MGMT_METHOD_SET || | ||
413 | smp->method == IB_MGMT_METHOD_TRAP_REPRESS || | ||
414 | (smp->method == IB_MGMT_METHOD_GET && ibp->mkeyprot >= 2))) { | ||
415 | if (ibp->mkey_violations != 0xFFFF) | ||
416 | ++ibp->mkey_violations; | ||
417 | if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period) | ||
418 | ibp->mkey_lease_timeout = jiffies + | ||
419 | ibp->mkey_lease_period * HZ; | ||
420 | /* Generate a trap notice. */ | ||
421 | qib_bad_mkey(ibp, smp); | ||
422 | ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; | ||
423 | } else if (ibp->mkey_lease_timeout) | ||
424 | ibp->mkey_lease_timeout = 0; | ||
425 | |||
426 | return ret; | ||
427 | } | ||
428 | |||
429 | static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | ||
430 | u8 port) | ||
431 | { | ||
432 | struct qib_devdata *dd; | ||
433 | struct qib_pportdata *ppd; | ||
434 | struct qib_ibport *ibp; | ||
435 | struct ib_port_info *pip = (struct ib_port_info *)smp->data; | ||
436 | u16 lid; | ||
437 | u8 mtu; | ||
438 | int ret; | ||
439 | u32 state; | ||
440 | u32 port_num = be32_to_cpu(smp->attr_mod); | ||
441 | |||
442 | if (port_num == 0) | ||
443 | port_num = port; | ||
444 | else { | ||
445 | if (port_num > ibdev->phys_port_cnt) { | ||
446 | smp->status |= IB_SMP_INVALID_FIELD; | ||
447 | ret = reply(smp); | ||
448 | goto bail; | ||
449 | } | ||
450 | if (port_num != port) { | ||
451 | ibp = to_iport(ibdev, port_num); | ||
452 | ret = check_mkey(ibp, smp, 0); | ||
453 | if (ret) | ||
454 | goto bail; | ||
455 | } | ||
456 | } | ||
457 | |||
458 | dd = dd_from_ibdev(ibdev); | ||
459 | /* IB numbers ports from 1, hdw from 0 */ | ||
460 | ppd = dd->pport + (port_num - 1); | ||
461 | ibp = &ppd->ibport_data; | ||
462 | |||
463 | /* Clear all fields. Only set the non-zero fields. */ | ||
464 | memset(smp->data, 0, sizeof(smp->data)); | ||
465 | |||
466 | /* Only return the mkey if the protection field allows it. */ | ||
467 | if (smp->method == IB_MGMT_METHOD_SET || ibp->mkey == smp->mkey || | ||
468 | ibp->mkeyprot == 0) | ||
469 | pip->mkey = ibp->mkey; | ||
470 | pip->gid_prefix = ibp->gid_prefix; | ||
471 | lid = ppd->lid; | ||
472 | pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE; | ||
473 | pip->sm_lid = cpu_to_be16(ibp->sm_lid); | ||
474 | pip->cap_mask = cpu_to_be32(ibp->port_cap_flags); | ||
475 | /* pip->diag_code; */ | ||
476 | pip->mkey_lease_period = cpu_to_be16(ibp->mkey_lease_period); | ||
477 | pip->local_port_num = port; | ||
478 | pip->link_width_enabled = ppd->link_width_enabled; | ||
479 | pip->link_width_supported = ppd->link_width_supported; | ||
480 | pip->link_width_active = ppd->link_width_active; | ||
481 | state = dd->f_iblink_state(ppd->lastibcstat); | ||
482 | pip->linkspeed_portstate = ppd->link_speed_supported << 4 | state; | ||
483 | |||
484 | pip->portphysstate_linkdown = | ||
485 | (dd->f_ibphys_portstate(ppd->lastibcstat) << 4) | | ||
486 | (get_linkdowndefaultstate(ppd) ? 1 : 2); | ||
487 | pip->mkeyprot_resv_lmc = (ibp->mkeyprot << 6) | ppd->lmc; | ||
488 | pip->linkspeedactive_enabled = (ppd->link_speed_active << 4) | | ||
489 | ppd->link_speed_enabled; | ||
490 | switch (ppd->ibmtu) { | ||
491 | default: /* something is wrong; fall through */ | ||
492 | case 4096: | ||
493 | mtu = IB_MTU_4096; | ||
494 | break; | ||
495 | case 2048: | ||
496 | mtu = IB_MTU_2048; | ||
497 | break; | ||
498 | case 1024: | ||
499 | mtu = IB_MTU_1024; | ||
500 | break; | ||
501 | case 512: | ||
502 | mtu = IB_MTU_512; | ||
503 | break; | ||
504 | case 256: | ||
505 | mtu = IB_MTU_256; | ||
506 | break; | ||
507 | } | ||
508 | pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->sm_sl; | ||
509 | pip->vlcap_inittype = ppd->vls_supported << 4; /* InitType = 0 */ | ||
510 | pip->vl_high_limit = ibp->vl_high_limit; | ||
511 | pip->vl_arb_high_cap = | ||
512 | dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_CAP); | ||
513 | pip->vl_arb_low_cap = | ||
514 | dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_LOW_CAP); | ||
515 | /* InitTypeReply = 0 */ | ||
516 | pip->inittypereply_mtucap = qib_ibmtu ? qib_ibmtu : IB_MTU_4096; | ||
517 | /* HCAs ignore VLStallCount and HOQLife */ | ||
518 | /* pip->vlstallcnt_hoqlife; */ | ||
519 | pip->operationalvl_pei_peo_fpi_fpo = | ||
520 | dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OP_VLS) << 4; | ||
521 | pip->mkey_violations = cpu_to_be16(ibp->mkey_violations); | ||
522 | /* P_KeyViolations are counted by hardware. */ | ||
523 | pip->pkey_violations = cpu_to_be16(ibp->pkey_violations); | ||
524 | pip->qkey_violations = cpu_to_be16(ibp->qkey_violations); | ||
525 | /* Only the hardware GUID is supported for now */ | ||
526 | pip->guid_cap = QIB_GUIDS_PER_PORT; | ||
527 | pip->clientrereg_resv_subnetto = ibp->subnet_timeout; | ||
528 | /* 32.768 usec. response time (guessing) */ | ||
529 | pip->resv_resptimevalue = 3; | ||
530 | pip->localphyerrors_overrunerrors = | ||
531 | (get_phyerrthreshold(ppd) << 4) | | ||
532 | get_overrunthreshold(ppd); | ||
533 | /* pip->max_credit_hint; */ | ||
534 | if (ibp->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) { | ||
535 | u32 v; | ||
536 | |||
537 | v = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKLATENCY); | ||
538 | pip->link_roundtrip_latency[0] = v >> 16; | ||
539 | pip->link_roundtrip_latency[1] = v >> 8; | ||
540 | pip->link_roundtrip_latency[2] = v; | ||
541 | } | ||
542 | |||
543 | ret = reply(smp); | ||
544 | |||
545 | bail: | ||
546 | return ret; | ||
547 | } | ||
548 | |||
549 | /** | ||
550 | * get_pkeys - return the PKEY table | ||
551 | * @dd: the qlogic_ib device | ||
552 | * @port: the IB port number | ||
553 | * @pkeys: the pkey table is placed here | ||
554 | */ | ||
555 | static int get_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys) | ||
556 | { | ||
557 | struct qib_pportdata *ppd = dd->pport + port - 1; | ||
558 | /* | ||
559 | * always a kernel context, no locking needed. | ||
560 | * If we get here with ppd setup, no need to check | ||
561 | * that pd is valid. | ||
562 | */ | ||
563 | struct qib_ctxtdata *rcd = dd->rcd[ppd->hw_pidx]; | ||
564 | |||
565 | memcpy(pkeys, rcd->pkeys, sizeof(rcd->pkeys)); | ||
566 | |||
567 | return 0; | ||
568 | } | ||
569 | |||
570 | static int subn_get_pkeytable(struct ib_smp *smp, struct ib_device *ibdev, | ||
571 | u8 port) | ||
572 | { | ||
573 | u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff); | ||
574 | u16 *p = (u16 *) smp->data; | ||
575 | __be16 *q = (__be16 *) smp->data; | ||
576 | |||
577 | /* 64 blocks of 32 16-bit P_Key entries */ | ||
578 | |||
579 | memset(smp->data, 0, sizeof(smp->data)); | ||
580 | if (startpx == 0) { | ||
581 | struct qib_devdata *dd = dd_from_ibdev(ibdev); | ||
582 | unsigned i, n = qib_get_npkeys(dd); | ||
583 | |||
584 | get_pkeys(dd, port, p); | ||
585 | |||
586 | for (i = 0; i < n; i++) | ||
587 | q[i] = cpu_to_be16(p[i]); | ||
588 | } else | ||
589 | smp->status |= IB_SMP_INVALID_FIELD; | ||
590 | |||
591 | return reply(smp); | ||
592 | } | ||
593 | |||
594 | static int subn_set_guidinfo(struct ib_smp *smp, struct ib_device *ibdev, | ||
595 | u8 port) | ||
596 | { | ||
597 | struct qib_devdata *dd = dd_from_ibdev(ibdev); | ||
598 | u32 startgx = 8 * be32_to_cpu(smp->attr_mod); | ||
599 | __be64 *p = (__be64 *) smp->data; | ||
600 | unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */ | ||
601 | |||
602 | /* 32 blocks of 8 64-bit GUIDs per block */ | ||
603 | |||
604 | if (startgx == 0 && pidx < dd->num_pports) { | ||
605 | struct qib_pportdata *ppd = dd->pport + pidx; | ||
606 | struct qib_ibport *ibp = &ppd->ibport_data; | ||
607 | unsigned i; | ||
608 | |||
609 | /* The first entry is read-only. */ | ||
610 | for (i = 1; i < QIB_GUIDS_PER_PORT; i++) | ||
611 | ibp->guids[i - 1] = p[i]; | ||
612 | } else | ||
613 | smp->status |= IB_SMP_INVALID_FIELD; | ||
614 | |||
615 | /* The only GUID we support is the first read-only entry. */ | ||
616 | return subn_get_guidinfo(smp, ibdev, port); | ||
617 | } | ||
618 | |||
619 | /** | ||
620 | * subn_set_portinfo - set port information | ||
621 | * @smp: the incoming SM packet | ||
622 | * @ibdev: the infiniband device | ||
623 | * @port: the port on the device | ||
624 | * | ||
625 | * Set Portinfo (see ch. 14.2.5.6). | ||
626 | */ | ||
627 | static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | ||
628 | u8 port) | ||
629 | { | ||
630 | struct ib_port_info *pip = (struct ib_port_info *)smp->data; | ||
631 | struct ib_event event; | ||
632 | struct qib_devdata *dd; | ||
633 | struct qib_pportdata *ppd; | ||
634 | struct qib_ibport *ibp; | ||
635 | char clientrereg = 0; | ||
636 | unsigned long flags; | ||
637 | u16 lid, smlid; | ||
638 | u8 lwe; | ||
639 | u8 lse; | ||
640 | u8 state; | ||
641 | u8 vls; | ||
642 | u8 msl; | ||
643 | u16 lstate; | ||
644 | int ret, ore, mtu; | ||
645 | u32 port_num = be32_to_cpu(smp->attr_mod); | ||
646 | |||
647 | if (port_num == 0) | ||
648 | port_num = port; | ||
649 | else { | ||
650 | if (port_num > ibdev->phys_port_cnt) | ||
651 | goto err; | ||
652 | /* Port attributes can only be set on the receiving port */ | ||
653 | if (port_num != port) | ||
654 | goto get_only; | ||
655 | } | ||
656 | |||
657 | dd = dd_from_ibdev(ibdev); | ||
658 | /* IB numbers ports from 1, hdw from 0 */ | ||
659 | ppd = dd->pport + (port_num - 1); | ||
660 | ibp = &ppd->ibport_data; | ||
661 | event.device = ibdev; | ||
662 | event.element.port_num = port; | ||
663 | |||
664 | ibp->mkey = pip->mkey; | ||
665 | ibp->gid_prefix = pip->gid_prefix; | ||
666 | ibp->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period); | ||
667 | |||
668 | lid = be16_to_cpu(pip->lid); | ||
669 | /* Must be a valid unicast LID address. */ | ||
670 | if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE) | ||
671 | goto err; | ||
672 | if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) { | ||
673 | if (ppd->lid != lid) | ||
674 | qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT); | ||
675 | if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) | ||
676 | qib_set_uevent_bits(ppd, _QIB_EVENT_LMC_CHANGE_BIT); | ||
677 | qib_set_lid(ppd, lid, pip->mkeyprot_resv_lmc & 7); | ||
678 | event.event = IB_EVENT_LID_CHANGE; | ||
679 | ib_dispatch_event(&event); | ||
680 | } | ||
681 | |||
682 | smlid = be16_to_cpu(pip->sm_lid); | ||
683 | msl = pip->neighbormtu_mastersmsl & 0xF; | ||
684 | /* Must be a valid unicast LID address. */ | ||
685 | if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE) | ||
686 | goto err; | ||
687 | if (smlid != ibp->sm_lid || msl != ibp->sm_sl) { | ||
688 | spin_lock_irqsave(&ibp->lock, flags); | ||
689 | if (ibp->sm_ah) { | ||
690 | if (smlid != ibp->sm_lid) | ||
691 | ibp->sm_ah->attr.dlid = smlid; | ||
692 | if (msl != ibp->sm_sl) | ||
693 | ibp->sm_ah->attr.sl = msl; | ||
694 | } | ||
695 | spin_unlock_irqrestore(&ibp->lock, flags); | ||
696 | if (smlid != ibp->sm_lid) | ||
697 | ibp->sm_lid = smlid; | ||
698 | if (msl != ibp->sm_sl) | ||
699 | ibp->sm_sl = msl; | ||
700 | event.event = IB_EVENT_SM_CHANGE; | ||
701 | ib_dispatch_event(&event); | ||
702 | } | ||
703 | |||
704 | /* Allow 1x or 4x to be set (see 14.2.6.6). */ | ||
705 | lwe = pip->link_width_enabled; | ||
706 | if (lwe) { | ||
707 | if (lwe == 0xFF) | ||
708 | lwe = ppd->link_width_supported; | ||
709 | else if (lwe >= 16 || (lwe & ~ppd->link_width_supported)) | ||
710 | goto err; | ||
711 | set_link_width_enabled(ppd, lwe); | ||
712 | } | ||
713 | |||
714 | lse = pip->linkspeedactive_enabled & 0xF; | ||
715 | if (lse) { | ||
716 | /* | ||
717 | * The IB 1.2 spec. only allows link speed values | ||
718 | * 1, 3, 5, 7, 15. 1.2.1 extended to allow specific | ||
719 | * speeds. | ||
720 | */ | ||
721 | if (lse == 15) | ||
722 | lse = ppd->link_speed_supported; | ||
723 | else if (lse >= 8 || (lse & ~ppd->link_speed_supported)) | ||
724 | goto err; | ||
725 | set_link_speed_enabled(ppd, lse); | ||
726 | } | ||
727 | |||
728 | /* Set link down default state. */ | ||
729 | switch (pip->portphysstate_linkdown & 0xF) { | ||
730 | case 0: /* NOP */ | ||
731 | break; | ||
732 | case 1: /* SLEEP */ | ||
733 | (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT, | ||
734 | IB_LINKINITCMD_SLEEP); | ||
735 | break; | ||
736 | case 2: /* POLL */ | ||
737 | (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT, | ||
738 | IB_LINKINITCMD_POLL); | ||
739 | break; | ||
740 | default: | ||
741 | goto err; | ||
742 | } | ||
743 | |||
744 | ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6; | ||
745 | ibp->vl_high_limit = pip->vl_high_limit; | ||
746 | (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_LIMIT, | ||
747 | ibp->vl_high_limit); | ||
748 | |||
749 | mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF); | ||
750 | if (mtu == -1) | ||
751 | goto err; | ||
752 | qib_set_mtu(ppd, mtu); | ||
753 | |||
754 | /* Set operational VLs */ | ||
755 | vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF; | ||
756 | if (vls) { | ||
757 | if (vls > ppd->vls_supported) | ||
758 | goto err; | ||
759 | (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls); | ||
760 | } | ||
761 | |||
762 | if (pip->mkey_violations == 0) | ||
763 | ibp->mkey_violations = 0; | ||
764 | |||
765 | if (pip->pkey_violations == 0) | ||
766 | ibp->pkey_violations = 0; | ||
767 | |||
768 | if (pip->qkey_violations == 0) | ||
769 | ibp->qkey_violations = 0; | ||
770 | |||
771 | ore = pip->localphyerrors_overrunerrors; | ||
772 | if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF)) | ||
773 | goto err; | ||
774 | |||
775 | if (set_overrunthreshold(ppd, (ore & 0xF))) | ||
776 | goto err; | ||
777 | |||
778 | ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F; | ||
779 | |||
780 | if (pip->clientrereg_resv_subnetto & 0x80) { | ||
781 | clientrereg = 1; | ||
782 | event.event = IB_EVENT_CLIENT_REREGISTER; | ||
783 | ib_dispatch_event(&event); | ||
784 | } | ||
785 | |||
786 | /* | ||
787 | * Do the port state change now that the other link parameters | ||
788 | * have been set. | ||
789 | * Changing the port physical state only makes sense if the link | ||
790 | * is down or is being set to down. | ||
791 | */ | ||
792 | state = pip->linkspeed_portstate & 0xF; | ||
793 | lstate = (pip->portphysstate_linkdown >> 4) & 0xF; | ||
794 | if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP)) | ||
795 | goto err; | ||
796 | |||
797 | /* | ||
798 | * Only state changes of DOWN, ARM, and ACTIVE are valid | ||
799 | * and must be in the correct state to take effect (see 7.2.6). | ||
800 | */ | ||
801 | switch (state) { | ||
802 | case IB_PORT_NOP: | ||
803 | if (lstate == 0) | ||
804 | break; | ||
805 | /* FALLTHROUGH */ | ||
806 | case IB_PORT_DOWN: | ||
807 | if (lstate == 0) | ||
808 | lstate = QIB_IB_LINKDOWN_ONLY; | ||
809 | else if (lstate == 1) | ||
810 | lstate = QIB_IB_LINKDOWN_SLEEP; | ||
811 | else if (lstate == 2) | ||
812 | lstate = QIB_IB_LINKDOWN; | ||
813 | else if (lstate == 3) | ||
814 | lstate = QIB_IB_LINKDOWN_DISABLE; | ||
815 | else | ||
816 | goto err; | ||
817 | spin_lock_irqsave(&ppd->lflags_lock, flags); | ||
818 | ppd->lflags &= ~QIBL_LINKV; | ||
819 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | ||
820 | qib_set_linkstate(ppd, lstate); | ||
821 | /* | ||
822 | * Don't send a reply if the response would be sent | ||
823 | * through the disabled port. | ||
824 | */ | ||
825 | if (lstate == QIB_IB_LINKDOWN_DISABLE && smp->hop_cnt) { | ||
826 | ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; | ||
827 | goto done; | ||
828 | } | ||
829 | qib_wait_linkstate(ppd, QIBL_LINKV, 10); | ||
830 | break; | ||
831 | case IB_PORT_ARMED: | ||
832 | qib_set_linkstate(ppd, QIB_IB_LINKARM); | ||
833 | break; | ||
834 | case IB_PORT_ACTIVE: | ||
835 | qib_set_linkstate(ppd, QIB_IB_LINKACTIVE); | ||
836 | break; | ||
837 | default: | ||
838 | /* XXX We have already partially updated our state! */ | ||
839 | goto err; | ||
840 | } | ||
841 | |||
842 | ret = subn_get_portinfo(smp, ibdev, port); | ||
843 | |||
844 | if (clientrereg) | ||
845 | pip->clientrereg_resv_subnetto |= 0x80; | ||
846 | |||
847 | goto done; | ||
848 | |||
849 | err: | ||
850 | smp->status |= IB_SMP_INVALID_FIELD; | ||
851 | get_only: | ||
852 | ret = subn_get_portinfo(smp, ibdev, port); | ||
853 | done: | ||
854 | return ret; | ||
855 | } | ||
856 | |||
857 | /** | ||
858 | * rm_pkey - decrecment the reference count for the given PKEY | ||
859 | * @dd: the qlogic_ib device | ||
860 | * @key: the PKEY index | ||
861 | * | ||
862 | * Return true if this was the last reference and the hardware table entry | ||
863 | * needs to be changed. | ||
864 | */ | ||
865 | static int rm_pkey(struct qib_pportdata *ppd, u16 key) | ||
866 | { | ||
867 | int i; | ||
868 | int ret; | ||
869 | |||
870 | for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) { | ||
871 | if (ppd->pkeys[i] != key) | ||
872 | continue; | ||
873 | if (atomic_dec_and_test(&ppd->pkeyrefs[i])) { | ||
874 | ppd->pkeys[i] = 0; | ||
875 | ret = 1; | ||
876 | goto bail; | ||
877 | } | ||
878 | break; | ||
879 | } | ||
880 | |||
881 | ret = 0; | ||
882 | |||
883 | bail: | ||
884 | return ret; | ||
885 | } | ||
886 | |||
887 | /** | ||
888 | * add_pkey - add the given PKEY to the hardware table | ||
889 | * @dd: the qlogic_ib device | ||
890 | * @key: the PKEY | ||
891 | * | ||
892 | * Return an error code if unable to add the entry, zero if no change, | ||
893 | * or 1 if the hardware PKEY register needs to be updated. | ||
894 | */ | ||
895 | static int add_pkey(struct qib_pportdata *ppd, u16 key) | ||
896 | { | ||
897 | int i; | ||
898 | u16 lkey = key & 0x7FFF; | ||
899 | int any = 0; | ||
900 | int ret; | ||
901 | |||
902 | if (lkey == 0x7FFF) { | ||
903 | ret = 0; | ||
904 | goto bail; | ||
905 | } | ||
906 | |||
907 | /* Look for an empty slot or a matching PKEY. */ | ||
908 | for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) { | ||
909 | if (!ppd->pkeys[i]) { | ||
910 | any++; | ||
911 | continue; | ||
912 | } | ||
913 | /* If it matches exactly, try to increment the ref count */ | ||
914 | if (ppd->pkeys[i] == key) { | ||
915 | if (atomic_inc_return(&ppd->pkeyrefs[i]) > 1) { | ||
916 | ret = 0; | ||
917 | goto bail; | ||
918 | } | ||
919 | /* Lost the race. Look for an empty slot below. */ | ||
920 | atomic_dec(&ppd->pkeyrefs[i]); | ||
921 | any++; | ||
922 | } | ||
923 | /* | ||
924 | * It makes no sense to have both the limited and unlimited | ||
925 | * PKEY set at the same time since the unlimited one will | ||
926 | * disable the limited one. | ||
927 | */ | ||
928 | if ((ppd->pkeys[i] & 0x7FFF) == lkey) { | ||
929 | ret = -EEXIST; | ||
930 | goto bail; | ||
931 | } | ||
932 | } | ||
933 | if (!any) { | ||
934 | ret = -EBUSY; | ||
935 | goto bail; | ||
936 | } | ||
937 | for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) { | ||
938 | if (!ppd->pkeys[i] && | ||
939 | atomic_inc_return(&ppd->pkeyrefs[i]) == 1) { | ||
940 | /* for qibstats, etc. */ | ||
941 | ppd->pkeys[i] = key; | ||
942 | ret = 1; | ||
943 | goto bail; | ||
944 | } | ||
945 | } | ||
946 | ret = -EBUSY; | ||
947 | |||
948 | bail: | ||
949 | return ret; | ||
950 | } | ||
951 | |||
952 | /** | ||
953 | * set_pkeys - set the PKEY table for ctxt 0 | ||
954 | * @dd: the qlogic_ib device | ||
955 | * @port: the IB port number | ||
956 | * @pkeys: the PKEY table | ||
957 | */ | ||
958 | static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys) | ||
959 | { | ||
960 | struct qib_pportdata *ppd; | ||
961 | struct qib_ctxtdata *rcd; | ||
962 | int i; | ||
963 | int changed = 0; | ||
964 | |||
965 | /* | ||
966 | * IB port one/two always maps to context zero/one, | ||
967 | * always a kernel context, no locking needed | ||
968 | * If we get here with ppd setup, no need to check | ||
969 | * that rcd is valid. | ||
970 | */ | ||
971 | ppd = dd->pport + (port - 1); | ||
972 | rcd = dd->rcd[ppd->hw_pidx]; | ||
973 | |||
974 | for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) { | ||
975 | u16 key = pkeys[i]; | ||
976 | u16 okey = rcd->pkeys[i]; | ||
977 | |||
978 | if (key == okey) | ||
979 | continue; | ||
980 | /* | ||
981 | * The value of this PKEY table entry is changing. | ||
982 | * Remove the old entry in the hardware's array of PKEYs. | ||
983 | */ | ||
984 | if (okey & 0x7FFF) | ||
985 | changed |= rm_pkey(ppd, okey); | ||
986 | if (key & 0x7FFF) { | ||
987 | int ret = add_pkey(ppd, key); | ||
988 | |||
989 | if (ret < 0) | ||
990 | key = 0; | ||
991 | else | ||
992 | changed |= ret; | ||
993 | } | ||
994 | rcd->pkeys[i] = key; | ||
995 | } | ||
996 | if (changed) { | ||
997 | struct ib_event event; | ||
998 | |||
999 | (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0); | ||
1000 | |||
1001 | event.event = IB_EVENT_PKEY_CHANGE; | ||
1002 | event.device = &dd->verbs_dev.ibdev; | ||
1003 | event.element.port_num = 1; | ||
1004 | ib_dispatch_event(&event); | ||
1005 | } | ||
1006 | return 0; | ||
1007 | } | ||
1008 | |||
1009 | static int subn_set_pkeytable(struct ib_smp *smp, struct ib_device *ibdev, | ||
1010 | u8 port) | ||
1011 | { | ||
1012 | u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff); | ||
1013 | __be16 *p = (__be16 *) smp->data; | ||
1014 | u16 *q = (u16 *) smp->data; | ||
1015 | struct qib_devdata *dd = dd_from_ibdev(ibdev); | ||
1016 | unsigned i, n = qib_get_npkeys(dd); | ||
1017 | |||
1018 | for (i = 0; i < n; i++) | ||
1019 | q[i] = be16_to_cpu(p[i]); | ||
1020 | |||
1021 | if (startpx != 0 || set_pkeys(dd, port, q) != 0) | ||
1022 | smp->status |= IB_SMP_INVALID_FIELD; | ||
1023 | |||
1024 | return subn_get_pkeytable(smp, ibdev, port); | ||
1025 | } | ||
1026 | |||
1027 | static int subn_get_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev, | ||
1028 | u8 port) | ||
1029 | { | ||
1030 | struct qib_ibport *ibp = to_iport(ibdev, port); | ||
1031 | u8 *p = (u8 *) smp->data; | ||
1032 | unsigned i; | ||
1033 | |||
1034 | memset(smp->data, 0, sizeof(smp->data)); | ||
1035 | |||
1036 | if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP)) | ||
1037 | smp->status |= IB_SMP_UNSUP_METHOD; | ||
1038 | else | ||
1039 | for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2) | ||
1040 | *p++ = (ibp->sl_to_vl[i] << 4) | ibp->sl_to_vl[i + 1]; | ||
1041 | |||
1042 | return reply(smp); | ||
1043 | } | ||
1044 | |||
1045 | static int subn_set_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev, | ||
1046 | u8 port) | ||
1047 | { | ||
1048 | struct qib_ibport *ibp = to_iport(ibdev, port); | ||
1049 | u8 *p = (u8 *) smp->data; | ||
1050 | unsigned i; | ||
1051 | |||
1052 | if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP)) { | ||
1053 | smp->status |= IB_SMP_UNSUP_METHOD; | ||
1054 | return reply(smp); | ||
1055 | } | ||
1056 | |||
1057 | for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2, p++) { | ||
1058 | ibp->sl_to_vl[i] = *p >> 4; | ||
1059 | ibp->sl_to_vl[i + 1] = *p & 0xF; | ||
1060 | } | ||
1061 | qib_set_uevent_bits(ppd_from_ibp(to_iport(ibdev, port)), | ||
1062 | _QIB_EVENT_SL2VL_CHANGE_BIT); | ||
1063 | |||
1064 | return subn_get_sl_to_vl(smp, ibdev, port); | ||
1065 | } | ||
1066 | |||
1067 | static int subn_get_vl_arb(struct ib_smp *smp, struct ib_device *ibdev, | ||
1068 | u8 port) | ||
1069 | { | ||
1070 | unsigned which = be32_to_cpu(smp->attr_mod) >> 16; | ||
1071 | struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port)); | ||
1072 | |||
1073 | memset(smp->data, 0, sizeof(smp->data)); | ||
1074 | |||
1075 | if (ppd->vls_supported == IB_VL_VL0) | ||
1076 | smp->status |= IB_SMP_UNSUP_METHOD; | ||
1077 | else if (which == IB_VLARB_LOWPRI_0_31) | ||
1078 | (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB, | ||
1079 | smp->data); | ||
1080 | else if (which == IB_VLARB_HIGHPRI_0_31) | ||
1081 | (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB, | ||
1082 | smp->data); | ||
1083 | else | ||
1084 | smp->status |= IB_SMP_INVALID_FIELD; | ||
1085 | |||
1086 | return reply(smp); | ||
1087 | } | ||
1088 | |||
1089 | static int subn_set_vl_arb(struct ib_smp *smp, struct ib_device *ibdev, | ||
1090 | u8 port) | ||
1091 | { | ||
1092 | unsigned which = be32_to_cpu(smp->attr_mod) >> 16; | ||
1093 | struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port)); | ||
1094 | |||
1095 | if (ppd->vls_supported == IB_VL_VL0) | ||
1096 | smp->status |= IB_SMP_UNSUP_METHOD; | ||
1097 | else if (which == IB_VLARB_LOWPRI_0_31) | ||
1098 | (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB, | ||
1099 | smp->data); | ||
1100 | else if (which == IB_VLARB_HIGHPRI_0_31) | ||
1101 | (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB, | ||
1102 | smp->data); | ||
1103 | else | ||
1104 | smp->status |= IB_SMP_INVALID_FIELD; | ||
1105 | |||
1106 | return subn_get_vl_arb(smp, ibdev, port); | ||
1107 | } | ||
1108 | |||
1109 | static int subn_trap_repress(struct ib_smp *smp, struct ib_device *ibdev, | ||
1110 | u8 port) | ||
1111 | { | ||
1112 | /* | ||
1113 | * For now, we only send the trap once so no need to process this. | ||
1114 | * o13-6, o13-7, | ||
1115 | * o14-3.a4 The SMA shall not send any message in response to a valid | ||
1116 | * SubnTrapRepress() message. | ||
1117 | */ | ||
1118 | return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; | ||
1119 | } | ||
1120 | |||
1121 | static int pma_get_classportinfo(struct ib_perf *pmp, | ||
1122 | struct ib_device *ibdev) | ||
1123 | { | ||
1124 | struct ib_pma_classportinfo *p = | ||
1125 | (struct ib_pma_classportinfo *)pmp->data; | ||
1126 | struct qib_devdata *dd = dd_from_ibdev(ibdev); | ||
1127 | |||
1128 | memset(pmp->data, 0, sizeof(pmp->data)); | ||
1129 | |||
1130 | if (pmp->attr_mod != 0) | ||
1131 | pmp->status |= IB_SMP_INVALID_FIELD; | ||
1132 | |||
1133 | /* Note that AllPortSelect is not valid */ | ||
1134 | p->base_version = 1; | ||
1135 | p->class_version = 1; | ||
1136 | p->cap_mask = IB_PMA_CLASS_CAP_EXT_WIDTH; | ||
1137 | /* | ||
1138 | * Set the most significant bit of CM2 to indicate support for | ||
1139 | * congestion statistics | ||
1140 | */ | ||
1141 | p->reserved[0] = dd->psxmitwait_supported << 7; | ||
1142 | /* | ||
1143 | * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec. | ||
1144 | */ | ||
1145 | p->resp_time_value = 18; | ||
1146 | |||
1147 | return reply((struct ib_smp *) pmp); | ||
1148 | } | ||
1149 | |||
1150 | static int pma_get_portsamplescontrol(struct ib_perf *pmp, | ||
1151 | struct ib_device *ibdev, u8 port) | ||
1152 | { | ||
1153 | struct ib_pma_portsamplescontrol *p = | ||
1154 | (struct ib_pma_portsamplescontrol *)pmp->data; | ||
1155 | struct qib_ibdev *dev = to_idev(ibdev); | ||
1156 | struct qib_devdata *dd = dd_from_dev(dev); | ||
1157 | struct qib_ibport *ibp = to_iport(ibdev, port); | ||
1158 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | ||
1159 | unsigned long flags; | ||
1160 | u8 port_select = p->port_select; | ||
1161 | |||
1162 | memset(pmp->data, 0, sizeof(pmp->data)); | ||
1163 | |||
1164 | p->port_select = port_select; | ||
1165 | if (pmp->attr_mod != 0 || port_select != port) { | ||
1166 | pmp->status |= IB_SMP_INVALID_FIELD; | ||
1167 | goto bail; | ||
1168 | } | ||
1169 | spin_lock_irqsave(&ibp->lock, flags); | ||
1170 | p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS); | ||
1171 | p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); | ||
1172 | p->counter_width = 4; /* 32 bit counters */ | ||
1173 | p->counter_mask0_9 = COUNTER_MASK0_9; | ||
1174 | p->sample_start = cpu_to_be32(ibp->pma_sample_start); | ||
1175 | p->sample_interval = cpu_to_be32(ibp->pma_sample_interval); | ||
1176 | p->tag = cpu_to_be16(ibp->pma_tag); | ||
1177 | p->counter_select[0] = ibp->pma_counter_select[0]; | ||
1178 | p->counter_select[1] = ibp->pma_counter_select[1]; | ||
1179 | p->counter_select[2] = ibp->pma_counter_select[2]; | ||
1180 | p->counter_select[3] = ibp->pma_counter_select[3]; | ||
1181 | p->counter_select[4] = ibp->pma_counter_select[4]; | ||
1182 | spin_unlock_irqrestore(&ibp->lock, flags); | ||
1183 | |||
1184 | bail: | ||
1185 | return reply((struct ib_smp *) pmp); | ||
1186 | } | ||
1187 | |||
1188 | static int pma_set_portsamplescontrol(struct ib_perf *pmp, | ||
1189 | struct ib_device *ibdev, u8 port) | ||
1190 | { | ||
1191 | struct ib_pma_portsamplescontrol *p = | ||
1192 | (struct ib_pma_portsamplescontrol *)pmp->data; | ||
1193 | struct qib_ibdev *dev = to_idev(ibdev); | ||
1194 | struct qib_devdata *dd = dd_from_dev(dev); | ||
1195 | struct qib_ibport *ibp = to_iport(ibdev, port); | ||
1196 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | ||
1197 | unsigned long flags; | ||
1198 | u8 status, xmit_flags; | ||
1199 | int ret; | ||
1200 | |||
1201 | if (pmp->attr_mod != 0 || p->port_select != port) { | ||
1202 | pmp->status |= IB_SMP_INVALID_FIELD; | ||
1203 | ret = reply((struct ib_smp *) pmp); | ||
1204 | goto bail; | ||
1205 | } | ||
1206 | |||
1207 | spin_lock_irqsave(&ibp->lock, flags); | ||
1208 | |||
1209 | /* Port Sampling code owns the PS* HW counters */ | ||
1210 | xmit_flags = ppd->cong_stats.flags; | ||
1211 | ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_SAMPLE; | ||
1212 | status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); | ||
1213 | if (status == IB_PMA_SAMPLE_STATUS_DONE || | ||
1214 | (status == IB_PMA_SAMPLE_STATUS_RUNNING && | ||
1215 | xmit_flags == IB_PMA_CONG_HW_CONTROL_TIMER)) { | ||
1216 | ibp->pma_sample_start = be32_to_cpu(p->sample_start); | ||
1217 | ibp->pma_sample_interval = be32_to_cpu(p->sample_interval); | ||
1218 | ibp->pma_tag = be16_to_cpu(p->tag); | ||
1219 | ibp->pma_counter_select[0] = p->counter_select[0]; | ||
1220 | ibp->pma_counter_select[1] = p->counter_select[1]; | ||
1221 | ibp->pma_counter_select[2] = p->counter_select[2]; | ||
1222 | ibp->pma_counter_select[3] = p->counter_select[3]; | ||
1223 | ibp->pma_counter_select[4] = p->counter_select[4]; | ||
1224 | dd->f_set_cntr_sample(ppd, ibp->pma_sample_interval, | ||
1225 | ibp->pma_sample_start); | ||
1226 | } | ||
1227 | spin_unlock_irqrestore(&ibp->lock, flags); | ||
1228 | |||
1229 | ret = pma_get_portsamplescontrol(pmp, ibdev, port); | ||
1230 | |||
1231 | bail: | ||
1232 | return ret; | ||
1233 | } | ||
1234 | |||
1235 | static u64 get_counter(struct qib_ibport *ibp, struct qib_pportdata *ppd, | ||
1236 | __be16 sel) | ||
1237 | { | ||
1238 | u64 ret; | ||
1239 | |||
1240 | switch (sel) { | ||
1241 | case IB_PMA_PORT_XMIT_DATA: | ||
1242 | ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITDATA); | ||
1243 | break; | ||
1244 | case IB_PMA_PORT_RCV_DATA: | ||
1245 | ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVDATA); | ||
1246 | break; | ||
1247 | case IB_PMA_PORT_XMIT_PKTS: | ||
1248 | ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITPKTS); | ||
1249 | break; | ||
1250 | case IB_PMA_PORT_RCV_PKTS: | ||
1251 | ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVPKTS); | ||
1252 | break; | ||
1253 | case IB_PMA_PORT_XMIT_WAIT: | ||
1254 | ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITWAIT); | ||
1255 | break; | ||
1256 | default: | ||
1257 | ret = 0; | ||
1258 | } | ||
1259 | |||
1260 | return ret; | ||
1261 | } | ||
1262 | |||
1263 | /* This function assumes that the xmit_wait lock is already held */ | ||
1264 | static u64 xmit_wait_get_value_delta(struct qib_pportdata *ppd) | ||
1265 | { | ||
1266 | u32 delta; | ||
1267 | |||
1268 | delta = get_counter(&ppd->ibport_data, ppd, | ||
1269 | IB_PMA_PORT_XMIT_WAIT); | ||
1270 | return ppd->cong_stats.counter + delta; | ||
1271 | } | ||
1272 | |||
1273 | static void cache_hw_sample_counters(struct qib_pportdata *ppd) | ||
1274 | { | ||
1275 | struct qib_ibport *ibp = &ppd->ibport_data; | ||
1276 | |||
1277 | ppd->cong_stats.counter_cache.psxmitdata = | ||
1278 | get_counter(ibp, ppd, IB_PMA_PORT_XMIT_DATA); | ||
1279 | ppd->cong_stats.counter_cache.psrcvdata = | ||
1280 | get_counter(ibp, ppd, IB_PMA_PORT_RCV_DATA); | ||
1281 | ppd->cong_stats.counter_cache.psxmitpkts = | ||
1282 | get_counter(ibp, ppd, IB_PMA_PORT_XMIT_PKTS); | ||
1283 | ppd->cong_stats.counter_cache.psrcvpkts = | ||
1284 | get_counter(ibp, ppd, IB_PMA_PORT_RCV_PKTS); | ||
1285 | ppd->cong_stats.counter_cache.psxmitwait = | ||
1286 | get_counter(ibp, ppd, IB_PMA_PORT_XMIT_WAIT); | ||
1287 | } | ||
1288 | |||
1289 | static u64 get_cache_hw_sample_counters(struct qib_pportdata *ppd, | ||
1290 | __be16 sel) | ||
1291 | { | ||
1292 | u64 ret; | ||
1293 | |||
1294 | switch (sel) { | ||
1295 | case IB_PMA_PORT_XMIT_DATA: | ||
1296 | ret = ppd->cong_stats.counter_cache.psxmitdata; | ||
1297 | break; | ||
1298 | case IB_PMA_PORT_RCV_DATA: | ||
1299 | ret = ppd->cong_stats.counter_cache.psrcvdata; | ||
1300 | break; | ||
1301 | case IB_PMA_PORT_XMIT_PKTS: | ||
1302 | ret = ppd->cong_stats.counter_cache.psxmitpkts; | ||
1303 | break; | ||
1304 | case IB_PMA_PORT_RCV_PKTS: | ||
1305 | ret = ppd->cong_stats.counter_cache.psrcvpkts; | ||
1306 | break; | ||
1307 | case IB_PMA_PORT_XMIT_WAIT: | ||
1308 | ret = ppd->cong_stats.counter_cache.psxmitwait; | ||
1309 | break; | ||
1310 | default: | ||
1311 | ret = 0; | ||
1312 | } | ||
1313 | |||
1314 | return ret; | ||
1315 | } | ||
1316 | |||
1317 | static int pma_get_portsamplesresult(struct ib_perf *pmp, | ||
1318 | struct ib_device *ibdev, u8 port) | ||
1319 | { | ||
1320 | struct ib_pma_portsamplesresult *p = | ||
1321 | (struct ib_pma_portsamplesresult *)pmp->data; | ||
1322 | struct qib_ibdev *dev = to_idev(ibdev); | ||
1323 | struct qib_devdata *dd = dd_from_dev(dev); | ||
1324 | struct qib_ibport *ibp = to_iport(ibdev, port); | ||
1325 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | ||
1326 | unsigned long flags; | ||
1327 | u8 status; | ||
1328 | int i; | ||
1329 | |||
1330 | memset(pmp->data, 0, sizeof(pmp->data)); | ||
1331 | spin_lock_irqsave(&ibp->lock, flags); | ||
1332 | p->tag = cpu_to_be16(ibp->pma_tag); | ||
1333 | if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER) | ||
1334 | p->sample_status = IB_PMA_SAMPLE_STATUS_DONE; | ||
1335 | else { | ||
1336 | status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); | ||
1337 | p->sample_status = cpu_to_be16(status); | ||
1338 | if (status == IB_PMA_SAMPLE_STATUS_DONE) { | ||
1339 | cache_hw_sample_counters(ppd); | ||
1340 | ppd->cong_stats.counter = | ||
1341 | xmit_wait_get_value_delta(ppd); | ||
1342 | dd->f_set_cntr_sample(ppd, | ||
1343 | QIB_CONG_TIMER_PSINTERVAL, 0); | ||
1344 | ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER; | ||
1345 | } | ||
1346 | } | ||
1347 | for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++) | ||
1348 | p->counter[i] = cpu_to_be32( | ||
1349 | get_cache_hw_sample_counters( | ||
1350 | ppd, ibp->pma_counter_select[i])); | ||
1351 | spin_unlock_irqrestore(&ibp->lock, flags); | ||
1352 | |||
1353 | return reply((struct ib_smp *) pmp); | ||
1354 | } | ||
1355 | |||
1356 | static int pma_get_portsamplesresult_ext(struct ib_perf *pmp, | ||
1357 | struct ib_device *ibdev, u8 port) | ||
1358 | { | ||
1359 | struct ib_pma_portsamplesresult_ext *p = | ||
1360 | (struct ib_pma_portsamplesresult_ext *)pmp->data; | ||
1361 | struct qib_ibdev *dev = to_idev(ibdev); | ||
1362 | struct qib_devdata *dd = dd_from_dev(dev); | ||
1363 | struct qib_ibport *ibp = to_iport(ibdev, port); | ||
1364 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | ||
1365 | unsigned long flags; | ||
1366 | u8 status; | ||
1367 | int i; | ||
1368 | |||
1369 | /* Port Sampling code owns the PS* HW counters */ | ||
1370 | memset(pmp->data, 0, sizeof(pmp->data)); | ||
1371 | spin_lock_irqsave(&ibp->lock, flags); | ||
1372 | p->tag = cpu_to_be16(ibp->pma_tag); | ||
1373 | if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER) | ||
1374 | p->sample_status = IB_PMA_SAMPLE_STATUS_DONE; | ||
1375 | else { | ||
1376 | status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); | ||
1377 | p->sample_status = cpu_to_be16(status); | ||
1378 | /* 64 bits */ | ||
1379 | p->extended_width = cpu_to_be32(0x80000000); | ||
1380 | if (status == IB_PMA_SAMPLE_STATUS_DONE) { | ||
1381 | cache_hw_sample_counters(ppd); | ||
1382 | ppd->cong_stats.counter = | ||
1383 | xmit_wait_get_value_delta(ppd); | ||
1384 | dd->f_set_cntr_sample(ppd, | ||
1385 | QIB_CONG_TIMER_PSINTERVAL, 0); | ||
1386 | ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER; | ||
1387 | } | ||
1388 | } | ||
1389 | for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++) | ||
1390 | p->counter[i] = cpu_to_be64( | ||
1391 | get_cache_hw_sample_counters( | ||
1392 | ppd, ibp->pma_counter_select[i])); | ||
1393 | spin_unlock_irqrestore(&ibp->lock, flags); | ||
1394 | |||
1395 | return reply((struct ib_smp *) pmp); | ||
1396 | } | ||
1397 | |||
1398 | static int pma_get_portcounters(struct ib_perf *pmp, | ||
1399 | struct ib_device *ibdev, u8 port) | ||
1400 | { | ||
1401 | struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) | ||
1402 | pmp->data; | ||
1403 | struct qib_ibport *ibp = to_iport(ibdev, port); | ||
1404 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | ||
1405 | struct qib_verbs_counters cntrs; | ||
1406 | u8 port_select = p->port_select; | ||
1407 | |||
1408 | qib_get_counters(ppd, &cntrs); | ||
1409 | |||
1410 | /* Adjust counters for any resets done. */ | ||
1411 | cntrs.symbol_error_counter -= ibp->z_symbol_error_counter; | ||
1412 | cntrs.link_error_recovery_counter -= | ||
1413 | ibp->z_link_error_recovery_counter; | ||
1414 | cntrs.link_downed_counter -= ibp->z_link_downed_counter; | ||
1415 | cntrs.port_rcv_errors -= ibp->z_port_rcv_errors; | ||
1416 | cntrs.port_rcv_remphys_errors -= ibp->z_port_rcv_remphys_errors; | ||
1417 | cntrs.port_xmit_discards -= ibp->z_port_xmit_discards; | ||
1418 | cntrs.port_xmit_data -= ibp->z_port_xmit_data; | ||
1419 | cntrs.port_rcv_data -= ibp->z_port_rcv_data; | ||
1420 | cntrs.port_xmit_packets -= ibp->z_port_xmit_packets; | ||
1421 | cntrs.port_rcv_packets -= ibp->z_port_rcv_packets; | ||
1422 | cntrs.local_link_integrity_errors -= | ||
1423 | ibp->z_local_link_integrity_errors; | ||
1424 | cntrs.excessive_buffer_overrun_errors -= | ||
1425 | ibp->z_excessive_buffer_overrun_errors; | ||
1426 | cntrs.vl15_dropped -= ibp->z_vl15_dropped; | ||
1427 | cntrs.vl15_dropped += ibp->n_vl15_dropped; | ||
1428 | |||
1429 | memset(pmp->data, 0, sizeof(pmp->data)); | ||
1430 | |||
1431 | p->port_select = port_select; | ||
1432 | if (pmp->attr_mod != 0 || port_select != port) | ||
1433 | pmp->status |= IB_SMP_INVALID_FIELD; | ||
1434 | |||
1435 | if (cntrs.symbol_error_counter > 0xFFFFUL) | ||
1436 | p->symbol_error_counter = cpu_to_be16(0xFFFF); | ||
1437 | else | ||
1438 | p->symbol_error_counter = | ||
1439 | cpu_to_be16((u16)cntrs.symbol_error_counter); | ||
1440 | if (cntrs.link_error_recovery_counter > 0xFFUL) | ||
1441 | p->link_error_recovery_counter = 0xFF; | ||
1442 | else | ||
1443 | p->link_error_recovery_counter = | ||
1444 | (u8)cntrs.link_error_recovery_counter; | ||
1445 | if (cntrs.link_downed_counter > 0xFFUL) | ||
1446 | p->link_downed_counter = 0xFF; | ||
1447 | else | ||
1448 | p->link_downed_counter = (u8)cntrs.link_downed_counter; | ||
1449 | if (cntrs.port_rcv_errors > 0xFFFFUL) | ||
1450 | p->port_rcv_errors = cpu_to_be16(0xFFFF); | ||
1451 | else | ||
1452 | p->port_rcv_errors = | ||
1453 | cpu_to_be16((u16) cntrs.port_rcv_errors); | ||
1454 | if (cntrs.port_rcv_remphys_errors > 0xFFFFUL) | ||
1455 | p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF); | ||
1456 | else | ||
1457 | p->port_rcv_remphys_errors = | ||
1458 | cpu_to_be16((u16)cntrs.port_rcv_remphys_errors); | ||
1459 | if (cntrs.port_xmit_discards > 0xFFFFUL) | ||
1460 | p->port_xmit_discards = cpu_to_be16(0xFFFF); | ||
1461 | else | ||
1462 | p->port_xmit_discards = | ||
1463 | cpu_to_be16((u16)cntrs.port_xmit_discards); | ||
1464 | if (cntrs.local_link_integrity_errors > 0xFUL) | ||
1465 | cntrs.local_link_integrity_errors = 0xFUL; | ||
1466 | if (cntrs.excessive_buffer_overrun_errors > 0xFUL) | ||
1467 | cntrs.excessive_buffer_overrun_errors = 0xFUL; | ||
1468 | p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) | | ||
1469 | cntrs.excessive_buffer_overrun_errors; | ||
1470 | if (cntrs.vl15_dropped > 0xFFFFUL) | ||
1471 | p->vl15_dropped = cpu_to_be16(0xFFFF); | ||
1472 | else | ||
1473 | p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped); | ||
1474 | if (cntrs.port_xmit_data > 0xFFFFFFFFUL) | ||
1475 | p->port_xmit_data = cpu_to_be32(0xFFFFFFFF); | ||
1476 | else | ||
1477 | p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data); | ||
1478 | if (cntrs.port_rcv_data > 0xFFFFFFFFUL) | ||
1479 | p->port_rcv_data = cpu_to_be32(0xFFFFFFFF); | ||
1480 | else | ||
1481 | p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data); | ||
1482 | if (cntrs.port_xmit_packets > 0xFFFFFFFFUL) | ||
1483 | p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF); | ||
1484 | else | ||
1485 | p->port_xmit_packets = | ||
1486 | cpu_to_be32((u32)cntrs.port_xmit_packets); | ||
1487 | if (cntrs.port_rcv_packets > 0xFFFFFFFFUL) | ||
1488 | p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF); | ||
1489 | else | ||
1490 | p->port_rcv_packets = | ||
1491 | cpu_to_be32((u32) cntrs.port_rcv_packets); | ||
1492 | |||
1493 | return reply((struct ib_smp *) pmp); | ||
1494 | } | ||
1495 | |||
1496 | static int pma_get_portcounters_cong(struct ib_perf *pmp, | ||
1497 | struct ib_device *ibdev, u8 port) | ||
1498 | { | ||
1499 | /* Congestion PMA packets start at offset 24 not 64 */ | ||
1500 | struct ib_pma_portcounters_cong *p = | ||
1501 | (struct ib_pma_portcounters_cong *)pmp->reserved; | ||
1502 | struct qib_verbs_counters cntrs; | ||
1503 | struct qib_ibport *ibp = to_iport(ibdev, port); | ||
1504 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | ||
1505 | struct qib_devdata *dd = dd_from_ppd(ppd); | ||
1506 | u32 port_select = be32_to_cpu(pmp->attr_mod) & 0xFF; | ||
1507 | u64 xmit_wait_counter; | ||
1508 | unsigned long flags; | ||
1509 | |||
1510 | /* | ||
1511 | * This check is performed only in the GET method because the | ||
1512 | * SET method ends up calling this anyway. | ||
1513 | */ | ||
1514 | if (!dd->psxmitwait_supported) | ||
1515 | pmp->status |= IB_SMP_UNSUP_METH_ATTR; | ||
1516 | if (port_select != port) | ||
1517 | pmp->status |= IB_SMP_INVALID_FIELD; | ||
1518 | |||
1519 | qib_get_counters(ppd, &cntrs); | ||
1520 | spin_lock_irqsave(&ppd->ibport_data.lock, flags); | ||
1521 | xmit_wait_counter = xmit_wait_get_value_delta(ppd); | ||
1522 | spin_unlock_irqrestore(&ppd->ibport_data.lock, flags); | ||
1523 | |||
1524 | /* Adjust counters for any resets done. */ | ||
1525 | cntrs.symbol_error_counter -= ibp->z_symbol_error_counter; | ||
1526 | cntrs.link_error_recovery_counter -= | ||
1527 | ibp->z_link_error_recovery_counter; | ||
1528 | cntrs.link_downed_counter -= ibp->z_link_downed_counter; | ||
1529 | cntrs.port_rcv_errors -= ibp->z_port_rcv_errors; | ||
1530 | cntrs.port_rcv_remphys_errors -= | ||
1531 | ibp->z_port_rcv_remphys_errors; | ||
1532 | cntrs.port_xmit_discards -= ibp->z_port_xmit_discards; | ||
1533 | cntrs.local_link_integrity_errors -= | ||
1534 | ibp->z_local_link_integrity_errors; | ||
1535 | cntrs.excessive_buffer_overrun_errors -= | ||
1536 | ibp->z_excessive_buffer_overrun_errors; | ||
1537 | cntrs.vl15_dropped -= ibp->z_vl15_dropped; | ||
1538 | cntrs.vl15_dropped += ibp->n_vl15_dropped; | ||
1539 | cntrs.port_xmit_data -= ibp->z_port_xmit_data; | ||
1540 | cntrs.port_rcv_data -= ibp->z_port_rcv_data; | ||
1541 | cntrs.port_xmit_packets -= ibp->z_port_xmit_packets; | ||
1542 | cntrs.port_rcv_packets -= ibp->z_port_rcv_packets; | ||
1543 | |||
1544 | memset(pmp->reserved, 0, sizeof(pmp->reserved) + | ||
1545 | sizeof(pmp->data)); | ||
1546 | |||
1547 | /* | ||
1548 | * Set top 3 bits to indicate interval in picoseconds in | ||
1549 | * remaining bits. | ||
1550 | */ | ||
1551 | p->port_check_rate = | ||
1552 | cpu_to_be16((QIB_XMIT_RATE_PICO << 13) | | ||
1553 | (dd->psxmitwait_check_rate & | ||
1554 | ~(QIB_XMIT_RATE_PICO << 13))); | ||
1555 | p->port_adr_events = cpu_to_be64(0); | ||
1556 | p->port_xmit_wait = cpu_to_be64(xmit_wait_counter); | ||
1557 | p->port_xmit_data = cpu_to_be64(cntrs.port_xmit_data); | ||
1558 | p->port_rcv_data = cpu_to_be64(cntrs.port_rcv_data); | ||
1559 | p->port_xmit_packets = | ||
1560 | cpu_to_be64(cntrs.port_xmit_packets); | ||
1561 | p->port_rcv_packets = | ||
1562 | cpu_to_be64(cntrs.port_rcv_packets); | ||
1563 | if (cntrs.symbol_error_counter > 0xFFFFUL) | ||
1564 | p->symbol_error_counter = cpu_to_be16(0xFFFF); | ||
1565 | else | ||
1566 | p->symbol_error_counter = | ||
1567 | cpu_to_be16( | ||
1568 | (u16)cntrs.symbol_error_counter); | ||
1569 | if (cntrs.link_error_recovery_counter > 0xFFUL) | ||
1570 | p->link_error_recovery_counter = 0xFF; | ||
1571 | else | ||
1572 | p->link_error_recovery_counter = | ||
1573 | (u8)cntrs.link_error_recovery_counter; | ||
1574 | if (cntrs.link_downed_counter > 0xFFUL) | ||
1575 | p->link_downed_counter = 0xFF; | ||
1576 | else | ||
1577 | p->link_downed_counter = | ||
1578 | (u8)cntrs.link_downed_counter; | ||
1579 | if (cntrs.port_rcv_errors > 0xFFFFUL) | ||
1580 | p->port_rcv_errors = cpu_to_be16(0xFFFF); | ||
1581 | else | ||
1582 | p->port_rcv_errors = | ||
1583 | cpu_to_be16((u16) cntrs.port_rcv_errors); | ||
1584 | if (cntrs.port_rcv_remphys_errors > 0xFFFFUL) | ||
1585 | p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF); | ||
1586 | else | ||
1587 | p->port_rcv_remphys_errors = | ||
1588 | cpu_to_be16( | ||
1589 | (u16)cntrs.port_rcv_remphys_errors); | ||
1590 | if (cntrs.port_xmit_discards > 0xFFFFUL) | ||
1591 | p->port_xmit_discards = cpu_to_be16(0xFFFF); | ||
1592 | else | ||
1593 | p->port_xmit_discards = | ||
1594 | cpu_to_be16((u16)cntrs.port_xmit_discards); | ||
1595 | if (cntrs.local_link_integrity_errors > 0xFUL) | ||
1596 | cntrs.local_link_integrity_errors = 0xFUL; | ||
1597 | if (cntrs.excessive_buffer_overrun_errors > 0xFUL) | ||
1598 | cntrs.excessive_buffer_overrun_errors = 0xFUL; | ||
1599 | p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) | | ||
1600 | cntrs.excessive_buffer_overrun_errors; | ||
1601 | if (cntrs.vl15_dropped > 0xFFFFUL) | ||
1602 | p->vl15_dropped = cpu_to_be16(0xFFFF); | ||
1603 | else | ||
1604 | p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped); | ||
1605 | |||
1606 | return reply((struct ib_smp *)pmp); | ||
1607 | } | ||
1608 | |||
1609 | static int pma_get_portcounters_ext(struct ib_perf *pmp, | ||
1610 | struct ib_device *ibdev, u8 port) | ||
1611 | { | ||
1612 | struct ib_pma_portcounters_ext *p = | ||
1613 | (struct ib_pma_portcounters_ext *)pmp->data; | ||
1614 | struct qib_ibport *ibp = to_iport(ibdev, port); | ||
1615 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | ||
1616 | u64 swords, rwords, spkts, rpkts, xwait; | ||
1617 | u8 port_select = p->port_select; | ||
1618 | |||
1619 | memset(pmp->data, 0, sizeof(pmp->data)); | ||
1620 | |||
1621 | p->port_select = port_select; | ||
1622 | if (pmp->attr_mod != 0 || port_select != port) { | ||
1623 | pmp->status |= IB_SMP_INVALID_FIELD; | ||
1624 | goto bail; | ||
1625 | } | ||
1626 | |||
1627 | qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait); | ||
1628 | |||
1629 | /* Adjust counters for any resets done. */ | ||
1630 | swords -= ibp->z_port_xmit_data; | ||
1631 | rwords -= ibp->z_port_rcv_data; | ||
1632 | spkts -= ibp->z_port_xmit_packets; | ||
1633 | rpkts -= ibp->z_port_rcv_packets; | ||
1634 | |||
1635 | p->port_xmit_data = cpu_to_be64(swords); | ||
1636 | p->port_rcv_data = cpu_to_be64(rwords); | ||
1637 | p->port_xmit_packets = cpu_to_be64(spkts); | ||
1638 | p->port_rcv_packets = cpu_to_be64(rpkts); | ||
1639 | p->port_unicast_xmit_packets = cpu_to_be64(ibp->n_unicast_xmit); | ||
1640 | p->port_unicast_rcv_packets = cpu_to_be64(ibp->n_unicast_rcv); | ||
1641 | p->port_multicast_xmit_packets = cpu_to_be64(ibp->n_multicast_xmit); | ||
1642 | p->port_multicast_rcv_packets = cpu_to_be64(ibp->n_multicast_rcv); | ||
1643 | |||
1644 | bail: | ||
1645 | return reply((struct ib_smp *) pmp); | ||
1646 | } | ||
1647 | |||
1648 | static int pma_set_portcounters(struct ib_perf *pmp, | ||
1649 | struct ib_device *ibdev, u8 port) | ||
1650 | { | ||
1651 | struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) | ||
1652 | pmp->data; | ||
1653 | struct qib_ibport *ibp = to_iport(ibdev, port); | ||
1654 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | ||
1655 | struct qib_verbs_counters cntrs; | ||
1656 | |||
1657 | /* | ||
1658 | * Since the HW doesn't support clearing counters, we save the | ||
1659 | * current count and subtract it from future responses. | ||
1660 | */ | ||
1661 | qib_get_counters(ppd, &cntrs); | ||
1662 | |||
1663 | if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR) | ||
1664 | ibp->z_symbol_error_counter = cntrs.symbol_error_counter; | ||
1665 | |||
1666 | if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY) | ||
1667 | ibp->z_link_error_recovery_counter = | ||
1668 | cntrs.link_error_recovery_counter; | ||
1669 | |||
1670 | if (p->counter_select & IB_PMA_SEL_LINK_DOWNED) | ||
1671 | ibp->z_link_downed_counter = cntrs.link_downed_counter; | ||
1672 | |||
1673 | if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS) | ||
1674 | ibp->z_port_rcv_errors = cntrs.port_rcv_errors; | ||
1675 | |||
1676 | if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS) | ||
1677 | ibp->z_port_rcv_remphys_errors = | ||
1678 | cntrs.port_rcv_remphys_errors; | ||
1679 | |||
1680 | if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS) | ||
1681 | ibp->z_port_xmit_discards = cntrs.port_xmit_discards; | ||
1682 | |||
1683 | if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS) | ||
1684 | ibp->z_local_link_integrity_errors = | ||
1685 | cntrs.local_link_integrity_errors; | ||
1686 | |||
1687 | if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS) | ||
1688 | ibp->z_excessive_buffer_overrun_errors = | ||
1689 | cntrs.excessive_buffer_overrun_errors; | ||
1690 | |||
1691 | if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) { | ||
1692 | ibp->n_vl15_dropped = 0; | ||
1693 | ibp->z_vl15_dropped = cntrs.vl15_dropped; | ||
1694 | } | ||
1695 | |||
1696 | if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA) | ||
1697 | ibp->z_port_xmit_data = cntrs.port_xmit_data; | ||
1698 | |||
1699 | if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA) | ||
1700 | ibp->z_port_rcv_data = cntrs.port_rcv_data; | ||
1701 | |||
1702 | if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS) | ||
1703 | ibp->z_port_xmit_packets = cntrs.port_xmit_packets; | ||
1704 | |||
1705 | if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS) | ||
1706 | ibp->z_port_rcv_packets = cntrs.port_rcv_packets; | ||
1707 | |||
1708 | return pma_get_portcounters(pmp, ibdev, port); | ||
1709 | } | ||
1710 | |||
1711 | static int pma_set_portcounters_cong(struct ib_perf *pmp, | ||
1712 | struct ib_device *ibdev, u8 port) | ||
1713 | { | ||
1714 | struct qib_ibport *ibp = to_iport(ibdev, port); | ||
1715 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | ||
1716 | struct qib_devdata *dd = dd_from_ppd(ppd); | ||
1717 | struct qib_verbs_counters cntrs; | ||
1718 | u32 counter_select = (be32_to_cpu(pmp->attr_mod) >> 24) & 0xFF; | ||
1719 | int ret = 0; | ||
1720 | unsigned long flags; | ||
1721 | |||
1722 | qib_get_counters(ppd, &cntrs); | ||
1723 | /* Get counter values before we save them */ | ||
1724 | ret = pma_get_portcounters_cong(pmp, ibdev, port); | ||
1725 | |||
1726 | if (counter_select & IB_PMA_SEL_CONG_XMIT) { | ||
1727 | spin_lock_irqsave(&ppd->ibport_data.lock, flags); | ||
1728 | ppd->cong_stats.counter = 0; | ||
1729 | dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, | ||
1730 | 0x0); | ||
1731 | spin_unlock_irqrestore(&ppd->ibport_data.lock, flags); | ||
1732 | } | ||
1733 | if (counter_select & IB_PMA_SEL_CONG_PORT_DATA) { | ||
1734 | ibp->z_port_xmit_data = cntrs.port_xmit_data; | ||
1735 | ibp->z_port_rcv_data = cntrs.port_rcv_data; | ||
1736 | ibp->z_port_xmit_packets = cntrs.port_xmit_packets; | ||
1737 | ibp->z_port_rcv_packets = cntrs.port_rcv_packets; | ||
1738 | } | ||
1739 | if (counter_select & IB_PMA_SEL_CONG_ALL) { | ||
1740 | ibp->z_symbol_error_counter = | ||
1741 | cntrs.symbol_error_counter; | ||
1742 | ibp->z_link_error_recovery_counter = | ||
1743 | cntrs.link_error_recovery_counter; | ||
1744 | ibp->z_link_downed_counter = | ||
1745 | cntrs.link_downed_counter; | ||
1746 | ibp->z_port_rcv_errors = cntrs.port_rcv_errors; | ||
1747 | ibp->z_port_rcv_remphys_errors = | ||
1748 | cntrs.port_rcv_remphys_errors; | ||
1749 | ibp->z_port_xmit_discards = | ||
1750 | cntrs.port_xmit_discards; | ||
1751 | ibp->z_local_link_integrity_errors = | ||
1752 | cntrs.local_link_integrity_errors; | ||
1753 | ibp->z_excessive_buffer_overrun_errors = | ||
1754 | cntrs.excessive_buffer_overrun_errors; | ||
1755 | ibp->n_vl15_dropped = 0; | ||
1756 | ibp->z_vl15_dropped = cntrs.vl15_dropped; | ||
1757 | } | ||
1758 | |||
1759 | return ret; | ||
1760 | } | ||
1761 | |||
1762 | static int pma_set_portcounters_ext(struct ib_perf *pmp, | ||
1763 | struct ib_device *ibdev, u8 port) | ||
1764 | { | ||
1765 | struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) | ||
1766 | pmp->data; | ||
1767 | struct qib_ibport *ibp = to_iport(ibdev, port); | ||
1768 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | ||
1769 | u64 swords, rwords, spkts, rpkts, xwait; | ||
1770 | |||
1771 | qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait); | ||
1772 | |||
1773 | if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA) | ||
1774 | ibp->z_port_xmit_data = swords; | ||
1775 | |||
1776 | if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA) | ||
1777 | ibp->z_port_rcv_data = rwords; | ||
1778 | |||
1779 | if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS) | ||
1780 | ibp->z_port_xmit_packets = spkts; | ||
1781 | |||
1782 | if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS) | ||
1783 | ibp->z_port_rcv_packets = rpkts; | ||
1784 | |||
1785 | if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS) | ||
1786 | ibp->n_unicast_xmit = 0; | ||
1787 | |||
1788 | if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS) | ||
1789 | ibp->n_unicast_rcv = 0; | ||
1790 | |||
1791 | if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS) | ||
1792 | ibp->n_multicast_xmit = 0; | ||
1793 | |||
1794 | if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS) | ||
1795 | ibp->n_multicast_rcv = 0; | ||
1796 | |||
1797 | return pma_get_portcounters_ext(pmp, ibdev, port); | ||
1798 | } | ||
1799 | |||
1800 | static int process_subn(struct ib_device *ibdev, int mad_flags, | ||
1801 | u8 port, struct ib_mad *in_mad, | ||
1802 | struct ib_mad *out_mad) | ||
1803 | { | ||
1804 | struct ib_smp *smp = (struct ib_smp *)out_mad; | ||
1805 | struct qib_ibport *ibp = to_iport(ibdev, port); | ||
1806 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | ||
1807 | int ret; | ||
1808 | |||
1809 | *out_mad = *in_mad; | ||
1810 | if (smp->class_version != 1) { | ||
1811 | smp->status |= IB_SMP_UNSUP_VERSION; | ||
1812 | ret = reply(smp); | ||
1813 | goto bail; | ||
1814 | } | ||
1815 | |||
1816 | ret = check_mkey(ibp, smp, mad_flags); | ||
1817 | if (ret) { | ||
1818 | u32 port_num = be32_to_cpu(smp->attr_mod); | ||
1819 | |||
1820 | /* | ||
1821 | * If this is a get/set portinfo, we already check the | ||
1822 | * M_Key if the MAD is for another port and the M_Key | ||
1823 | * is OK on the receiving port. This check is needed | ||
1824 | * to increment the error counters when the M_Key | ||
1825 | * fails to match on *both* ports. | ||
1826 | */ | ||
1827 | if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO && | ||
1828 | (smp->method == IB_MGMT_METHOD_GET || | ||
1829 | smp->method == IB_MGMT_METHOD_SET) && | ||
1830 | port_num && port_num <= ibdev->phys_port_cnt && | ||
1831 | port != port_num) | ||
1832 | (void) check_mkey(to_iport(ibdev, port_num), smp, 0); | ||
1833 | goto bail; | ||
1834 | } | ||
1835 | |||
1836 | switch (smp->method) { | ||
1837 | case IB_MGMT_METHOD_GET: | ||
1838 | switch (smp->attr_id) { | ||
1839 | case IB_SMP_ATTR_NODE_DESC: | ||
1840 | ret = subn_get_nodedescription(smp, ibdev); | ||
1841 | goto bail; | ||
1842 | case IB_SMP_ATTR_NODE_INFO: | ||
1843 | ret = subn_get_nodeinfo(smp, ibdev, port); | ||
1844 | goto bail; | ||
1845 | case IB_SMP_ATTR_GUID_INFO: | ||
1846 | ret = subn_get_guidinfo(smp, ibdev, port); | ||
1847 | goto bail; | ||
1848 | case IB_SMP_ATTR_PORT_INFO: | ||
1849 | ret = subn_get_portinfo(smp, ibdev, port); | ||
1850 | goto bail; | ||
1851 | case IB_SMP_ATTR_PKEY_TABLE: | ||
1852 | ret = subn_get_pkeytable(smp, ibdev, port); | ||
1853 | goto bail; | ||
1854 | case IB_SMP_ATTR_SL_TO_VL_TABLE: | ||
1855 | ret = subn_get_sl_to_vl(smp, ibdev, port); | ||
1856 | goto bail; | ||
1857 | case IB_SMP_ATTR_VL_ARB_TABLE: | ||
1858 | ret = subn_get_vl_arb(smp, ibdev, port); | ||
1859 | goto bail; | ||
1860 | case IB_SMP_ATTR_SM_INFO: | ||
1861 | if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) { | ||
1862 | ret = IB_MAD_RESULT_SUCCESS | | ||
1863 | IB_MAD_RESULT_CONSUMED; | ||
1864 | goto bail; | ||
1865 | } | ||
1866 | if (ibp->port_cap_flags & IB_PORT_SM) { | ||
1867 | ret = IB_MAD_RESULT_SUCCESS; | ||
1868 | goto bail; | ||
1869 | } | ||
1870 | /* FALLTHROUGH */ | ||
1871 | default: | ||
1872 | smp->status |= IB_SMP_UNSUP_METH_ATTR; | ||
1873 | ret = reply(smp); | ||
1874 | goto bail; | ||
1875 | } | ||
1876 | |||
1877 | case IB_MGMT_METHOD_SET: | ||
1878 | switch (smp->attr_id) { | ||
1879 | case IB_SMP_ATTR_GUID_INFO: | ||
1880 | ret = subn_set_guidinfo(smp, ibdev, port); | ||
1881 | goto bail; | ||
1882 | case IB_SMP_ATTR_PORT_INFO: | ||
1883 | ret = subn_set_portinfo(smp, ibdev, port); | ||
1884 | goto bail; | ||
1885 | case IB_SMP_ATTR_PKEY_TABLE: | ||
1886 | ret = subn_set_pkeytable(smp, ibdev, port); | ||
1887 | goto bail; | ||
1888 | case IB_SMP_ATTR_SL_TO_VL_TABLE: | ||
1889 | ret = subn_set_sl_to_vl(smp, ibdev, port); | ||
1890 | goto bail; | ||
1891 | case IB_SMP_ATTR_VL_ARB_TABLE: | ||
1892 | ret = subn_set_vl_arb(smp, ibdev, port); | ||
1893 | goto bail; | ||
1894 | case IB_SMP_ATTR_SM_INFO: | ||
1895 | if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) { | ||
1896 | ret = IB_MAD_RESULT_SUCCESS | | ||
1897 | IB_MAD_RESULT_CONSUMED; | ||
1898 | goto bail; | ||
1899 | } | ||
1900 | if (ibp->port_cap_flags & IB_PORT_SM) { | ||
1901 | ret = IB_MAD_RESULT_SUCCESS; | ||
1902 | goto bail; | ||
1903 | } | ||
1904 | /* FALLTHROUGH */ | ||
1905 | default: | ||
1906 | smp->status |= IB_SMP_UNSUP_METH_ATTR; | ||
1907 | ret = reply(smp); | ||
1908 | goto bail; | ||
1909 | } | ||
1910 | |||
1911 | case IB_MGMT_METHOD_TRAP_REPRESS: | ||
1912 | if (smp->attr_id == IB_SMP_ATTR_NOTICE) | ||
1913 | ret = subn_trap_repress(smp, ibdev, port); | ||
1914 | else { | ||
1915 | smp->status |= IB_SMP_UNSUP_METH_ATTR; | ||
1916 | ret = reply(smp); | ||
1917 | } | ||
1918 | goto bail; | ||
1919 | |||
1920 | case IB_MGMT_METHOD_TRAP: | ||
1921 | case IB_MGMT_METHOD_REPORT: | ||
1922 | case IB_MGMT_METHOD_REPORT_RESP: | ||
1923 | case IB_MGMT_METHOD_GET_RESP: | ||
1924 | /* | ||
1925 | * The ib_mad module will call us to process responses | ||
1926 | * before checking for other consumers. | ||
1927 | * Just tell the caller to process it normally. | ||
1928 | */ | ||
1929 | ret = IB_MAD_RESULT_SUCCESS; | ||
1930 | goto bail; | ||
1931 | |||
1932 | case IB_MGMT_METHOD_SEND: | ||
1933 | if (ib_get_smp_direction(smp) && | ||
1934 | smp->attr_id == QIB_VENDOR_IPG) { | ||
1935 | ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PORT, | ||
1936 | smp->data[0]); | ||
1937 | ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; | ||
1938 | } else | ||
1939 | ret = IB_MAD_RESULT_SUCCESS; | ||
1940 | goto bail; | ||
1941 | |||
1942 | default: | ||
1943 | smp->status |= IB_SMP_UNSUP_METHOD; | ||
1944 | ret = reply(smp); | ||
1945 | } | ||
1946 | |||
1947 | bail: | ||
1948 | return ret; | ||
1949 | } | ||
1950 | |||
1951 | static int process_perf(struct ib_device *ibdev, u8 port, | ||
1952 | struct ib_mad *in_mad, | ||
1953 | struct ib_mad *out_mad) | ||
1954 | { | ||
1955 | struct ib_perf *pmp = (struct ib_perf *)out_mad; | ||
1956 | int ret; | ||
1957 | |||
1958 | *out_mad = *in_mad; | ||
1959 | if (pmp->class_version != 1) { | ||
1960 | pmp->status |= IB_SMP_UNSUP_VERSION; | ||
1961 | ret = reply((struct ib_smp *) pmp); | ||
1962 | goto bail; | ||
1963 | } | ||
1964 | |||
1965 | switch (pmp->method) { | ||
1966 | case IB_MGMT_METHOD_GET: | ||
1967 | switch (pmp->attr_id) { | ||
1968 | case IB_PMA_CLASS_PORT_INFO: | ||
1969 | ret = pma_get_classportinfo(pmp, ibdev); | ||
1970 | goto bail; | ||
1971 | case IB_PMA_PORT_SAMPLES_CONTROL: | ||
1972 | ret = pma_get_portsamplescontrol(pmp, ibdev, port); | ||
1973 | goto bail; | ||
1974 | case IB_PMA_PORT_SAMPLES_RESULT: | ||
1975 | ret = pma_get_portsamplesresult(pmp, ibdev, port); | ||
1976 | goto bail; | ||
1977 | case IB_PMA_PORT_SAMPLES_RESULT_EXT: | ||
1978 | ret = pma_get_portsamplesresult_ext(pmp, ibdev, port); | ||
1979 | goto bail; | ||
1980 | case IB_PMA_PORT_COUNTERS: | ||
1981 | ret = pma_get_portcounters(pmp, ibdev, port); | ||
1982 | goto bail; | ||
1983 | case IB_PMA_PORT_COUNTERS_EXT: | ||
1984 | ret = pma_get_portcounters_ext(pmp, ibdev, port); | ||
1985 | goto bail; | ||
1986 | case IB_PMA_PORT_COUNTERS_CONG: | ||
1987 | ret = pma_get_portcounters_cong(pmp, ibdev, port); | ||
1988 | goto bail; | ||
1989 | default: | ||
1990 | pmp->status |= IB_SMP_UNSUP_METH_ATTR; | ||
1991 | ret = reply((struct ib_smp *) pmp); | ||
1992 | goto bail; | ||
1993 | } | ||
1994 | |||
1995 | case IB_MGMT_METHOD_SET: | ||
1996 | switch (pmp->attr_id) { | ||
1997 | case IB_PMA_PORT_SAMPLES_CONTROL: | ||
1998 | ret = pma_set_portsamplescontrol(pmp, ibdev, port); | ||
1999 | goto bail; | ||
2000 | case IB_PMA_PORT_COUNTERS: | ||
2001 | ret = pma_set_portcounters(pmp, ibdev, port); | ||
2002 | goto bail; | ||
2003 | case IB_PMA_PORT_COUNTERS_EXT: | ||
2004 | ret = pma_set_portcounters_ext(pmp, ibdev, port); | ||
2005 | goto bail; | ||
2006 | case IB_PMA_PORT_COUNTERS_CONG: | ||
2007 | ret = pma_set_portcounters_cong(pmp, ibdev, port); | ||
2008 | goto bail; | ||
2009 | default: | ||
2010 | pmp->status |= IB_SMP_UNSUP_METH_ATTR; | ||
2011 | ret = reply((struct ib_smp *) pmp); | ||
2012 | goto bail; | ||
2013 | } | ||
2014 | |||
2015 | case IB_MGMT_METHOD_TRAP: | ||
2016 | case IB_MGMT_METHOD_GET_RESP: | ||
2017 | /* | ||
2018 | * The ib_mad module will call us to process responses | ||
2019 | * before checking for other consumers. | ||
2020 | * Just tell the caller to process it normally. | ||
2021 | */ | ||
2022 | ret = IB_MAD_RESULT_SUCCESS; | ||
2023 | goto bail; | ||
2024 | |||
2025 | default: | ||
2026 | pmp->status |= IB_SMP_UNSUP_METHOD; | ||
2027 | ret = reply((struct ib_smp *) pmp); | ||
2028 | } | ||
2029 | |||
2030 | bail: | ||
2031 | return ret; | ||
2032 | } | ||
2033 | |||
2034 | /** | ||
2035 | * qib_process_mad - process an incoming MAD packet | ||
2036 | * @ibdev: the infiniband device this packet came in on | ||
2037 | * @mad_flags: MAD flags | ||
2038 | * @port: the port number this packet came in on | ||
2039 | * @in_wc: the work completion entry for this packet | ||
2040 | * @in_grh: the global route header for this packet | ||
2041 | * @in_mad: the incoming MAD | ||
2042 | * @out_mad: any outgoing MAD reply | ||
2043 | * | ||
2044 | * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not | ||
2045 | * interested in processing. | ||
2046 | * | ||
2047 | * Note that the verbs framework has already done the MAD sanity checks, | ||
2048 | * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE | ||
2049 | * MADs. | ||
2050 | * | ||
2051 | * This is called by the ib_mad module. | ||
2052 | */ | ||
2053 | int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port, | ||
2054 | struct ib_wc *in_wc, struct ib_grh *in_grh, | ||
2055 | struct ib_mad *in_mad, struct ib_mad *out_mad) | ||
2056 | { | ||
2057 | int ret; | ||
2058 | |||
2059 | switch (in_mad->mad_hdr.mgmt_class) { | ||
2060 | case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: | ||
2061 | case IB_MGMT_CLASS_SUBN_LID_ROUTED: | ||
2062 | ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad); | ||
2063 | goto bail; | ||
2064 | |||
2065 | case IB_MGMT_CLASS_PERF_MGMT: | ||
2066 | ret = process_perf(ibdev, port, in_mad, out_mad); | ||
2067 | goto bail; | ||
2068 | |||
2069 | default: | ||
2070 | ret = IB_MAD_RESULT_SUCCESS; | ||
2071 | } | ||
2072 | |||
2073 | bail: | ||
2074 | return ret; | ||
2075 | } | ||
2076 | |||
2077 | static void send_handler(struct ib_mad_agent *agent, | ||
2078 | struct ib_mad_send_wc *mad_send_wc) | ||
2079 | { | ||
2080 | ib_free_send_mad(mad_send_wc->send_buf); | ||
2081 | } | ||
2082 | |||
2083 | static void xmit_wait_timer_func(unsigned long opaque) | ||
2084 | { | ||
2085 | struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; | ||
2086 | struct qib_devdata *dd = dd_from_ppd(ppd); | ||
2087 | unsigned long flags; | ||
2088 | u8 status; | ||
2089 | |||
2090 | spin_lock_irqsave(&ppd->ibport_data.lock, flags); | ||
2091 | if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_SAMPLE) { | ||
2092 | status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); | ||
2093 | if (status == IB_PMA_SAMPLE_STATUS_DONE) { | ||
2094 | /* save counter cache */ | ||
2095 | cache_hw_sample_counters(ppd); | ||
2096 | ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER; | ||
2097 | } else | ||
2098 | goto done; | ||
2099 | } | ||
2100 | ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd); | ||
2101 | dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0); | ||
2102 | done: | ||
2103 | spin_unlock_irqrestore(&ppd->ibport_data.lock, flags); | ||
2104 | mod_timer(&ppd->cong_stats.timer, jiffies + HZ); | ||
2105 | } | ||
2106 | |||
2107 | int qib_create_agents(struct qib_ibdev *dev) | ||
2108 | { | ||
2109 | struct qib_devdata *dd = dd_from_dev(dev); | ||
2110 | struct ib_mad_agent *agent; | ||
2111 | struct qib_ibport *ibp; | ||
2112 | int p; | ||
2113 | int ret; | ||
2114 | |||
2115 | for (p = 0; p < dd->num_pports; p++) { | ||
2116 | ibp = &dd->pport[p].ibport_data; | ||
2117 | agent = ib_register_mad_agent(&dev->ibdev, p + 1, IB_QPT_SMI, | ||
2118 | NULL, 0, send_handler, | ||
2119 | NULL, NULL); | ||
2120 | if (IS_ERR(agent)) { | ||
2121 | ret = PTR_ERR(agent); | ||
2122 | goto err; | ||
2123 | } | ||
2124 | |||
2125 | /* Initialize xmit_wait structure */ | ||
2126 | dd->pport[p].cong_stats.counter = 0; | ||
2127 | init_timer(&dd->pport[p].cong_stats.timer); | ||
2128 | dd->pport[p].cong_stats.timer.function = xmit_wait_timer_func; | ||
2129 | dd->pport[p].cong_stats.timer.data = | ||
2130 | (unsigned long)(&dd->pport[p]); | ||
2131 | dd->pport[p].cong_stats.timer.expires = 0; | ||
2132 | add_timer(&dd->pport[p].cong_stats.timer); | ||
2133 | |||
2134 | ibp->send_agent = agent; | ||
2135 | } | ||
2136 | |||
2137 | return 0; | ||
2138 | |||
2139 | err: | ||
2140 | for (p = 0; p < dd->num_pports; p++) { | ||
2141 | ibp = &dd->pport[p].ibport_data; | ||
2142 | if (ibp->send_agent) { | ||
2143 | agent = ibp->send_agent; | ||
2144 | ibp->send_agent = NULL; | ||
2145 | ib_unregister_mad_agent(agent); | ||
2146 | } | ||
2147 | } | ||
2148 | |||
2149 | return ret; | ||
2150 | } | ||
2151 | |||
2152 | void qib_free_agents(struct qib_ibdev *dev) | ||
2153 | { | ||
2154 | struct qib_devdata *dd = dd_from_dev(dev); | ||
2155 | struct ib_mad_agent *agent; | ||
2156 | struct qib_ibport *ibp; | ||
2157 | int p; | ||
2158 | |||
2159 | for (p = 0; p < dd->num_pports; p++) { | ||
2160 | ibp = &dd->pport[p].ibport_data; | ||
2161 | if (ibp->send_agent) { | ||
2162 | agent = ibp->send_agent; | ||
2163 | ibp->send_agent = NULL; | ||
2164 | ib_unregister_mad_agent(agent); | ||
2165 | } | ||
2166 | if (ibp->sm_ah) { | ||
2167 | ib_destroy_ah(&ibp->sm_ah->ibah); | ||
2168 | ibp->sm_ah = NULL; | ||
2169 | } | ||
2170 | if (dd->pport[p].cong_stats.timer.data) | ||
2171 | del_timer_sync(&dd->pport[p].cong_stats.timer); | ||
2172 | } | ||
2173 | } | ||
diff --git a/drivers/infiniband/hw/qib/qib_mad.h b/drivers/infiniband/hw/qib/qib_mad.h new file mode 100644 index 000000000000..147aff9117d7 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_mad.h | |||
@@ -0,0 +1,373 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. | ||
3 | * All rights reserved. | ||
4 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004) | ||
36 | #define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008) | ||
37 | #define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C) | ||
38 | #define IB_SMP_INVALID_FIELD cpu_to_be16(0x001C) | ||
39 | |||
40 | struct ib_node_info { | ||
41 | u8 base_version; | ||
42 | u8 class_version; | ||
43 | u8 node_type; | ||
44 | u8 num_ports; | ||
45 | __be64 sys_guid; | ||
46 | __be64 node_guid; | ||
47 | __be64 port_guid; | ||
48 | __be16 partition_cap; | ||
49 | __be16 device_id; | ||
50 | __be32 revision; | ||
51 | u8 local_port_num; | ||
52 | u8 vendor_id[3]; | ||
53 | } __attribute__ ((packed)); | ||
54 | |||
55 | struct ib_mad_notice_attr { | ||
56 | u8 generic_type; | ||
57 | u8 prod_type_msb; | ||
58 | __be16 prod_type_lsb; | ||
59 | __be16 trap_num; | ||
60 | __be16 issuer_lid; | ||
61 | __be16 toggle_count; | ||
62 | |||
63 | union { | ||
64 | struct { | ||
65 | u8 details[54]; | ||
66 | } raw_data; | ||
67 | |||
68 | struct { | ||
69 | __be16 reserved; | ||
70 | __be16 lid; /* where violation happened */ | ||
71 | u8 port_num; /* where violation happened */ | ||
72 | } __attribute__ ((packed)) ntc_129_131; | ||
73 | |||
74 | struct { | ||
75 | __be16 reserved; | ||
76 | __be16 lid; /* LID where change occured */ | ||
77 | u8 reserved2; | ||
78 | u8 local_changes; /* low bit - local changes */ | ||
79 | __be32 new_cap_mask; /* new capability mask */ | ||
80 | u8 reserved3; | ||
81 | u8 change_flags; /* low 3 bits only */ | ||
82 | } __attribute__ ((packed)) ntc_144; | ||
83 | |||
84 | struct { | ||
85 | __be16 reserved; | ||
86 | __be16 lid; /* lid where sys guid changed */ | ||
87 | __be16 reserved2; | ||
88 | __be64 new_sys_guid; | ||
89 | } __attribute__ ((packed)) ntc_145; | ||
90 | |||
91 | struct { | ||
92 | __be16 reserved; | ||
93 | __be16 lid; | ||
94 | __be16 dr_slid; | ||
95 | u8 method; | ||
96 | u8 reserved2; | ||
97 | __be16 attr_id; | ||
98 | __be32 attr_mod; | ||
99 | __be64 mkey; | ||
100 | u8 reserved3; | ||
101 | u8 dr_trunc_hop; | ||
102 | u8 dr_rtn_path[30]; | ||
103 | } __attribute__ ((packed)) ntc_256; | ||
104 | |||
105 | struct { | ||
106 | __be16 reserved; | ||
107 | __be16 lid1; | ||
108 | __be16 lid2; | ||
109 | __be32 key; | ||
110 | __be32 sl_qp1; /* SL: high 4 bits */ | ||
111 | __be32 qp2; /* high 8 bits reserved */ | ||
112 | union ib_gid gid1; | ||
113 | union ib_gid gid2; | ||
114 | } __attribute__ ((packed)) ntc_257_258; | ||
115 | |||
116 | } details; | ||
117 | }; | ||
118 | |||
119 | /* | ||
120 | * Generic trap/notice types | ||
121 | */ | ||
122 | #define IB_NOTICE_TYPE_FATAL 0x80 | ||
123 | #define IB_NOTICE_TYPE_URGENT 0x81 | ||
124 | #define IB_NOTICE_TYPE_SECURITY 0x82 | ||
125 | #define IB_NOTICE_TYPE_SM 0x83 | ||
126 | #define IB_NOTICE_TYPE_INFO 0x84 | ||
127 | |||
128 | /* | ||
129 | * Generic trap/notice producers | ||
130 | */ | ||
131 | #define IB_NOTICE_PROD_CA cpu_to_be16(1) | ||
132 | #define IB_NOTICE_PROD_SWITCH cpu_to_be16(2) | ||
133 | #define IB_NOTICE_PROD_ROUTER cpu_to_be16(3) | ||
134 | #define IB_NOTICE_PROD_CLASS_MGR cpu_to_be16(4) | ||
135 | |||
136 | /* | ||
137 | * Generic trap/notice numbers | ||
138 | */ | ||
139 | #define IB_NOTICE_TRAP_LLI_THRESH cpu_to_be16(129) | ||
140 | #define IB_NOTICE_TRAP_EBO_THRESH cpu_to_be16(130) | ||
141 | #define IB_NOTICE_TRAP_FLOW_UPDATE cpu_to_be16(131) | ||
142 | #define IB_NOTICE_TRAP_CAP_MASK_CHG cpu_to_be16(144) | ||
143 | #define IB_NOTICE_TRAP_SYS_GUID_CHG cpu_to_be16(145) | ||
144 | #define IB_NOTICE_TRAP_BAD_MKEY cpu_to_be16(256) | ||
145 | #define IB_NOTICE_TRAP_BAD_PKEY cpu_to_be16(257) | ||
146 | #define IB_NOTICE_TRAP_BAD_QKEY cpu_to_be16(258) | ||
147 | |||
148 | /* | ||
149 | * Repress trap/notice flags | ||
150 | */ | ||
151 | #define IB_NOTICE_REPRESS_LLI_THRESH (1 << 0) | ||
152 | #define IB_NOTICE_REPRESS_EBO_THRESH (1 << 1) | ||
153 | #define IB_NOTICE_REPRESS_FLOW_UPDATE (1 << 2) | ||
154 | #define IB_NOTICE_REPRESS_CAP_MASK_CHG (1 << 3) | ||
155 | #define IB_NOTICE_REPRESS_SYS_GUID_CHG (1 << 4) | ||
156 | #define IB_NOTICE_REPRESS_BAD_MKEY (1 << 5) | ||
157 | #define IB_NOTICE_REPRESS_BAD_PKEY (1 << 6) | ||
158 | #define IB_NOTICE_REPRESS_BAD_QKEY (1 << 7) | ||
159 | |||
160 | /* | ||
161 | * Generic trap/notice other local changes flags (trap 144). | ||
162 | */ | ||
163 | #define IB_NOTICE_TRAP_LSE_CHG 0x04 /* Link Speed Enable changed */ | ||
164 | #define IB_NOTICE_TRAP_LWE_CHG 0x02 /* Link Width Enable changed */ | ||
165 | #define IB_NOTICE_TRAP_NODE_DESC_CHG 0x01 | ||
166 | |||
167 | /* | ||
168 | * Generic trap/notice M_Key volation flags in dr_trunc_hop (trap 256). | ||
169 | */ | ||
170 | #define IB_NOTICE_TRAP_DR_NOTICE 0x80 | ||
171 | #define IB_NOTICE_TRAP_DR_TRUNC 0x40 | ||
172 | |||
173 | struct ib_vl_weight_elem { | ||
174 | u8 vl; /* Only low 4 bits, upper 4 bits reserved */ | ||
175 | u8 weight; | ||
176 | }; | ||
177 | |||
178 | #define IB_VLARB_LOWPRI_0_31 1 | ||
179 | #define IB_VLARB_LOWPRI_32_63 2 | ||
180 | #define IB_VLARB_HIGHPRI_0_31 3 | ||
181 | #define IB_VLARB_HIGHPRI_32_63 4 | ||
182 | |||
183 | /* | ||
184 | * PMA class portinfo capability mask bits | ||
185 | */ | ||
186 | #define IB_PMA_CLASS_CAP_ALLPORTSELECT cpu_to_be16(1 << 8) | ||
187 | #define IB_PMA_CLASS_CAP_EXT_WIDTH cpu_to_be16(1 << 9) | ||
188 | #define IB_PMA_CLASS_CAP_XMIT_WAIT cpu_to_be16(1 << 12) | ||
189 | |||
190 | #define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001) | ||
191 | #define IB_PMA_PORT_SAMPLES_CONTROL cpu_to_be16(0x0010) | ||
192 | #define IB_PMA_PORT_SAMPLES_RESULT cpu_to_be16(0x0011) | ||
193 | #define IB_PMA_PORT_COUNTERS cpu_to_be16(0x0012) | ||
194 | #define IB_PMA_PORT_COUNTERS_EXT cpu_to_be16(0x001D) | ||
195 | #define IB_PMA_PORT_SAMPLES_RESULT_EXT cpu_to_be16(0x001E) | ||
196 | #define IB_PMA_PORT_COUNTERS_CONG cpu_to_be16(0xFF00) | ||
197 | |||
198 | struct ib_perf { | ||
199 | u8 base_version; | ||
200 | u8 mgmt_class; | ||
201 | u8 class_version; | ||
202 | u8 method; | ||
203 | __be16 status; | ||
204 | __be16 unused; | ||
205 | __be64 tid; | ||
206 | __be16 attr_id; | ||
207 | __be16 resv; | ||
208 | __be32 attr_mod; | ||
209 | u8 reserved[40]; | ||
210 | u8 data[192]; | ||
211 | } __attribute__ ((packed)); | ||
212 | |||
213 | struct ib_pma_classportinfo { | ||
214 | u8 base_version; | ||
215 | u8 class_version; | ||
216 | __be16 cap_mask; | ||
217 | u8 reserved[3]; | ||
218 | u8 resp_time_value; /* only lower 5 bits */ | ||
219 | union ib_gid redirect_gid; | ||
220 | __be32 redirect_tc_sl_fl; /* 8, 4, 20 bits respectively */ | ||
221 | __be16 redirect_lid; | ||
222 | __be16 redirect_pkey; | ||
223 | __be32 redirect_qp; /* only lower 24 bits */ | ||
224 | __be32 redirect_qkey; | ||
225 | union ib_gid trap_gid; | ||
226 | __be32 trap_tc_sl_fl; /* 8, 4, 20 bits respectively */ | ||
227 | __be16 trap_lid; | ||
228 | __be16 trap_pkey; | ||
229 | __be32 trap_hl_qp; /* 8, 24 bits respectively */ | ||
230 | __be32 trap_qkey; | ||
231 | } __attribute__ ((packed)); | ||
232 | |||
233 | struct ib_pma_portsamplescontrol { | ||
234 | u8 opcode; | ||
235 | u8 port_select; | ||
236 | u8 tick; | ||
237 | u8 counter_width; /* only lower 3 bits */ | ||
238 | __be32 counter_mask0_9; /* 2, 10 * 3, bits */ | ||
239 | __be16 counter_mask10_14; /* 1, 5 * 3, bits */ | ||
240 | u8 sample_mechanisms; | ||
241 | u8 sample_status; /* only lower 2 bits */ | ||
242 | __be64 option_mask; | ||
243 | __be64 vendor_mask; | ||
244 | __be32 sample_start; | ||
245 | __be32 sample_interval; | ||
246 | __be16 tag; | ||
247 | __be16 counter_select[15]; | ||
248 | } __attribute__ ((packed)); | ||
249 | |||
250 | struct ib_pma_portsamplesresult { | ||
251 | __be16 tag; | ||
252 | __be16 sample_status; /* only lower 2 bits */ | ||
253 | __be32 counter[15]; | ||
254 | } __attribute__ ((packed)); | ||
255 | |||
256 | struct ib_pma_portsamplesresult_ext { | ||
257 | __be16 tag; | ||
258 | __be16 sample_status; /* only lower 2 bits */ | ||
259 | __be32 extended_width; /* only upper 2 bits */ | ||
260 | __be64 counter[15]; | ||
261 | } __attribute__ ((packed)); | ||
262 | |||
263 | struct ib_pma_portcounters { | ||
264 | u8 reserved; | ||
265 | u8 port_select; | ||
266 | __be16 counter_select; | ||
267 | __be16 symbol_error_counter; | ||
268 | u8 link_error_recovery_counter; | ||
269 | u8 link_downed_counter; | ||
270 | __be16 port_rcv_errors; | ||
271 | __be16 port_rcv_remphys_errors; | ||
272 | __be16 port_rcv_switch_relay_errors; | ||
273 | __be16 port_xmit_discards; | ||
274 | u8 port_xmit_constraint_errors; | ||
275 | u8 port_rcv_constraint_errors; | ||
276 | u8 reserved1; | ||
277 | u8 lli_ebor_errors; /* 4, 4, bits */ | ||
278 | __be16 reserved2; | ||
279 | __be16 vl15_dropped; | ||
280 | __be32 port_xmit_data; | ||
281 | __be32 port_rcv_data; | ||
282 | __be32 port_xmit_packets; | ||
283 | __be32 port_rcv_packets; | ||
284 | } __attribute__ ((packed)); | ||
285 | |||
286 | struct ib_pma_portcounters_cong { | ||
287 | u8 reserved; | ||
288 | u8 reserved1; | ||
289 | __be16 port_check_rate; | ||
290 | __be16 symbol_error_counter; | ||
291 | u8 link_error_recovery_counter; | ||
292 | u8 link_downed_counter; | ||
293 | __be16 port_rcv_errors; | ||
294 | __be16 port_rcv_remphys_errors; | ||
295 | __be16 port_rcv_switch_relay_errors; | ||
296 | __be16 port_xmit_discards; | ||
297 | u8 port_xmit_constraint_errors; | ||
298 | u8 port_rcv_constraint_errors; | ||
299 | u8 reserved2; | ||
300 | u8 lli_ebor_errors; /* 4, 4, bits */ | ||
301 | __be16 reserved3; | ||
302 | __be16 vl15_dropped; | ||
303 | __be64 port_xmit_data; | ||
304 | __be64 port_rcv_data; | ||
305 | __be64 port_xmit_packets; | ||
306 | __be64 port_rcv_packets; | ||
307 | __be64 port_xmit_wait; | ||
308 | __be64 port_adr_events; | ||
309 | } __attribute__ ((packed)); | ||
310 | |||
311 | #define IB_PMA_CONG_HW_CONTROL_TIMER 0x00 | ||
312 | #define IB_PMA_CONG_HW_CONTROL_SAMPLE 0x01 | ||
313 | |||
314 | #define QIB_XMIT_RATE_UNSUPPORTED 0x0 | ||
315 | #define QIB_XMIT_RATE_PICO 0x7 | ||
316 | /* number of 4nsec cycles equaling 2secs */ | ||
317 | #define QIB_CONG_TIMER_PSINTERVAL 0x1DCD64EC | ||
318 | |||
319 | #define IB_PMA_SEL_SYMBOL_ERROR cpu_to_be16(0x0001) | ||
320 | #define IB_PMA_SEL_LINK_ERROR_RECOVERY cpu_to_be16(0x0002) | ||
321 | #define IB_PMA_SEL_LINK_DOWNED cpu_to_be16(0x0004) | ||
322 | #define IB_PMA_SEL_PORT_RCV_ERRORS cpu_to_be16(0x0008) | ||
323 | #define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS cpu_to_be16(0x0010) | ||
324 | #define IB_PMA_SEL_PORT_XMIT_DISCARDS cpu_to_be16(0x0040) | ||
325 | #define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS cpu_to_be16(0x0200) | ||
326 | #define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS cpu_to_be16(0x0400) | ||
327 | #define IB_PMA_SEL_PORT_VL15_DROPPED cpu_to_be16(0x0800) | ||
328 | #define IB_PMA_SEL_PORT_XMIT_DATA cpu_to_be16(0x1000) | ||
329 | #define IB_PMA_SEL_PORT_RCV_DATA cpu_to_be16(0x2000) | ||
330 | #define IB_PMA_SEL_PORT_XMIT_PACKETS cpu_to_be16(0x4000) | ||
331 | #define IB_PMA_SEL_PORT_RCV_PACKETS cpu_to_be16(0x8000) | ||
332 | |||
333 | #define IB_PMA_SEL_CONG_ALL 0x01 | ||
334 | #define IB_PMA_SEL_CONG_PORT_DATA 0x02 | ||
335 | #define IB_PMA_SEL_CONG_XMIT 0x04 | ||
336 | #define IB_PMA_SEL_CONG_ROUTING 0x08 | ||
337 | |||
338 | struct ib_pma_portcounters_ext { | ||
339 | u8 reserved; | ||
340 | u8 port_select; | ||
341 | __be16 counter_select; | ||
342 | __be32 reserved1; | ||
343 | __be64 port_xmit_data; | ||
344 | __be64 port_rcv_data; | ||
345 | __be64 port_xmit_packets; | ||
346 | __be64 port_rcv_packets; | ||
347 | __be64 port_unicast_xmit_packets; | ||
348 | __be64 port_unicast_rcv_packets; | ||
349 | __be64 port_multicast_xmit_packets; | ||
350 | __be64 port_multicast_rcv_packets; | ||
351 | } __attribute__ ((packed)); | ||
352 | |||
353 | #define IB_PMA_SELX_PORT_XMIT_DATA cpu_to_be16(0x0001) | ||
354 | #define IB_PMA_SELX_PORT_RCV_DATA cpu_to_be16(0x0002) | ||
355 | #define IB_PMA_SELX_PORT_XMIT_PACKETS cpu_to_be16(0x0004) | ||
356 | #define IB_PMA_SELX_PORT_RCV_PACKETS cpu_to_be16(0x0008) | ||
357 | #define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS cpu_to_be16(0x0010) | ||
358 | #define IB_PMA_SELX_PORT_UNI_RCV_PACKETS cpu_to_be16(0x0020) | ||
359 | #define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS cpu_to_be16(0x0040) | ||
360 | #define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS cpu_to_be16(0x0080) | ||
361 | |||
362 | /* | ||
363 | * The PortSamplesControl.CounterMasks field is an array of 3 bit fields | ||
364 | * which specify the N'th counter's capabilities. See ch. 16.1.3.2. | ||
365 | * We support 5 counters which only count the mandatory quantities. | ||
366 | */ | ||
367 | #define COUNTER_MASK(q, n) (q << ((9 - n) * 3)) | ||
368 | #define COUNTER_MASK0_9 \ | ||
369 | cpu_to_be32(COUNTER_MASK(1, 0) | \ | ||
370 | COUNTER_MASK(1, 1) | \ | ||
371 | COUNTER_MASK(1, 2) | \ | ||
372 | COUNTER_MASK(1, 3) | \ | ||
373 | COUNTER_MASK(1, 4)) | ||
diff --git a/drivers/infiniband/hw/qib/qib_mmap.c b/drivers/infiniband/hw/qib/qib_mmap.c new file mode 100644 index 000000000000..8b73a11d571c --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_mmap.c | |||
@@ -0,0 +1,174 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #include <linux/module.h> | ||
34 | #include <linux/slab.h> | ||
35 | #include <linux/vmalloc.h> | ||
36 | #include <linux/mm.h> | ||
37 | #include <linux/errno.h> | ||
38 | #include <asm/pgtable.h> | ||
39 | |||
40 | #include "qib_verbs.h" | ||
41 | |||
42 | /** | ||
43 | * qib_release_mmap_info - free mmap info structure | ||
44 | * @ref: a pointer to the kref within struct qib_mmap_info | ||
45 | */ | ||
46 | void qib_release_mmap_info(struct kref *ref) | ||
47 | { | ||
48 | struct qib_mmap_info *ip = | ||
49 | container_of(ref, struct qib_mmap_info, ref); | ||
50 | struct qib_ibdev *dev = to_idev(ip->context->device); | ||
51 | |||
52 | spin_lock_irq(&dev->pending_lock); | ||
53 | list_del(&ip->pending_mmaps); | ||
54 | spin_unlock_irq(&dev->pending_lock); | ||
55 | |||
56 | vfree(ip->obj); | ||
57 | kfree(ip); | ||
58 | } | ||
59 | |||
60 | /* | ||
61 | * open and close keep track of how many times the CQ is mapped, | ||
62 | * to avoid releasing it. | ||
63 | */ | ||
64 | static void qib_vma_open(struct vm_area_struct *vma) | ||
65 | { | ||
66 | struct qib_mmap_info *ip = vma->vm_private_data; | ||
67 | |||
68 | kref_get(&ip->ref); | ||
69 | } | ||
70 | |||
71 | static void qib_vma_close(struct vm_area_struct *vma) | ||
72 | { | ||
73 | struct qib_mmap_info *ip = vma->vm_private_data; | ||
74 | |||
75 | kref_put(&ip->ref, qib_release_mmap_info); | ||
76 | } | ||
77 | |||
78 | static struct vm_operations_struct qib_vm_ops = { | ||
79 | .open = qib_vma_open, | ||
80 | .close = qib_vma_close, | ||
81 | }; | ||
82 | |||
83 | /** | ||
84 | * qib_mmap - create a new mmap region | ||
85 | * @context: the IB user context of the process making the mmap() call | ||
86 | * @vma: the VMA to be initialized | ||
87 | * Return zero if the mmap is OK. Otherwise, return an errno. | ||
88 | */ | ||
89 | int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) | ||
90 | { | ||
91 | struct qib_ibdev *dev = to_idev(context->device); | ||
92 | unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; | ||
93 | unsigned long size = vma->vm_end - vma->vm_start; | ||
94 | struct qib_mmap_info *ip, *pp; | ||
95 | int ret = -EINVAL; | ||
96 | |||
97 | /* | ||
98 | * Search the device's list of objects waiting for a mmap call. | ||
99 | * Normally, this list is very short since a call to create a | ||
100 | * CQ, QP, or SRQ is soon followed by a call to mmap(). | ||
101 | */ | ||
102 | spin_lock_irq(&dev->pending_lock); | ||
103 | list_for_each_entry_safe(ip, pp, &dev->pending_mmaps, | ||
104 | pending_mmaps) { | ||
105 | /* Only the creator is allowed to mmap the object */ | ||
106 | if (context != ip->context || (__u64) offset != ip->offset) | ||
107 | continue; | ||
108 | /* Don't allow a mmap larger than the object. */ | ||
109 | if (size > ip->size) | ||
110 | break; | ||
111 | |||
112 | list_del_init(&ip->pending_mmaps); | ||
113 | spin_unlock_irq(&dev->pending_lock); | ||
114 | |||
115 | ret = remap_vmalloc_range(vma, ip->obj, 0); | ||
116 | if (ret) | ||
117 | goto done; | ||
118 | vma->vm_ops = &qib_vm_ops; | ||
119 | vma->vm_private_data = ip; | ||
120 | qib_vma_open(vma); | ||
121 | goto done; | ||
122 | } | ||
123 | spin_unlock_irq(&dev->pending_lock); | ||
124 | done: | ||
125 | return ret; | ||
126 | } | ||
127 | |||
128 | /* | ||
129 | * Allocate information for qib_mmap | ||
130 | */ | ||
131 | struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, | ||
132 | u32 size, | ||
133 | struct ib_ucontext *context, | ||
134 | void *obj) { | ||
135 | struct qib_mmap_info *ip; | ||
136 | |||
137 | ip = kmalloc(sizeof *ip, GFP_KERNEL); | ||
138 | if (!ip) | ||
139 | goto bail; | ||
140 | |||
141 | size = PAGE_ALIGN(size); | ||
142 | |||
143 | spin_lock_irq(&dev->mmap_offset_lock); | ||
144 | if (dev->mmap_offset == 0) | ||
145 | dev->mmap_offset = PAGE_SIZE; | ||
146 | ip->offset = dev->mmap_offset; | ||
147 | dev->mmap_offset += size; | ||
148 | spin_unlock_irq(&dev->mmap_offset_lock); | ||
149 | |||
150 | INIT_LIST_HEAD(&ip->pending_mmaps); | ||
151 | ip->size = size; | ||
152 | ip->context = context; | ||
153 | ip->obj = obj; | ||
154 | kref_init(&ip->ref); | ||
155 | |||
156 | bail: | ||
157 | return ip; | ||
158 | } | ||
159 | |||
160 | void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip, | ||
161 | u32 size, void *obj) | ||
162 | { | ||
163 | size = PAGE_ALIGN(size); | ||
164 | |||
165 | spin_lock_irq(&dev->mmap_offset_lock); | ||
166 | if (dev->mmap_offset == 0) | ||
167 | dev->mmap_offset = PAGE_SIZE; | ||
168 | ip->offset = dev->mmap_offset; | ||
169 | dev->mmap_offset += size; | ||
170 | spin_unlock_irq(&dev->mmap_offset_lock); | ||
171 | |||
172 | ip->size = size; | ||
173 | ip->obj = obj; | ||
174 | } | ||
diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c new file mode 100644 index 000000000000..5f95f0f6385d --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_mr.c | |||
@@ -0,0 +1,503 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. | ||
3 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | |||
34 | #include <rdma/ib_umem.h> | ||
35 | #include <rdma/ib_smi.h> | ||
36 | |||
37 | #include "qib.h" | ||
38 | |||
39 | /* Fast memory region */ | ||
40 | struct qib_fmr { | ||
41 | struct ib_fmr ibfmr; | ||
42 | u8 page_shift; | ||
43 | struct qib_mregion mr; /* must be last */ | ||
44 | }; | ||
45 | |||
46 | static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr) | ||
47 | { | ||
48 | return container_of(ibfmr, struct qib_fmr, ibfmr); | ||
49 | } | ||
50 | |||
51 | /** | ||
52 | * qib_get_dma_mr - get a DMA memory region | ||
53 | * @pd: protection domain for this memory region | ||
54 | * @acc: access flags | ||
55 | * | ||
56 | * Returns the memory region on success, otherwise returns an errno. | ||
57 | * Note that all DMA addresses should be created via the | ||
58 | * struct ib_dma_mapping_ops functions (see qib_dma.c). | ||
59 | */ | ||
60 | struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc) | ||
61 | { | ||
62 | struct qib_ibdev *dev = to_idev(pd->device); | ||
63 | struct qib_mr *mr; | ||
64 | struct ib_mr *ret; | ||
65 | unsigned long flags; | ||
66 | |||
67 | if (to_ipd(pd)->user) { | ||
68 | ret = ERR_PTR(-EPERM); | ||
69 | goto bail; | ||
70 | } | ||
71 | |||
72 | mr = kzalloc(sizeof *mr, GFP_KERNEL); | ||
73 | if (!mr) { | ||
74 | ret = ERR_PTR(-ENOMEM); | ||
75 | goto bail; | ||
76 | } | ||
77 | |||
78 | mr->mr.access_flags = acc; | ||
79 | atomic_set(&mr->mr.refcount, 0); | ||
80 | |||
81 | spin_lock_irqsave(&dev->lk_table.lock, flags); | ||
82 | if (!dev->dma_mr) | ||
83 | dev->dma_mr = &mr->mr; | ||
84 | spin_unlock_irqrestore(&dev->lk_table.lock, flags); | ||
85 | |||
86 | ret = &mr->ibmr; | ||
87 | |||
88 | bail: | ||
89 | return ret; | ||
90 | } | ||
91 | |||
92 | static struct qib_mr *alloc_mr(int count, struct qib_lkey_table *lk_table) | ||
93 | { | ||
94 | struct qib_mr *mr; | ||
95 | int m, i = 0; | ||
96 | |||
97 | /* Allocate struct plus pointers to first level page tables. */ | ||
98 | m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ; | ||
99 | mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL); | ||
100 | if (!mr) | ||
101 | goto done; | ||
102 | |||
103 | /* Allocate first level page tables. */ | ||
104 | for (; i < m; i++) { | ||
105 | mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL); | ||
106 | if (!mr->mr.map[i]) | ||
107 | goto bail; | ||
108 | } | ||
109 | mr->mr.mapsz = m; | ||
110 | mr->mr.max_segs = count; | ||
111 | |||
112 | /* | ||
113 | * ib_reg_phys_mr() will initialize mr->ibmr except for | ||
114 | * lkey and rkey. | ||
115 | */ | ||
116 | if (!qib_alloc_lkey(lk_table, &mr->mr)) | ||
117 | goto bail; | ||
118 | mr->ibmr.lkey = mr->mr.lkey; | ||
119 | mr->ibmr.rkey = mr->mr.lkey; | ||
120 | |||
121 | atomic_set(&mr->mr.refcount, 0); | ||
122 | goto done; | ||
123 | |||
124 | bail: | ||
125 | while (i) | ||
126 | kfree(mr->mr.map[--i]); | ||
127 | kfree(mr); | ||
128 | mr = NULL; | ||
129 | |||
130 | done: | ||
131 | return mr; | ||
132 | } | ||
133 | |||
134 | /** | ||
135 | * qib_reg_phys_mr - register a physical memory region | ||
136 | * @pd: protection domain for this memory region | ||
137 | * @buffer_list: pointer to the list of physical buffers to register | ||
138 | * @num_phys_buf: the number of physical buffers to register | ||
139 | * @iova_start: the starting address passed over IB which maps to this MR | ||
140 | * | ||
141 | * Returns the memory region on success, otherwise returns an errno. | ||
142 | */ | ||
143 | struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd, | ||
144 | struct ib_phys_buf *buffer_list, | ||
145 | int num_phys_buf, int acc, u64 *iova_start) | ||
146 | { | ||
147 | struct qib_mr *mr; | ||
148 | int n, m, i; | ||
149 | struct ib_mr *ret; | ||
150 | |||
151 | mr = alloc_mr(num_phys_buf, &to_idev(pd->device)->lk_table); | ||
152 | if (mr == NULL) { | ||
153 | ret = ERR_PTR(-ENOMEM); | ||
154 | goto bail; | ||
155 | } | ||
156 | |||
157 | mr->mr.pd = pd; | ||
158 | mr->mr.user_base = *iova_start; | ||
159 | mr->mr.iova = *iova_start; | ||
160 | mr->mr.length = 0; | ||
161 | mr->mr.offset = 0; | ||
162 | mr->mr.access_flags = acc; | ||
163 | mr->umem = NULL; | ||
164 | |||
165 | m = 0; | ||
166 | n = 0; | ||
167 | for (i = 0; i < num_phys_buf; i++) { | ||
168 | mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr; | ||
169 | mr->mr.map[m]->segs[n].length = buffer_list[i].size; | ||
170 | mr->mr.length += buffer_list[i].size; | ||
171 | n++; | ||
172 | if (n == QIB_SEGSZ) { | ||
173 | m++; | ||
174 | n = 0; | ||
175 | } | ||
176 | } | ||
177 | |||
178 | ret = &mr->ibmr; | ||
179 | |||
180 | bail: | ||
181 | return ret; | ||
182 | } | ||
183 | |||
184 | /** | ||
185 | * qib_reg_user_mr - register a userspace memory region | ||
186 | * @pd: protection domain for this memory region | ||
187 | * @start: starting userspace address | ||
188 | * @length: length of region to register | ||
189 | * @virt_addr: virtual address to use (from HCA's point of view) | ||
190 | * @mr_access_flags: access flags for this memory region | ||
191 | * @udata: unused by the QLogic_IB driver | ||
192 | * | ||
193 | * Returns the memory region on success, otherwise returns an errno. | ||
194 | */ | ||
195 | struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | ||
196 | u64 virt_addr, int mr_access_flags, | ||
197 | struct ib_udata *udata) | ||
198 | { | ||
199 | struct qib_mr *mr; | ||
200 | struct ib_umem *umem; | ||
201 | struct ib_umem_chunk *chunk; | ||
202 | int n, m, i; | ||
203 | struct ib_mr *ret; | ||
204 | |||
205 | if (length == 0) { | ||
206 | ret = ERR_PTR(-EINVAL); | ||
207 | goto bail; | ||
208 | } | ||
209 | |||
210 | umem = ib_umem_get(pd->uobject->context, start, length, | ||
211 | mr_access_flags, 0); | ||
212 | if (IS_ERR(umem)) | ||
213 | return (void *) umem; | ||
214 | |||
215 | n = 0; | ||
216 | list_for_each_entry(chunk, &umem->chunk_list, list) | ||
217 | n += chunk->nents; | ||
218 | |||
219 | mr = alloc_mr(n, &to_idev(pd->device)->lk_table); | ||
220 | if (!mr) { | ||
221 | ret = ERR_PTR(-ENOMEM); | ||
222 | ib_umem_release(umem); | ||
223 | goto bail; | ||
224 | } | ||
225 | |||
226 | mr->mr.pd = pd; | ||
227 | mr->mr.user_base = start; | ||
228 | mr->mr.iova = virt_addr; | ||
229 | mr->mr.length = length; | ||
230 | mr->mr.offset = umem->offset; | ||
231 | mr->mr.access_flags = mr_access_flags; | ||
232 | mr->umem = umem; | ||
233 | |||
234 | m = 0; | ||
235 | n = 0; | ||
236 | list_for_each_entry(chunk, &umem->chunk_list, list) { | ||
237 | for (i = 0; i < chunk->nents; i++) { | ||
238 | void *vaddr; | ||
239 | |||
240 | vaddr = page_address(sg_page(&chunk->page_list[i])); | ||
241 | if (!vaddr) { | ||
242 | ret = ERR_PTR(-EINVAL); | ||
243 | goto bail; | ||
244 | } | ||
245 | mr->mr.map[m]->segs[n].vaddr = vaddr; | ||
246 | mr->mr.map[m]->segs[n].length = umem->page_size; | ||
247 | n++; | ||
248 | if (n == QIB_SEGSZ) { | ||
249 | m++; | ||
250 | n = 0; | ||
251 | } | ||
252 | } | ||
253 | } | ||
254 | ret = &mr->ibmr; | ||
255 | |||
256 | bail: | ||
257 | return ret; | ||
258 | } | ||
259 | |||
260 | /** | ||
261 | * qib_dereg_mr - unregister and free a memory region | ||
262 | * @ibmr: the memory region to free | ||
263 | * | ||
264 | * Returns 0 on success. | ||
265 | * | ||
266 | * Note that this is called to free MRs created by qib_get_dma_mr() | ||
267 | * or qib_reg_user_mr(). | ||
268 | */ | ||
269 | int qib_dereg_mr(struct ib_mr *ibmr) | ||
270 | { | ||
271 | struct qib_mr *mr = to_imr(ibmr); | ||
272 | struct qib_ibdev *dev = to_idev(ibmr->device); | ||
273 | int ret; | ||
274 | int i; | ||
275 | |||
276 | ret = qib_free_lkey(dev, &mr->mr); | ||
277 | if (ret) | ||
278 | return ret; | ||
279 | |||
280 | i = mr->mr.mapsz; | ||
281 | while (i) | ||
282 | kfree(mr->mr.map[--i]); | ||
283 | if (mr->umem) | ||
284 | ib_umem_release(mr->umem); | ||
285 | kfree(mr); | ||
286 | return 0; | ||
287 | } | ||
288 | |||
289 | /* | ||
290 | * Allocate a memory region usable with the | ||
291 | * IB_WR_FAST_REG_MR send work request. | ||
292 | * | ||
293 | * Return the memory region on success, otherwise return an errno. | ||
294 | */ | ||
295 | struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len) | ||
296 | { | ||
297 | struct qib_mr *mr; | ||
298 | |||
299 | mr = alloc_mr(max_page_list_len, &to_idev(pd->device)->lk_table); | ||
300 | if (mr == NULL) | ||
301 | return ERR_PTR(-ENOMEM); | ||
302 | |||
303 | mr->mr.pd = pd; | ||
304 | mr->mr.user_base = 0; | ||
305 | mr->mr.iova = 0; | ||
306 | mr->mr.length = 0; | ||
307 | mr->mr.offset = 0; | ||
308 | mr->mr.access_flags = 0; | ||
309 | mr->umem = NULL; | ||
310 | |||
311 | return &mr->ibmr; | ||
312 | } | ||
313 | |||
314 | struct ib_fast_reg_page_list * | ||
315 | qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len) | ||
316 | { | ||
317 | unsigned size = page_list_len * sizeof(u64); | ||
318 | struct ib_fast_reg_page_list *pl; | ||
319 | |||
320 | if (size > PAGE_SIZE) | ||
321 | return ERR_PTR(-EINVAL); | ||
322 | |||
323 | pl = kmalloc(sizeof *pl, GFP_KERNEL); | ||
324 | if (!pl) | ||
325 | return ERR_PTR(-ENOMEM); | ||
326 | |||
327 | pl->page_list = kmalloc(size, GFP_KERNEL); | ||
328 | if (!pl->page_list) | ||
329 | goto err_free; | ||
330 | |||
331 | return pl; | ||
332 | |||
333 | err_free: | ||
334 | kfree(pl); | ||
335 | return ERR_PTR(-ENOMEM); | ||
336 | } | ||
337 | |||
338 | void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl) | ||
339 | { | ||
340 | kfree(pl->page_list); | ||
341 | kfree(pl); | ||
342 | } | ||
343 | |||
344 | /** | ||
345 | * qib_alloc_fmr - allocate a fast memory region | ||
346 | * @pd: the protection domain for this memory region | ||
347 | * @mr_access_flags: access flags for this memory region | ||
348 | * @fmr_attr: fast memory region attributes | ||
349 | * | ||
350 | * Returns the memory region on success, otherwise returns an errno. | ||
351 | */ | ||
352 | struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags, | ||
353 | struct ib_fmr_attr *fmr_attr) | ||
354 | { | ||
355 | struct qib_fmr *fmr; | ||
356 | int m, i = 0; | ||
357 | struct ib_fmr *ret; | ||
358 | |||
359 | /* Allocate struct plus pointers to first level page tables. */ | ||
360 | m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ; | ||
361 | fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL); | ||
362 | if (!fmr) | ||
363 | goto bail; | ||
364 | |||
365 | /* Allocate first level page tables. */ | ||
366 | for (; i < m; i++) { | ||
367 | fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0], | ||
368 | GFP_KERNEL); | ||
369 | if (!fmr->mr.map[i]) | ||
370 | goto bail; | ||
371 | } | ||
372 | fmr->mr.mapsz = m; | ||
373 | |||
374 | /* | ||
375 | * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey & | ||
376 | * rkey. | ||
377 | */ | ||
378 | if (!qib_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr)) | ||
379 | goto bail; | ||
380 | fmr->ibfmr.rkey = fmr->mr.lkey; | ||
381 | fmr->ibfmr.lkey = fmr->mr.lkey; | ||
382 | /* | ||
383 | * Resources are allocated but no valid mapping (RKEY can't be | ||
384 | * used). | ||
385 | */ | ||
386 | fmr->mr.pd = pd; | ||
387 | fmr->mr.user_base = 0; | ||
388 | fmr->mr.iova = 0; | ||
389 | fmr->mr.length = 0; | ||
390 | fmr->mr.offset = 0; | ||
391 | fmr->mr.access_flags = mr_access_flags; | ||
392 | fmr->mr.max_segs = fmr_attr->max_pages; | ||
393 | fmr->page_shift = fmr_attr->page_shift; | ||
394 | |||
395 | atomic_set(&fmr->mr.refcount, 0); | ||
396 | ret = &fmr->ibfmr; | ||
397 | goto done; | ||
398 | |||
399 | bail: | ||
400 | while (i) | ||
401 | kfree(fmr->mr.map[--i]); | ||
402 | kfree(fmr); | ||
403 | ret = ERR_PTR(-ENOMEM); | ||
404 | |||
405 | done: | ||
406 | return ret; | ||
407 | } | ||
408 | |||
409 | /** | ||
410 | * qib_map_phys_fmr - set up a fast memory region | ||
411 | * @ibmfr: the fast memory region to set up | ||
412 | * @page_list: the list of pages to associate with the fast memory region | ||
413 | * @list_len: the number of pages to associate with the fast memory region | ||
414 | * @iova: the virtual address of the start of the fast memory region | ||
415 | * | ||
416 | * This may be called from interrupt context. | ||
417 | */ | ||
418 | |||
419 | int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, | ||
420 | int list_len, u64 iova) | ||
421 | { | ||
422 | struct qib_fmr *fmr = to_ifmr(ibfmr); | ||
423 | struct qib_lkey_table *rkt; | ||
424 | unsigned long flags; | ||
425 | int m, n, i; | ||
426 | u32 ps; | ||
427 | int ret; | ||
428 | |||
429 | if (atomic_read(&fmr->mr.refcount)) | ||
430 | return -EBUSY; | ||
431 | |||
432 | if (list_len > fmr->mr.max_segs) { | ||
433 | ret = -EINVAL; | ||
434 | goto bail; | ||
435 | } | ||
436 | rkt = &to_idev(ibfmr->device)->lk_table; | ||
437 | spin_lock_irqsave(&rkt->lock, flags); | ||
438 | fmr->mr.user_base = iova; | ||
439 | fmr->mr.iova = iova; | ||
440 | ps = 1 << fmr->page_shift; | ||
441 | fmr->mr.length = list_len * ps; | ||
442 | m = 0; | ||
443 | n = 0; | ||
444 | for (i = 0; i < list_len; i++) { | ||
445 | fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i]; | ||
446 | fmr->mr.map[m]->segs[n].length = ps; | ||
447 | if (++n == QIB_SEGSZ) { | ||
448 | m++; | ||
449 | n = 0; | ||
450 | } | ||
451 | } | ||
452 | spin_unlock_irqrestore(&rkt->lock, flags); | ||
453 | ret = 0; | ||
454 | |||
455 | bail: | ||
456 | return ret; | ||
457 | } | ||
458 | |||
459 | /** | ||
460 | * qib_unmap_fmr - unmap fast memory regions | ||
461 | * @fmr_list: the list of fast memory regions to unmap | ||
462 | * | ||
463 | * Returns 0 on success. | ||
464 | */ | ||
465 | int qib_unmap_fmr(struct list_head *fmr_list) | ||
466 | { | ||
467 | struct qib_fmr *fmr; | ||
468 | struct qib_lkey_table *rkt; | ||
469 | unsigned long flags; | ||
470 | |||
471 | list_for_each_entry(fmr, fmr_list, ibfmr.list) { | ||
472 | rkt = &to_idev(fmr->ibfmr.device)->lk_table; | ||
473 | spin_lock_irqsave(&rkt->lock, flags); | ||
474 | fmr->mr.user_base = 0; | ||
475 | fmr->mr.iova = 0; | ||
476 | fmr->mr.length = 0; | ||
477 | spin_unlock_irqrestore(&rkt->lock, flags); | ||
478 | } | ||
479 | return 0; | ||
480 | } | ||
481 | |||
482 | /** | ||
483 | * qib_dealloc_fmr - deallocate a fast memory region | ||
484 | * @ibfmr: the fast memory region to deallocate | ||
485 | * | ||
486 | * Returns 0 on success. | ||
487 | */ | ||
488 | int qib_dealloc_fmr(struct ib_fmr *ibfmr) | ||
489 | { | ||
490 | struct qib_fmr *fmr = to_ifmr(ibfmr); | ||
491 | int ret; | ||
492 | int i; | ||
493 | |||
494 | ret = qib_free_lkey(to_idev(ibfmr->device), &fmr->mr); | ||
495 | if (ret) | ||
496 | return ret; | ||
497 | |||
498 | i = fmr->mr.mapsz; | ||
499 | while (i) | ||
500 | kfree(fmr->mr.map[--i]); | ||
501 | kfree(fmr); | ||
502 | return 0; | ||
503 | } | ||
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c new file mode 100644 index 000000000000..c926bf4541df --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_pcie.c | |||
@@ -0,0 +1,738 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2008, 2009 QLogic Corporation. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #include <linux/pci.h> | ||
34 | #include <linux/io.h> | ||
35 | #include <linux/delay.h> | ||
36 | #include <linux/vmalloc.h> | ||
37 | #include <linux/aer.h> | ||
38 | |||
39 | #include "qib.h" | ||
40 | |||
41 | /* | ||
42 | * This file contains PCIe utility routines that are common to the | ||
43 | * various QLogic InfiniPath adapters | ||
44 | */ | ||
45 | |||
46 | /* | ||
47 | * Code to adjust PCIe capabilities. | ||
48 | * To minimize the change footprint, we call it | ||
49 | * from qib_pcie_params, which every chip-specific | ||
50 | * file calls, even though this violates some | ||
51 | * expectations of harmlessness. | ||
52 | */ | ||
53 | static int qib_tune_pcie_caps(struct qib_devdata *); | ||
54 | static int qib_tune_pcie_coalesce(struct qib_devdata *); | ||
55 | |||
56 | /* | ||
57 | * Do all the common PCIe setup and initialization. | ||
58 | * devdata is not yet allocated, and is not allocated until after this | ||
59 | * routine returns success. Therefore qib_dev_err() can't be used for error | ||
60 | * printing. | ||
61 | */ | ||
62 | int qib_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
63 | { | ||
64 | int ret; | ||
65 | |||
66 | ret = pci_enable_device(pdev); | ||
67 | if (ret) { | ||
68 | /* | ||
69 | * This can happen (in theory) iff: | ||
70 | * We did a chip reset, and then failed to reprogram the | ||
71 | * BAR, or the chip reset due to an internal error. We then | ||
72 | * unloaded the driver and reloaded it. | ||
73 | * | ||
74 | * Both reset cases set the BAR back to initial state. For | ||
75 | * the latter case, the AER sticky error bit at offset 0x718 | ||
76 | * should be set, but the Linux kernel doesn't yet know | ||
77 | * about that, it appears. If the original BAR was retained | ||
78 | * in the kernel data structures, this may be OK. | ||
79 | */ | ||
80 | qib_early_err(&pdev->dev, "pci enable failed: error %d\n", | ||
81 | -ret); | ||
82 | goto done; | ||
83 | } | ||
84 | |||
85 | ret = pci_request_regions(pdev, QIB_DRV_NAME); | ||
86 | if (ret) { | ||
87 | qib_devinfo(pdev, "pci_request_regions fails: err %d\n", -ret); | ||
88 | goto bail; | ||
89 | } | ||
90 | |||
91 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
92 | if (ret) { | ||
93 | /* | ||
94 | * If the 64 bit setup fails, try 32 bit. Some systems | ||
95 | * do not setup 64 bit maps on systems with 2GB or less | ||
96 | * memory installed. | ||
97 | */ | ||
98 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
99 | if (ret) { | ||
100 | qib_devinfo(pdev, "Unable to set DMA mask: %d\n", ret); | ||
101 | goto bail; | ||
102 | } | ||
103 | ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
104 | } else | ||
105 | ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
106 | if (ret) | ||
107 | qib_early_err(&pdev->dev, | ||
108 | "Unable to set DMA consistent mask: %d\n", ret); | ||
109 | |||
110 | pci_set_master(pdev); | ||
111 | ret = pci_enable_pcie_error_reporting(pdev); | ||
112 | if (ret) | ||
113 | qib_early_err(&pdev->dev, | ||
114 | "Unable to enable pcie error reporting: %d\n", | ||
115 | ret); | ||
116 | goto done; | ||
117 | |||
118 | bail: | ||
119 | pci_disable_device(pdev); | ||
120 | pci_release_regions(pdev); | ||
121 | done: | ||
122 | return ret; | ||
123 | } | ||
124 | |||
125 | /* | ||
126 | * Do remaining PCIe setup, once dd is allocated, and save away | ||
127 | * fields required to re-initialize after a chip reset, or for | ||
128 | * various other purposes | ||
129 | */ | ||
130 | int qib_pcie_ddinit(struct qib_devdata *dd, struct pci_dev *pdev, | ||
131 | const struct pci_device_id *ent) | ||
132 | { | ||
133 | unsigned long len; | ||
134 | resource_size_t addr; | ||
135 | |||
136 | dd->pcidev = pdev; | ||
137 | pci_set_drvdata(pdev, dd); | ||
138 | |||
139 | addr = pci_resource_start(pdev, 0); | ||
140 | len = pci_resource_len(pdev, 0); | ||
141 | |||
142 | #if defined(__powerpc__) | ||
143 | /* There isn't a generic way to specify writethrough mappings */ | ||
144 | dd->kregbase = __ioremap(addr, len, _PAGE_NO_CACHE | _PAGE_WRITETHRU); | ||
145 | #else | ||
146 | dd->kregbase = ioremap_nocache(addr, len); | ||
147 | #endif | ||
148 | |||
149 | if (!dd->kregbase) | ||
150 | return -ENOMEM; | ||
151 | |||
152 | dd->kregend = (u64 __iomem *)((void __iomem *) dd->kregbase + len); | ||
153 | dd->physaddr = addr; /* used for io_remap, etc. */ | ||
154 | |||
155 | /* | ||
156 | * Save BARs to rewrite after device reset. Save all 64 bits of | ||
157 | * BAR, just in case. | ||
158 | */ | ||
159 | dd->pcibar0 = addr; | ||
160 | dd->pcibar1 = addr >> 32; | ||
161 | dd->deviceid = ent->device; /* save for later use */ | ||
162 | dd->vendorid = ent->vendor; | ||
163 | |||
164 | return 0; | ||
165 | } | ||
166 | |||
167 | /* | ||
168 | * Do PCIe cleanup, after chip-specific cleanup, etc. Just prior | ||
169 | * to releasing the dd memory. | ||
170 | * void because none of the core pcie cleanup returns are void | ||
171 | */ | ||
172 | void qib_pcie_ddcleanup(struct qib_devdata *dd) | ||
173 | { | ||
174 | u64 __iomem *base = (void __iomem *) dd->kregbase; | ||
175 | |||
176 | dd->kregbase = NULL; | ||
177 | iounmap(base); | ||
178 | if (dd->piobase) | ||
179 | iounmap(dd->piobase); | ||
180 | if (dd->userbase) | ||
181 | iounmap(dd->userbase); | ||
182 | |||
183 | pci_disable_device(dd->pcidev); | ||
184 | pci_release_regions(dd->pcidev); | ||
185 | |||
186 | pci_set_drvdata(dd->pcidev, NULL); | ||
187 | } | ||
188 | |||
189 | static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt, | ||
190 | struct msix_entry *msix_entry) | ||
191 | { | ||
192 | int ret; | ||
193 | u32 tabsize = 0; | ||
194 | u16 msix_flags; | ||
195 | |||
196 | pci_read_config_word(dd->pcidev, pos + PCI_MSIX_FLAGS, &msix_flags); | ||
197 | tabsize = 1 + (msix_flags & PCI_MSIX_FLAGS_QSIZE); | ||
198 | if (tabsize > *msixcnt) | ||
199 | tabsize = *msixcnt; | ||
200 | ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize); | ||
201 | if (ret > 0) { | ||
202 | tabsize = ret; | ||
203 | ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize); | ||
204 | } | ||
205 | if (ret) { | ||
206 | qib_dev_err(dd, "pci_enable_msix %d vectors failed: %d, " | ||
207 | "falling back to INTx\n", tabsize, ret); | ||
208 | tabsize = 0; | ||
209 | } | ||
210 | *msixcnt = tabsize; | ||
211 | |||
212 | if (ret) | ||
213 | qib_enable_intx(dd->pcidev); | ||
214 | |||
215 | } | ||
216 | |||
217 | /** | ||
218 | * We save the msi lo and hi values, so we can restore them after | ||
219 | * chip reset (the kernel PCI infrastructure doesn't yet handle that | ||
220 | * correctly. | ||
221 | */ | ||
222 | static int qib_msi_setup(struct qib_devdata *dd, int pos) | ||
223 | { | ||
224 | struct pci_dev *pdev = dd->pcidev; | ||
225 | u16 control; | ||
226 | int ret; | ||
227 | |||
228 | ret = pci_enable_msi(pdev); | ||
229 | if (ret) | ||
230 | qib_dev_err(dd, "pci_enable_msi failed: %d, " | ||
231 | "interrupts may not work\n", ret); | ||
232 | /* continue even if it fails, we may still be OK... */ | ||
233 | |||
234 | pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO, | ||
235 | &dd->msi_lo); | ||
236 | pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_HI, | ||
237 | &dd->msi_hi); | ||
238 | pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control); | ||
239 | /* now save the data (vector) info */ | ||
240 | pci_read_config_word(pdev, pos + ((control & PCI_MSI_FLAGS_64BIT) | ||
241 | ? 12 : 8), | ||
242 | &dd->msi_data); | ||
243 | return ret; | ||
244 | } | ||
245 | |||
246 | int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent, | ||
247 | struct msix_entry *entry) | ||
248 | { | ||
249 | u16 linkstat, speed; | ||
250 | int pos = 0, pose, ret = 1; | ||
251 | |||
252 | pose = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP); | ||
253 | if (!pose) { | ||
254 | qib_dev_err(dd, "Can't find PCI Express capability!\n"); | ||
255 | /* set up something... */ | ||
256 | dd->lbus_width = 1; | ||
257 | dd->lbus_speed = 2500; /* Gen1, 2.5GHz */ | ||
258 | goto bail; | ||
259 | } | ||
260 | |||
261 | pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSIX); | ||
262 | if (nent && *nent && pos) { | ||
263 | qib_msix_setup(dd, pos, nent, entry); | ||
264 | ret = 0; /* did it, either MSIx or INTx */ | ||
265 | } else { | ||
266 | pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI); | ||
267 | if (pos) | ||
268 | ret = qib_msi_setup(dd, pos); | ||
269 | else | ||
270 | qib_dev_err(dd, "No PCI MSI or MSIx capability!\n"); | ||
271 | } | ||
272 | if (!pos) | ||
273 | qib_enable_intx(dd->pcidev); | ||
274 | |||
275 | pci_read_config_word(dd->pcidev, pose + PCI_EXP_LNKSTA, &linkstat); | ||
276 | /* | ||
277 | * speed is bits 0-3, linkwidth is bits 4-8 | ||
278 | * no defines for them in headers | ||
279 | */ | ||
280 | speed = linkstat & 0xf; | ||
281 | linkstat >>= 4; | ||
282 | linkstat &= 0x1f; | ||
283 | dd->lbus_width = linkstat; | ||
284 | |||
285 | switch (speed) { | ||
286 | case 1: | ||
287 | dd->lbus_speed = 2500; /* Gen1, 2.5GHz */ | ||
288 | break; | ||
289 | case 2: | ||
290 | dd->lbus_speed = 5000; /* Gen1, 5GHz */ | ||
291 | break; | ||
292 | default: /* not defined, assume gen1 */ | ||
293 | dd->lbus_speed = 2500; | ||
294 | break; | ||
295 | } | ||
296 | |||
297 | /* | ||
298 | * Check against expected pcie width and complain if "wrong" | ||
299 | * on first initialization, not afterwards (i.e., reset). | ||
300 | */ | ||
301 | if (minw && linkstat < minw) | ||
302 | qib_dev_err(dd, | ||
303 | "PCIe width %u (x%u HCA), performance reduced\n", | ||
304 | linkstat, minw); | ||
305 | |||
306 | qib_tune_pcie_caps(dd); | ||
307 | |||
308 | qib_tune_pcie_coalesce(dd); | ||
309 | |||
310 | bail: | ||
311 | /* fill in string, even on errors */ | ||
312 | snprintf(dd->lbus_info, sizeof(dd->lbus_info), | ||
313 | "PCIe,%uMHz,x%u\n", dd->lbus_speed, dd->lbus_width); | ||
314 | return ret; | ||
315 | } | ||
316 | |||
317 | /* | ||
318 | * Setup pcie interrupt stuff again after a reset. I'd like to just call | ||
319 | * pci_enable_msi() again for msi, but when I do that, | ||
320 | * the MSI enable bit doesn't get set in the command word, and | ||
321 | * we switch to to a different interrupt vector, which is confusing, | ||
322 | * so I instead just do it all inline. Perhaps somehow can tie this | ||
323 | * into the PCIe hotplug support at some point | ||
324 | */ | ||
325 | int qib_reinit_intr(struct qib_devdata *dd) | ||
326 | { | ||
327 | int pos; | ||
328 | u16 control; | ||
329 | int ret = 0; | ||
330 | |||
331 | /* If we aren't using MSI, don't restore it */ | ||
332 | if (!dd->msi_lo) | ||
333 | goto bail; | ||
334 | |||
335 | pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI); | ||
336 | if (!pos) { | ||
337 | qib_dev_err(dd, "Can't find MSI capability, " | ||
338 | "can't restore MSI settings\n"); | ||
339 | ret = 0; | ||
340 | /* nothing special for MSIx, just MSI */ | ||
341 | goto bail; | ||
342 | } | ||
343 | pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO, | ||
344 | dd->msi_lo); | ||
345 | pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI, | ||
346 | dd->msi_hi); | ||
347 | pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control); | ||
348 | if (!(control & PCI_MSI_FLAGS_ENABLE)) { | ||
349 | control |= PCI_MSI_FLAGS_ENABLE; | ||
350 | pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, | ||
351 | control); | ||
352 | } | ||
353 | /* now rewrite the data (vector) info */ | ||
354 | pci_write_config_word(dd->pcidev, pos + | ||
355 | ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8), | ||
356 | dd->msi_data); | ||
357 | ret = 1; | ||
358 | bail: | ||
359 | if (!ret && (dd->flags & QIB_HAS_INTX)) { | ||
360 | qib_enable_intx(dd->pcidev); | ||
361 | ret = 1; | ||
362 | } | ||
363 | |||
364 | /* and now set the pci master bit again */ | ||
365 | pci_set_master(dd->pcidev); | ||
366 | |||
367 | return ret; | ||
368 | } | ||
369 | |||
370 | /* | ||
371 | * Disable msi interrupt if enabled, and clear msi_lo. | ||
372 | * This is used primarily for the fallback to INTx, but | ||
373 | * is also used in reinit after reset, and during cleanup. | ||
374 | */ | ||
375 | void qib_nomsi(struct qib_devdata *dd) | ||
376 | { | ||
377 | dd->msi_lo = 0; | ||
378 | pci_disable_msi(dd->pcidev); | ||
379 | } | ||
380 | |||
381 | /* | ||
382 | * Same as qib_nosmi, but for MSIx. | ||
383 | */ | ||
384 | void qib_nomsix(struct qib_devdata *dd) | ||
385 | { | ||
386 | pci_disable_msix(dd->pcidev); | ||
387 | } | ||
388 | |||
389 | /* | ||
390 | * Similar to pci_intx(pdev, 1), except that we make sure | ||
391 | * msi(x) is off. | ||
392 | */ | ||
393 | void qib_enable_intx(struct pci_dev *pdev) | ||
394 | { | ||
395 | u16 cw, new; | ||
396 | int pos; | ||
397 | |||
398 | /* first, turn on INTx */ | ||
399 | pci_read_config_word(pdev, PCI_COMMAND, &cw); | ||
400 | new = cw & ~PCI_COMMAND_INTX_DISABLE; | ||
401 | if (new != cw) | ||
402 | pci_write_config_word(pdev, PCI_COMMAND, new); | ||
403 | |||
404 | pos = pci_find_capability(pdev, PCI_CAP_ID_MSI); | ||
405 | if (pos) { | ||
406 | /* then turn off MSI */ | ||
407 | pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw); | ||
408 | new = cw & ~PCI_MSI_FLAGS_ENABLE; | ||
409 | if (new != cw) | ||
410 | pci_write_config_word(pdev, pos + PCI_MSI_FLAGS, new); | ||
411 | } | ||
412 | pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); | ||
413 | if (pos) { | ||
414 | /* then turn off MSIx */ | ||
415 | pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, &cw); | ||
416 | new = cw & ~PCI_MSIX_FLAGS_ENABLE; | ||
417 | if (new != cw) | ||
418 | pci_write_config_word(pdev, pos + PCI_MSIX_FLAGS, new); | ||
419 | } | ||
420 | } | ||
421 | |||
422 | /* | ||
423 | * These two routines are helper routines for the device reset code | ||
424 | * to move all the pcie code out of the chip-specific driver code. | ||
425 | */ | ||
426 | void qib_pcie_getcmd(struct qib_devdata *dd, u16 *cmd, u8 *iline, u8 *cline) | ||
427 | { | ||
428 | pci_read_config_word(dd->pcidev, PCI_COMMAND, cmd); | ||
429 | pci_read_config_byte(dd->pcidev, PCI_INTERRUPT_LINE, iline); | ||
430 | pci_read_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline); | ||
431 | } | ||
432 | |||
433 | void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline) | ||
434 | { | ||
435 | int r; | ||
436 | r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, | ||
437 | dd->pcibar0); | ||
438 | if (r) | ||
439 | qib_dev_err(dd, "rewrite of BAR0 failed: %d\n", r); | ||
440 | r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1, | ||
441 | dd->pcibar1); | ||
442 | if (r) | ||
443 | qib_dev_err(dd, "rewrite of BAR1 failed: %d\n", r); | ||
444 | /* now re-enable memory access, and restore cosmetic settings */ | ||
445 | pci_write_config_word(dd->pcidev, PCI_COMMAND, cmd); | ||
446 | pci_write_config_byte(dd->pcidev, PCI_INTERRUPT_LINE, iline); | ||
447 | pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline); | ||
448 | r = pci_enable_device(dd->pcidev); | ||
449 | if (r) | ||
450 | qib_dev_err(dd, "pci_enable_device failed after " | ||
451 | "reset: %d\n", r); | ||
452 | } | ||
453 | |||
454 | /* code to adjust PCIe capabilities. */ | ||
455 | |||
456 | static int fld2val(int wd, int mask) | ||
457 | { | ||
458 | int lsbmask; | ||
459 | |||
460 | if (!mask) | ||
461 | return 0; | ||
462 | wd &= mask; | ||
463 | lsbmask = mask ^ (mask & (mask - 1)); | ||
464 | wd /= lsbmask; | ||
465 | return wd; | ||
466 | } | ||
467 | |||
468 | static int val2fld(int wd, int mask) | ||
469 | { | ||
470 | int lsbmask; | ||
471 | |||
472 | if (!mask) | ||
473 | return 0; | ||
474 | lsbmask = mask ^ (mask & (mask - 1)); | ||
475 | wd *= lsbmask; | ||
476 | return wd; | ||
477 | } | ||
478 | |||
479 | static int qib_pcie_coalesce; | ||
480 | module_param_named(pcie_coalesce, qib_pcie_coalesce, int, S_IRUGO); | ||
481 | MODULE_PARM_DESC(pcie_coalesce, "tune PCIe colescing on some Intel chipsets"); | ||
482 | |||
483 | /* | ||
484 | * Enable PCIe completion and data coalescing, on Intel 5x00 and 7300 | ||
485 | * chipsets. This is known to be unsafe for some revisions of some | ||
486 | * of these chipsets, with some BIOS settings, and enabling it on those | ||
487 | * systems may result in the system crashing, and/or data corruption. | ||
488 | */ | ||
489 | static int qib_tune_pcie_coalesce(struct qib_devdata *dd) | ||
490 | { | ||
491 | int r; | ||
492 | struct pci_dev *parent; | ||
493 | int ppos; | ||
494 | u16 devid; | ||
495 | u32 mask, bits, val; | ||
496 | |||
497 | if (!qib_pcie_coalesce) | ||
498 | return 0; | ||
499 | |||
500 | /* Find out supported and configured values for parent (root) */ | ||
501 | parent = dd->pcidev->bus->self; | ||
502 | if (parent->bus->parent) { | ||
503 | qib_devinfo(dd->pcidev, "Parent not root\n"); | ||
504 | return 1; | ||
505 | } | ||
506 | ppos = pci_find_capability(parent, PCI_CAP_ID_EXP); | ||
507 | if (!ppos) | ||
508 | return 1; | ||
509 | if (parent->vendor != 0x8086) | ||
510 | return 1; | ||
511 | |||
512 | /* | ||
513 | * - bit 12: Max_rdcmp_Imt_EN: need to set to 1 | ||
514 | * - bit 11: COALESCE_FORCE: need to set to 0 | ||
515 | * - bit 10: COALESCE_EN: need to set to 1 | ||
516 | * (but limitations on some on some chipsets) | ||
517 | * | ||
518 | * On the Intel 5000, 5100, and 7300 chipsets, there is | ||
519 | * also: - bit 25:24: COALESCE_MODE, need to set to 0 | ||
520 | */ | ||
521 | devid = parent->device; | ||
522 | if (devid >= 0x25e2 && devid <= 0x25fa) { | ||
523 | u8 rev; | ||
524 | |||
525 | /* 5000 P/V/X/Z */ | ||
526 | pci_read_config_byte(parent, PCI_REVISION_ID, &rev); | ||
527 | if (rev <= 0xb2) | ||
528 | bits = 1U << 10; | ||
529 | else | ||
530 | bits = 7U << 10; | ||
531 | mask = (3U << 24) | (7U << 10); | ||
532 | } else if (devid >= 0x65e2 && devid <= 0x65fa) { | ||
533 | /* 5100 */ | ||
534 | bits = 1U << 10; | ||
535 | mask = (3U << 24) | (7U << 10); | ||
536 | } else if (devid >= 0x4021 && devid <= 0x402e) { | ||
537 | /* 5400 */ | ||
538 | bits = 7U << 10; | ||
539 | mask = 7U << 10; | ||
540 | } else if (devid >= 0x3604 && devid <= 0x360a) { | ||
541 | /* 7300 */ | ||
542 | bits = 7U << 10; | ||
543 | mask = (3U << 24) | (7U << 10); | ||
544 | } else { | ||
545 | /* not one of the chipsets that we know about */ | ||
546 | return 1; | ||
547 | } | ||
548 | pci_read_config_dword(parent, 0x48, &val); | ||
549 | val &= ~mask; | ||
550 | val |= bits; | ||
551 | r = pci_write_config_dword(parent, 0x48, val); | ||
552 | return 0; | ||
553 | } | ||
554 | |||
555 | /* | ||
556 | * BIOS may not set PCIe bus-utilization parameters for best performance. | ||
557 | * Check and optionally adjust them to maximize our throughput. | ||
558 | */ | ||
559 | static int qib_pcie_caps; | ||
560 | module_param_named(pcie_caps, qib_pcie_caps, int, S_IRUGO); | ||
561 | MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (4lsb), ReadReq (D4..7)"); | ||
562 | |||
563 | static int qib_tune_pcie_caps(struct qib_devdata *dd) | ||
564 | { | ||
565 | int ret = 1; /* Assume the worst */ | ||
566 | struct pci_dev *parent; | ||
567 | int ppos, epos; | ||
568 | u16 pcaps, pctl, ecaps, ectl; | ||
569 | int rc_sup, ep_sup; | ||
570 | int rc_cur, ep_cur; | ||
571 | |||
572 | /* Find out supported and configured values for parent (root) */ | ||
573 | parent = dd->pcidev->bus->self; | ||
574 | if (parent->bus->parent) { | ||
575 | qib_devinfo(dd->pcidev, "Parent not root\n"); | ||
576 | goto bail; | ||
577 | } | ||
578 | ppos = pci_find_capability(parent, PCI_CAP_ID_EXP); | ||
579 | if (ppos) { | ||
580 | pci_read_config_word(parent, ppos + PCI_EXP_DEVCAP, &pcaps); | ||
581 | pci_read_config_word(parent, ppos + PCI_EXP_DEVCTL, &pctl); | ||
582 | } else | ||
583 | goto bail; | ||
584 | /* Find out supported and configured values for endpoint (us) */ | ||
585 | epos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP); | ||
586 | if (epos) { | ||
587 | pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCAP, &ecaps); | ||
588 | pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, &ectl); | ||
589 | } else | ||
590 | goto bail; | ||
591 | ret = 0; | ||
592 | /* Find max payload supported by root, endpoint */ | ||
593 | rc_sup = fld2val(pcaps, PCI_EXP_DEVCAP_PAYLOAD); | ||
594 | ep_sup = fld2val(ecaps, PCI_EXP_DEVCAP_PAYLOAD); | ||
595 | if (rc_sup > ep_sup) | ||
596 | rc_sup = ep_sup; | ||
597 | |||
598 | rc_cur = fld2val(pctl, PCI_EXP_DEVCTL_PAYLOAD); | ||
599 | ep_cur = fld2val(ectl, PCI_EXP_DEVCTL_PAYLOAD); | ||
600 | |||
601 | /* If Supported greater than limit in module param, limit it */ | ||
602 | if (rc_sup > (qib_pcie_caps & 7)) | ||
603 | rc_sup = qib_pcie_caps & 7; | ||
604 | /* If less than (allowed, supported), bump root payload */ | ||
605 | if (rc_sup > rc_cur) { | ||
606 | rc_cur = rc_sup; | ||
607 | pctl = (pctl & ~PCI_EXP_DEVCTL_PAYLOAD) | | ||
608 | val2fld(rc_cur, PCI_EXP_DEVCTL_PAYLOAD); | ||
609 | pci_write_config_word(parent, ppos + PCI_EXP_DEVCTL, pctl); | ||
610 | } | ||
611 | /* If less than (allowed, supported), bump endpoint payload */ | ||
612 | if (rc_sup > ep_cur) { | ||
613 | ep_cur = rc_sup; | ||
614 | ectl = (ectl & ~PCI_EXP_DEVCTL_PAYLOAD) | | ||
615 | val2fld(ep_cur, PCI_EXP_DEVCTL_PAYLOAD); | ||
616 | pci_write_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, ectl); | ||
617 | } | ||
618 | |||
619 | /* | ||
620 | * Now the Read Request size. | ||
621 | * No field for max supported, but PCIe spec limits it to 4096, | ||
622 | * which is code '5' (log2(4096) - 7) | ||
623 | */ | ||
624 | rc_sup = 5; | ||
625 | if (rc_sup > ((qib_pcie_caps >> 4) & 7)) | ||
626 | rc_sup = (qib_pcie_caps >> 4) & 7; | ||
627 | rc_cur = fld2val(pctl, PCI_EXP_DEVCTL_READRQ); | ||
628 | ep_cur = fld2val(ectl, PCI_EXP_DEVCTL_READRQ); | ||
629 | |||
630 | if (rc_sup > rc_cur) { | ||
631 | rc_cur = rc_sup; | ||
632 | pctl = (pctl & ~PCI_EXP_DEVCTL_READRQ) | | ||
633 | val2fld(rc_cur, PCI_EXP_DEVCTL_READRQ); | ||
634 | pci_write_config_word(parent, ppos + PCI_EXP_DEVCTL, pctl); | ||
635 | } | ||
636 | if (rc_sup > ep_cur) { | ||
637 | ep_cur = rc_sup; | ||
638 | ectl = (ectl & ~PCI_EXP_DEVCTL_READRQ) | | ||
639 | val2fld(ep_cur, PCI_EXP_DEVCTL_READRQ); | ||
640 | pci_write_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, ectl); | ||
641 | } | ||
642 | bail: | ||
643 | return ret; | ||
644 | } | ||
645 | /* End of PCIe capability tuning */ | ||
646 | |||
647 | /* | ||
648 | * From here through qib_pci_err_handler definition is invoked via | ||
649 | * PCI error infrastructure, registered via pci | ||
650 | */ | ||
651 | static pci_ers_result_t | ||
652 | qib_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) | ||
653 | { | ||
654 | struct qib_devdata *dd = pci_get_drvdata(pdev); | ||
655 | pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED; | ||
656 | |||
657 | switch (state) { | ||
658 | case pci_channel_io_normal: | ||
659 | qib_devinfo(pdev, "State Normal, ignoring\n"); | ||
660 | break; | ||
661 | |||
662 | case pci_channel_io_frozen: | ||
663 | qib_devinfo(pdev, "State Frozen, requesting reset\n"); | ||
664 | pci_disable_device(pdev); | ||
665 | ret = PCI_ERS_RESULT_NEED_RESET; | ||
666 | break; | ||
667 | |||
668 | case pci_channel_io_perm_failure: | ||
669 | qib_devinfo(pdev, "State Permanent Failure, disabling\n"); | ||
670 | if (dd) { | ||
671 | /* no more register accesses! */ | ||
672 | dd->flags &= ~QIB_PRESENT; | ||
673 | qib_disable_after_error(dd); | ||
674 | } | ||
675 | /* else early, or other problem */ | ||
676 | ret = PCI_ERS_RESULT_DISCONNECT; | ||
677 | break; | ||
678 | |||
679 | default: /* shouldn't happen */ | ||
680 | qib_devinfo(pdev, "QIB PCI errors detected (state %d)\n", | ||
681 | state); | ||
682 | break; | ||
683 | } | ||
684 | return ret; | ||
685 | } | ||
686 | |||
687 | static pci_ers_result_t | ||
688 | qib_pci_mmio_enabled(struct pci_dev *pdev) | ||
689 | { | ||
690 | u64 words = 0U; | ||
691 | struct qib_devdata *dd = pci_get_drvdata(pdev); | ||
692 | pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED; | ||
693 | |||
694 | if (dd && dd->pport) { | ||
695 | words = dd->f_portcntr(dd->pport, QIBPORTCNTR_WORDRCV); | ||
696 | if (words == ~0ULL) | ||
697 | ret = PCI_ERS_RESULT_NEED_RESET; | ||
698 | } | ||
699 | qib_devinfo(pdev, "QIB mmio_enabled function called, " | ||
700 | "read wordscntr %Lx, returning %d\n", words, ret); | ||
701 | return ret; | ||
702 | } | ||
703 | |||
704 | static pci_ers_result_t | ||
705 | qib_pci_slot_reset(struct pci_dev *pdev) | ||
706 | { | ||
707 | qib_devinfo(pdev, "QIB link_reset function called, ignored\n"); | ||
708 | return PCI_ERS_RESULT_CAN_RECOVER; | ||
709 | } | ||
710 | |||
711 | static pci_ers_result_t | ||
712 | qib_pci_link_reset(struct pci_dev *pdev) | ||
713 | { | ||
714 | qib_devinfo(pdev, "QIB link_reset function called, ignored\n"); | ||
715 | return PCI_ERS_RESULT_CAN_RECOVER; | ||
716 | } | ||
717 | |||
718 | static void | ||
719 | qib_pci_resume(struct pci_dev *pdev) | ||
720 | { | ||
721 | struct qib_devdata *dd = pci_get_drvdata(pdev); | ||
722 | qib_devinfo(pdev, "QIB resume function called\n"); | ||
723 | pci_cleanup_aer_uncorrect_error_status(pdev); | ||
724 | /* | ||
725 | * Running jobs will fail, since it's asynchronous | ||
726 | * unlike sysfs-requested reset. Better than | ||
727 | * doing nothing. | ||
728 | */ | ||
729 | qib_init(dd, 1); /* same as re-init after reset */ | ||
730 | } | ||
731 | |||
732 | struct pci_error_handlers qib_pci_err_handler = { | ||
733 | .error_detected = qib_pci_error_detected, | ||
734 | .mmio_enabled = qib_pci_mmio_enabled, | ||
735 | .link_reset = qib_pci_link_reset, | ||
736 | .slot_reset = qib_pci_slot_reset, | ||
737 | .resume = qib_pci_resume, | ||
738 | }; | ||
diff --git a/drivers/infiniband/hw/qib/qib_pio_copy.c b/drivers/infiniband/hw/qib/qib_pio_copy.c new file mode 100644 index 000000000000..10b8c444dd31 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_pio_copy.c | |||
@@ -0,0 +1,64 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2009 QLogic Corporation. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #include "qib.h" | ||
34 | |||
35 | /** | ||
36 | * qib_pio_copy - copy data to MMIO space, in multiples of 32-bits | ||
37 | * @to: destination, in MMIO space (must be 64-bit aligned) | ||
38 | * @from: source (must be 64-bit aligned) | ||
39 | * @count: number of 32-bit quantities to copy | ||
40 | * | ||
41 | * Copy data from kernel space to MMIO space, in multiples of 32 bits at a | ||
42 | * time. Order of access is not guaranteed, nor is a memory barrier | ||
43 | * performed afterwards. | ||
44 | */ | ||
45 | void qib_pio_copy(void __iomem *to, const void *from, size_t count) | ||
46 | { | ||
47 | #ifdef CONFIG_64BIT | ||
48 | u64 __iomem *dst = to; | ||
49 | const u64 *src = from; | ||
50 | const u64 *end = src + (count >> 1); | ||
51 | |||
52 | while (src < end) | ||
53 | __raw_writeq(*src++, dst++); | ||
54 | if (count & 1) | ||
55 | __raw_writel(*(const u32 *)src, dst); | ||
56 | #else | ||
57 | u32 __iomem *dst = to; | ||
58 | const u32 *src = from; | ||
59 | const u32 *end = src + count; | ||
60 | |||
61 | while (src < end) | ||
62 | __raw_writel(*src++, dst++); | ||
63 | #endif | ||
64 | } | ||
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c new file mode 100644 index 000000000000..e0f65e39076b --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_qp.c | |||
@@ -0,0 +1,1255 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. | ||
3 | * All rights reserved. | ||
4 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #include <linux/err.h> | ||
36 | #include <linux/vmalloc.h> | ||
37 | |||
38 | #include "qib.h" | ||
39 | |||
40 | #define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE) | ||
41 | #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) | ||
42 | |||
43 | static inline unsigned mk_qpn(struct qib_qpn_table *qpt, | ||
44 | struct qpn_map *map, unsigned off) | ||
45 | { | ||
46 | return (map - qpt->map) * BITS_PER_PAGE + off; | ||
47 | } | ||
48 | |||
49 | static inline unsigned find_next_offset(struct qib_qpn_table *qpt, | ||
50 | struct qpn_map *map, unsigned off, | ||
51 | unsigned r) | ||
52 | { | ||
53 | if (qpt->mask) { | ||
54 | off++; | ||
55 | if ((off & qpt->mask) >> 1 != r) | ||
56 | off = ((off & qpt->mask) ? | ||
57 | (off | qpt->mask) + 1 : off) | (r << 1); | ||
58 | } else | ||
59 | off = find_next_zero_bit(map->page, BITS_PER_PAGE, off); | ||
60 | return off; | ||
61 | } | ||
62 | |||
63 | /* | ||
64 | * Convert the AETH credit code into the number of credits. | ||
65 | */ | ||
66 | static u32 credit_table[31] = { | ||
67 | 0, /* 0 */ | ||
68 | 1, /* 1 */ | ||
69 | 2, /* 2 */ | ||
70 | 3, /* 3 */ | ||
71 | 4, /* 4 */ | ||
72 | 6, /* 5 */ | ||
73 | 8, /* 6 */ | ||
74 | 12, /* 7 */ | ||
75 | 16, /* 8 */ | ||
76 | 24, /* 9 */ | ||
77 | 32, /* A */ | ||
78 | 48, /* B */ | ||
79 | 64, /* C */ | ||
80 | 96, /* D */ | ||
81 | 128, /* E */ | ||
82 | 192, /* F */ | ||
83 | 256, /* 10 */ | ||
84 | 384, /* 11 */ | ||
85 | 512, /* 12 */ | ||
86 | 768, /* 13 */ | ||
87 | 1024, /* 14 */ | ||
88 | 1536, /* 15 */ | ||
89 | 2048, /* 16 */ | ||
90 | 3072, /* 17 */ | ||
91 | 4096, /* 18 */ | ||
92 | 6144, /* 19 */ | ||
93 | 8192, /* 1A */ | ||
94 | 12288, /* 1B */ | ||
95 | 16384, /* 1C */ | ||
96 | 24576, /* 1D */ | ||
97 | 32768 /* 1E */ | ||
98 | }; | ||
99 | |||
100 | static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map) | ||
101 | { | ||
102 | unsigned long page = get_zeroed_page(GFP_KERNEL); | ||
103 | |||
104 | /* | ||
105 | * Free the page if someone raced with us installing it. | ||
106 | */ | ||
107 | |||
108 | spin_lock(&qpt->lock); | ||
109 | if (map->page) | ||
110 | free_page(page); | ||
111 | else | ||
112 | map->page = (void *)page; | ||
113 | spin_unlock(&qpt->lock); | ||
114 | } | ||
115 | |||
116 | /* | ||
117 | * Allocate the next available QPN or | ||
118 | * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI. | ||
119 | */ | ||
120 | static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt, | ||
121 | enum ib_qp_type type, u8 port) | ||
122 | { | ||
123 | u32 i, offset, max_scan, qpn; | ||
124 | struct qpn_map *map; | ||
125 | u32 ret; | ||
126 | int r; | ||
127 | |||
128 | if (type == IB_QPT_SMI || type == IB_QPT_GSI) { | ||
129 | unsigned n; | ||
130 | |||
131 | ret = type == IB_QPT_GSI; | ||
132 | n = 1 << (ret + 2 * (port - 1)); | ||
133 | spin_lock(&qpt->lock); | ||
134 | if (qpt->flags & n) | ||
135 | ret = -EINVAL; | ||
136 | else | ||
137 | qpt->flags |= n; | ||
138 | spin_unlock(&qpt->lock); | ||
139 | goto bail; | ||
140 | } | ||
141 | |||
142 | r = smp_processor_id(); | ||
143 | if (r >= dd->n_krcv_queues) | ||
144 | r %= dd->n_krcv_queues; | ||
145 | qpn = qpt->last + 1; | ||
146 | if (qpn >= QPN_MAX) | ||
147 | qpn = 2; | ||
148 | if (qpt->mask && ((qpn & qpt->mask) >> 1) != r) | ||
149 | qpn = ((qpn & qpt->mask) ? (qpn | qpt->mask) + 1 : qpn) | | ||
150 | (r << 1); | ||
151 | offset = qpn & BITS_PER_PAGE_MASK; | ||
152 | map = &qpt->map[qpn / BITS_PER_PAGE]; | ||
153 | max_scan = qpt->nmaps - !offset; | ||
154 | for (i = 0;;) { | ||
155 | if (unlikely(!map->page)) { | ||
156 | get_map_page(qpt, map); | ||
157 | if (unlikely(!map->page)) | ||
158 | break; | ||
159 | } | ||
160 | do { | ||
161 | if (!test_and_set_bit(offset, map->page)) { | ||
162 | qpt->last = qpn; | ||
163 | ret = qpn; | ||
164 | goto bail; | ||
165 | } | ||
166 | offset = find_next_offset(qpt, map, offset, r); | ||
167 | qpn = mk_qpn(qpt, map, offset); | ||
168 | /* | ||
169 | * This test differs from alloc_pidmap(). | ||
170 | * If find_next_offset() does find a zero | ||
171 | * bit, we don't need to check for QPN | ||
172 | * wrapping around past our starting QPN. | ||
173 | * We just need to be sure we don't loop | ||
174 | * forever. | ||
175 | */ | ||
176 | } while (offset < BITS_PER_PAGE && qpn < QPN_MAX); | ||
177 | /* | ||
178 | * In order to keep the number of pages allocated to a | ||
179 | * minimum, we scan the all existing pages before increasing | ||
180 | * the size of the bitmap table. | ||
181 | */ | ||
182 | if (++i > max_scan) { | ||
183 | if (qpt->nmaps == QPNMAP_ENTRIES) | ||
184 | break; | ||
185 | map = &qpt->map[qpt->nmaps++]; | ||
186 | offset = qpt->mask ? (r << 1) : 0; | ||
187 | } else if (map < &qpt->map[qpt->nmaps]) { | ||
188 | ++map; | ||
189 | offset = qpt->mask ? (r << 1) : 0; | ||
190 | } else { | ||
191 | map = &qpt->map[0]; | ||
192 | offset = qpt->mask ? (r << 1) : 2; | ||
193 | } | ||
194 | qpn = mk_qpn(qpt, map, offset); | ||
195 | } | ||
196 | |||
197 | ret = -ENOMEM; | ||
198 | |||
199 | bail: | ||
200 | return ret; | ||
201 | } | ||
202 | |||
203 | static void free_qpn(struct qib_qpn_table *qpt, u32 qpn) | ||
204 | { | ||
205 | struct qpn_map *map; | ||
206 | |||
207 | map = qpt->map + qpn / BITS_PER_PAGE; | ||
208 | if (map->page) | ||
209 | clear_bit(qpn & BITS_PER_PAGE_MASK, map->page); | ||
210 | } | ||
211 | |||
212 | /* | ||
213 | * Put the QP into the hash table. | ||
214 | * The hash table holds a reference to the QP. | ||
215 | */ | ||
216 | static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp) | ||
217 | { | ||
218 | struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); | ||
219 | unsigned n = qp->ibqp.qp_num % dev->qp_table_size; | ||
220 | unsigned long flags; | ||
221 | |||
222 | spin_lock_irqsave(&dev->qpt_lock, flags); | ||
223 | |||
224 | if (qp->ibqp.qp_num == 0) | ||
225 | ibp->qp0 = qp; | ||
226 | else if (qp->ibqp.qp_num == 1) | ||
227 | ibp->qp1 = qp; | ||
228 | else { | ||
229 | qp->next = dev->qp_table[n]; | ||
230 | dev->qp_table[n] = qp; | ||
231 | } | ||
232 | atomic_inc(&qp->refcount); | ||
233 | |||
234 | spin_unlock_irqrestore(&dev->qpt_lock, flags); | ||
235 | } | ||
236 | |||
237 | /* | ||
238 | * Remove the QP from the table so it can't be found asynchronously by | ||
239 | * the receive interrupt routine. | ||
240 | */ | ||
241 | static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp) | ||
242 | { | ||
243 | struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); | ||
244 | struct qib_qp *q, **qpp; | ||
245 | unsigned long flags; | ||
246 | |||
247 | qpp = &dev->qp_table[qp->ibqp.qp_num % dev->qp_table_size]; | ||
248 | |||
249 | spin_lock_irqsave(&dev->qpt_lock, flags); | ||
250 | |||
251 | if (ibp->qp0 == qp) { | ||
252 | ibp->qp0 = NULL; | ||
253 | atomic_dec(&qp->refcount); | ||
254 | } else if (ibp->qp1 == qp) { | ||
255 | ibp->qp1 = NULL; | ||
256 | atomic_dec(&qp->refcount); | ||
257 | } else | ||
258 | for (; (q = *qpp) != NULL; qpp = &q->next) | ||
259 | if (q == qp) { | ||
260 | *qpp = qp->next; | ||
261 | qp->next = NULL; | ||
262 | atomic_dec(&qp->refcount); | ||
263 | break; | ||
264 | } | ||
265 | |||
266 | spin_unlock_irqrestore(&dev->qpt_lock, flags); | ||
267 | } | ||
268 | |||
269 | /** | ||
270 | * qib_free_all_qps - check for QPs still in use | ||
271 | * @qpt: the QP table to empty | ||
272 | * | ||
273 | * There should not be any QPs still in use. | ||
274 | * Free memory for table. | ||
275 | */ | ||
276 | unsigned qib_free_all_qps(struct qib_devdata *dd) | ||
277 | { | ||
278 | struct qib_ibdev *dev = &dd->verbs_dev; | ||
279 | unsigned long flags; | ||
280 | struct qib_qp *qp; | ||
281 | unsigned n, qp_inuse = 0; | ||
282 | |||
283 | for (n = 0; n < dd->num_pports; n++) { | ||
284 | struct qib_ibport *ibp = &dd->pport[n].ibport_data; | ||
285 | |||
286 | if (!qib_mcast_tree_empty(ibp)) | ||
287 | qp_inuse++; | ||
288 | if (ibp->qp0) | ||
289 | qp_inuse++; | ||
290 | if (ibp->qp1) | ||
291 | qp_inuse++; | ||
292 | } | ||
293 | |||
294 | spin_lock_irqsave(&dev->qpt_lock, flags); | ||
295 | for (n = 0; n < dev->qp_table_size; n++) { | ||
296 | qp = dev->qp_table[n]; | ||
297 | dev->qp_table[n] = NULL; | ||
298 | |||
299 | for (; qp; qp = qp->next) | ||
300 | qp_inuse++; | ||
301 | } | ||
302 | spin_unlock_irqrestore(&dev->qpt_lock, flags); | ||
303 | |||
304 | return qp_inuse; | ||
305 | } | ||
306 | |||
307 | /** | ||
308 | * qib_lookup_qpn - return the QP with the given QPN | ||
309 | * @qpt: the QP table | ||
310 | * @qpn: the QP number to look up | ||
311 | * | ||
312 | * The caller is responsible for decrementing the QP reference count | ||
313 | * when done. | ||
314 | */ | ||
315 | struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn) | ||
316 | { | ||
317 | struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev; | ||
318 | unsigned long flags; | ||
319 | struct qib_qp *qp; | ||
320 | |||
321 | spin_lock_irqsave(&dev->qpt_lock, flags); | ||
322 | |||
323 | if (qpn == 0) | ||
324 | qp = ibp->qp0; | ||
325 | else if (qpn == 1) | ||
326 | qp = ibp->qp1; | ||
327 | else | ||
328 | for (qp = dev->qp_table[qpn % dev->qp_table_size]; qp; | ||
329 | qp = qp->next) | ||
330 | if (qp->ibqp.qp_num == qpn) | ||
331 | break; | ||
332 | if (qp) | ||
333 | atomic_inc(&qp->refcount); | ||
334 | |||
335 | spin_unlock_irqrestore(&dev->qpt_lock, flags); | ||
336 | return qp; | ||
337 | } | ||
338 | |||
339 | /** | ||
340 | * qib_reset_qp - initialize the QP state to the reset state | ||
341 | * @qp: the QP to reset | ||
342 | * @type: the QP type | ||
343 | */ | ||
344 | static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type) | ||
345 | { | ||
346 | qp->remote_qpn = 0; | ||
347 | qp->qkey = 0; | ||
348 | qp->qp_access_flags = 0; | ||
349 | atomic_set(&qp->s_dma_busy, 0); | ||
350 | qp->s_flags &= QIB_S_SIGNAL_REQ_WR; | ||
351 | qp->s_hdrwords = 0; | ||
352 | qp->s_wqe = NULL; | ||
353 | qp->s_draining = 0; | ||
354 | qp->s_next_psn = 0; | ||
355 | qp->s_last_psn = 0; | ||
356 | qp->s_sending_psn = 0; | ||
357 | qp->s_sending_hpsn = 0; | ||
358 | qp->s_psn = 0; | ||
359 | qp->r_psn = 0; | ||
360 | qp->r_msn = 0; | ||
361 | if (type == IB_QPT_RC) { | ||
362 | qp->s_state = IB_OPCODE_RC_SEND_LAST; | ||
363 | qp->r_state = IB_OPCODE_RC_SEND_LAST; | ||
364 | } else { | ||
365 | qp->s_state = IB_OPCODE_UC_SEND_LAST; | ||
366 | qp->r_state = IB_OPCODE_UC_SEND_LAST; | ||
367 | } | ||
368 | qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; | ||
369 | qp->r_nak_state = 0; | ||
370 | qp->r_aflags = 0; | ||
371 | qp->r_flags = 0; | ||
372 | qp->s_head = 0; | ||
373 | qp->s_tail = 0; | ||
374 | qp->s_cur = 0; | ||
375 | qp->s_acked = 0; | ||
376 | qp->s_last = 0; | ||
377 | qp->s_ssn = 1; | ||
378 | qp->s_lsn = 0; | ||
379 | qp->s_mig_state = IB_MIG_MIGRATED; | ||
380 | memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue)); | ||
381 | qp->r_head_ack_queue = 0; | ||
382 | qp->s_tail_ack_queue = 0; | ||
383 | qp->s_num_rd_atomic = 0; | ||
384 | if (qp->r_rq.wq) { | ||
385 | qp->r_rq.wq->head = 0; | ||
386 | qp->r_rq.wq->tail = 0; | ||
387 | } | ||
388 | qp->r_sge.num_sge = 0; | ||
389 | } | ||
390 | |||
391 | static void clear_mr_refs(struct qib_qp *qp, int clr_sends) | ||
392 | { | ||
393 | unsigned n; | ||
394 | |||
395 | if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) | ||
396 | while (qp->s_rdma_read_sge.num_sge) { | ||
397 | atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount); | ||
398 | if (--qp->s_rdma_read_sge.num_sge) | ||
399 | qp->s_rdma_read_sge.sge = | ||
400 | *qp->s_rdma_read_sge.sg_list++; | ||
401 | } | ||
402 | |||
403 | while (qp->r_sge.num_sge) { | ||
404 | atomic_dec(&qp->r_sge.sge.mr->refcount); | ||
405 | if (--qp->r_sge.num_sge) | ||
406 | qp->r_sge.sge = *qp->r_sge.sg_list++; | ||
407 | } | ||
408 | |||
409 | if (clr_sends) { | ||
410 | while (qp->s_last != qp->s_head) { | ||
411 | struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_last); | ||
412 | unsigned i; | ||
413 | |||
414 | for (i = 0; i < wqe->wr.num_sge; i++) { | ||
415 | struct qib_sge *sge = &wqe->sg_list[i]; | ||
416 | |||
417 | atomic_dec(&sge->mr->refcount); | ||
418 | } | ||
419 | if (qp->ibqp.qp_type == IB_QPT_UD || | ||
420 | qp->ibqp.qp_type == IB_QPT_SMI || | ||
421 | qp->ibqp.qp_type == IB_QPT_GSI) | ||
422 | atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount); | ||
423 | if (++qp->s_last >= qp->s_size) | ||
424 | qp->s_last = 0; | ||
425 | } | ||
426 | if (qp->s_rdma_mr) { | ||
427 | atomic_dec(&qp->s_rdma_mr->refcount); | ||
428 | qp->s_rdma_mr = NULL; | ||
429 | } | ||
430 | } | ||
431 | |||
432 | if (qp->ibqp.qp_type != IB_QPT_RC) | ||
433 | return; | ||
434 | |||
435 | for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) { | ||
436 | struct qib_ack_entry *e = &qp->s_ack_queue[n]; | ||
437 | |||
438 | if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST && | ||
439 | e->rdma_sge.mr) { | ||
440 | atomic_dec(&e->rdma_sge.mr->refcount); | ||
441 | e->rdma_sge.mr = NULL; | ||
442 | } | ||
443 | } | ||
444 | } | ||
445 | |||
446 | /** | ||
447 | * qib_error_qp - put a QP into the error state | ||
448 | * @qp: the QP to put into the error state | ||
449 | * @err: the receive completion error to signal if a RWQE is active | ||
450 | * | ||
451 | * Flushes both send and receive work queues. | ||
452 | * Returns true if last WQE event should be generated. | ||
453 | * The QP s_lock should be held and interrupts disabled. | ||
454 | * If we are already in error state, just return. | ||
455 | */ | ||
456 | int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err) | ||
457 | { | ||
458 | struct qib_ibdev *dev = to_idev(qp->ibqp.device); | ||
459 | struct ib_wc wc; | ||
460 | int ret = 0; | ||
461 | |||
462 | if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET) | ||
463 | goto bail; | ||
464 | |||
465 | qp->state = IB_QPS_ERR; | ||
466 | |||
467 | if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) { | ||
468 | qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR); | ||
469 | del_timer(&qp->s_timer); | ||
470 | } | ||
471 | spin_lock(&dev->pending_lock); | ||
472 | if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) { | ||
473 | qp->s_flags &= ~QIB_S_ANY_WAIT_IO; | ||
474 | list_del_init(&qp->iowait); | ||
475 | } | ||
476 | spin_unlock(&dev->pending_lock); | ||
477 | |||
478 | if (!(qp->s_flags & QIB_S_BUSY)) { | ||
479 | qp->s_hdrwords = 0; | ||
480 | if (qp->s_rdma_mr) { | ||
481 | atomic_dec(&qp->s_rdma_mr->refcount); | ||
482 | qp->s_rdma_mr = NULL; | ||
483 | } | ||
484 | if (qp->s_tx) { | ||
485 | qib_put_txreq(qp->s_tx); | ||
486 | qp->s_tx = NULL; | ||
487 | } | ||
488 | } | ||
489 | |||
490 | /* Schedule the sending tasklet to drain the send work queue. */ | ||
491 | if (qp->s_last != qp->s_head) | ||
492 | qib_schedule_send(qp); | ||
493 | |||
494 | clear_mr_refs(qp, 0); | ||
495 | |||
496 | memset(&wc, 0, sizeof(wc)); | ||
497 | wc.qp = &qp->ibqp; | ||
498 | wc.opcode = IB_WC_RECV; | ||
499 | |||
500 | if (test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) { | ||
501 | wc.wr_id = qp->r_wr_id; | ||
502 | wc.status = err; | ||
503 | qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); | ||
504 | } | ||
505 | wc.status = IB_WC_WR_FLUSH_ERR; | ||
506 | |||
507 | if (qp->r_rq.wq) { | ||
508 | struct qib_rwq *wq; | ||
509 | u32 head; | ||
510 | u32 tail; | ||
511 | |||
512 | spin_lock(&qp->r_rq.lock); | ||
513 | |||
514 | /* sanity check pointers before trusting them */ | ||
515 | wq = qp->r_rq.wq; | ||
516 | head = wq->head; | ||
517 | if (head >= qp->r_rq.size) | ||
518 | head = 0; | ||
519 | tail = wq->tail; | ||
520 | if (tail >= qp->r_rq.size) | ||
521 | tail = 0; | ||
522 | while (tail != head) { | ||
523 | wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; | ||
524 | if (++tail >= qp->r_rq.size) | ||
525 | tail = 0; | ||
526 | qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); | ||
527 | } | ||
528 | wq->tail = tail; | ||
529 | |||
530 | spin_unlock(&qp->r_rq.lock); | ||
531 | } else if (qp->ibqp.event_handler) | ||
532 | ret = 1; | ||
533 | |||
534 | bail: | ||
535 | return ret; | ||
536 | } | ||
537 | |||
538 | /** | ||
539 | * qib_modify_qp - modify the attributes of a queue pair | ||
540 | * @ibqp: the queue pair who's attributes we're modifying | ||
541 | * @attr: the new attributes | ||
542 | * @attr_mask: the mask of attributes to modify | ||
543 | * @udata: user data for libibverbs.so | ||
544 | * | ||
545 | * Returns 0 on success, otherwise returns an errno. | ||
546 | */ | ||
547 | int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | ||
548 | int attr_mask, struct ib_udata *udata) | ||
549 | { | ||
550 | struct qib_ibdev *dev = to_idev(ibqp->device); | ||
551 | struct qib_qp *qp = to_iqp(ibqp); | ||
552 | enum ib_qp_state cur_state, new_state; | ||
553 | struct ib_event ev; | ||
554 | int lastwqe = 0; | ||
555 | int mig = 0; | ||
556 | int ret; | ||
557 | u32 pmtu = 0; /* for gcc warning only */ | ||
558 | |||
559 | spin_lock_irq(&qp->r_lock); | ||
560 | spin_lock(&qp->s_lock); | ||
561 | |||
562 | cur_state = attr_mask & IB_QP_CUR_STATE ? | ||
563 | attr->cur_qp_state : qp->state; | ||
564 | new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; | ||
565 | |||
566 | if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, | ||
567 | attr_mask)) | ||
568 | goto inval; | ||
569 | |||
570 | if (attr_mask & IB_QP_AV) { | ||
571 | if (attr->ah_attr.dlid >= QIB_MULTICAST_LID_BASE) | ||
572 | goto inval; | ||
573 | if (qib_check_ah(qp->ibqp.device, &attr->ah_attr)) | ||
574 | goto inval; | ||
575 | } | ||
576 | |||
577 | if (attr_mask & IB_QP_ALT_PATH) { | ||
578 | if (attr->alt_ah_attr.dlid >= QIB_MULTICAST_LID_BASE) | ||
579 | goto inval; | ||
580 | if (qib_check_ah(qp->ibqp.device, &attr->alt_ah_attr)) | ||
581 | goto inval; | ||
582 | if (attr->alt_pkey_index >= qib_get_npkeys(dd_from_dev(dev))) | ||
583 | goto inval; | ||
584 | } | ||
585 | |||
586 | if (attr_mask & IB_QP_PKEY_INDEX) | ||
587 | if (attr->pkey_index >= qib_get_npkeys(dd_from_dev(dev))) | ||
588 | goto inval; | ||
589 | |||
590 | if (attr_mask & IB_QP_MIN_RNR_TIMER) | ||
591 | if (attr->min_rnr_timer > 31) | ||
592 | goto inval; | ||
593 | |||
594 | if (attr_mask & IB_QP_PORT) | ||
595 | if (qp->ibqp.qp_type == IB_QPT_SMI || | ||
596 | qp->ibqp.qp_type == IB_QPT_GSI || | ||
597 | attr->port_num == 0 || | ||
598 | attr->port_num > ibqp->device->phys_port_cnt) | ||
599 | goto inval; | ||
600 | |||
601 | if (attr_mask & IB_QP_DEST_QPN) | ||
602 | if (attr->dest_qp_num > QIB_QPN_MASK) | ||
603 | goto inval; | ||
604 | |||
605 | if (attr_mask & IB_QP_RETRY_CNT) | ||
606 | if (attr->retry_cnt > 7) | ||
607 | goto inval; | ||
608 | |||
609 | if (attr_mask & IB_QP_RNR_RETRY) | ||
610 | if (attr->rnr_retry > 7) | ||
611 | goto inval; | ||
612 | |||
613 | /* | ||
614 | * Don't allow invalid path_mtu values. OK to set greater | ||
615 | * than the active mtu (or even the max_cap, if we have tuned | ||
616 | * that to a small mtu. We'll set qp->path_mtu | ||
617 | * to the lesser of requested attribute mtu and active, | ||
618 | * for packetizing messages. | ||
619 | * Note that the QP port has to be set in INIT and MTU in RTR. | ||
620 | */ | ||
621 | if (attr_mask & IB_QP_PATH_MTU) { | ||
622 | struct qib_devdata *dd = dd_from_dev(dev); | ||
623 | int mtu, pidx = qp->port_num - 1; | ||
624 | |||
625 | mtu = ib_mtu_enum_to_int(attr->path_mtu); | ||
626 | if (mtu == -1) | ||
627 | goto inval; | ||
628 | if (mtu > dd->pport[pidx].ibmtu) { | ||
629 | switch (dd->pport[pidx].ibmtu) { | ||
630 | case 4096: | ||
631 | pmtu = IB_MTU_4096; | ||
632 | break; | ||
633 | case 2048: | ||
634 | pmtu = IB_MTU_2048; | ||
635 | break; | ||
636 | case 1024: | ||
637 | pmtu = IB_MTU_1024; | ||
638 | break; | ||
639 | case 512: | ||
640 | pmtu = IB_MTU_512; | ||
641 | break; | ||
642 | case 256: | ||
643 | pmtu = IB_MTU_256; | ||
644 | break; | ||
645 | default: | ||
646 | pmtu = IB_MTU_2048; | ||
647 | } | ||
648 | } else | ||
649 | pmtu = attr->path_mtu; | ||
650 | } | ||
651 | |||
652 | if (attr_mask & IB_QP_PATH_MIG_STATE) { | ||
653 | if (attr->path_mig_state == IB_MIG_REARM) { | ||
654 | if (qp->s_mig_state == IB_MIG_ARMED) | ||
655 | goto inval; | ||
656 | if (new_state != IB_QPS_RTS) | ||
657 | goto inval; | ||
658 | } else if (attr->path_mig_state == IB_MIG_MIGRATED) { | ||
659 | if (qp->s_mig_state == IB_MIG_REARM) | ||
660 | goto inval; | ||
661 | if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD) | ||
662 | goto inval; | ||
663 | if (qp->s_mig_state == IB_MIG_ARMED) | ||
664 | mig = 1; | ||
665 | } else | ||
666 | goto inval; | ||
667 | } | ||
668 | |||
669 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) | ||
670 | if (attr->max_dest_rd_atomic > QIB_MAX_RDMA_ATOMIC) | ||
671 | goto inval; | ||
672 | |||
673 | switch (new_state) { | ||
674 | case IB_QPS_RESET: | ||
675 | if (qp->state != IB_QPS_RESET) { | ||
676 | qp->state = IB_QPS_RESET; | ||
677 | spin_lock(&dev->pending_lock); | ||
678 | if (!list_empty(&qp->iowait)) | ||
679 | list_del_init(&qp->iowait); | ||
680 | spin_unlock(&dev->pending_lock); | ||
681 | qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT); | ||
682 | spin_unlock(&qp->s_lock); | ||
683 | spin_unlock_irq(&qp->r_lock); | ||
684 | /* Stop the sending work queue and retry timer */ | ||
685 | cancel_work_sync(&qp->s_work); | ||
686 | del_timer_sync(&qp->s_timer); | ||
687 | wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy)); | ||
688 | if (qp->s_tx) { | ||
689 | qib_put_txreq(qp->s_tx); | ||
690 | qp->s_tx = NULL; | ||
691 | } | ||
692 | remove_qp(dev, qp); | ||
693 | wait_event(qp->wait, !atomic_read(&qp->refcount)); | ||
694 | spin_lock_irq(&qp->r_lock); | ||
695 | spin_lock(&qp->s_lock); | ||
696 | clear_mr_refs(qp, 1); | ||
697 | qib_reset_qp(qp, ibqp->qp_type); | ||
698 | } | ||
699 | break; | ||
700 | |||
701 | case IB_QPS_RTR: | ||
702 | /* Allow event to retrigger if QP set to RTR more than once */ | ||
703 | qp->r_flags &= ~QIB_R_COMM_EST; | ||
704 | qp->state = new_state; | ||
705 | break; | ||
706 | |||
707 | case IB_QPS_SQD: | ||
708 | qp->s_draining = qp->s_last != qp->s_cur; | ||
709 | qp->state = new_state; | ||
710 | break; | ||
711 | |||
712 | case IB_QPS_SQE: | ||
713 | if (qp->ibqp.qp_type == IB_QPT_RC) | ||
714 | goto inval; | ||
715 | qp->state = new_state; | ||
716 | break; | ||
717 | |||
718 | case IB_QPS_ERR: | ||
719 | lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR); | ||
720 | break; | ||
721 | |||
722 | default: | ||
723 | qp->state = new_state; | ||
724 | break; | ||
725 | } | ||
726 | |||
727 | if (attr_mask & IB_QP_PKEY_INDEX) | ||
728 | qp->s_pkey_index = attr->pkey_index; | ||
729 | |||
730 | if (attr_mask & IB_QP_PORT) | ||
731 | qp->port_num = attr->port_num; | ||
732 | |||
733 | if (attr_mask & IB_QP_DEST_QPN) | ||
734 | qp->remote_qpn = attr->dest_qp_num; | ||
735 | |||
736 | if (attr_mask & IB_QP_SQ_PSN) { | ||
737 | qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK; | ||
738 | qp->s_psn = qp->s_next_psn; | ||
739 | qp->s_sending_psn = qp->s_next_psn; | ||
740 | qp->s_last_psn = qp->s_next_psn - 1; | ||
741 | qp->s_sending_hpsn = qp->s_last_psn; | ||
742 | } | ||
743 | |||
744 | if (attr_mask & IB_QP_RQ_PSN) | ||
745 | qp->r_psn = attr->rq_psn & QIB_PSN_MASK; | ||
746 | |||
747 | if (attr_mask & IB_QP_ACCESS_FLAGS) | ||
748 | qp->qp_access_flags = attr->qp_access_flags; | ||
749 | |||
750 | if (attr_mask & IB_QP_AV) { | ||
751 | qp->remote_ah_attr = attr->ah_attr; | ||
752 | qp->s_srate = attr->ah_attr.static_rate; | ||
753 | } | ||
754 | |||
755 | if (attr_mask & IB_QP_ALT_PATH) { | ||
756 | qp->alt_ah_attr = attr->alt_ah_attr; | ||
757 | qp->s_alt_pkey_index = attr->alt_pkey_index; | ||
758 | } | ||
759 | |||
760 | if (attr_mask & IB_QP_PATH_MIG_STATE) { | ||
761 | qp->s_mig_state = attr->path_mig_state; | ||
762 | if (mig) { | ||
763 | qp->remote_ah_attr = qp->alt_ah_attr; | ||
764 | qp->port_num = qp->alt_ah_attr.port_num; | ||
765 | qp->s_pkey_index = qp->s_alt_pkey_index; | ||
766 | } | ||
767 | } | ||
768 | |||
769 | if (attr_mask & IB_QP_PATH_MTU) | ||
770 | qp->path_mtu = pmtu; | ||
771 | |||
772 | if (attr_mask & IB_QP_RETRY_CNT) { | ||
773 | qp->s_retry_cnt = attr->retry_cnt; | ||
774 | qp->s_retry = attr->retry_cnt; | ||
775 | } | ||
776 | |||
777 | if (attr_mask & IB_QP_RNR_RETRY) { | ||
778 | qp->s_rnr_retry_cnt = attr->rnr_retry; | ||
779 | qp->s_rnr_retry = attr->rnr_retry; | ||
780 | } | ||
781 | |||
782 | if (attr_mask & IB_QP_MIN_RNR_TIMER) | ||
783 | qp->r_min_rnr_timer = attr->min_rnr_timer; | ||
784 | |||
785 | if (attr_mask & IB_QP_TIMEOUT) | ||
786 | qp->timeout = attr->timeout; | ||
787 | |||
788 | if (attr_mask & IB_QP_QKEY) | ||
789 | qp->qkey = attr->qkey; | ||
790 | |||
791 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) | ||
792 | qp->r_max_rd_atomic = attr->max_dest_rd_atomic; | ||
793 | |||
794 | if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) | ||
795 | qp->s_max_rd_atomic = attr->max_rd_atomic; | ||
796 | |||
797 | spin_unlock(&qp->s_lock); | ||
798 | spin_unlock_irq(&qp->r_lock); | ||
799 | |||
800 | if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) | ||
801 | insert_qp(dev, qp); | ||
802 | |||
803 | if (lastwqe) { | ||
804 | ev.device = qp->ibqp.device; | ||
805 | ev.element.qp = &qp->ibqp; | ||
806 | ev.event = IB_EVENT_QP_LAST_WQE_REACHED; | ||
807 | qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); | ||
808 | } | ||
809 | if (mig) { | ||
810 | ev.device = qp->ibqp.device; | ||
811 | ev.element.qp = &qp->ibqp; | ||
812 | ev.event = IB_EVENT_PATH_MIG; | ||
813 | qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); | ||
814 | } | ||
815 | ret = 0; | ||
816 | goto bail; | ||
817 | |||
818 | inval: | ||
819 | spin_unlock(&qp->s_lock); | ||
820 | spin_unlock_irq(&qp->r_lock); | ||
821 | ret = -EINVAL; | ||
822 | |||
823 | bail: | ||
824 | return ret; | ||
825 | } | ||
826 | |||
827 | int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | ||
828 | int attr_mask, struct ib_qp_init_attr *init_attr) | ||
829 | { | ||
830 | struct qib_qp *qp = to_iqp(ibqp); | ||
831 | |||
832 | attr->qp_state = qp->state; | ||
833 | attr->cur_qp_state = attr->qp_state; | ||
834 | attr->path_mtu = qp->path_mtu; | ||
835 | attr->path_mig_state = qp->s_mig_state; | ||
836 | attr->qkey = qp->qkey; | ||
837 | attr->rq_psn = qp->r_psn & QIB_PSN_MASK; | ||
838 | attr->sq_psn = qp->s_next_psn & QIB_PSN_MASK; | ||
839 | attr->dest_qp_num = qp->remote_qpn; | ||
840 | attr->qp_access_flags = qp->qp_access_flags; | ||
841 | attr->cap.max_send_wr = qp->s_size - 1; | ||
842 | attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; | ||
843 | attr->cap.max_send_sge = qp->s_max_sge; | ||
844 | attr->cap.max_recv_sge = qp->r_rq.max_sge; | ||
845 | attr->cap.max_inline_data = 0; | ||
846 | attr->ah_attr = qp->remote_ah_attr; | ||
847 | attr->alt_ah_attr = qp->alt_ah_attr; | ||
848 | attr->pkey_index = qp->s_pkey_index; | ||
849 | attr->alt_pkey_index = qp->s_alt_pkey_index; | ||
850 | attr->en_sqd_async_notify = 0; | ||
851 | attr->sq_draining = qp->s_draining; | ||
852 | attr->max_rd_atomic = qp->s_max_rd_atomic; | ||
853 | attr->max_dest_rd_atomic = qp->r_max_rd_atomic; | ||
854 | attr->min_rnr_timer = qp->r_min_rnr_timer; | ||
855 | attr->port_num = qp->port_num; | ||
856 | attr->timeout = qp->timeout; | ||
857 | attr->retry_cnt = qp->s_retry_cnt; | ||
858 | attr->rnr_retry = qp->s_rnr_retry_cnt; | ||
859 | attr->alt_port_num = qp->alt_ah_attr.port_num; | ||
860 | attr->alt_timeout = qp->alt_timeout; | ||
861 | |||
862 | init_attr->event_handler = qp->ibqp.event_handler; | ||
863 | init_attr->qp_context = qp->ibqp.qp_context; | ||
864 | init_attr->send_cq = qp->ibqp.send_cq; | ||
865 | init_attr->recv_cq = qp->ibqp.recv_cq; | ||
866 | init_attr->srq = qp->ibqp.srq; | ||
867 | init_attr->cap = attr->cap; | ||
868 | if (qp->s_flags & QIB_S_SIGNAL_REQ_WR) | ||
869 | init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; | ||
870 | else | ||
871 | init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; | ||
872 | init_attr->qp_type = qp->ibqp.qp_type; | ||
873 | init_attr->port_num = qp->port_num; | ||
874 | return 0; | ||
875 | } | ||
876 | |||
877 | /** | ||
878 | * qib_compute_aeth - compute the AETH (syndrome + MSN) | ||
879 | * @qp: the queue pair to compute the AETH for | ||
880 | * | ||
881 | * Returns the AETH. | ||
882 | */ | ||
883 | __be32 qib_compute_aeth(struct qib_qp *qp) | ||
884 | { | ||
885 | u32 aeth = qp->r_msn & QIB_MSN_MASK; | ||
886 | |||
887 | if (qp->ibqp.srq) { | ||
888 | /* | ||
889 | * Shared receive queues don't generate credits. | ||
890 | * Set the credit field to the invalid value. | ||
891 | */ | ||
892 | aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT; | ||
893 | } else { | ||
894 | u32 min, max, x; | ||
895 | u32 credits; | ||
896 | struct qib_rwq *wq = qp->r_rq.wq; | ||
897 | u32 head; | ||
898 | u32 tail; | ||
899 | |||
900 | /* sanity check pointers before trusting them */ | ||
901 | head = wq->head; | ||
902 | if (head >= qp->r_rq.size) | ||
903 | head = 0; | ||
904 | tail = wq->tail; | ||
905 | if (tail >= qp->r_rq.size) | ||
906 | tail = 0; | ||
907 | /* | ||
908 | * Compute the number of credits available (RWQEs). | ||
909 | * XXX Not holding the r_rq.lock here so there is a small | ||
910 | * chance that the pair of reads are not atomic. | ||
911 | */ | ||
912 | credits = head - tail; | ||
913 | if ((int)credits < 0) | ||
914 | credits += qp->r_rq.size; | ||
915 | /* | ||
916 | * Binary search the credit table to find the code to | ||
917 | * use. | ||
918 | */ | ||
919 | min = 0; | ||
920 | max = 31; | ||
921 | for (;;) { | ||
922 | x = (min + max) / 2; | ||
923 | if (credit_table[x] == credits) | ||
924 | break; | ||
925 | if (credit_table[x] > credits) | ||
926 | max = x; | ||
927 | else if (min == x) | ||
928 | break; | ||
929 | else | ||
930 | min = x; | ||
931 | } | ||
932 | aeth |= x << QIB_AETH_CREDIT_SHIFT; | ||
933 | } | ||
934 | return cpu_to_be32(aeth); | ||
935 | } | ||
936 | |||
937 | /** | ||
938 | * qib_create_qp - create a queue pair for a device | ||
939 | * @ibpd: the protection domain who's device we create the queue pair for | ||
940 | * @init_attr: the attributes of the queue pair | ||
941 | * @udata: user data for libibverbs.so | ||
942 | * | ||
943 | * Returns the queue pair on success, otherwise returns an errno. | ||
944 | * | ||
945 | * Called by the ib_create_qp() core verbs function. | ||
946 | */ | ||
947 | struct ib_qp *qib_create_qp(struct ib_pd *ibpd, | ||
948 | struct ib_qp_init_attr *init_attr, | ||
949 | struct ib_udata *udata) | ||
950 | { | ||
951 | struct qib_qp *qp; | ||
952 | int err; | ||
953 | struct qib_swqe *swq = NULL; | ||
954 | struct qib_ibdev *dev; | ||
955 | struct qib_devdata *dd; | ||
956 | size_t sz; | ||
957 | size_t sg_list_sz; | ||
958 | struct ib_qp *ret; | ||
959 | |||
960 | if (init_attr->cap.max_send_sge > ib_qib_max_sges || | ||
961 | init_attr->cap.max_send_wr > ib_qib_max_qp_wrs) { | ||
962 | ret = ERR_PTR(-EINVAL); | ||
963 | goto bail; | ||
964 | } | ||
965 | |||
966 | /* Check receive queue parameters if no SRQ is specified. */ | ||
967 | if (!init_attr->srq) { | ||
968 | if (init_attr->cap.max_recv_sge > ib_qib_max_sges || | ||
969 | init_attr->cap.max_recv_wr > ib_qib_max_qp_wrs) { | ||
970 | ret = ERR_PTR(-EINVAL); | ||
971 | goto bail; | ||
972 | } | ||
973 | if (init_attr->cap.max_send_sge + | ||
974 | init_attr->cap.max_send_wr + | ||
975 | init_attr->cap.max_recv_sge + | ||
976 | init_attr->cap.max_recv_wr == 0) { | ||
977 | ret = ERR_PTR(-EINVAL); | ||
978 | goto bail; | ||
979 | } | ||
980 | } | ||
981 | |||
982 | switch (init_attr->qp_type) { | ||
983 | case IB_QPT_SMI: | ||
984 | case IB_QPT_GSI: | ||
985 | if (init_attr->port_num == 0 || | ||
986 | init_attr->port_num > ibpd->device->phys_port_cnt) { | ||
987 | ret = ERR_PTR(-EINVAL); | ||
988 | goto bail; | ||
989 | } | ||
990 | case IB_QPT_UC: | ||
991 | case IB_QPT_RC: | ||
992 | case IB_QPT_UD: | ||
993 | sz = sizeof(struct qib_sge) * | ||
994 | init_attr->cap.max_send_sge + | ||
995 | sizeof(struct qib_swqe); | ||
996 | swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz); | ||
997 | if (swq == NULL) { | ||
998 | ret = ERR_PTR(-ENOMEM); | ||
999 | goto bail; | ||
1000 | } | ||
1001 | sz = sizeof(*qp); | ||
1002 | sg_list_sz = 0; | ||
1003 | if (init_attr->srq) { | ||
1004 | struct qib_srq *srq = to_isrq(init_attr->srq); | ||
1005 | |||
1006 | if (srq->rq.max_sge > 1) | ||
1007 | sg_list_sz = sizeof(*qp->r_sg_list) * | ||
1008 | (srq->rq.max_sge - 1); | ||
1009 | } else if (init_attr->cap.max_recv_sge > 1) | ||
1010 | sg_list_sz = sizeof(*qp->r_sg_list) * | ||
1011 | (init_attr->cap.max_recv_sge - 1); | ||
1012 | qp = kzalloc(sz + sg_list_sz, GFP_KERNEL); | ||
1013 | if (!qp) { | ||
1014 | ret = ERR_PTR(-ENOMEM); | ||
1015 | goto bail_swq; | ||
1016 | } | ||
1017 | if (init_attr->srq) | ||
1018 | sz = 0; | ||
1019 | else { | ||
1020 | qp->r_rq.size = init_attr->cap.max_recv_wr + 1; | ||
1021 | qp->r_rq.max_sge = init_attr->cap.max_recv_sge; | ||
1022 | sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + | ||
1023 | sizeof(struct qib_rwqe); | ||
1024 | qp->r_rq.wq = vmalloc_user(sizeof(struct qib_rwq) + | ||
1025 | qp->r_rq.size * sz); | ||
1026 | if (!qp->r_rq.wq) { | ||
1027 | ret = ERR_PTR(-ENOMEM); | ||
1028 | goto bail_qp; | ||
1029 | } | ||
1030 | } | ||
1031 | |||
1032 | /* | ||
1033 | * ib_create_qp() will initialize qp->ibqp | ||
1034 | * except for qp->ibqp.qp_num. | ||
1035 | */ | ||
1036 | spin_lock_init(&qp->r_lock); | ||
1037 | spin_lock_init(&qp->s_lock); | ||
1038 | spin_lock_init(&qp->r_rq.lock); | ||
1039 | atomic_set(&qp->refcount, 0); | ||
1040 | init_waitqueue_head(&qp->wait); | ||
1041 | init_waitqueue_head(&qp->wait_dma); | ||
1042 | init_timer(&qp->s_timer); | ||
1043 | qp->s_timer.data = (unsigned long)qp; | ||
1044 | INIT_WORK(&qp->s_work, qib_do_send); | ||
1045 | INIT_LIST_HEAD(&qp->iowait); | ||
1046 | INIT_LIST_HEAD(&qp->rspwait); | ||
1047 | qp->state = IB_QPS_RESET; | ||
1048 | qp->s_wq = swq; | ||
1049 | qp->s_size = init_attr->cap.max_send_wr + 1; | ||
1050 | qp->s_max_sge = init_attr->cap.max_send_sge; | ||
1051 | if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) | ||
1052 | qp->s_flags = QIB_S_SIGNAL_REQ_WR; | ||
1053 | dev = to_idev(ibpd->device); | ||
1054 | dd = dd_from_dev(dev); | ||
1055 | err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type, | ||
1056 | init_attr->port_num); | ||
1057 | if (err < 0) { | ||
1058 | ret = ERR_PTR(err); | ||
1059 | vfree(qp->r_rq.wq); | ||
1060 | goto bail_qp; | ||
1061 | } | ||
1062 | qp->ibqp.qp_num = err; | ||
1063 | qp->port_num = init_attr->port_num; | ||
1064 | qp->processor_id = smp_processor_id(); | ||
1065 | qib_reset_qp(qp, init_attr->qp_type); | ||
1066 | break; | ||
1067 | |||
1068 | default: | ||
1069 | /* Don't support raw QPs */ | ||
1070 | ret = ERR_PTR(-ENOSYS); | ||
1071 | goto bail; | ||
1072 | } | ||
1073 | |||
1074 | init_attr->cap.max_inline_data = 0; | ||
1075 | |||
1076 | /* | ||
1077 | * Return the address of the RWQ as the offset to mmap. | ||
1078 | * See qib_mmap() for details. | ||
1079 | */ | ||
1080 | if (udata && udata->outlen >= sizeof(__u64)) { | ||
1081 | if (!qp->r_rq.wq) { | ||
1082 | __u64 offset = 0; | ||
1083 | |||
1084 | err = ib_copy_to_udata(udata, &offset, | ||
1085 | sizeof(offset)); | ||
1086 | if (err) { | ||
1087 | ret = ERR_PTR(err); | ||
1088 | goto bail_ip; | ||
1089 | } | ||
1090 | } else { | ||
1091 | u32 s = sizeof(struct qib_rwq) + qp->r_rq.size * sz; | ||
1092 | |||
1093 | qp->ip = qib_create_mmap_info(dev, s, | ||
1094 | ibpd->uobject->context, | ||
1095 | qp->r_rq.wq); | ||
1096 | if (!qp->ip) { | ||
1097 | ret = ERR_PTR(-ENOMEM); | ||
1098 | goto bail_ip; | ||
1099 | } | ||
1100 | |||
1101 | err = ib_copy_to_udata(udata, &(qp->ip->offset), | ||
1102 | sizeof(qp->ip->offset)); | ||
1103 | if (err) { | ||
1104 | ret = ERR_PTR(err); | ||
1105 | goto bail_ip; | ||
1106 | } | ||
1107 | } | ||
1108 | } | ||
1109 | |||
1110 | spin_lock(&dev->n_qps_lock); | ||
1111 | if (dev->n_qps_allocated == ib_qib_max_qps) { | ||
1112 | spin_unlock(&dev->n_qps_lock); | ||
1113 | ret = ERR_PTR(-ENOMEM); | ||
1114 | goto bail_ip; | ||
1115 | } | ||
1116 | |||
1117 | dev->n_qps_allocated++; | ||
1118 | spin_unlock(&dev->n_qps_lock); | ||
1119 | |||
1120 | if (qp->ip) { | ||
1121 | spin_lock_irq(&dev->pending_lock); | ||
1122 | list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps); | ||
1123 | spin_unlock_irq(&dev->pending_lock); | ||
1124 | } | ||
1125 | |||
1126 | ret = &qp->ibqp; | ||
1127 | goto bail; | ||
1128 | |||
1129 | bail_ip: | ||
1130 | if (qp->ip) | ||
1131 | kref_put(&qp->ip->ref, qib_release_mmap_info); | ||
1132 | else | ||
1133 | vfree(qp->r_rq.wq); | ||
1134 | free_qpn(&dev->qpn_table, qp->ibqp.qp_num); | ||
1135 | bail_qp: | ||
1136 | kfree(qp); | ||
1137 | bail_swq: | ||
1138 | vfree(swq); | ||
1139 | bail: | ||
1140 | return ret; | ||
1141 | } | ||
1142 | |||
1143 | /** | ||
1144 | * qib_destroy_qp - destroy a queue pair | ||
1145 | * @ibqp: the queue pair to destroy | ||
1146 | * | ||
1147 | * Returns 0 on success. | ||
1148 | * | ||
1149 | * Note that this can be called while the QP is actively sending or | ||
1150 | * receiving! | ||
1151 | */ | ||
1152 | int qib_destroy_qp(struct ib_qp *ibqp) | ||
1153 | { | ||
1154 | struct qib_qp *qp = to_iqp(ibqp); | ||
1155 | struct qib_ibdev *dev = to_idev(ibqp->device); | ||
1156 | |||
1157 | /* Make sure HW and driver activity is stopped. */ | ||
1158 | spin_lock_irq(&qp->s_lock); | ||
1159 | if (qp->state != IB_QPS_RESET) { | ||
1160 | qp->state = IB_QPS_RESET; | ||
1161 | spin_lock(&dev->pending_lock); | ||
1162 | if (!list_empty(&qp->iowait)) | ||
1163 | list_del_init(&qp->iowait); | ||
1164 | spin_unlock(&dev->pending_lock); | ||
1165 | qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT); | ||
1166 | spin_unlock_irq(&qp->s_lock); | ||
1167 | cancel_work_sync(&qp->s_work); | ||
1168 | del_timer_sync(&qp->s_timer); | ||
1169 | wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy)); | ||
1170 | if (qp->s_tx) { | ||
1171 | qib_put_txreq(qp->s_tx); | ||
1172 | qp->s_tx = NULL; | ||
1173 | } | ||
1174 | remove_qp(dev, qp); | ||
1175 | wait_event(qp->wait, !atomic_read(&qp->refcount)); | ||
1176 | clear_mr_refs(qp, 1); | ||
1177 | } else | ||
1178 | spin_unlock_irq(&qp->s_lock); | ||
1179 | |||
1180 | /* all user's cleaned up, mark it available */ | ||
1181 | free_qpn(&dev->qpn_table, qp->ibqp.qp_num); | ||
1182 | spin_lock(&dev->n_qps_lock); | ||
1183 | dev->n_qps_allocated--; | ||
1184 | spin_unlock(&dev->n_qps_lock); | ||
1185 | |||
1186 | if (qp->ip) | ||
1187 | kref_put(&qp->ip->ref, qib_release_mmap_info); | ||
1188 | else | ||
1189 | vfree(qp->r_rq.wq); | ||
1190 | vfree(qp->s_wq); | ||
1191 | kfree(qp); | ||
1192 | return 0; | ||
1193 | } | ||
1194 | |||
1195 | /** | ||
1196 | * qib_init_qpn_table - initialize the QP number table for a device | ||
1197 | * @qpt: the QPN table | ||
1198 | */ | ||
1199 | void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt) | ||
1200 | { | ||
1201 | spin_lock_init(&qpt->lock); | ||
1202 | qpt->last = 1; /* start with QPN 2 */ | ||
1203 | qpt->nmaps = 1; | ||
1204 | qpt->mask = dd->qpn_mask; | ||
1205 | } | ||
1206 | |||
1207 | /** | ||
1208 | * qib_free_qpn_table - free the QP number table for a device | ||
1209 | * @qpt: the QPN table | ||
1210 | */ | ||
1211 | void qib_free_qpn_table(struct qib_qpn_table *qpt) | ||
1212 | { | ||
1213 | int i; | ||
1214 | |||
1215 | for (i = 0; i < ARRAY_SIZE(qpt->map); i++) | ||
1216 | if (qpt->map[i].page) | ||
1217 | free_page((unsigned long) qpt->map[i].page); | ||
1218 | } | ||
1219 | |||
1220 | /** | ||
1221 | * qib_get_credit - flush the send work queue of a QP | ||
1222 | * @qp: the qp who's send work queue to flush | ||
1223 | * @aeth: the Acknowledge Extended Transport Header | ||
1224 | * | ||
1225 | * The QP s_lock should be held. | ||
1226 | */ | ||
1227 | void qib_get_credit(struct qib_qp *qp, u32 aeth) | ||
1228 | { | ||
1229 | u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK; | ||
1230 | |||
1231 | /* | ||
1232 | * If the credit is invalid, we can send | ||
1233 | * as many packets as we like. Otherwise, we have to | ||
1234 | * honor the credit field. | ||
1235 | */ | ||
1236 | if (credit == QIB_AETH_CREDIT_INVAL) { | ||
1237 | if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) { | ||
1238 | qp->s_flags |= QIB_S_UNLIMITED_CREDIT; | ||
1239 | if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) { | ||
1240 | qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT; | ||
1241 | qib_schedule_send(qp); | ||
1242 | } | ||
1243 | } | ||
1244 | } else if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) { | ||
1245 | /* Compute new LSN (i.e., MSN + credit) */ | ||
1246 | credit = (aeth + credit_table[credit]) & QIB_MSN_MASK; | ||
1247 | if (qib_cmp24(credit, qp->s_lsn) > 0) { | ||
1248 | qp->s_lsn = credit; | ||
1249 | if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) { | ||
1250 | qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT; | ||
1251 | qib_schedule_send(qp); | ||
1252 | } | ||
1253 | } | ||
1254 | } | ||
1255 | } | ||
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c new file mode 100644 index 000000000000..35b3604b691d --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_qsfp.c | |||
@@ -0,0 +1,564 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. | ||
3 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | |||
34 | #include <linux/delay.h> | ||
35 | #include <linux/pci.h> | ||
36 | #include <linux/vmalloc.h> | ||
37 | |||
38 | #include "qib.h" | ||
39 | #include "qib_qsfp.h" | ||
40 | |||
41 | /* | ||
42 | * QSFP support for ib_qib driver, using "Two Wire Serial Interface" driver | ||
43 | * in qib_twsi.c | ||
44 | */ | ||
45 | #define QSFP_MAX_RETRY 4 | ||
46 | |||
47 | static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len) | ||
48 | { | ||
49 | struct qib_devdata *dd = ppd->dd; | ||
50 | u32 out, mask; | ||
51 | int ret, cnt, pass = 0; | ||
52 | int stuck = 0; | ||
53 | u8 *buff = bp; | ||
54 | |||
55 | ret = mutex_lock_interruptible(&dd->eep_lock); | ||
56 | if (ret) | ||
57 | goto no_unlock; | ||
58 | |||
59 | if (dd->twsi_eeprom_dev == QIB_TWSI_NO_DEV) { | ||
60 | ret = -ENXIO; | ||
61 | goto bail; | ||
62 | } | ||
63 | |||
64 | /* | ||
65 | * We presume, if we are called at all, that this board has | ||
66 | * QSFP. This is on the same i2c chain as the legacy parts, | ||
67 | * but only responds if the module is selected via GPIO pins. | ||
68 | * Further, there are very long setup and hold requirements | ||
69 | * on MODSEL. | ||
70 | */ | ||
71 | mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE; | ||
72 | out = QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE; | ||
73 | if (ppd->hw_pidx) { | ||
74 | mask <<= QSFP_GPIO_PORT2_SHIFT; | ||
75 | out <<= QSFP_GPIO_PORT2_SHIFT; | ||
76 | } | ||
77 | |||
78 | dd->f_gpio_mod(dd, out, mask, mask); | ||
79 | |||
80 | /* | ||
81 | * Module could take up to 2 Msec to respond to MOD_SEL, and there | ||
82 | * is no way to tell if it is ready, so we must wait. | ||
83 | */ | ||
84 | msleep(2); | ||
85 | |||
86 | /* Make sure TWSI bus is in sane state. */ | ||
87 | ret = qib_twsi_reset(dd); | ||
88 | if (ret) { | ||
89 | qib_dev_porterr(dd, ppd->port, | ||
90 | "QSFP interface Reset for read failed\n"); | ||
91 | ret = -EIO; | ||
92 | stuck = 1; | ||
93 | goto deselect; | ||
94 | } | ||
95 | |||
96 | /* All QSFP modules are at A0 */ | ||
97 | |||
98 | cnt = 0; | ||
99 | while (cnt < len) { | ||
100 | unsigned in_page; | ||
101 | int wlen = len - cnt; | ||
102 | in_page = addr % QSFP_PAGESIZE; | ||
103 | if ((in_page + wlen) > QSFP_PAGESIZE) | ||
104 | wlen = QSFP_PAGESIZE - in_page; | ||
105 | ret = qib_twsi_blk_rd(dd, QSFP_DEV, addr, buff + cnt, wlen); | ||
106 | /* Some QSFP's fail first try. Retry as experiment */ | ||
107 | if (ret && cnt == 0 && ++pass < QSFP_MAX_RETRY) | ||
108 | continue; | ||
109 | if (ret) { | ||
110 | /* qib_twsi_blk_rd() 1 for error, else 0 */ | ||
111 | ret = -EIO; | ||
112 | goto deselect; | ||
113 | } | ||
114 | addr += wlen; | ||
115 | cnt += wlen; | ||
116 | } | ||
117 | ret = cnt; | ||
118 | |||
119 | deselect: | ||
120 | /* | ||
121 | * Module could take up to 10 uSec after transfer before | ||
122 | * ready to respond to MOD_SEL negation, and there is no way | ||
123 | * to tell if it is ready, so we must wait. | ||
124 | */ | ||
125 | udelay(10); | ||
126 | /* set QSFP MODSEL, RST. LP all high */ | ||
127 | dd->f_gpio_mod(dd, mask, mask, mask); | ||
128 | |||
129 | /* | ||
130 | * Module could take up to 2 Msec to respond to MOD_SEL | ||
131 | * going away, and there is no way to tell if it is ready. | ||
132 | * so we must wait. | ||
133 | */ | ||
134 | if (stuck) | ||
135 | qib_dev_err(dd, "QSFP interface bus stuck non-idle\n"); | ||
136 | |||
137 | if (pass >= QSFP_MAX_RETRY && ret) | ||
138 | qib_dev_porterr(dd, ppd->port, "QSFP failed even retrying\n"); | ||
139 | else if (pass) | ||
140 | qib_dev_porterr(dd, ppd->port, "QSFP retries: %d\n", pass); | ||
141 | |||
142 | msleep(2); | ||
143 | |||
144 | bail: | ||
145 | mutex_unlock(&dd->eep_lock); | ||
146 | |||
147 | no_unlock: | ||
148 | return ret; | ||
149 | } | ||
150 | |||
151 | /* | ||
152 | * qsfp_write | ||
153 | * We do not ordinarily write the QSFP, but this is needed to select | ||
154 | * the page on non-flat QSFPs, and possibly later unusual cases | ||
155 | */ | ||
156 | static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp, | ||
157 | int len) | ||
158 | { | ||
159 | struct qib_devdata *dd = ppd->dd; | ||
160 | u32 out, mask; | ||
161 | int ret, cnt; | ||
162 | u8 *buff = bp; | ||
163 | |||
164 | ret = mutex_lock_interruptible(&dd->eep_lock); | ||
165 | if (ret) | ||
166 | goto no_unlock; | ||
167 | |||
168 | if (dd->twsi_eeprom_dev == QIB_TWSI_NO_DEV) { | ||
169 | ret = -ENXIO; | ||
170 | goto bail; | ||
171 | } | ||
172 | |||
173 | /* | ||
174 | * We presume, if we are called at all, that this board has | ||
175 | * QSFP. This is on the same i2c chain as the legacy parts, | ||
176 | * but only responds if the module is selected via GPIO pins. | ||
177 | * Further, there are very long setup and hold requirements | ||
178 | * on MODSEL. | ||
179 | */ | ||
180 | mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE; | ||
181 | out = QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE; | ||
182 | if (ppd->hw_pidx) { | ||
183 | mask <<= QSFP_GPIO_PORT2_SHIFT; | ||
184 | out <<= QSFP_GPIO_PORT2_SHIFT; | ||
185 | } | ||
186 | dd->f_gpio_mod(dd, out, mask, mask); | ||
187 | |||
188 | /* | ||
189 | * Module could take up to 2 Msec to respond to MOD_SEL, | ||
190 | * and there is no way to tell if it is ready, so we must wait. | ||
191 | */ | ||
192 | msleep(2); | ||
193 | |||
194 | /* Make sure TWSI bus is in sane state. */ | ||
195 | ret = qib_twsi_reset(dd); | ||
196 | if (ret) { | ||
197 | qib_dev_porterr(dd, ppd->port, | ||
198 | "QSFP interface Reset for write failed\n"); | ||
199 | ret = -EIO; | ||
200 | goto deselect; | ||
201 | } | ||
202 | |||
203 | /* All QSFP modules are at A0 */ | ||
204 | |||
205 | cnt = 0; | ||
206 | while (cnt < len) { | ||
207 | unsigned in_page; | ||
208 | int wlen = len - cnt; | ||
209 | in_page = addr % QSFP_PAGESIZE; | ||
210 | if ((in_page + wlen) > QSFP_PAGESIZE) | ||
211 | wlen = QSFP_PAGESIZE - in_page; | ||
212 | ret = qib_twsi_blk_wr(dd, QSFP_DEV, addr, buff + cnt, wlen); | ||
213 | if (ret) { | ||
214 | /* qib_twsi_blk_wr() 1 for error, else 0 */ | ||
215 | ret = -EIO; | ||
216 | goto deselect; | ||
217 | } | ||
218 | addr += wlen; | ||
219 | cnt += wlen; | ||
220 | } | ||
221 | ret = cnt; | ||
222 | |||
223 | deselect: | ||
224 | /* | ||
225 | * Module could take up to 10 uSec after transfer before | ||
226 | * ready to respond to MOD_SEL negation, and there is no way | ||
227 | * to tell if it is ready, so we must wait. | ||
228 | */ | ||
229 | udelay(10); | ||
230 | /* set QSFP MODSEL, RST, LP high */ | ||
231 | dd->f_gpio_mod(dd, mask, mask, mask); | ||
232 | /* | ||
233 | * Module could take up to 2 Msec to respond to MOD_SEL | ||
234 | * going away, and there is no way to tell if it is ready. | ||
235 | * so we must wait. | ||
236 | */ | ||
237 | msleep(2); | ||
238 | |||
239 | bail: | ||
240 | mutex_unlock(&dd->eep_lock); | ||
241 | |||
242 | no_unlock: | ||
243 | return ret; | ||
244 | } | ||
245 | |||
246 | /* | ||
247 | * For validation, we want to check the checksums, even of the | ||
248 | * fields we do not otherwise use. This function reads the bytes from | ||
249 | * <first> to <next-1> and returns the 8lsbs of the sum, or <0 for errors | ||
250 | */ | ||
251 | static int qsfp_cks(struct qib_pportdata *ppd, int first, int next) | ||
252 | { | ||
253 | int ret; | ||
254 | u16 cks; | ||
255 | u8 bval; | ||
256 | |||
257 | cks = 0; | ||
258 | while (first < next) { | ||
259 | ret = qsfp_read(ppd, first, &bval, 1); | ||
260 | if (ret < 0) | ||
261 | goto bail; | ||
262 | cks += bval; | ||
263 | ++first; | ||
264 | } | ||
265 | ret = cks & 0xFF; | ||
266 | bail: | ||
267 | return ret; | ||
268 | |||
269 | } | ||
270 | |||
271 | int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp) | ||
272 | { | ||
273 | int ret; | ||
274 | int idx; | ||
275 | u16 cks; | ||
276 | u32 mask; | ||
277 | u8 peek[4]; | ||
278 | |||
279 | /* ensure sane contents on invalid reads, for cable swaps */ | ||
280 | memset(cp, 0, sizeof(*cp)); | ||
281 | |||
282 | mask = QSFP_GPIO_MOD_PRS_N; | ||
283 | if (ppd->hw_pidx) | ||
284 | mask <<= QSFP_GPIO_PORT2_SHIFT; | ||
285 | |||
286 | ret = ppd->dd->f_gpio_mod(ppd->dd, 0, 0, 0); | ||
287 | if (ret & mask) { | ||
288 | ret = -ENODEV; | ||
289 | goto bail; | ||
290 | } | ||
291 | |||
292 | ret = qsfp_read(ppd, 0, peek, 3); | ||
293 | if (ret < 0) | ||
294 | goto bail; | ||
295 | if ((peek[0] & 0xFE) != 0x0C) | ||
296 | qib_dev_porterr(ppd->dd, ppd->port, | ||
297 | "QSFP byte0 is 0x%02X, S/B 0x0C/D\n", peek[0]); | ||
298 | |||
299 | if ((peek[2] & 2) == 0) { | ||
300 | /* | ||
301 | * If cable is paged, rather than "flat memory", we need to | ||
302 | * set the page to zero, Even if it already appears to be zero. | ||
303 | */ | ||
304 | u8 poke = 0; | ||
305 | ret = qib_qsfp_write(ppd, 127, &poke, 1); | ||
306 | udelay(50); | ||
307 | if (ret != 1) { | ||
308 | qib_dev_porterr(ppd->dd, ppd->port, | ||
309 | "Failed QSFP Page set\n"); | ||
310 | goto bail; | ||
311 | } | ||
312 | } | ||
313 | |||
314 | ret = qsfp_read(ppd, QSFP_MOD_ID_OFFS, &cp->id, 1); | ||
315 | if (ret < 0) | ||
316 | goto bail; | ||
317 | if ((cp->id & 0xFE) != 0x0C) | ||
318 | qib_dev_porterr(ppd->dd, ppd->port, | ||
319 | "QSFP ID byte is 0x%02X, S/B 0x0C/D\n", cp->id); | ||
320 | cks = cp->id; | ||
321 | |||
322 | ret = qsfp_read(ppd, QSFP_MOD_PWR_OFFS, &cp->pwr, 1); | ||
323 | if (ret < 0) | ||
324 | goto bail; | ||
325 | cks += cp->pwr; | ||
326 | |||
327 | ret = qsfp_cks(ppd, QSFP_MOD_PWR_OFFS + 1, QSFP_MOD_LEN_OFFS); | ||
328 | if (ret < 0) | ||
329 | goto bail; | ||
330 | cks += ret; | ||
331 | |||
332 | ret = qsfp_read(ppd, QSFP_MOD_LEN_OFFS, &cp->len, 1); | ||
333 | if (ret < 0) | ||
334 | goto bail; | ||
335 | cks += cp->len; | ||
336 | |||
337 | ret = qsfp_read(ppd, QSFP_MOD_TECH_OFFS, &cp->tech, 1); | ||
338 | if (ret < 0) | ||
339 | goto bail; | ||
340 | cks += cp->tech; | ||
341 | |||
342 | ret = qsfp_read(ppd, QSFP_VEND_OFFS, &cp->vendor, QSFP_VEND_LEN); | ||
343 | if (ret < 0) | ||
344 | goto bail; | ||
345 | for (idx = 0; idx < QSFP_VEND_LEN; ++idx) | ||
346 | cks += cp->vendor[idx]; | ||
347 | |||
348 | ret = qsfp_read(ppd, QSFP_IBXCV_OFFS, &cp->xt_xcv, 1); | ||
349 | if (ret < 0) | ||
350 | goto bail; | ||
351 | cks += cp->xt_xcv; | ||
352 | |||
353 | ret = qsfp_read(ppd, QSFP_VOUI_OFFS, &cp->oui, QSFP_VOUI_LEN); | ||
354 | if (ret < 0) | ||
355 | goto bail; | ||
356 | for (idx = 0; idx < QSFP_VOUI_LEN; ++idx) | ||
357 | cks += cp->oui[idx]; | ||
358 | |||
359 | ret = qsfp_read(ppd, QSFP_PN_OFFS, &cp->partnum, QSFP_PN_LEN); | ||
360 | if (ret < 0) | ||
361 | goto bail; | ||
362 | for (idx = 0; idx < QSFP_PN_LEN; ++idx) | ||
363 | cks += cp->partnum[idx]; | ||
364 | |||
365 | ret = qsfp_read(ppd, QSFP_REV_OFFS, &cp->rev, QSFP_REV_LEN); | ||
366 | if (ret < 0) | ||
367 | goto bail; | ||
368 | for (idx = 0; idx < QSFP_REV_LEN; ++idx) | ||
369 | cks += cp->rev[idx]; | ||
370 | |||
371 | ret = qsfp_read(ppd, QSFP_ATTEN_OFFS, &cp->atten, QSFP_ATTEN_LEN); | ||
372 | if (ret < 0) | ||
373 | goto bail; | ||
374 | for (idx = 0; idx < QSFP_ATTEN_LEN; ++idx) | ||
375 | cks += cp->atten[idx]; | ||
376 | |||
377 | ret = qsfp_cks(ppd, QSFP_ATTEN_OFFS + QSFP_ATTEN_LEN, QSFP_CC_OFFS); | ||
378 | if (ret < 0) | ||
379 | goto bail; | ||
380 | cks += ret; | ||
381 | |||
382 | cks &= 0xFF; | ||
383 | ret = qsfp_read(ppd, QSFP_CC_OFFS, &cp->cks1, 1); | ||
384 | if (ret < 0) | ||
385 | goto bail; | ||
386 | if (cks != cp->cks1) | ||
387 | qib_dev_porterr(ppd->dd, ppd->port, | ||
388 | "QSFP cks1 is %02X, computed %02X\n", cp->cks1, | ||
389 | cks); | ||
390 | |||
391 | /* Second checksum covers 192 to (serial, date, lot) */ | ||
392 | ret = qsfp_cks(ppd, QSFP_CC_OFFS + 1, QSFP_SN_OFFS); | ||
393 | if (ret < 0) | ||
394 | goto bail; | ||
395 | cks = ret; | ||
396 | |||
397 | ret = qsfp_read(ppd, QSFP_SN_OFFS, &cp->serial, QSFP_SN_LEN); | ||
398 | if (ret < 0) | ||
399 | goto bail; | ||
400 | for (idx = 0; idx < QSFP_SN_LEN; ++idx) | ||
401 | cks += cp->serial[idx]; | ||
402 | |||
403 | ret = qsfp_read(ppd, QSFP_DATE_OFFS, &cp->date, QSFP_DATE_LEN); | ||
404 | if (ret < 0) | ||
405 | goto bail; | ||
406 | for (idx = 0; idx < QSFP_DATE_LEN; ++idx) | ||
407 | cks += cp->date[idx]; | ||
408 | |||
409 | ret = qsfp_read(ppd, QSFP_LOT_OFFS, &cp->lot, QSFP_LOT_LEN); | ||
410 | if (ret < 0) | ||
411 | goto bail; | ||
412 | for (idx = 0; idx < QSFP_LOT_LEN; ++idx) | ||
413 | cks += cp->lot[idx]; | ||
414 | |||
415 | ret = qsfp_cks(ppd, QSFP_LOT_OFFS + QSFP_LOT_LEN, QSFP_CC_EXT_OFFS); | ||
416 | if (ret < 0) | ||
417 | goto bail; | ||
418 | cks += ret; | ||
419 | |||
420 | ret = qsfp_read(ppd, QSFP_CC_EXT_OFFS, &cp->cks2, 1); | ||
421 | if (ret < 0) | ||
422 | goto bail; | ||
423 | cks &= 0xFF; | ||
424 | if (cks != cp->cks2) | ||
425 | qib_dev_porterr(ppd->dd, ppd->port, | ||
426 | "QSFP cks2 is %02X, computed %02X\n", cp->cks2, | ||
427 | cks); | ||
428 | return 0; | ||
429 | |||
430 | bail: | ||
431 | cp->id = 0; | ||
432 | return ret; | ||
433 | } | ||
434 | |||
435 | const char * const qib_qsfp_devtech[16] = { | ||
436 | "850nm VCSEL", "1310nm VCSEL", "1550nm VCSEL", "1310nm FP", | ||
437 | "1310nm DFB", "1550nm DFB", "1310nm EML", "1550nm EML", | ||
438 | "Cu Misc", "1490nm DFB", "Cu NoEq", "Cu Eq", | ||
439 | "Undef", "Cu Active BothEq", "Cu FarEq", "Cu NearEq" | ||
440 | }; | ||
441 | |||
442 | #define QSFP_DUMP_CHUNK 16 /* Holds longest string */ | ||
443 | #define QSFP_DEFAULT_HDR_CNT 224 | ||
444 | |||
445 | static const char *pwr_codes = "1.5W2.0W2.5W3.5W"; | ||
446 | |||
447 | /* | ||
448 | * Initialize structures that control access to QSFP. Called once per port | ||
449 | * on cards that support QSFP. | ||
450 | */ | ||
451 | void qib_qsfp_init(struct qib_qsfp_data *qd, | ||
452 | void (*fevent)(struct work_struct *)) | ||
453 | { | ||
454 | u32 mask, highs; | ||
455 | int pins; | ||
456 | |||
457 | struct qib_devdata *dd = qd->ppd->dd; | ||
458 | |||
459 | /* Initialize work struct for later QSFP events */ | ||
460 | INIT_WORK(&qd->work, fevent); | ||
461 | |||
462 | /* | ||
463 | * Later, we may want more validation. For now, just set up pins and | ||
464 | * blip reset. If module is present, call qib_refresh_qsfp_cache(), | ||
465 | * to do further init. | ||
466 | */ | ||
467 | mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE; | ||
468 | highs = mask - QSFP_GPIO_MOD_RST_N; | ||
469 | if (qd->ppd->hw_pidx) { | ||
470 | mask <<= QSFP_GPIO_PORT2_SHIFT; | ||
471 | highs <<= QSFP_GPIO_PORT2_SHIFT; | ||
472 | } | ||
473 | dd->f_gpio_mod(dd, highs, mask, mask); | ||
474 | udelay(20); /* Generous RST dwell */ | ||
475 | |||
476 | dd->f_gpio_mod(dd, mask, mask, mask); | ||
477 | /* Spec says module can take up to two seconds! */ | ||
478 | mask = QSFP_GPIO_MOD_PRS_N; | ||
479 | if (qd->ppd->hw_pidx) | ||
480 | mask <<= QSFP_GPIO_PORT2_SHIFT; | ||
481 | |||
482 | /* Do not try to wait here. Better to let event handle it */ | ||
483 | pins = dd->f_gpio_mod(dd, 0, 0, 0); | ||
484 | if (pins & mask) | ||
485 | goto bail; | ||
486 | /* We see a module, but it may be unwise to look yet. Just schedule */ | ||
487 | qd->t_insert = get_jiffies_64(); | ||
488 | schedule_work(&qd->work); | ||
489 | bail: | ||
490 | return; | ||
491 | } | ||
492 | |||
493 | void qib_qsfp_deinit(struct qib_qsfp_data *qd) | ||
494 | { | ||
495 | /* | ||
496 | * There is nothing to do here for now. our | ||
497 | * work is scheduled with schedule_work(), and | ||
498 | * flush_scheduled_work() from remove_one will | ||
499 | * block until all work ssetup with schedule_work() | ||
500 | * completes. | ||
501 | */ | ||
502 | } | ||
503 | |||
504 | int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len) | ||
505 | { | ||
506 | struct qib_qsfp_cache cd; | ||
507 | u8 bin_buff[QSFP_DUMP_CHUNK]; | ||
508 | char lenstr[6]; | ||
509 | int sofar, ret; | ||
510 | int bidx = 0; | ||
511 | |||
512 | sofar = 0; | ||
513 | ret = qib_refresh_qsfp_cache(ppd, &cd); | ||
514 | if (ret < 0) | ||
515 | goto bail; | ||
516 | |||
517 | lenstr[0] = ' '; | ||
518 | lenstr[1] = '\0'; | ||
519 | if (QSFP_IS_CU(cd.tech)) | ||
520 | sprintf(lenstr, "%dM ", cd.len); | ||
521 | |||
522 | sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n", pwr_codes + | ||
523 | (QSFP_PWR(cd.pwr) * 4)); | ||
524 | |||
525 | sofar += scnprintf(buf + sofar, len - sofar, "TECH:%s%s\n", lenstr, | ||
526 | qib_qsfp_devtech[cd.tech >> 4]); | ||
527 | |||
528 | sofar += scnprintf(buf + sofar, len - sofar, "Vendor:%.*s\n", | ||
529 | QSFP_VEND_LEN, cd.vendor); | ||
530 | |||
531 | sofar += scnprintf(buf + sofar, len - sofar, "OUI:%06X\n", | ||
532 | QSFP_OUI(cd.oui)); | ||
533 | |||
534 | sofar += scnprintf(buf + sofar, len - sofar, "Part#:%.*s\n", | ||
535 | QSFP_PN_LEN, cd.partnum); | ||
536 | sofar += scnprintf(buf + sofar, len - sofar, "Rev:%.*s\n", | ||
537 | QSFP_REV_LEN, cd.rev); | ||
538 | if (QSFP_IS_CU(cd.tech)) | ||
539 | sofar += scnprintf(buf + sofar, len - sofar, "Atten:%d, %d\n", | ||
540 | QSFP_ATTEN_SDR(cd.atten), | ||
541 | QSFP_ATTEN_DDR(cd.atten)); | ||
542 | sofar += scnprintf(buf + sofar, len - sofar, "Serial:%.*s\n", | ||
543 | QSFP_SN_LEN, cd.serial); | ||
544 | sofar += scnprintf(buf + sofar, len - sofar, "Date:%.*s\n", | ||
545 | QSFP_DATE_LEN, cd.date); | ||
546 | sofar += scnprintf(buf + sofar, len - sofar, "Lot:%.*s\n", | ||
547 | QSFP_LOT_LEN, cd.date); | ||
548 | |||
549 | while (bidx < QSFP_DEFAULT_HDR_CNT) { | ||
550 | int iidx; | ||
551 | ret = qsfp_read(ppd, bidx, bin_buff, QSFP_DUMP_CHUNK); | ||
552 | if (ret < 0) | ||
553 | goto bail; | ||
554 | for (iidx = 0; iidx < ret; ++iidx) { | ||
555 | sofar += scnprintf(buf + sofar, len-sofar, " %02X", | ||
556 | bin_buff[iidx]); | ||
557 | } | ||
558 | sofar += scnprintf(buf + sofar, len - sofar, "\n"); | ||
559 | bidx += QSFP_DUMP_CHUNK; | ||
560 | } | ||
561 | ret = sofar; | ||
562 | bail: | ||
563 | return ret; | ||
564 | } | ||
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.h b/drivers/infiniband/hw/qib/qib_qsfp.h new file mode 100644 index 000000000000..19b527bafd57 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_qsfp.h | |||
@@ -0,0 +1,184 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. | ||
3 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | /* QSFP support common definitions, for ib_qib driver */ | ||
34 | |||
35 | #define QSFP_DEV 0xA0 | ||
36 | #define QSFP_PWR_LAG_MSEC 2000 | ||
37 | |||
38 | /* | ||
39 | * Below are masks for various QSFP signals, for Port 1. | ||
40 | * Port2 equivalents are shifted by QSFP_GPIO_PORT2_SHIFT. | ||
41 | * _N means asserted low | ||
42 | */ | ||
43 | #define QSFP_GPIO_MOD_SEL_N (4) | ||
44 | #define QSFP_GPIO_MOD_PRS_N (8) | ||
45 | #define QSFP_GPIO_INT_N (0x10) | ||
46 | #define QSFP_GPIO_MOD_RST_N (0x20) | ||
47 | #define QSFP_GPIO_LP_MODE (0x40) | ||
48 | #define QSFP_GPIO_PORT2_SHIFT 5 | ||
49 | |||
50 | #define QSFP_PAGESIZE 128 | ||
51 | /* Defined fields that QLogic requires of qualified cables */ | ||
52 | /* Byte 0 is Identifier, not checked */ | ||
53 | /* Byte 1 is reserved "status MSB" */ | ||
54 | /* Byte 2 is "status LSB" We only care that D2 "Flat Mem" is set. */ | ||
55 | /* | ||
56 | * Rest of first 128 not used, although 127 is reserved for page select | ||
57 | * if module is not "Flat memory". | ||
58 | */ | ||
59 | /* Byte 128 is Identifier: must be 0x0c for QSFP, or 0x0d for QSFP+ */ | ||
60 | #define QSFP_MOD_ID_OFFS 128 | ||
61 | /* | ||
62 | * Byte 129 is "Extended Identifier". We only care about D7,D6: Power class | ||
63 | * 0:1.5W, 1:2.0W, 2:2.5W, 3:3.5W | ||
64 | */ | ||
65 | #define QSFP_MOD_PWR_OFFS 129 | ||
66 | /* Byte 130 is Connector type. Not QLogic req'd */ | ||
67 | /* Bytes 131..138 are Transceiver types, bit maps for various tech, none IB */ | ||
68 | /* Byte 139 is encoding. code 0x01 is 8b10b. Not QLogic req'd */ | ||
69 | /* byte 140 is nominal bit-rate, in units of 100Mbits/sec Not QLogic req'd */ | ||
70 | /* Byte 141 is Extended Rate Select. Not QLogic req'd */ | ||
71 | /* Bytes 142..145 are lengths for various fiber types. Not QLogic req'd */ | ||
72 | /* Byte 146 is length for Copper. Units of 1 meter */ | ||
73 | #define QSFP_MOD_LEN_OFFS 146 | ||
74 | /* | ||
75 | * Byte 147 is Device technology. D0..3 not Qlogc req'd | ||
76 | * D4..7 select from 15 choices, translated by table: | ||
77 | */ | ||
78 | #define QSFP_MOD_TECH_OFFS 147 | ||
79 | extern const char *const qib_qsfp_devtech[16]; | ||
80 | /* Active Equalization includes fiber, copper full EQ, and copper near Eq */ | ||
81 | #define QSFP_IS_ACTIVE(tech) ((0xA2FF >> ((tech) >> 4)) & 1) | ||
82 | /* Attenuation should be valid for copper other than full/near Eq */ | ||
83 | #define QSFP_HAS_ATTEN(tech) ((0x4D00 >> ((tech) >> 4)) & 1) | ||
84 | /* Length is only valid if technology is "copper" */ | ||
85 | #define QSFP_IS_CU(tech) ((0xED00 >> ((tech) >> 4)) & 1) | ||
86 | #define QSFP_TECH_1490 9 | ||
87 | |||
88 | #define QSFP_OUI(oui) (((unsigned)oui[0] << 16) | ((unsigned)oui[1] << 8) | \ | ||
89 | oui[2]) | ||
90 | #define QSFP_OUI_AMPHENOL 0x415048 | ||
91 | #define QSFP_OUI_FINISAR 0x009065 | ||
92 | #define QSFP_OUI_GORE 0x002177 | ||
93 | |||
94 | /* Bytes 148..163 are Vendor Name, Left-justified Blank-filled */ | ||
95 | #define QSFP_VEND_OFFS 148 | ||
96 | #define QSFP_VEND_LEN 16 | ||
97 | /* Byte 164 is IB Extended tranceiver codes Bits D0..3 are SDR,DDR,QDR,EDR */ | ||
98 | #define QSFP_IBXCV_OFFS 164 | ||
99 | /* Bytes 165..167 are Vendor OUI number */ | ||
100 | #define QSFP_VOUI_OFFS 165 | ||
101 | #define QSFP_VOUI_LEN 3 | ||
102 | /* Bytes 168..183 are Vendor Part Number, string */ | ||
103 | #define QSFP_PN_OFFS 168 | ||
104 | #define QSFP_PN_LEN 16 | ||
105 | /* Bytes 184,185 are Vendor Rev. Left Justified, Blank-filled */ | ||
106 | #define QSFP_REV_OFFS 184 | ||
107 | #define QSFP_REV_LEN 2 | ||
108 | /* | ||
109 | * Bytes 186,187 are Wavelength, if Optical. Not Qlogic req'd | ||
110 | * If copper, they are attenuation in dB: | ||
111 | * Byte 186 is at 2.5Gb/sec (SDR), Byte 187 at 5.0Gb/sec (DDR) | ||
112 | */ | ||
113 | #define QSFP_ATTEN_OFFS 186 | ||
114 | #define QSFP_ATTEN_LEN 2 | ||
115 | /* Bytes 188,189 are Wavelength tolerance, not QLogic req'd */ | ||
116 | /* Byte 190 is Max Case Temp. Not QLogic req'd */ | ||
117 | /* Byte 191 is LSB of sum of bytes 128..190. Not QLogic req'd */ | ||
118 | #define QSFP_CC_OFFS 191 | ||
119 | /* Bytes 192..195 are Options implemented in qsfp. Not Qlogic req'd */ | ||
120 | /* Bytes 196..211 are Serial Number, String */ | ||
121 | #define QSFP_SN_OFFS 196 | ||
122 | #define QSFP_SN_LEN 16 | ||
123 | /* Bytes 212..219 are date-code YYMMDD (MM==1 for Jan) */ | ||
124 | #define QSFP_DATE_OFFS 212 | ||
125 | #define QSFP_DATE_LEN 6 | ||
126 | /* Bytes 218,219 are optional lot-code, string */ | ||
127 | #define QSFP_LOT_OFFS 218 | ||
128 | #define QSFP_LOT_LEN 2 | ||
129 | /* Bytes 220, 221 indicate monitoring options, Not QLogic req'd */ | ||
130 | /* Byte 223 is LSB of sum of bytes 192..222 */ | ||
131 | #define QSFP_CC_EXT_OFFS 223 | ||
132 | |||
133 | /* | ||
134 | * struct qib_qsfp_data encapsulates state of QSFP device for one port. | ||
135 | * it will be part of port-chip-specific data if a board supports QSFP. | ||
136 | * | ||
137 | * Since multiple board-types use QSFP, and their pport_data structs | ||
138 | * differ (in the chip-specific section), we need a pointer to its head. | ||
139 | * | ||
140 | * Avoiding premature optimization, we will have one work_struct per port, | ||
141 | * and let the (increasingly inaccurately named) eep_lock arbitrate | ||
142 | * access to common resources. | ||
143 | * | ||
144 | */ | ||
145 | |||
146 | /* | ||
147 | * Hold the parts of the onboard EEPROM that we care about, so we aren't | ||
148 | * coonstantly bit-boffing | ||
149 | */ | ||
150 | struct qib_qsfp_cache { | ||
151 | u8 id; /* must be 0x0C or 0x0D; 0 indicates invalid EEPROM read */ | ||
152 | u8 pwr; /* in D6,7 */ | ||
153 | u8 len; /* in meters, Cu only */ | ||
154 | u8 tech; | ||
155 | char vendor[QSFP_VEND_LEN]; | ||
156 | u8 xt_xcv; /* Ext. tranceiver codes, 4 lsbs are IB speed supported */ | ||
157 | u8 oui[QSFP_VOUI_LEN]; | ||
158 | u8 partnum[QSFP_PN_LEN]; | ||
159 | u8 rev[QSFP_REV_LEN]; | ||
160 | u8 atten[QSFP_ATTEN_LEN]; | ||
161 | u8 cks1; /* Checksum of bytes 128..190 */ | ||
162 | u8 serial[QSFP_SN_LEN]; | ||
163 | u8 date[QSFP_DATE_LEN]; | ||
164 | u8 lot[QSFP_LOT_LEN]; | ||
165 | u8 cks2; /* Checsum of bytes 192..222 */ | ||
166 | }; | ||
167 | |||
168 | #define QSFP_PWR(pbyte) (((pbyte) >> 6) & 3) | ||
169 | #define QSFP_ATTEN_SDR(attenarray) (attenarray[0]) | ||
170 | #define QSFP_ATTEN_DDR(attenarray) (attenarray[1]) | ||
171 | |||
172 | struct qib_qsfp_data { | ||
173 | /* Helps to find our way */ | ||
174 | struct qib_pportdata *ppd; | ||
175 | struct work_struct work; | ||
176 | struct qib_qsfp_cache cache; | ||
177 | u64 t_insert; | ||
178 | }; | ||
179 | |||
180 | extern int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, | ||
181 | struct qib_qsfp_cache *cp); | ||
182 | extern void qib_qsfp_init(struct qib_qsfp_data *qd, | ||
183 | void (*fevent)(struct work_struct *)); | ||
184 | extern void qib_qsfp_deinit(struct qib_qsfp_data *qd); | ||
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c new file mode 100644 index 000000000000..40c0a373719c --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_rc.c | |||
@@ -0,0 +1,2288 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. | ||
3 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | |||
34 | #include <linux/io.h> | ||
35 | |||
36 | #include "qib.h" | ||
37 | |||
38 | /* cut down ridiculously long IB macro names */ | ||
39 | #define OP(x) IB_OPCODE_RC_##x | ||
40 | |||
41 | static void rc_timeout(unsigned long arg); | ||
42 | |||
43 | static u32 restart_sge(struct qib_sge_state *ss, struct qib_swqe *wqe, | ||
44 | u32 psn, u32 pmtu) | ||
45 | { | ||
46 | u32 len; | ||
47 | |||
48 | len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu; | ||
49 | ss->sge = wqe->sg_list[0]; | ||
50 | ss->sg_list = wqe->sg_list + 1; | ||
51 | ss->num_sge = wqe->wr.num_sge; | ||
52 | ss->total_len = wqe->length; | ||
53 | qib_skip_sge(ss, len, 0); | ||
54 | return wqe->length - len; | ||
55 | } | ||
56 | |||
57 | static void start_timer(struct qib_qp *qp) | ||
58 | { | ||
59 | qp->s_flags |= QIB_S_TIMER; | ||
60 | qp->s_timer.function = rc_timeout; | ||
61 | /* 4.096 usec. * (1 << qp->timeout) */ | ||
62 | qp->s_timer.expires = jiffies + | ||
63 | usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 1000UL); | ||
64 | add_timer(&qp->s_timer); | ||
65 | } | ||
66 | |||
67 | /** | ||
68 | * qib_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read) | ||
69 | * @dev: the device for this QP | ||
70 | * @qp: a pointer to the QP | ||
71 | * @ohdr: a pointer to the IB header being constructed | ||
72 | * @pmtu: the path MTU | ||
73 | * | ||
74 | * Return 1 if constructed; otherwise, return 0. | ||
75 | * Note that we are in the responder's side of the QP context. | ||
76 | * Note the QP s_lock must be held. | ||
77 | */ | ||
78 | static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp, | ||
79 | struct qib_other_headers *ohdr, u32 pmtu) | ||
80 | { | ||
81 | struct qib_ack_entry *e; | ||
82 | u32 hwords; | ||
83 | u32 len; | ||
84 | u32 bth0; | ||
85 | u32 bth2; | ||
86 | |||
87 | /* Don't send an ACK if we aren't supposed to. */ | ||
88 | if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) | ||
89 | goto bail; | ||
90 | |||
91 | /* header size in 32-bit words LRH+BTH = (8+12)/4. */ | ||
92 | hwords = 5; | ||
93 | |||
94 | switch (qp->s_ack_state) { | ||
95 | case OP(RDMA_READ_RESPONSE_LAST): | ||
96 | case OP(RDMA_READ_RESPONSE_ONLY): | ||
97 | e = &qp->s_ack_queue[qp->s_tail_ack_queue]; | ||
98 | if (e->rdma_sge.mr) { | ||
99 | atomic_dec(&e->rdma_sge.mr->refcount); | ||
100 | e->rdma_sge.mr = NULL; | ||
101 | } | ||
102 | /* FALLTHROUGH */ | ||
103 | case OP(ATOMIC_ACKNOWLEDGE): | ||
104 | /* | ||
105 | * We can increment the tail pointer now that the last | ||
106 | * response has been sent instead of only being | ||
107 | * constructed. | ||
108 | */ | ||
109 | if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC) | ||
110 | qp->s_tail_ack_queue = 0; | ||
111 | /* FALLTHROUGH */ | ||
112 | case OP(SEND_ONLY): | ||
113 | case OP(ACKNOWLEDGE): | ||
114 | /* Check for no next entry in the queue. */ | ||
115 | if (qp->r_head_ack_queue == qp->s_tail_ack_queue) { | ||
116 | if (qp->s_flags & QIB_S_ACK_PENDING) | ||
117 | goto normal; | ||
118 | goto bail; | ||
119 | } | ||
120 | |||
121 | e = &qp->s_ack_queue[qp->s_tail_ack_queue]; | ||
122 | if (e->opcode == OP(RDMA_READ_REQUEST)) { | ||
123 | /* | ||
124 | * If a RDMA read response is being resent and | ||
125 | * we haven't seen the duplicate request yet, | ||
126 | * then stop sending the remaining responses the | ||
127 | * responder has seen until the requester resends it. | ||
128 | */ | ||
129 | len = e->rdma_sge.sge_length; | ||
130 | if (len && !e->rdma_sge.mr) { | ||
131 | qp->s_tail_ack_queue = qp->r_head_ack_queue; | ||
132 | goto bail; | ||
133 | } | ||
134 | /* Copy SGE state in case we need to resend */ | ||
135 | qp->s_rdma_mr = e->rdma_sge.mr; | ||
136 | if (qp->s_rdma_mr) | ||
137 | atomic_inc(&qp->s_rdma_mr->refcount); | ||
138 | qp->s_ack_rdma_sge.sge = e->rdma_sge; | ||
139 | qp->s_ack_rdma_sge.num_sge = 1; | ||
140 | qp->s_cur_sge = &qp->s_ack_rdma_sge; | ||
141 | if (len > pmtu) { | ||
142 | len = pmtu; | ||
143 | qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST); | ||
144 | } else { | ||
145 | qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY); | ||
146 | e->sent = 1; | ||
147 | } | ||
148 | ohdr->u.aeth = qib_compute_aeth(qp); | ||
149 | hwords++; | ||
150 | qp->s_ack_rdma_psn = e->psn; | ||
151 | bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK; | ||
152 | } else { | ||
153 | /* COMPARE_SWAP or FETCH_ADD */ | ||
154 | qp->s_cur_sge = NULL; | ||
155 | len = 0; | ||
156 | qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE); | ||
157 | ohdr->u.at.aeth = qib_compute_aeth(qp); | ||
158 | ohdr->u.at.atomic_ack_eth[0] = | ||
159 | cpu_to_be32(e->atomic_data >> 32); | ||
160 | ohdr->u.at.atomic_ack_eth[1] = | ||
161 | cpu_to_be32(e->atomic_data); | ||
162 | hwords += sizeof(ohdr->u.at) / sizeof(u32); | ||
163 | bth2 = e->psn & QIB_PSN_MASK; | ||
164 | e->sent = 1; | ||
165 | } | ||
166 | bth0 = qp->s_ack_state << 24; | ||
167 | break; | ||
168 | |||
169 | case OP(RDMA_READ_RESPONSE_FIRST): | ||
170 | qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE); | ||
171 | /* FALLTHROUGH */ | ||
172 | case OP(RDMA_READ_RESPONSE_MIDDLE): | ||
173 | qp->s_cur_sge = &qp->s_ack_rdma_sge; | ||
174 | qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr; | ||
175 | if (qp->s_rdma_mr) | ||
176 | atomic_inc(&qp->s_rdma_mr->refcount); | ||
177 | len = qp->s_ack_rdma_sge.sge.sge_length; | ||
178 | if (len > pmtu) | ||
179 | len = pmtu; | ||
180 | else { | ||
181 | ohdr->u.aeth = qib_compute_aeth(qp); | ||
182 | hwords++; | ||
183 | qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); | ||
184 | e = &qp->s_ack_queue[qp->s_tail_ack_queue]; | ||
185 | e->sent = 1; | ||
186 | } | ||
187 | bth0 = qp->s_ack_state << 24; | ||
188 | bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK; | ||
189 | break; | ||
190 | |||
191 | default: | ||
192 | normal: | ||
193 | /* | ||
194 | * Send a regular ACK. | ||
195 | * Set the s_ack_state so we wait until after sending | ||
196 | * the ACK before setting s_ack_state to ACKNOWLEDGE | ||
197 | * (see above). | ||
198 | */ | ||
199 | qp->s_ack_state = OP(SEND_ONLY); | ||
200 | qp->s_flags &= ~QIB_S_ACK_PENDING; | ||
201 | qp->s_cur_sge = NULL; | ||
202 | if (qp->s_nak_state) | ||
203 | ohdr->u.aeth = | ||
204 | cpu_to_be32((qp->r_msn & QIB_MSN_MASK) | | ||
205 | (qp->s_nak_state << | ||
206 | QIB_AETH_CREDIT_SHIFT)); | ||
207 | else | ||
208 | ohdr->u.aeth = qib_compute_aeth(qp); | ||
209 | hwords++; | ||
210 | len = 0; | ||
211 | bth0 = OP(ACKNOWLEDGE) << 24; | ||
212 | bth2 = qp->s_ack_psn & QIB_PSN_MASK; | ||
213 | } | ||
214 | qp->s_rdma_ack_cnt++; | ||
215 | qp->s_hdrwords = hwords; | ||
216 | qp->s_cur_size = len; | ||
217 | qib_make_ruc_header(qp, ohdr, bth0, bth2); | ||
218 | return 1; | ||
219 | |||
220 | bail: | ||
221 | qp->s_ack_state = OP(ACKNOWLEDGE); | ||
222 | qp->s_flags &= ~(QIB_S_RESP_PENDING | QIB_S_ACK_PENDING); | ||
223 | return 0; | ||
224 | } | ||
225 | |||
226 | /** | ||
227 | * qib_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC) | ||
228 | * @qp: a pointer to the QP | ||
229 | * | ||
230 | * Return 1 if constructed; otherwise, return 0. | ||
231 | */ | ||
232 | int qib_make_rc_req(struct qib_qp *qp) | ||
233 | { | ||
234 | struct qib_ibdev *dev = to_idev(qp->ibqp.device); | ||
235 | struct qib_other_headers *ohdr; | ||
236 | struct qib_sge_state *ss; | ||
237 | struct qib_swqe *wqe; | ||
238 | u32 hwords; | ||
239 | u32 len; | ||
240 | u32 bth0; | ||
241 | u32 bth2; | ||
242 | u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); | ||
243 | char newreq; | ||
244 | unsigned long flags; | ||
245 | int ret = 0; | ||
246 | int delta; | ||
247 | |||
248 | ohdr = &qp->s_hdr.u.oth; | ||
249 | if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) | ||
250 | ohdr = &qp->s_hdr.u.l.oth; | ||
251 | |||
252 | /* | ||
253 | * The lock is needed to synchronize between the sending tasklet, | ||
254 | * the receive interrupt handler, and timeout resends. | ||
255 | */ | ||
256 | spin_lock_irqsave(&qp->s_lock, flags); | ||
257 | |||
258 | /* Sending responses has higher priority over sending requests. */ | ||
259 | if ((qp->s_flags & QIB_S_RESP_PENDING) && | ||
260 | qib_make_rc_ack(dev, qp, ohdr, pmtu)) | ||
261 | goto done; | ||
262 | |||
263 | if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) { | ||
264 | if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND)) | ||
265 | goto bail; | ||
266 | /* We are in the error state, flush the work request. */ | ||
267 | if (qp->s_last == qp->s_head) | ||
268 | goto bail; | ||
269 | /* If DMAs are in progress, we can't flush immediately. */ | ||
270 | if (atomic_read(&qp->s_dma_busy)) { | ||
271 | qp->s_flags |= QIB_S_WAIT_DMA; | ||
272 | goto bail; | ||
273 | } | ||
274 | wqe = get_swqe_ptr(qp, qp->s_last); | ||
275 | while (qp->s_last != qp->s_acked) { | ||
276 | qib_send_complete(qp, wqe, IB_WC_SUCCESS); | ||
277 | if (++qp->s_last >= qp->s_size) | ||
278 | qp->s_last = 0; | ||
279 | wqe = get_swqe_ptr(qp, qp->s_last); | ||
280 | } | ||
281 | qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); | ||
282 | goto done; | ||
283 | } | ||
284 | |||
285 | if (qp->s_flags & (QIB_S_WAIT_RNR | QIB_S_WAIT_ACK)) | ||
286 | goto bail; | ||
287 | |||
288 | if (qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) { | ||
289 | if (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) { | ||
290 | qp->s_flags |= QIB_S_WAIT_PSN; | ||
291 | goto bail; | ||
292 | } | ||
293 | qp->s_sending_psn = qp->s_psn; | ||
294 | qp->s_sending_hpsn = qp->s_psn - 1; | ||
295 | } | ||
296 | |||
297 | /* header size in 32-bit words LRH+BTH = (8+12)/4. */ | ||
298 | hwords = 5; | ||
299 | bth0 = 0; | ||
300 | |||
301 | /* Send a request. */ | ||
302 | wqe = get_swqe_ptr(qp, qp->s_cur); | ||
303 | switch (qp->s_state) { | ||
304 | default: | ||
305 | if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK)) | ||
306 | goto bail; | ||
307 | /* | ||
308 | * Resend an old request or start a new one. | ||
309 | * | ||
310 | * We keep track of the current SWQE so that | ||
311 | * we don't reset the "furthest progress" state | ||
312 | * if we need to back up. | ||
313 | */ | ||
314 | newreq = 0; | ||
315 | if (qp->s_cur == qp->s_tail) { | ||
316 | /* Check if send work queue is empty. */ | ||
317 | if (qp->s_tail == qp->s_head) | ||
318 | goto bail; | ||
319 | /* | ||
320 | * If a fence is requested, wait for previous | ||
321 | * RDMA read and atomic operations to finish. | ||
322 | */ | ||
323 | if ((wqe->wr.send_flags & IB_SEND_FENCE) && | ||
324 | qp->s_num_rd_atomic) { | ||
325 | qp->s_flags |= QIB_S_WAIT_FENCE; | ||
326 | goto bail; | ||
327 | } | ||
328 | wqe->psn = qp->s_next_psn; | ||
329 | newreq = 1; | ||
330 | } | ||
331 | /* | ||
332 | * Note that we have to be careful not to modify the | ||
333 | * original work request since we may need to resend | ||
334 | * it. | ||
335 | */ | ||
336 | len = wqe->length; | ||
337 | ss = &qp->s_sge; | ||
338 | bth2 = qp->s_psn & QIB_PSN_MASK; | ||
339 | switch (wqe->wr.opcode) { | ||
340 | case IB_WR_SEND: | ||
341 | case IB_WR_SEND_WITH_IMM: | ||
342 | /* If no credit, return. */ | ||
343 | if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) && | ||
344 | qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) { | ||
345 | qp->s_flags |= QIB_S_WAIT_SSN_CREDIT; | ||
346 | goto bail; | ||
347 | } | ||
348 | wqe->lpsn = wqe->psn; | ||
349 | if (len > pmtu) { | ||
350 | wqe->lpsn += (len - 1) / pmtu; | ||
351 | qp->s_state = OP(SEND_FIRST); | ||
352 | len = pmtu; | ||
353 | break; | ||
354 | } | ||
355 | if (wqe->wr.opcode == IB_WR_SEND) | ||
356 | qp->s_state = OP(SEND_ONLY); | ||
357 | else { | ||
358 | qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE); | ||
359 | /* Immediate data comes after the BTH */ | ||
360 | ohdr->u.imm_data = wqe->wr.ex.imm_data; | ||
361 | hwords += 1; | ||
362 | } | ||
363 | if (wqe->wr.send_flags & IB_SEND_SOLICITED) | ||
364 | bth0 |= IB_BTH_SOLICITED; | ||
365 | bth2 |= IB_BTH_REQ_ACK; | ||
366 | if (++qp->s_cur == qp->s_size) | ||
367 | qp->s_cur = 0; | ||
368 | break; | ||
369 | |||
370 | case IB_WR_RDMA_WRITE: | ||
371 | if (newreq && !(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) | ||
372 | qp->s_lsn++; | ||
373 | /* FALLTHROUGH */ | ||
374 | case IB_WR_RDMA_WRITE_WITH_IMM: | ||
375 | /* If no credit, return. */ | ||
376 | if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) && | ||
377 | qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) { | ||
378 | qp->s_flags |= QIB_S_WAIT_SSN_CREDIT; | ||
379 | goto bail; | ||
380 | } | ||
381 | ohdr->u.rc.reth.vaddr = | ||
382 | cpu_to_be64(wqe->wr.wr.rdma.remote_addr); | ||
383 | ohdr->u.rc.reth.rkey = | ||
384 | cpu_to_be32(wqe->wr.wr.rdma.rkey); | ||
385 | ohdr->u.rc.reth.length = cpu_to_be32(len); | ||
386 | hwords += sizeof(struct ib_reth) / sizeof(u32); | ||
387 | wqe->lpsn = wqe->psn; | ||
388 | if (len > pmtu) { | ||
389 | wqe->lpsn += (len - 1) / pmtu; | ||
390 | qp->s_state = OP(RDMA_WRITE_FIRST); | ||
391 | len = pmtu; | ||
392 | break; | ||
393 | } | ||
394 | if (wqe->wr.opcode == IB_WR_RDMA_WRITE) | ||
395 | qp->s_state = OP(RDMA_WRITE_ONLY); | ||
396 | else { | ||
397 | qp->s_state = | ||
398 | OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE); | ||
399 | /* Immediate data comes after RETH */ | ||
400 | ohdr->u.rc.imm_data = wqe->wr.ex.imm_data; | ||
401 | hwords += 1; | ||
402 | if (wqe->wr.send_flags & IB_SEND_SOLICITED) | ||
403 | bth0 |= IB_BTH_SOLICITED; | ||
404 | } | ||
405 | bth2 |= IB_BTH_REQ_ACK; | ||
406 | if (++qp->s_cur == qp->s_size) | ||
407 | qp->s_cur = 0; | ||
408 | break; | ||
409 | |||
410 | case IB_WR_RDMA_READ: | ||
411 | /* | ||
412 | * Don't allow more operations to be started | ||
413 | * than the QP limits allow. | ||
414 | */ | ||
415 | if (newreq) { | ||
416 | if (qp->s_num_rd_atomic >= | ||
417 | qp->s_max_rd_atomic) { | ||
418 | qp->s_flags |= QIB_S_WAIT_RDMAR; | ||
419 | goto bail; | ||
420 | } | ||
421 | qp->s_num_rd_atomic++; | ||
422 | if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) | ||
423 | qp->s_lsn++; | ||
424 | /* | ||
425 | * Adjust s_next_psn to count the | ||
426 | * expected number of responses. | ||
427 | */ | ||
428 | if (len > pmtu) | ||
429 | qp->s_next_psn += (len - 1) / pmtu; | ||
430 | wqe->lpsn = qp->s_next_psn++; | ||
431 | } | ||
432 | ohdr->u.rc.reth.vaddr = | ||
433 | cpu_to_be64(wqe->wr.wr.rdma.remote_addr); | ||
434 | ohdr->u.rc.reth.rkey = | ||
435 | cpu_to_be32(wqe->wr.wr.rdma.rkey); | ||
436 | ohdr->u.rc.reth.length = cpu_to_be32(len); | ||
437 | qp->s_state = OP(RDMA_READ_REQUEST); | ||
438 | hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); | ||
439 | ss = NULL; | ||
440 | len = 0; | ||
441 | bth2 |= IB_BTH_REQ_ACK; | ||
442 | if (++qp->s_cur == qp->s_size) | ||
443 | qp->s_cur = 0; | ||
444 | break; | ||
445 | |||
446 | case IB_WR_ATOMIC_CMP_AND_SWP: | ||
447 | case IB_WR_ATOMIC_FETCH_AND_ADD: | ||
448 | /* | ||
449 | * Don't allow more operations to be started | ||
450 | * than the QP limits allow. | ||
451 | */ | ||
452 | if (newreq) { | ||
453 | if (qp->s_num_rd_atomic >= | ||
454 | qp->s_max_rd_atomic) { | ||
455 | qp->s_flags |= QIB_S_WAIT_RDMAR; | ||
456 | goto bail; | ||
457 | } | ||
458 | qp->s_num_rd_atomic++; | ||
459 | if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) | ||
460 | qp->s_lsn++; | ||
461 | wqe->lpsn = wqe->psn; | ||
462 | } | ||
463 | if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { | ||
464 | qp->s_state = OP(COMPARE_SWAP); | ||
465 | ohdr->u.atomic_eth.swap_data = cpu_to_be64( | ||
466 | wqe->wr.wr.atomic.swap); | ||
467 | ohdr->u.atomic_eth.compare_data = cpu_to_be64( | ||
468 | wqe->wr.wr.atomic.compare_add); | ||
469 | } else { | ||
470 | qp->s_state = OP(FETCH_ADD); | ||
471 | ohdr->u.atomic_eth.swap_data = cpu_to_be64( | ||
472 | wqe->wr.wr.atomic.compare_add); | ||
473 | ohdr->u.atomic_eth.compare_data = 0; | ||
474 | } | ||
475 | ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32( | ||
476 | wqe->wr.wr.atomic.remote_addr >> 32); | ||
477 | ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32( | ||
478 | wqe->wr.wr.atomic.remote_addr); | ||
479 | ohdr->u.atomic_eth.rkey = cpu_to_be32( | ||
480 | wqe->wr.wr.atomic.rkey); | ||
481 | hwords += sizeof(struct ib_atomic_eth) / sizeof(u32); | ||
482 | ss = NULL; | ||
483 | len = 0; | ||
484 | bth2 |= IB_BTH_REQ_ACK; | ||
485 | if (++qp->s_cur == qp->s_size) | ||
486 | qp->s_cur = 0; | ||
487 | break; | ||
488 | |||
489 | default: | ||
490 | goto bail; | ||
491 | } | ||
492 | qp->s_sge.sge = wqe->sg_list[0]; | ||
493 | qp->s_sge.sg_list = wqe->sg_list + 1; | ||
494 | qp->s_sge.num_sge = wqe->wr.num_sge; | ||
495 | qp->s_sge.total_len = wqe->length; | ||
496 | qp->s_len = wqe->length; | ||
497 | if (newreq) { | ||
498 | qp->s_tail++; | ||
499 | if (qp->s_tail >= qp->s_size) | ||
500 | qp->s_tail = 0; | ||
501 | } | ||
502 | if (wqe->wr.opcode == IB_WR_RDMA_READ) | ||
503 | qp->s_psn = wqe->lpsn + 1; | ||
504 | else { | ||
505 | qp->s_psn++; | ||
506 | if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0) | ||
507 | qp->s_next_psn = qp->s_psn; | ||
508 | } | ||
509 | break; | ||
510 | |||
511 | case OP(RDMA_READ_RESPONSE_FIRST): | ||
512 | /* | ||
513 | * qp->s_state is normally set to the opcode of the | ||
514 | * last packet constructed for new requests and therefore | ||
515 | * is never set to RDMA read response. | ||
516 | * RDMA_READ_RESPONSE_FIRST is used by the ACK processing | ||
517 | * thread to indicate a SEND needs to be restarted from an | ||
518 | * earlier PSN without interferring with the sending thread. | ||
519 | * See qib_restart_rc(). | ||
520 | */ | ||
521 | qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); | ||
522 | /* FALLTHROUGH */ | ||
523 | case OP(SEND_FIRST): | ||
524 | qp->s_state = OP(SEND_MIDDLE); | ||
525 | /* FALLTHROUGH */ | ||
526 | case OP(SEND_MIDDLE): | ||
527 | bth2 = qp->s_psn++ & QIB_PSN_MASK; | ||
528 | if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0) | ||
529 | qp->s_next_psn = qp->s_psn; | ||
530 | ss = &qp->s_sge; | ||
531 | len = qp->s_len; | ||
532 | if (len > pmtu) { | ||
533 | len = pmtu; | ||
534 | break; | ||
535 | } | ||
536 | if (wqe->wr.opcode == IB_WR_SEND) | ||
537 | qp->s_state = OP(SEND_LAST); | ||
538 | else { | ||
539 | qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); | ||
540 | /* Immediate data comes after the BTH */ | ||
541 | ohdr->u.imm_data = wqe->wr.ex.imm_data; | ||
542 | hwords += 1; | ||
543 | } | ||
544 | if (wqe->wr.send_flags & IB_SEND_SOLICITED) | ||
545 | bth0 |= IB_BTH_SOLICITED; | ||
546 | bth2 |= IB_BTH_REQ_ACK; | ||
547 | qp->s_cur++; | ||
548 | if (qp->s_cur >= qp->s_size) | ||
549 | qp->s_cur = 0; | ||
550 | break; | ||
551 | |||
552 | case OP(RDMA_READ_RESPONSE_LAST): | ||
553 | /* | ||
554 | * qp->s_state is normally set to the opcode of the | ||
555 | * last packet constructed for new requests and therefore | ||
556 | * is never set to RDMA read response. | ||
557 | * RDMA_READ_RESPONSE_LAST is used by the ACK processing | ||
558 | * thread to indicate a RDMA write needs to be restarted from | ||
559 | * an earlier PSN without interferring with the sending thread. | ||
560 | * See qib_restart_rc(). | ||
561 | */ | ||
562 | qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); | ||
563 | /* FALLTHROUGH */ | ||
564 | case OP(RDMA_WRITE_FIRST): | ||
565 | qp->s_state = OP(RDMA_WRITE_MIDDLE); | ||
566 | /* FALLTHROUGH */ | ||
567 | case OP(RDMA_WRITE_MIDDLE): | ||
568 | bth2 = qp->s_psn++ & QIB_PSN_MASK; | ||
569 | if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0) | ||
570 | qp->s_next_psn = qp->s_psn; | ||
571 | ss = &qp->s_sge; | ||
572 | len = qp->s_len; | ||
573 | if (len > pmtu) { | ||
574 | len = pmtu; | ||
575 | break; | ||
576 | } | ||
577 | if (wqe->wr.opcode == IB_WR_RDMA_WRITE) | ||
578 | qp->s_state = OP(RDMA_WRITE_LAST); | ||
579 | else { | ||
580 | qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE); | ||
581 | /* Immediate data comes after the BTH */ | ||
582 | ohdr->u.imm_data = wqe->wr.ex.imm_data; | ||
583 | hwords += 1; | ||
584 | if (wqe->wr.send_flags & IB_SEND_SOLICITED) | ||
585 | bth0 |= IB_BTH_SOLICITED; | ||
586 | } | ||
587 | bth2 |= IB_BTH_REQ_ACK; | ||
588 | qp->s_cur++; | ||
589 | if (qp->s_cur >= qp->s_size) | ||
590 | qp->s_cur = 0; | ||
591 | break; | ||
592 | |||
593 | case OP(RDMA_READ_RESPONSE_MIDDLE): | ||
594 | /* | ||
595 | * qp->s_state is normally set to the opcode of the | ||
596 | * last packet constructed for new requests and therefore | ||
597 | * is never set to RDMA read response. | ||
598 | * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing | ||
599 | * thread to indicate a RDMA read needs to be restarted from | ||
600 | * an earlier PSN without interferring with the sending thread. | ||
601 | * See qib_restart_rc(). | ||
602 | */ | ||
603 | len = ((qp->s_psn - wqe->psn) & QIB_PSN_MASK) * pmtu; | ||
604 | ohdr->u.rc.reth.vaddr = | ||
605 | cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len); | ||
606 | ohdr->u.rc.reth.rkey = | ||
607 | cpu_to_be32(wqe->wr.wr.rdma.rkey); | ||
608 | ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len); | ||
609 | qp->s_state = OP(RDMA_READ_REQUEST); | ||
610 | hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); | ||
611 | bth2 = (qp->s_psn & QIB_PSN_MASK) | IB_BTH_REQ_ACK; | ||
612 | qp->s_psn = wqe->lpsn + 1; | ||
613 | ss = NULL; | ||
614 | len = 0; | ||
615 | qp->s_cur++; | ||
616 | if (qp->s_cur == qp->s_size) | ||
617 | qp->s_cur = 0; | ||
618 | break; | ||
619 | } | ||
620 | qp->s_sending_hpsn = bth2; | ||
621 | delta = (((int) bth2 - (int) wqe->psn) << 8) >> 8; | ||
622 | if (delta && delta % QIB_PSN_CREDIT == 0) | ||
623 | bth2 |= IB_BTH_REQ_ACK; | ||
624 | if (qp->s_flags & QIB_S_SEND_ONE) { | ||
625 | qp->s_flags &= ~QIB_S_SEND_ONE; | ||
626 | qp->s_flags |= QIB_S_WAIT_ACK; | ||
627 | bth2 |= IB_BTH_REQ_ACK; | ||
628 | } | ||
629 | qp->s_len -= len; | ||
630 | qp->s_hdrwords = hwords; | ||
631 | qp->s_cur_sge = ss; | ||
632 | qp->s_cur_size = len; | ||
633 | qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), bth2); | ||
634 | done: | ||
635 | ret = 1; | ||
636 | goto unlock; | ||
637 | |||
638 | bail: | ||
639 | qp->s_flags &= ~QIB_S_BUSY; | ||
640 | unlock: | ||
641 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
642 | return ret; | ||
643 | } | ||
644 | |||
645 | /** | ||
646 | * qib_send_rc_ack - Construct an ACK packet and send it | ||
647 | * @qp: a pointer to the QP | ||
648 | * | ||
649 | * This is called from qib_rc_rcv() and qib_kreceive(). | ||
650 | * Note that RDMA reads and atomics are handled in the | ||
651 | * send side QP state and tasklet. | ||
652 | */ | ||
653 | void qib_send_rc_ack(struct qib_qp *qp) | ||
654 | { | ||
655 | struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device); | ||
656 | struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); | ||
657 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | ||
658 | u64 pbc; | ||
659 | u16 lrh0; | ||
660 | u32 bth0; | ||
661 | u32 hwords; | ||
662 | u32 pbufn; | ||
663 | u32 __iomem *piobuf; | ||
664 | struct qib_ib_header hdr; | ||
665 | struct qib_other_headers *ohdr; | ||
666 | u32 control; | ||
667 | unsigned long flags; | ||
668 | |||
669 | spin_lock_irqsave(&qp->s_lock, flags); | ||
670 | |||
671 | if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) | ||
672 | goto unlock; | ||
673 | |||
674 | /* Don't send ACK or NAK if a RDMA read or atomic is pending. */ | ||
675 | if ((qp->s_flags & QIB_S_RESP_PENDING) || qp->s_rdma_ack_cnt) | ||
676 | goto queue_ack; | ||
677 | |||
678 | /* Construct the header with s_lock held so APM doesn't change it. */ | ||
679 | ohdr = &hdr.u.oth; | ||
680 | lrh0 = QIB_LRH_BTH; | ||
681 | /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */ | ||
682 | hwords = 6; | ||
683 | if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) { | ||
684 | hwords += qib_make_grh(ibp, &hdr.u.l.grh, | ||
685 | &qp->remote_ah_attr.grh, hwords, 0); | ||
686 | ohdr = &hdr.u.l.oth; | ||
687 | lrh0 = QIB_LRH_GRH; | ||
688 | } | ||
689 | /* read pkey_index w/o lock (its atomic) */ | ||
690 | bth0 = qib_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24); | ||
691 | if (qp->s_mig_state == IB_MIG_MIGRATED) | ||
692 | bth0 |= IB_BTH_MIG_REQ; | ||
693 | if (qp->r_nak_state) | ||
694 | ohdr->u.aeth = cpu_to_be32((qp->r_msn & QIB_MSN_MASK) | | ||
695 | (qp->r_nak_state << | ||
696 | QIB_AETH_CREDIT_SHIFT)); | ||
697 | else | ||
698 | ohdr->u.aeth = qib_compute_aeth(qp); | ||
699 | lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 | | ||
700 | qp->remote_ah_attr.sl << 4; | ||
701 | hdr.lrh[0] = cpu_to_be16(lrh0); | ||
702 | hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); | ||
703 | hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC); | ||
704 | hdr.lrh[3] = cpu_to_be16(ppd->lid | qp->remote_ah_attr.src_path_bits); | ||
705 | ohdr->bth[0] = cpu_to_be32(bth0); | ||
706 | ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); | ||
707 | ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & QIB_PSN_MASK); | ||
708 | |||
709 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
710 | |||
711 | /* Don't try to send ACKs if the link isn't ACTIVE */ | ||
712 | if (!(ppd->lflags & QIBL_LINKACTIVE)) | ||
713 | goto done; | ||
714 | |||
715 | control = dd->f_setpbc_control(ppd, hwords + SIZE_OF_CRC, | ||
716 | qp->s_srate, lrh0 >> 12); | ||
717 | /* length is + 1 for the control dword */ | ||
718 | pbc = ((u64) control << 32) | (hwords + 1); | ||
719 | |||
720 | piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn); | ||
721 | if (!piobuf) { | ||
722 | /* | ||
723 | * We are out of PIO buffers at the moment. | ||
724 | * Pass responsibility for sending the ACK to the | ||
725 | * send tasklet so that when a PIO buffer becomes | ||
726 | * available, the ACK is sent ahead of other outgoing | ||
727 | * packets. | ||
728 | */ | ||
729 | spin_lock_irqsave(&qp->s_lock, flags); | ||
730 | goto queue_ack; | ||
731 | } | ||
732 | |||
733 | /* | ||
734 | * Write the pbc. | ||
735 | * We have to flush after the PBC for correctness | ||
736 | * on some cpus or WC buffer can be written out of order. | ||
737 | */ | ||
738 | writeq(pbc, piobuf); | ||
739 | |||
740 | if (dd->flags & QIB_PIO_FLUSH_WC) { | ||
741 | u32 *hdrp = (u32 *) &hdr; | ||
742 | |||
743 | qib_flush_wc(); | ||
744 | qib_pio_copy(piobuf + 2, hdrp, hwords - 1); | ||
745 | qib_flush_wc(); | ||
746 | __raw_writel(hdrp[hwords - 1], piobuf + hwords + 1); | ||
747 | } else | ||
748 | qib_pio_copy(piobuf + 2, (u32 *) &hdr, hwords); | ||
749 | |||
750 | if (dd->flags & QIB_USE_SPCL_TRIG) { | ||
751 | u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023; | ||
752 | |||
753 | qib_flush_wc(); | ||
754 | __raw_writel(0xaebecede, piobuf + spcl_off); | ||
755 | } | ||
756 | |||
757 | qib_flush_wc(); | ||
758 | qib_sendbuf_done(dd, pbufn); | ||
759 | |||
760 | ibp->n_unicast_xmit++; | ||
761 | goto done; | ||
762 | |||
763 | queue_ack: | ||
764 | if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { | ||
765 | ibp->n_rc_qacks++; | ||
766 | qp->s_flags |= QIB_S_ACK_PENDING | QIB_S_RESP_PENDING; | ||
767 | qp->s_nak_state = qp->r_nak_state; | ||
768 | qp->s_ack_psn = qp->r_ack_psn; | ||
769 | |||
770 | /* Schedule the send tasklet. */ | ||
771 | qib_schedule_send(qp); | ||
772 | } | ||
773 | unlock: | ||
774 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
775 | done: | ||
776 | return; | ||
777 | } | ||
778 | |||
779 | /** | ||
780 | * reset_psn - reset the QP state to send starting from PSN | ||
781 | * @qp: the QP | ||
782 | * @psn: the packet sequence number to restart at | ||
783 | * | ||
784 | * This is called from qib_rc_rcv() to process an incoming RC ACK | ||
785 | * for the given QP. | ||
786 | * Called at interrupt level with the QP s_lock held. | ||
787 | */ | ||
788 | static void reset_psn(struct qib_qp *qp, u32 psn) | ||
789 | { | ||
790 | u32 n = qp->s_acked; | ||
791 | struct qib_swqe *wqe = get_swqe_ptr(qp, n); | ||
792 | u32 opcode; | ||
793 | |||
794 | qp->s_cur = n; | ||
795 | |||
796 | /* | ||
797 | * If we are starting the request from the beginning, | ||
798 | * let the normal send code handle initialization. | ||
799 | */ | ||
800 | if (qib_cmp24(psn, wqe->psn) <= 0) { | ||
801 | qp->s_state = OP(SEND_LAST); | ||
802 | goto done; | ||
803 | } | ||
804 | |||
805 | /* Find the work request opcode corresponding to the given PSN. */ | ||
806 | opcode = wqe->wr.opcode; | ||
807 | for (;;) { | ||
808 | int diff; | ||
809 | |||
810 | if (++n == qp->s_size) | ||
811 | n = 0; | ||
812 | if (n == qp->s_tail) | ||
813 | break; | ||
814 | wqe = get_swqe_ptr(qp, n); | ||
815 | diff = qib_cmp24(psn, wqe->psn); | ||
816 | if (diff < 0) | ||
817 | break; | ||
818 | qp->s_cur = n; | ||
819 | /* | ||
820 | * If we are starting the request from the beginning, | ||
821 | * let the normal send code handle initialization. | ||
822 | */ | ||
823 | if (diff == 0) { | ||
824 | qp->s_state = OP(SEND_LAST); | ||
825 | goto done; | ||
826 | } | ||
827 | opcode = wqe->wr.opcode; | ||
828 | } | ||
829 | |||
830 | /* | ||
831 | * Set the state to restart in the middle of a request. | ||
832 | * Don't change the s_sge, s_cur_sge, or s_cur_size. | ||
833 | * See qib_make_rc_req(). | ||
834 | */ | ||
835 | switch (opcode) { | ||
836 | case IB_WR_SEND: | ||
837 | case IB_WR_SEND_WITH_IMM: | ||
838 | qp->s_state = OP(RDMA_READ_RESPONSE_FIRST); | ||
839 | break; | ||
840 | |||
841 | case IB_WR_RDMA_WRITE: | ||
842 | case IB_WR_RDMA_WRITE_WITH_IMM: | ||
843 | qp->s_state = OP(RDMA_READ_RESPONSE_LAST); | ||
844 | break; | ||
845 | |||
846 | case IB_WR_RDMA_READ: | ||
847 | qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE); | ||
848 | break; | ||
849 | |||
850 | default: | ||
851 | /* | ||
852 | * This case shouldn't happen since its only | ||
853 | * one PSN per req. | ||
854 | */ | ||
855 | qp->s_state = OP(SEND_LAST); | ||
856 | } | ||
857 | done: | ||
858 | qp->s_psn = psn; | ||
859 | /* | ||
860 | * Set QIB_S_WAIT_PSN as qib_rc_complete() may start the timer | ||
861 | * asynchronously before the send tasklet can get scheduled. | ||
862 | * Doing it in qib_make_rc_req() is too late. | ||
863 | */ | ||
864 | if ((qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) && | ||
865 | (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) | ||
866 | qp->s_flags |= QIB_S_WAIT_PSN; | ||
867 | } | ||
868 | |||
869 | /* | ||
870 | * Back up requester to resend the last un-ACKed request. | ||
871 | * The QP s_lock should be held and interrupts disabled. | ||
872 | */ | ||
873 | static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait) | ||
874 | { | ||
875 | struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_acked); | ||
876 | struct qib_ibport *ibp; | ||
877 | |||
878 | if (qp->s_retry == 0) { | ||
879 | if (qp->s_mig_state == IB_MIG_ARMED) { | ||
880 | qib_migrate_qp(qp); | ||
881 | qp->s_retry = qp->s_retry_cnt; | ||
882 | } else if (qp->s_last == qp->s_acked) { | ||
883 | qib_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR); | ||
884 | qib_error_qp(qp, IB_WC_WR_FLUSH_ERR); | ||
885 | return; | ||
886 | } else /* XXX need to handle delayed completion */ | ||
887 | return; | ||
888 | } else | ||
889 | qp->s_retry--; | ||
890 | |||
891 | ibp = to_iport(qp->ibqp.device, qp->port_num); | ||
892 | if (wqe->wr.opcode == IB_WR_RDMA_READ) | ||
893 | ibp->n_rc_resends++; | ||
894 | else | ||
895 | ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK; | ||
896 | |||
897 | qp->s_flags &= ~(QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | | ||
898 | QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_PSN | | ||
899 | QIB_S_WAIT_ACK); | ||
900 | if (wait) | ||
901 | qp->s_flags |= QIB_S_SEND_ONE; | ||
902 | reset_psn(qp, psn); | ||
903 | } | ||
904 | |||
905 | /* | ||
906 | * This is called from s_timer for missing responses. | ||
907 | */ | ||
908 | static void rc_timeout(unsigned long arg) | ||
909 | { | ||
910 | struct qib_qp *qp = (struct qib_qp *)arg; | ||
911 | struct qib_ibport *ibp; | ||
912 | unsigned long flags; | ||
913 | |||
914 | spin_lock_irqsave(&qp->s_lock, flags); | ||
915 | if (qp->s_flags & QIB_S_TIMER) { | ||
916 | ibp = to_iport(qp->ibqp.device, qp->port_num); | ||
917 | ibp->n_rc_timeouts++; | ||
918 | qp->s_flags &= ~QIB_S_TIMER; | ||
919 | del_timer(&qp->s_timer); | ||
920 | qib_restart_rc(qp, qp->s_last_psn + 1, 1); | ||
921 | qib_schedule_send(qp); | ||
922 | } | ||
923 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
924 | } | ||
925 | |||
926 | /* | ||
927 | * This is called from s_timer for RNR timeouts. | ||
928 | */ | ||
929 | void qib_rc_rnr_retry(unsigned long arg) | ||
930 | { | ||
931 | struct qib_qp *qp = (struct qib_qp *)arg; | ||
932 | unsigned long flags; | ||
933 | |||
934 | spin_lock_irqsave(&qp->s_lock, flags); | ||
935 | if (qp->s_flags & QIB_S_WAIT_RNR) { | ||
936 | qp->s_flags &= ~QIB_S_WAIT_RNR; | ||
937 | del_timer(&qp->s_timer); | ||
938 | qib_schedule_send(qp); | ||
939 | } | ||
940 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
941 | } | ||
942 | |||
943 | /* | ||
944 | * Set qp->s_sending_psn to the next PSN after the given one. | ||
945 | * This would be psn+1 except when RDMA reads are present. | ||
946 | */ | ||
947 | static void reset_sending_psn(struct qib_qp *qp, u32 psn) | ||
948 | { | ||
949 | struct qib_swqe *wqe; | ||
950 | u32 n = qp->s_last; | ||
951 | |||
952 | /* Find the work request corresponding to the given PSN. */ | ||
953 | for (;;) { | ||
954 | wqe = get_swqe_ptr(qp, n); | ||
955 | if (qib_cmp24(psn, wqe->lpsn) <= 0) { | ||
956 | if (wqe->wr.opcode == IB_WR_RDMA_READ) | ||
957 | qp->s_sending_psn = wqe->lpsn + 1; | ||
958 | else | ||
959 | qp->s_sending_psn = psn + 1; | ||
960 | break; | ||
961 | } | ||
962 | if (++n == qp->s_size) | ||
963 | n = 0; | ||
964 | if (n == qp->s_tail) | ||
965 | break; | ||
966 | } | ||
967 | } | ||
968 | |||
969 | /* | ||
970 | * This should be called with the QP s_lock held and interrupts disabled. | ||
971 | */ | ||
972 | void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr) | ||
973 | { | ||
974 | struct qib_other_headers *ohdr; | ||
975 | struct qib_swqe *wqe; | ||
976 | struct ib_wc wc; | ||
977 | unsigned i; | ||
978 | u32 opcode; | ||
979 | u32 psn; | ||
980 | |||
981 | if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND)) | ||
982 | return; | ||
983 | |||
984 | /* Find out where the BTH is */ | ||
985 | if ((be16_to_cpu(hdr->lrh[0]) & 3) == QIB_LRH_BTH) | ||
986 | ohdr = &hdr->u.oth; | ||
987 | else | ||
988 | ohdr = &hdr->u.l.oth; | ||
989 | |||
990 | opcode = be32_to_cpu(ohdr->bth[0]) >> 24; | ||
991 | if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) && | ||
992 | opcode <= OP(ATOMIC_ACKNOWLEDGE)) { | ||
993 | WARN_ON(!qp->s_rdma_ack_cnt); | ||
994 | qp->s_rdma_ack_cnt--; | ||
995 | return; | ||
996 | } | ||
997 | |||
998 | psn = be32_to_cpu(ohdr->bth[2]); | ||
999 | reset_sending_psn(qp, psn); | ||
1000 | |||
1001 | /* | ||
1002 | * Start timer after a packet requesting an ACK has been sent and | ||
1003 | * there are still requests that haven't been acked. | ||
1004 | */ | ||
1005 | if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail && | ||
1006 | !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN))) | ||
1007 | start_timer(qp); | ||
1008 | |||
1009 | while (qp->s_last != qp->s_acked) { | ||
1010 | wqe = get_swqe_ptr(qp, qp->s_last); | ||
1011 | if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 && | ||
1012 | qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) | ||
1013 | break; | ||
1014 | for (i = 0; i < wqe->wr.num_sge; i++) { | ||
1015 | struct qib_sge *sge = &wqe->sg_list[i]; | ||
1016 | |||
1017 | atomic_dec(&sge->mr->refcount); | ||
1018 | } | ||
1019 | /* Post a send completion queue entry if requested. */ | ||
1020 | if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || | ||
1021 | (wqe->wr.send_flags & IB_SEND_SIGNALED)) { | ||
1022 | memset(&wc, 0, sizeof wc); | ||
1023 | wc.wr_id = wqe->wr.wr_id; | ||
1024 | wc.status = IB_WC_SUCCESS; | ||
1025 | wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; | ||
1026 | wc.byte_len = wqe->length; | ||
1027 | wc.qp = &qp->ibqp; | ||
1028 | qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); | ||
1029 | } | ||
1030 | if (++qp->s_last >= qp->s_size) | ||
1031 | qp->s_last = 0; | ||
1032 | } | ||
1033 | /* | ||
1034 | * If we were waiting for sends to complete before resending, | ||
1035 | * and they are now complete, restart sending. | ||
1036 | */ | ||
1037 | if (qp->s_flags & QIB_S_WAIT_PSN && | ||
1038 | qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { | ||
1039 | qp->s_flags &= ~QIB_S_WAIT_PSN; | ||
1040 | qp->s_sending_psn = qp->s_psn; | ||
1041 | qp->s_sending_hpsn = qp->s_psn - 1; | ||
1042 | qib_schedule_send(qp); | ||
1043 | } | ||
1044 | } | ||
1045 | |||
1046 | static inline void update_last_psn(struct qib_qp *qp, u32 psn) | ||
1047 | { | ||
1048 | qp->s_last_psn = psn; | ||
1049 | } | ||
1050 | |||
1051 | /* | ||
1052 | * Generate a SWQE completion. | ||
1053 | * This is similar to qib_send_complete but has to check to be sure | ||
1054 | * that the SGEs are not being referenced if the SWQE is being resent. | ||
1055 | */ | ||
1056 | static struct qib_swqe *do_rc_completion(struct qib_qp *qp, | ||
1057 | struct qib_swqe *wqe, | ||
1058 | struct qib_ibport *ibp) | ||
1059 | { | ||
1060 | struct ib_wc wc; | ||
1061 | unsigned i; | ||
1062 | |||
1063 | /* | ||
1064 | * Don't decrement refcount and don't generate a | ||
1065 | * completion if the SWQE is being resent until the send | ||
1066 | * is finished. | ||
1067 | */ | ||
1068 | if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 || | ||
1069 | qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { | ||
1070 | for (i = 0; i < wqe->wr.num_sge; i++) { | ||
1071 | struct qib_sge *sge = &wqe->sg_list[i]; | ||
1072 | |||
1073 | atomic_dec(&sge->mr->refcount); | ||
1074 | } | ||
1075 | /* Post a send completion queue entry if requested. */ | ||
1076 | if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || | ||
1077 | (wqe->wr.send_flags & IB_SEND_SIGNALED)) { | ||
1078 | memset(&wc, 0, sizeof wc); | ||
1079 | wc.wr_id = wqe->wr.wr_id; | ||
1080 | wc.status = IB_WC_SUCCESS; | ||
1081 | wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; | ||
1082 | wc.byte_len = wqe->length; | ||
1083 | wc.qp = &qp->ibqp; | ||
1084 | qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); | ||
1085 | } | ||
1086 | if (++qp->s_last >= qp->s_size) | ||
1087 | qp->s_last = 0; | ||
1088 | } else | ||
1089 | ibp->n_rc_delayed_comp++; | ||
1090 | |||
1091 | qp->s_retry = qp->s_retry_cnt; | ||
1092 | update_last_psn(qp, wqe->lpsn); | ||
1093 | |||
1094 | /* | ||
1095 | * If we are completing a request which is in the process of | ||
1096 | * being resent, we can stop resending it since we know the | ||
1097 | * responder has already seen it. | ||
1098 | */ | ||
1099 | if (qp->s_acked == qp->s_cur) { | ||
1100 | if (++qp->s_cur >= qp->s_size) | ||
1101 | qp->s_cur = 0; | ||
1102 | qp->s_acked = qp->s_cur; | ||
1103 | wqe = get_swqe_ptr(qp, qp->s_cur); | ||
1104 | if (qp->s_acked != qp->s_tail) { | ||
1105 | qp->s_state = OP(SEND_LAST); | ||
1106 | qp->s_psn = wqe->psn; | ||
1107 | } | ||
1108 | } else { | ||
1109 | if (++qp->s_acked >= qp->s_size) | ||
1110 | qp->s_acked = 0; | ||
1111 | if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur) | ||
1112 | qp->s_draining = 0; | ||
1113 | wqe = get_swqe_ptr(qp, qp->s_acked); | ||
1114 | } | ||
1115 | return wqe; | ||
1116 | } | ||
1117 | |||
1118 | /** | ||
1119 | * do_rc_ack - process an incoming RC ACK | ||
1120 | * @qp: the QP the ACK came in on | ||
1121 | * @psn: the packet sequence number of the ACK | ||
1122 | * @opcode: the opcode of the request that resulted in the ACK | ||
1123 | * | ||
1124 | * This is called from qib_rc_rcv_resp() to process an incoming RC ACK | ||
1125 | * for the given QP. | ||
1126 | * Called at interrupt level with the QP s_lock held. | ||
1127 | * Returns 1 if OK, 0 if current operation should be aborted (NAK). | ||
1128 | */ | ||
1129 | static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode, | ||
1130 | u64 val, struct qib_ctxtdata *rcd) | ||
1131 | { | ||
1132 | struct qib_ibport *ibp; | ||
1133 | enum ib_wc_status status; | ||
1134 | struct qib_swqe *wqe; | ||
1135 | int ret = 0; | ||
1136 | u32 ack_psn; | ||
1137 | int diff; | ||
1138 | |||
1139 | /* Remove QP from retry timer */ | ||
1140 | if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) { | ||
1141 | qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR); | ||
1142 | del_timer(&qp->s_timer); | ||
1143 | } | ||
1144 | |||
1145 | /* | ||
1146 | * Note that NAKs implicitly ACK outstanding SEND and RDMA write | ||
1147 | * requests and implicitly NAK RDMA read and atomic requests issued | ||
1148 | * before the NAK'ed request. The MSN won't include the NAK'ed | ||
1149 | * request but will include an ACK'ed request(s). | ||
1150 | */ | ||
1151 | ack_psn = psn; | ||
1152 | if (aeth >> 29) | ||
1153 | ack_psn--; | ||
1154 | wqe = get_swqe_ptr(qp, qp->s_acked); | ||
1155 | ibp = to_iport(qp->ibqp.device, qp->port_num); | ||
1156 | |||
1157 | /* | ||
1158 | * The MSN might be for a later WQE than the PSN indicates so | ||
1159 | * only complete WQEs that the PSN finishes. | ||
1160 | */ | ||
1161 | while ((diff = qib_cmp24(ack_psn, wqe->lpsn)) >= 0) { | ||
1162 | /* | ||
1163 | * RDMA_READ_RESPONSE_ONLY is a special case since | ||
1164 | * we want to generate completion events for everything | ||
1165 | * before the RDMA read, copy the data, then generate | ||
1166 | * the completion for the read. | ||
1167 | */ | ||
1168 | if (wqe->wr.opcode == IB_WR_RDMA_READ && | ||
1169 | opcode == OP(RDMA_READ_RESPONSE_ONLY) && | ||
1170 | diff == 0) { | ||
1171 | ret = 1; | ||
1172 | goto bail; | ||
1173 | } | ||
1174 | /* | ||
1175 | * If this request is a RDMA read or atomic, and the ACK is | ||
1176 | * for a later operation, this ACK NAKs the RDMA read or | ||
1177 | * atomic. In other words, only a RDMA_READ_LAST or ONLY | ||
1178 | * can ACK a RDMA read and likewise for atomic ops. Note | ||
1179 | * that the NAK case can only happen if relaxed ordering is | ||
1180 | * used and requests are sent after an RDMA read or atomic | ||
1181 | * is sent but before the response is received. | ||
1182 | */ | ||
1183 | if ((wqe->wr.opcode == IB_WR_RDMA_READ && | ||
1184 | (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) || | ||
1185 | ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || | ||
1186 | wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) && | ||
1187 | (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) { | ||
1188 | /* Retry this request. */ | ||
1189 | if (!(qp->r_flags & QIB_R_RDMAR_SEQ)) { | ||
1190 | qp->r_flags |= QIB_R_RDMAR_SEQ; | ||
1191 | qib_restart_rc(qp, qp->s_last_psn + 1, 0); | ||
1192 | if (list_empty(&qp->rspwait)) { | ||
1193 | qp->r_flags |= QIB_R_RSP_SEND; | ||
1194 | atomic_inc(&qp->refcount); | ||
1195 | list_add_tail(&qp->rspwait, | ||
1196 | &rcd->qp_wait_list); | ||
1197 | } | ||
1198 | } | ||
1199 | /* | ||
1200 | * No need to process the ACK/NAK since we are | ||
1201 | * restarting an earlier request. | ||
1202 | */ | ||
1203 | goto bail; | ||
1204 | } | ||
1205 | if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || | ||
1206 | wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { | ||
1207 | u64 *vaddr = wqe->sg_list[0].vaddr; | ||
1208 | *vaddr = val; | ||
1209 | } | ||
1210 | if (qp->s_num_rd_atomic && | ||
1211 | (wqe->wr.opcode == IB_WR_RDMA_READ || | ||
1212 | wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || | ||
1213 | wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) { | ||
1214 | qp->s_num_rd_atomic--; | ||
1215 | /* Restart sending task if fence is complete */ | ||
1216 | if ((qp->s_flags & QIB_S_WAIT_FENCE) && | ||
1217 | !qp->s_num_rd_atomic) { | ||
1218 | qp->s_flags &= ~(QIB_S_WAIT_FENCE | | ||
1219 | QIB_S_WAIT_ACK); | ||
1220 | qib_schedule_send(qp); | ||
1221 | } else if (qp->s_flags & QIB_S_WAIT_RDMAR) { | ||
1222 | qp->s_flags &= ~(QIB_S_WAIT_RDMAR | | ||
1223 | QIB_S_WAIT_ACK); | ||
1224 | qib_schedule_send(qp); | ||
1225 | } | ||
1226 | } | ||
1227 | wqe = do_rc_completion(qp, wqe, ibp); | ||
1228 | if (qp->s_acked == qp->s_tail) | ||
1229 | break; | ||
1230 | } | ||
1231 | |||
1232 | switch (aeth >> 29) { | ||
1233 | case 0: /* ACK */ | ||
1234 | ibp->n_rc_acks++; | ||
1235 | if (qp->s_acked != qp->s_tail) { | ||
1236 | /* | ||
1237 | * We are expecting more ACKs so | ||
1238 | * reset the retransmit timer. | ||
1239 | */ | ||
1240 | start_timer(qp); | ||
1241 | /* | ||
1242 | * We can stop resending the earlier packets and | ||
1243 | * continue with the next packet the receiver wants. | ||
1244 | */ | ||
1245 | if (qib_cmp24(qp->s_psn, psn) <= 0) | ||
1246 | reset_psn(qp, psn + 1); | ||
1247 | } else if (qib_cmp24(qp->s_psn, psn) <= 0) { | ||
1248 | qp->s_state = OP(SEND_LAST); | ||
1249 | qp->s_psn = psn + 1; | ||
1250 | } | ||
1251 | if (qp->s_flags & QIB_S_WAIT_ACK) { | ||
1252 | qp->s_flags &= ~QIB_S_WAIT_ACK; | ||
1253 | qib_schedule_send(qp); | ||
1254 | } | ||
1255 | qib_get_credit(qp, aeth); | ||
1256 | qp->s_rnr_retry = qp->s_rnr_retry_cnt; | ||
1257 | qp->s_retry = qp->s_retry_cnt; | ||
1258 | update_last_psn(qp, psn); | ||
1259 | ret = 1; | ||
1260 | goto bail; | ||
1261 | |||
1262 | case 1: /* RNR NAK */ | ||
1263 | ibp->n_rnr_naks++; | ||
1264 | if (qp->s_acked == qp->s_tail) | ||
1265 | goto bail; | ||
1266 | if (qp->s_flags & QIB_S_WAIT_RNR) | ||
1267 | goto bail; | ||
1268 | if (qp->s_rnr_retry == 0) { | ||
1269 | status = IB_WC_RNR_RETRY_EXC_ERR; | ||
1270 | goto class_b; | ||
1271 | } | ||
1272 | if (qp->s_rnr_retry_cnt < 7) | ||
1273 | qp->s_rnr_retry--; | ||
1274 | |||
1275 | /* The last valid PSN is the previous PSN. */ | ||
1276 | update_last_psn(qp, psn - 1); | ||
1277 | |||
1278 | ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK; | ||
1279 | |||
1280 | reset_psn(qp, psn); | ||
1281 | |||
1282 | qp->s_flags &= ~(QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_ACK); | ||
1283 | qp->s_flags |= QIB_S_WAIT_RNR; | ||
1284 | qp->s_timer.function = qib_rc_rnr_retry; | ||
1285 | qp->s_timer.expires = jiffies + usecs_to_jiffies( | ||
1286 | ib_qib_rnr_table[(aeth >> QIB_AETH_CREDIT_SHIFT) & | ||
1287 | QIB_AETH_CREDIT_MASK]); | ||
1288 | add_timer(&qp->s_timer); | ||
1289 | goto bail; | ||
1290 | |||
1291 | case 3: /* NAK */ | ||
1292 | if (qp->s_acked == qp->s_tail) | ||
1293 | goto bail; | ||
1294 | /* The last valid PSN is the previous PSN. */ | ||
1295 | update_last_psn(qp, psn - 1); | ||
1296 | switch ((aeth >> QIB_AETH_CREDIT_SHIFT) & | ||
1297 | QIB_AETH_CREDIT_MASK) { | ||
1298 | case 0: /* PSN sequence error */ | ||
1299 | ibp->n_seq_naks++; | ||
1300 | /* | ||
1301 | * Back up to the responder's expected PSN. | ||
1302 | * Note that we might get a NAK in the middle of an | ||
1303 | * RDMA READ response which terminates the RDMA | ||
1304 | * READ. | ||
1305 | */ | ||
1306 | qib_restart_rc(qp, psn, 0); | ||
1307 | qib_schedule_send(qp); | ||
1308 | break; | ||
1309 | |||
1310 | case 1: /* Invalid Request */ | ||
1311 | status = IB_WC_REM_INV_REQ_ERR; | ||
1312 | ibp->n_other_naks++; | ||
1313 | goto class_b; | ||
1314 | |||
1315 | case 2: /* Remote Access Error */ | ||
1316 | status = IB_WC_REM_ACCESS_ERR; | ||
1317 | ibp->n_other_naks++; | ||
1318 | goto class_b; | ||
1319 | |||
1320 | case 3: /* Remote Operation Error */ | ||
1321 | status = IB_WC_REM_OP_ERR; | ||
1322 | ibp->n_other_naks++; | ||
1323 | class_b: | ||
1324 | if (qp->s_last == qp->s_acked) { | ||
1325 | qib_send_complete(qp, wqe, status); | ||
1326 | qib_error_qp(qp, IB_WC_WR_FLUSH_ERR); | ||
1327 | } | ||
1328 | break; | ||
1329 | |||
1330 | default: | ||
1331 | /* Ignore other reserved NAK error codes */ | ||
1332 | goto reserved; | ||
1333 | } | ||
1334 | qp->s_retry = qp->s_retry_cnt; | ||
1335 | qp->s_rnr_retry = qp->s_rnr_retry_cnt; | ||
1336 | goto bail; | ||
1337 | |||
1338 | default: /* 2: reserved */ | ||
1339 | reserved: | ||
1340 | /* Ignore reserved NAK codes. */ | ||
1341 | goto bail; | ||
1342 | } | ||
1343 | |||
1344 | bail: | ||
1345 | return ret; | ||
1346 | } | ||
1347 | |||
1348 | /* | ||
1349 | * We have seen an out of sequence RDMA read middle or last packet. | ||
1350 | * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE. | ||
1351 | */ | ||
1352 | static void rdma_seq_err(struct qib_qp *qp, struct qib_ibport *ibp, u32 psn, | ||
1353 | struct qib_ctxtdata *rcd) | ||
1354 | { | ||
1355 | struct qib_swqe *wqe; | ||
1356 | |||
1357 | /* Remove QP from retry timer */ | ||
1358 | if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) { | ||
1359 | qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR); | ||
1360 | del_timer(&qp->s_timer); | ||
1361 | } | ||
1362 | |||
1363 | wqe = get_swqe_ptr(qp, qp->s_acked); | ||
1364 | |||
1365 | while (qib_cmp24(psn, wqe->lpsn) > 0) { | ||
1366 | if (wqe->wr.opcode == IB_WR_RDMA_READ || | ||
1367 | wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || | ||
1368 | wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) | ||
1369 | break; | ||
1370 | wqe = do_rc_completion(qp, wqe, ibp); | ||
1371 | } | ||
1372 | |||
1373 | ibp->n_rdma_seq++; | ||
1374 | qp->r_flags |= QIB_R_RDMAR_SEQ; | ||
1375 | qib_restart_rc(qp, qp->s_last_psn + 1, 0); | ||
1376 | if (list_empty(&qp->rspwait)) { | ||
1377 | qp->r_flags |= QIB_R_RSP_SEND; | ||
1378 | atomic_inc(&qp->refcount); | ||
1379 | list_add_tail(&qp->rspwait, &rcd->qp_wait_list); | ||
1380 | } | ||
1381 | } | ||
1382 | |||
1383 | /** | ||
1384 | * qib_rc_rcv_resp - process an incoming RC response packet | ||
1385 | * @ibp: the port this packet came in on | ||
1386 | * @ohdr: the other headers for this packet | ||
1387 | * @data: the packet data | ||
1388 | * @tlen: the packet length | ||
1389 | * @qp: the QP for this packet | ||
1390 | * @opcode: the opcode for this packet | ||
1391 | * @psn: the packet sequence number for this packet | ||
1392 | * @hdrsize: the header length | ||
1393 | * @pmtu: the path MTU | ||
1394 | * | ||
1395 | * This is called from qib_rc_rcv() to process an incoming RC response | ||
1396 | * packet for the given QP. | ||
1397 | * Called at interrupt level. | ||
1398 | */ | ||
1399 | static void qib_rc_rcv_resp(struct qib_ibport *ibp, | ||
1400 | struct qib_other_headers *ohdr, | ||
1401 | void *data, u32 tlen, | ||
1402 | struct qib_qp *qp, | ||
1403 | u32 opcode, | ||
1404 | u32 psn, u32 hdrsize, u32 pmtu, | ||
1405 | struct qib_ctxtdata *rcd) | ||
1406 | { | ||
1407 | struct qib_swqe *wqe; | ||
1408 | enum ib_wc_status status; | ||
1409 | unsigned long flags; | ||
1410 | int diff; | ||
1411 | u32 pad; | ||
1412 | u32 aeth; | ||
1413 | u64 val; | ||
1414 | |||
1415 | spin_lock_irqsave(&qp->s_lock, flags); | ||
1416 | |||
1417 | /* Double check we can process this now that we hold the s_lock. */ | ||
1418 | if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) | ||
1419 | goto ack_done; | ||
1420 | |||
1421 | /* Ignore invalid responses. */ | ||
1422 | if (qib_cmp24(psn, qp->s_next_psn) >= 0) | ||
1423 | goto ack_done; | ||
1424 | |||
1425 | /* Ignore duplicate responses. */ | ||
1426 | diff = qib_cmp24(psn, qp->s_last_psn); | ||
1427 | if (unlikely(diff <= 0)) { | ||
1428 | /* Update credits for "ghost" ACKs */ | ||
1429 | if (diff == 0 && opcode == OP(ACKNOWLEDGE)) { | ||
1430 | aeth = be32_to_cpu(ohdr->u.aeth); | ||
1431 | if ((aeth >> 29) == 0) | ||
1432 | qib_get_credit(qp, aeth); | ||
1433 | } | ||
1434 | goto ack_done; | ||
1435 | } | ||
1436 | |||
1437 | /* | ||
1438 | * Skip everything other than the PSN we expect, if we are waiting | ||
1439 | * for a reply to a restarted RDMA read or atomic op. | ||
1440 | */ | ||
1441 | if (qp->r_flags & QIB_R_RDMAR_SEQ) { | ||
1442 | if (qib_cmp24(psn, qp->s_last_psn + 1) != 0) | ||
1443 | goto ack_done; | ||
1444 | qp->r_flags &= ~QIB_R_RDMAR_SEQ; | ||
1445 | } | ||
1446 | |||
1447 | if (unlikely(qp->s_acked == qp->s_tail)) | ||
1448 | goto ack_done; | ||
1449 | wqe = get_swqe_ptr(qp, qp->s_acked); | ||
1450 | status = IB_WC_SUCCESS; | ||
1451 | |||
1452 | switch (opcode) { | ||
1453 | case OP(ACKNOWLEDGE): | ||
1454 | case OP(ATOMIC_ACKNOWLEDGE): | ||
1455 | case OP(RDMA_READ_RESPONSE_FIRST): | ||
1456 | aeth = be32_to_cpu(ohdr->u.aeth); | ||
1457 | if (opcode == OP(ATOMIC_ACKNOWLEDGE)) { | ||
1458 | __be32 *p = ohdr->u.at.atomic_ack_eth; | ||
1459 | |||
1460 | val = ((u64) be32_to_cpu(p[0]) << 32) | | ||
1461 | be32_to_cpu(p[1]); | ||
1462 | } else | ||
1463 | val = 0; | ||
1464 | if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) || | ||
1465 | opcode != OP(RDMA_READ_RESPONSE_FIRST)) | ||
1466 | goto ack_done; | ||
1467 | hdrsize += 4; | ||
1468 | wqe = get_swqe_ptr(qp, qp->s_acked); | ||
1469 | if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) | ||
1470 | goto ack_op_err; | ||
1471 | /* | ||
1472 | * If this is a response to a resent RDMA read, we | ||
1473 | * have to be careful to copy the data to the right | ||
1474 | * location. | ||
1475 | */ | ||
1476 | qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, | ||
1477 | wqe, psn, pmtu); | ||
1478 | goto read_middle; | ||
1479 | |||
1480 | case OP(RDMA_READ_RESPONSE_MIDDLE): | ||
1481 | /* no AETH, no ACK */ | ||
1482 | if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1))) | ||
1483 | goto ack_seq_err; | ||
1484 | if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) | ||
1485 | goto ack_op_err; | ||
1486 | read_middle: | ||
1487 | if (unlikely(tlen != (hdrsize + pmtu + 4))) | ||
1488 | goto ack_len_err; | ||
1489 | if (unlikely(pmtu >= qp->s_rdma_read_len)) | ||
1490 | goto ack_len_err; | ||
1491 | |||
1492 | /* | ||
1493 | * We got a response so update the timeout. | ||
1494 | * 4.096 usec. * (1 << qp->timeout) | ||
1495 | */ | ||
1496 | qp->s_flags |= QIB_S_TIMER; | ||
1497 | mod_timer(&qp->s_timer, jiffies + | ||
1498 | usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / | ||
1499 | 1000UL)); | ||
1500 | if (qp->s_flags & QIB_S_WAIT_ACK) { | ||
1501 | qp->s_flags &= ~QIB_S_WAIT_ACK; | ||
1502 | qib_schedule_send(qp); | ||
1503 | } | ||
1504 | |||
1505 | if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE)) | ||
1506 | qp->s_retry = qp->s_retry_cnt; | ||
1507 | |||
1508 | /* | ||
1509 | * Update the RDMA receive state but do the copy w/o | ||
1510 | * holding the locks and blocking interrupts. | ||
1511 | */ | ||
1512 | qp->s_rdma_read_len -= pmtu; | ||
1513 | update_last_psn(qp, psn); | ||
1514 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
1515 | qib_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0); | ||
1516 | goto bail; | ||
1517 | |||
1518 | case OP(RDMA_READ_RESPONSE_ONLY): | ||
1519 | aeth = be32_to_cpu(ohdr->u.aeth); | ||
1520 | if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd)) | ||
1521 | goto ack_done; | ||
1522 | /* Get the number of bytes the message was padded by. */ | ||
1523 | pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; | ||
1524 | /* | ||
1525 | * Check that the data size is >= 0 && <= pmtu. | ||
1526 | * Remember to account for the AETH header (4) and | ||
1527 | * ICRC (4). | ||
1528 | */ | ||
1529 | if (unlikely(tlen < (hdrsize + pad + 8))) | ||
1530 | goto ack_len_err; | ||
1531 | /* | ||
1532 | * If this is a response to a resent RDMA read, we | ||
1533 | * have to be careful to copy the data to the right | ||
1534 | * location. | ||
1535 | */ | ||
1536 | wqe = get_swqe_ptr(qp, qp->s_acked); | ||
1537 | qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, | ||
1538 | wqe, psn, pmtu); | ||
1539 | goto read_last; | ||
1540 | |||
1541 | case OP(RDMA_READ_RESPONSE_LAST): | ||
1542 | /* ACKs READ req. */ | ||
1543 | if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1))) | ||
1544 | goto ack_seq_err; | ||
1545 | if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) | ||
1546 | goto ack_op_err; | ||
1547 | /* Get the number of bytes the message was padded by. */ | ||
1548 | pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; | ||
1549 | /* | ||
1550 | * Check that the data size is >= 1 && <= pmtu. | ||
1551 | * Remember to account for the AETH header (4) and | ||
1552 | * ICRC (4). | ||
1553 | */ | ||
1554 | if (unlikely(tlen <= (hdrsize + pad + 8))) | ||
1555 | goto ack_len_err; | ||
1556 | read_last: | ||
1557 | tlen -= hdrsize + pad + 8; | ||
1558 | if (unlikely(tlen != qp->s_rdma_read_len)) | ||
1559 | goto ack_len_err; | ||
1560 | aeth = be32_to_cpu(ohdr->u.aeth); | ||
1561 | qib_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0); | ||
1562 | WARN_ON(qp->s_rdma_read_sge.num_sge); | ||
1563 | (void) do_rc_ack(qp, aeth, psn, | ||
1564 | OP(RDMA_READ_RESPONSE_LAST), 0, rcd); | ||
1565 | goto ack_done; | ||
1566 | } | ||
1567 | |||
1568 | ack_op_err: | ||
1569 | status = IB_WC_LOC_QP_OP_ERR; | ||
1570 | goto ack_err; | ||
1571 | |||
1572 | ack_seq_err: | ||
1573 | rdma_seq_err(qp, ibp, psn, rcd); | ||
1574 | goto ack_done; | ||
1575 | |||
1576 | ack_len_err: | ||
1577 | status = IB_WC_LOC_LEN_ERR; | ||
1578 | ack_err: | ||
1579 | if (qp->s_last == qp->s_acked) { | ||
1580 | qib_send_complete(qp, wqe, status); | ||
1581 | qib_error_qp(qp, IB_WC_WR_FLUSH_ERR); | ||
1582 | } | ||
1583 | ack_done: | ||
1584 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
1585 | bail: | ||
1586 | return; | ||
1587 | } | ||
1588 | |||
1589 | /** | ||
1590 | * qib_rc_rcv_error - process an incoming duplicate or error RC packet | ||
1591 | * @ohdr: the other headers for this packet | ||
1592 | * @data: the packet data | ||
1593 | * @qp: the QP for this packet | ||
1594 | * @opcode: the opcode for this packet | ||
1595 | * @psn: the packet sequence number for this packet | ||
1596 | * @diff: the difference between the PSN and the expected PSN | ||
1597 | * | ||
1598 | * This is called from qib_rc_rcv() to process an unexpected | ||
1599 | * incoming RC packet for the given QP. | ||
1600 | * Called at interrupt level. | ||
1601 | * Return 1 if no more processing is needed; otherwise return 0 to | ||
1602 | * schedule a response to be sent. | ||
1603 | */ | ||
1604 | static int qib_rc_rcv_error(struct qib_other_headers *ohdr, | ||
1605 | void *data, | ||
1606 | struct qib_qp *qp, | ||
1607 | u32 opcode, | ||
1608 | u32 psn, | ||
1609 | int diff, | ||
1610 | struct qib_ctxtdata *rcd) | ||
1611 | { | ||
1612 | struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); | ||
1613 | struct qib_ack_entry *e; | ||
1614 | unsigned long flags; | ||
1615 | u8 i, prev; | ||
1616 | int old_req; | ||
1617 | |||
1618 | if (diff > 0) { | ||
1619 | /* | ||
1620 | * Packet sequence error. | ||
1621 | * A NAK will ACK earlier sends and RDMA writes. | ||
1622 | * Don't queue the NAK if we already sent one. | ||
1623 | */ | ||
1624 | if (!qp->r_nak_state) { | ||
1625 | ibp->n_rc_seqnak++; | ||
1626 | qp->r_nak_state = IB_NAK_PSN_ERROR; | ||
1627 | /* Use the expected PSN. */ | ||
1628 | qp->r_ack_psn = qp->r_psn; | ||
1629 | /* | ||
1630 | * Wait to send the sequence NAK until all packets | ||
1631 | * in the receive queue have been processed. | ||
1632 | * Otherwise, we end up propagating congestion. | ||
1633 | */ | ||
1634 | if (list_empty(&qp->rspwait)) { | ||
1635 | qp->r_flags |= QIB_R_RSP_NAK; | ||
1636 | atomic_inc(&qp->refcount); | ||
1637 | list_add_tail(&qp->rspwait, &rcd->qp_wait_list); | ||
1638 | } | ||
1639 | } | ||
1640 | goto done; | ||
1641 | } | ||
1642 | |||
1643 | /* | ||
1644 | * Handle a duplicate request. Don't re-execute SEND, RDMA | ||
1645 | * write or atomic op. Don't NAK errors, just silently drop | ||
1646 | * the duplicate request. Note that r_sge, r_len, and | ||
1647 | * r_rcv_len may be in use so don't modify them. | ||
1648 | * | ||
1649 | * We are supposed to ACK the earliest duplicate PSN but we | ||
1650 | * can coalesce an outstanding duplicate ACK. We have to | ||
1651 | * send the earliest so that RDMA reads can be restarted at | ||
1652 | * the requester's expected PSN. | ||
1653 | * | ||
1654 | * First, find where this duplicate PSN falls within the | ||
1655 | * ACKs previously sent. | ||
1656 | * old_req is true if there is an older response that is scheduled | ||
1657 | * to be sent before sending this one. | ||
1658 | */ | ||
1659 | e = NULL; | ||
1660 | old_req = 1; | ||
1661 | ibp->n_rc_dupreq++; | ||
1662 | |||
1663 | spin_lock_irqsave(&qp->s_lock, flags); | ||
1664 | /* Double check we can process this now that we hold the s_lock. */ | ||
1665 | if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) | ||
1666 | goto unlock_done; | ||
1667 | |||
1668 | for (i = qp->r_head_ack_queue; ; i = prev) { | ||
1669 | if (i == qp->s_tail_ack_queue) | ||
1670 | old_req = 0; | ||
1671 | if (i) | ||
1672 | prev = i - 1; | ||
1673 | else | ||
1674 | prev = QIB_MAX_RDMA_ATOMIC; | ||
1675 | if (prev == qp->r_head_ack_queue) { | ||
1676 | e = NULL; | ||
1677 | break; | ||
1678 | } | ||
1679 | e = &qp->s_ack_queue[prev]; | ||
1680 | if (!e->opcode) { | ||
1681 | e = NULL; | ||
1682 | break; | ||
1683 | } | ||
1684 | if (qib_cmp24(psn, e->psn) >= 0) { | ||
1685 | if (prev == qp->s_tail_ack_queue && | ||
1686 | qib_cmp24(psn, e->lpsn) <= 0) | ||
1687 | old_req = 0; | ||
1688 | break; | ||
1689 | } | ||
1690 | } | ||
1691 | switch (opcode) { | ||
1692 | case OP(RDMA_READ_REQUEST): { | ||
1693 | struct ib_reth *reth; | ||
1694 | u32 offset; | ||
1695 | u32 len; | ||
1696 | |||
1697 | /* | ||
1698 | * If we didn't find the RDMA read request in the ack queue, | ||
1699 | * we can ignore this request. | ||
1700 | */ | ||
1701 | if (!e || e->opcode != OP(RDMA_READ_REQUEST)) | ||
1702 | goto unlock_done; | ||
1703 | /* RETH comes after BTH */ | ||
1704 | reth = &ohdr->u.rc.reth; | ||
1705 | /* | ||
1706 | * Address range must be a subset of the original | ||
1707 | * request and start on pmtu boundaries. | ||
1708 | * We reuse the old ack_queue slot since the requester | ||
1709 | * should not back up and request an earlier PSN for the | ||
1710 | * same request. | ||
1711 | */ | ||
1712 | offset = ((psn - e->psn) & QIB_PSN_MASK) * | ||
1713 | ib_mtu_enum_to_int(qp->path_mtu); | ||
1714 | len = be32_to_cpu(reth->length); | ||
1715 | if (unlikely(offset + len != e->rdma_sge.sge_length)) | ||
1716 | goto unlock_done; | ||
1717 | if (e->rdma_sge.mr) { | ||
1718 | atomic_dec(&e->rdma_sge.mr->refcount); | ||
1719 | e->rdma_sge.mr = NULL; | ||
1720 | } | ||
1721 | if (len != 0) { | ||
1722 | u32 rkey = be32_to_cpu(reth->rkey); | ||
1723 | u64 vaddr = be64_to_cpu(reth->vaddr); | ||
1724 | int ok; | ||
1725 | |||
1726 | ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey, | ||
1727 | IB_ACCESS_REMOTE_READ); | ||
1728 | if (unlikely(!ok)) | ||
1729 | goto unlock_done; | ||
1730 | } else { | ||
1731 | e->rdma_sge.vaddr = NULL; | ||
1732 | e->rdma_sge.length = 0; | ||
1733 | e->rdma_sge.sge_length = 0; | ||
1734 | } | ||
1735 | e->psn = psn; | ||
1736 | if (old_req) | ||
1737 | goto unlock_done; | ||
1738 | qp->s_tail_ack_queue = prev; | ||
1739 | break; | ||
1740 | } | ||
1741 | |||
1742 | case OP(COMPARE_SWAP): | ||
1743 | case OP(FETCH_ADD): { | ||
1744 | /* | ||
1745 | * If we didn't find the atomic request in the ack queue | ||
1746 | * or the send tasklet is already backed up to send an | ||
1747 | * earlier entry, we can ignore this request. | ||
1748 | */ | ||
1749 | if (!e || e->opcode != (u8) opcode || old_req) | ||
1750 | goto unlock_done; | ||
1751 | qp->s_tail_ack_queue = prev; | ||
1752 | break; | ||
1753 | } | ||
1754 | |||
1755 | default: | ||
1756 | /* | ||
1757 | * Ignore this operation if it doesn't request an ACK | ||
1758 | * or an earlier RDMA read or atomic is going to be resent. | ||
1759 | */ | ||
1760 | if (!(psn & IB_BTH_REQ_ACK) || old_req) | ||
1761 | goto unlock_done; | ||
1762 | /* | ||
1763 | * Resend the most recent ACK if this request is | ||
1764 | * after all the previous RDMA reads and atomics. | ||
1765 | */ | ||
1766 | if (i == qp->r_head_ack_queue) { | ||
1767 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
1768 | qp->r_nak_state = 0; | ||
1769 | qp->r_ack_psn = qp->r_psn - 1; | ||
1770 | goto send_ack; | ||
1771 | } | ||
1772 | /* | ||
1773 | * Try to send a simple ACK to work around a Mellanox bug | ||
1774 | * which doesn't accept a RDMA read response or atomic | ||
1775 | * response as an ACK for earlier SENDs or RDMA writes. | ||
1776 | */ | ||
1777 | if (!(qp->s_flags & QIB_S_RESP_PENDING)) { | ||
1778 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
1779 | qp->r_nak_state = 0; | ||
1780 | qp->r_ack_psn = qp->s_ack_queue[i].psn - 1; | ||
1781 | goto send_ack; | ||
1782 | } | ||
1783 | /* | ||
1784 | * Resend the RDMA read or atomic op which | ||
1785 | * ACKs this duplicate request. | ||
1786 | */ | ||
1787 | qp->s_tail_ack_queue = i; | ||
1788 | break; | ||
1789 | } | ||
1790 | qp->s_ack_state = OP(ACKNOWLEDGE); | ||
1791 | qp->s_flags |= QIB_S_RESP_PENDING; | ||
1792 | qp->r_nak_state = 0; | ||
1793 | qib_schedule_send(qp); | ||
1794 | |||
1795 | unlock_done: | ||
1796 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
1797 | done: | ||
1798 | return 1; | ||
1799 | |||
1800 | send_ack: | ||
1801 | return 0; | ||
1802 | } | ||
1803 | |||
1804 | void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err) | ||
1805 | { | ||
1806 | unsigned long flags; | ||
1807 | int lastwqe; | ||
1808 | |||
1809 | spin_lock_irqsave(&qp->s_lock, flags); | ||
1810 | lastwqe = qib_error_qp(qp, err); | ||
1811 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
1812 | |||
1813 | if (lastwqe) { | ||
1814 | struct ib_event ev; | ||
1815 | |||
1816 | ev.device = qp->ibqp.device; | ||
1817 | ev.element.qp = &qp->ibqp; | ||
1818 | ev.event = IB_EVENT_QP_LAST_WQE_REACHED; | ||
1819 | qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); | ||
1820 | } | ||
1821 | } | ||
1822 | |||
1823 | static inline void qib_update_ack_queue(struct qib_qp *qp, unsigned n) | ||
1824 | { | ||
1825 | unsigned next; | ||
1826 | |||
1827 | next = n + 1; | ||
1828 | if (next > QIB_MAX_RDMA_ATOMIC) | ||
1829 | next = 0; | ||
1830 | qp->s_tail_ack_queue = next; | ||
1831 | qp->s_ack_state = OP(ACKNOWLEDGE); | ||
1832 | } | ||
1833 | |||
1834 | /** | ||
1835 | * qib_rc_rcv - process an incoming RC packet | ||
1836 | * @rcd: the context pointer | ||
1837 | * @hdr: the header of this packet | ||
1838 | * @has_grh: true if the header has a GRH | ||
1839 | * @data: the packet data | ||
1840 | * @tlen: the packet length | ||
1841 | * @qp: the QP for this packet | ||
1842 | * | ||
1843 | * This is called from qib_qp_rcv() to process an incoming RC packet | ||
1844 | * for the given QP. | ||
1845 | * Called at interrupt level. | ||
1846 | */ | ||
1847 | void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, | ||
1848 | int has_grh, void *data, u32 tlen, struct qib_qp *qp) | ||
1849 | { | ||
1850 | struct qib_ibport *ibp = &rcd->ppd->ibport_data; | ||
1851 | struct qib_other_headers *ohdr; | ||
1852 | u32 opcode; | ||
1853 | u32 hdrsize; | ||
1854 | u32 psn; | ||
1855 | u32 pad; | ||
1856 | struct ib_wc wc; | ||
1857 | u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); | ||
1858 | int diff; | ||
1859 | struct ib_reth *reth; | ||
1860 | unsigned long flags; | ||
1861 | int ret; | ||
1862 | |||
1863 | /* Check for GRH */ | ||
1864 | if (!has_grh) { | ||
1865 | ohdr = &hdr->u.oth; | ||
1866 | hdrsize = 8 + 12; /* LRH + BTH */ | ||
1867 | } else { | ||
1868 | ohdr = &hdr->u.l.oth; | ||
1869 | hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */ | ||
1870 | } | ||
1871 | |||
1872 | opcode = be32_to_cpu(ohdr->bth[0]); | ||
1873 | spin_lock_irqsave(&qp->s_lock, flags); | ||
1874 | if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode)) | ||
1875 | goto sunlock; | ||
1876 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
1877 | |||
1878 | psn = be32_to_cpu(ohdr->bth[2]); | ||
1879 | opcode >>= 24; | ||
1880 | |||
1881 | /* Prevent simultaneous processing after APM on different CPUs */ | ||
1882 | spin_lock(&qp->r_lock); | ||
1883 | |||
1884 | /* | ||
1885 | * Process responses (ACKs) before anything else. Note that the | ||
1886 | * packet sequence number will be for something in the send work | ||
1887 | * queue rather than the expected receive packet sequence number. | ||
1888 | * In other words, this QP is the requester. | ||
1889 | */ | ||
1890 | if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) && | ||
1891 | opcode <= OP(ATOMIC_ACKNOWLEDGE)) { | ||
1892 | qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn, | ||
1893 | hdrsize, pmtu, rcd); | ||
1894 | goto runlock; | ||
1895 | } | ||
1896 | |||
1897 | /* Compute 24 bits worth of difference. */ | ||
1898 | diff = qib_cmp24(psn, qp->r_psn); | ||
1899 | if (unlikely(diff)) { | ||
1900 | if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd)) | ||
1901 | goto runlock; | ||
1902 | goto send_ack; | ||
1903 | } | ||
1904 | |||
1905 | /* Check for opcode sequence errors. */ | ||
1906 | switch (qp->r_state) { | ||
1907 | case OP(SEND_FIRST): | ||
1908 | case OP(SEND_MIDDLE): | ||
1909 | if (opcode == OP(SEND_MIDDLE) || | ||
1910 | opcode == OP(SEND_LAST) || | ||
1911 | opcode == OP(SEND_LAST_WITH_IMMEDIATE)) | ||
1912 | break; | ||
1913 | goto nack_inv; | ||
1914 | |||
1915 | case OP(RDMA_WRITE_FIRST): | ||
1916 | case OP(RDMA_WRITE_MIDDLE): | ||
1917 | if (opcode == OP(RDMA_WRITE_MIDDLE) || | ||
1918 | opcode == OP(RDMA_WRITE_LAST) || | ||
1919 | opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE)) | ||
1920 | break; | ||
1921 | goto nack_inv; | ||
1922 | |||
1923 | default: | ||
1924 | if (opcode == OP(SEND_MIDDLE) || | ||
1925 | opcode == OP(SEND_LAST) || | ||
1926 | opcode == OP(SEND_LAST_WITH_IMMEDIATE) || | ||
1927 | opcode == OP(RDMA_WRITE_MIDDLE) || | ||
1928 | opcode == OP(RDMA_WRITE_LAST) || | ||
1929 | opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE)) | ||
1930 | goto nack_inv; | ||
1931 | /* | ||
1932 | * Note that it is up to the requester to not send a new | ||
1933 | * RDMA read or atomic operation before receiving an ACK | ||
1934 | * for the previous operation. | ||
1935 | */ | ||
1936 | break; | ||
1937 | } | ||
1938 | |||
1939 | memset(&wc, 0, sizeof wc); | ||
1940 | |||
1941 | if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) { | ||
1942 | qp->r_flags |= QIB_R_COMM_EST; | ||
1943 | if (qp->ibqp.event_handler) { | ||
1944 | struct ib_event ev; | ||
1945 | |||
1946 | ev.device = qp->ibqp.device; | ||
1947 | ev.element.qp = &qp->ibqp; | ||
1948 | ev.event = IB_EVENT_COMM_EST; | ||
1949 | qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); | ||
1950 | } | ||
1951 | } | ||
1952 | |||
1953 | /* OK, process the packet. */ | ||
1954 | switch (opcode) { | ||
1955 | case OP(SEND_FIRST): | ||
1956 | ret = qib_get_rwqe(qp, 0); | ||
1957 | if (ret < 0) | ||
1958 | goto nack_op_err; | ||
1959 | if (!ret) | ||
1960 | goto rnr_nak; | ||
1961 | qp->r_rcv_len = 0; | ||
1962 | /* FALLTHROUGH */ | ||
1963 | case OP(SEND_MIDDLE): | ||
1964 | case OP(RDMA_WRITE_MIDDLE): | ||
1965 | send_middle: | ||
1966 | /* Check for invalid length PMTU or posted rwqe len. */ | ||
1967 | if (unlikely(tlen != (hdrsize + pmtu + 4))) | ||
1968 | goto nack_inv; | ||
1969 | qp->r_rcv_len += pmtu; | ||
1970 | if (unlikely(qp->r_rcv_len > qp->r_len)) | ||
1971 | goto nack_inv; | ||
1972 | qib_copy_sge(&qp->r_sge, data, pmtu, 1); | ||
1973 | break; | ||
1974 | |||
1975 | case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE): | ||
1976 | /* consume RWQE */ | ||
1977 | ret = qib_get_rwqe(qp, 1); | ||
1978 | if (ret < 0) | ||
1979 | goto nack_op_err; | ||
1980 | if (!ret) | ||
1981 | goto rnr_nak; | ||
1982 | goto send_last_imm; | ||
1983 | |||
1984 | case OP(SEND_ONLY): | ||
1985 | case OP(SEND_ONLY_WITH_IMMEDIATE): | ||
1986 | ret = qib_get_rwqe(qp, 0); | ||
1987 | if (ret < 0) | ||
1988 | goto nack_op_err; | ||
1989 | if (!ret) | ||
1990 | goto rnr_nak; | ||
1991 | qp->r_rcv_len = 0; | ||
1992 | if (opcode == OP(SEND_ONLY)) | ||
1993 | goto send_last; | ||
1994 | /* FALLTHROUGH */ | ||
1995 | case OP(SEND_LAST_WITH_IMMEDIATE): | ||
1996 | send_last_imm: | ||
1997 | wc.ex.imm_data = ohdr->u.imm_data; | ||
1998 | hdrsize += 4; | ||
1999 | wc.wc_flags = IB_WC_WITH_IMM; | ||
2000 | /* FALLTHROUGH */ | ||
2001 | case OP(SEND_LAST): | ||
2002 | case OP(RDMA_WRITE_LAST): | ||
2003 | send_last: | ||
2004 | /* Get the number of bytes the message was padded by. */ | ||
2005 | pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; | ||
2006 | /* Check for invalid length. */ | ||
2007 | /* XXX LAST len should be >= 1 */ | ||
2008 | if (unlikely(tlen < (hdrsize + pad + 4))) | ||
2009 | goto nack_inv; | ||
2010 | /* Don't count the CRC. */ | ||
2011 | tlen -= (hdrsize + pad + 4); | ||
2012 | wc.byte_len = tlen + qp->r_rcv_len; | ||
2013 | if (unlikely(wc.byte_len > qp->r_len)) | ||
2014 | goto nack_inv; | ||
2015 | qib_copy_sge(&qp->r_sge, data, tlen, 1); | ||
2016 | while (qp->r_sge.num_sge) { | ||
2017 | atomic_dec(&qp->r_sge.sge.mr->refcount); | ||
2018 | if (--qp->r_sge.num_sge) | ||
2019 | qp->r_sge.sge = *qp->r_sge.sg_list++; | ||
2020 | } | ||
2021 | qp->r_msn++; | ||
2022 | if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) | ||
2023 | break; | ||
2024 | wc.wr_id = qp->r_wr_id; | ||
2025 | wc.status = IB_WC_SUCCESS; | ||
2026 | if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) || | ||
2027 | opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) | ||
2028 | wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; | ||
2029 | else | ||
2030 | wc.opcode = IB_WC_RECV; | ||
2031 | wc.qp = &qp->ibqp; | ||
2032 | wc.src_qp = qp->remote_qpn; | ||
2033 | wc.slid = qp->remote_ah_attr.dlid; | ||
2034 | wc.sl = qp->remote_ah_attr.sl; | ||
2035 | /* Signal completion event if the solicited bit is set. */ | ||
2036 | qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, | ||
2037 | (ohdr->bth[0] & | ||
2038 | cpu_to_be32(IB_BTH_SOLICITED)) != 0); | ||
2039 | break; | ||
2040 | |||
2041 | case OP(RDMA_WRITE_FIRST): | ||
2042 | case OP(RDMA_WRITE_ONLY): | ||
2043 | case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): | ||
2044 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) | ||
2045 | goto nack_inv; | ||
2046 | /* consume RWQE */ | ||
2047 | reth = &ohdr->u.rc.reth; | ||
2048 | hdrsize += sizeof(*reth); | ||
2049 | qp->r_len = be32_to_cpu(reth->length); | ||
2050 | qp->r_rcv_len = 0; | ||
2051 | qp->r_sge.sg_list = NULL; | ||
2052 | if (qp->r_len != 0) { | ||
2053 | u32 rkey = be32_to_cpu(reth->rkey); | ||
2054 | u64 vaddr = be64_to_cpu(reth->vaddr); | ||
2055 | int ok; | ||
2056 | |||
2057 | /* Check rkey & NAK */ | ||
2058 | ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr, | ||
2059 | rkey, IB_ACCESS_REMOTE_WRITE); | ||
2060 | if (unlikely(!ok)) | ||
2061 | goto nack_acc; | ||
2062 | qp->r_sge.num_sge = 1; | ||
2063 | } else { | ||
2064 | qp->r_sge.num_sge = 0; | ||
2065 | qp->r_sge.sge.mr = NULL; | ||
2066 | qp->r_sge.sge.vaddr = NULL; | ||
2067 | qp->r_sge.sge.length = 0; | ||
2068 | qp->r_sge.sge.sge_length = 0; | ||
2069 | } | ||
2070 | if (opcode == OP(RDMA_WRITE_FIRST)) | ||
2071 | goto send_middle; | ||
2072 | else if (opcode == OP(RDMA_WRITE_ONLY)) | ||
2073 | goto send_last; | ||
2074 | ret = qib_get_rwqe(qp, 1); | ||
2075 | if (ret < 0) | ||
2076 | goto nack_op_err; | ||
2077 | if (!ret) | ||
2078 | goto rnr_nak; | ||
2079 | goto send_last_imm; | ||
2080 | |||
2081 | case OP(RDMA_READ_REQUEST): { | ||
2082 | struct qib_ack_entry *e; | ||
2083 | u32 len; | ||
2084 | u8 next; | ||
2085 | |||
2086 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) | ||
2087 | goto nack_inv; | ||
2088 | next = qp->r_head_ack_queue + 1; | ||
2089 | /* s_ack_queue is size QIB_MAX_RDMA_ATOMIC+1 so use > not >= */ | ||
2090 | if (next > QIB_MAX_RDMA_ATOMIC) | ||
2091 | next = 0; | ||
2092 | spin_lock_irqsave(&qp->s_lock, flags); | ||
2093 | /* Double check we can process this while holding the s_lock. */ | ||
2094 | if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) | ||
2095 | goto srunlock; | ||
2096 | if (unlikely(next == qp->s_tail_ack_queue)) { | ||
2097 | if (!qp->s_ack_queue[next].sent) | ||
2098 | goto nack_inv_unlck; | ||
2099 | qib_update_ack_queue(qp, next); | ||
2100 | } | ||
2101 | e = &qp->s_ack_queue[qp->r_head_ack_queue]; | ||
2102 | if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { | ||
2103 | atomic_dec(&e->rdma_sge.mr->refcount); | ||
2104 | e->rdma_sge.mr = NULL; | ||
2105 | } | ||
2106 | reth = &ohdr->u.rc.reth; | ||
2107 | len = be32_to_cpu(reth->length); | ||
2108 | if (len) { | ||
2109 | u32 rkey = be32_to_cpu(reth->rkey); | ||
2110 | u64 vaddr = be64_to_cpu(reth->vaddr); | ||
2111 | int ok; | ||
2112 | |||
2113 | /* Check rkey & NAK */ | ||
2114 | ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr, | ||
2115 | rkey, IB_ACCESS_REMOTE_READ); | ||
2116 | if (unlikely(!ok)) | ||
2117 | goto nack_acc_unlck; | ||
2118 | /* | ||
2119 | * Update the next expected PSN. We add 1 later | ||
2120 | * below, so only add the remainder here. | ||
2121 | */ | ||
2122 | if (len > pmtu) | ||
2123 | qp->r_psn += (len - 1) / pmtu; | ||
2124 | } else { | ||
2125 | e->rdma_sge.mr = NULL; | ||
2126 | e->rdma_sge.vaddr = NULL; | ||
2127 | e->rdma_sge.length = 0; | ||
2128 | e->rdma_sge.sge_length = 0; | ||
2129 | } | ||
2130 | e->opcode = opcode; | ||
2131 | e->sent = 0; | ||
2132 | e->psn = psn; | ||
2133 | e->lpsn = qp->r_psn; | ||
2134 | /* | ||
2135 | * We need to increment the MSN here instead of when we | ||
2136 | * finish sending the result since a duplicate request would | ||
2137 | * increment it more than once. | ||
2138 | */ | ||
2139 | qp->r_msn++; | ||
2140 | qp->r_psn++; | ||
2141 | qp->r_state = opcode; | ||
2142 | qp->r_nak_state = 0; | ||
2143 | qp->r_head_ack_queue = next; | ||
2144 | |||
2145 | /* Schedule the send tasklet. */ | ||
2146 | qp->s_flags |= QIB_S_RESP_PENDING; | ||
2147 | qib_schedule_send(qp); | ||
2148 | |||
2149 | goto srunlock; | ||
2150 | } | ||
2151 | |||
2152 | case OP(COMPARE_SWAP): | ||
2153 | case OP(FETCH_ADD): { | ||
2154 | struct ib_atomic_eth *ateth; | ||
2155 | struct qib_ack_entry *e; | ||
2156 | u64 vaddr; | ||
2157 | atomic64_t *maddr; | ||
2158 | u64 sdata; | ||
2159 | u32 rkey; | ||
2160 | u8 next; | ||
2161 | |||
2162 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) | ||
2163 | goto nack_inv; | ||
2164 | next = qp->r_head_ack_queue + 1; | ||
2165 | if (next > QIB_MAX_RDMA_ATOMIC) | ||
2166 | next = 0; | ||
2167 | spin_lock_irqsave(&qp->s_lock, flags); | ||
2168 | /* Double check we can process this while holding the s_lock. */ | ||
2169 | if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) | ||
2170 | goto srunlock; | ||
2171 | if (unlikely(next == qp->s_tail_ack_queue)) { | ||
2172 | if (!qp->s_ack_queue[next].sent) | ||
2173 | goto nack_inv_unlck; | ||
2174 | qib_update_ack_queue(qp, next); | ||
2175 | } | ||
2176 | e = &qp->s_ack_queue[qp->r_head_ack_queue]; | ||
2177 | if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { | ||
2178 | atomic_dec(&e->rdma_sge.mr->refcount); | ||
2179 | e->rdma_sge.mr = NULL; | ||
2180 | } | ||
2181 | ateth = &ohdr->u.atomic_eth; | ||
2182 | vaddr = ((u64) be32_to_cpu(ateth->vaddr[0]) << 32) | | ||
2183 | be32_to_cpu(ateth->vaddr[1]); | ||
2184 | if (unlikely(vaddr & (sizeof(u64) - 1))) | ||
2185 | goto nack_inv_unlck; | ||
2186 | rkey = be32_to_cpu(ateth->rkey); | ||
2187 | /* Check rkey & NAK */ | ||
2188 | if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), | ||
2189 | vaddr, rkey, | ||
2190 | IB_ACCESS_REMOTE_ATOMIC))) | ||
2191 | goto nack_acc_unlck; | ||
2192 | /* Perform atomic OP and save result. */ | ||
2193 | maddr = (atomic64_t *) qp->r_sge.sge.vaddr; | ||
2194 | sdata = be64_to_cpu(ateth->swap_data); | ||
2195 | e->atomic_data = (opcode == OP(FETCH_ADD)) ? | ||
2196 | (u64) atomic64_add_return(sdata, maddr) - sdata : | ||
2197 | (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, | ||
2198 | be64_to_cpu(ateth->compare_data), | ||
2199 | sdata); | ||
2200 | atomic_dec(&qp->r_sge.sge.mr->refcount); | ||
2201 | qp->r_sge.num_sge = 0; | ||
2202 | e->opcode = opcode; | ||
2203 | e->sent = 0; | ||
2204 | e->psn = psn; | ||
2205 | e->lpsn = psn; | ||
2206 | qp->r_msn++; | ||
2207 | qp->r_psn++; | ||
2208 | qp->r_state = opcode; | ||
2209 | qp->r_nak_state = 0; | ||
2210 | qp->r_head_ack_queue = next; | ||
2211 | |||
2212 | /* Schedule the send tasklet. */ | ||
2213 | qp->s_flags |= QIB_S_RESP_PENDING; | ||
2214 | qib_schedule_send(qp); | ||
2215 | |||
2216 | goto srunlock; | ||
2217 | } | ||
2218 | |||
2219 | default: | ||
2220 | /* NAK unknown opcodes. */ | ||
2221 | goto nack_inv; | ||
2222 | } | ||
2223 | qp->r_psn++; | ||
2224 | qp->r_state = opcode; | ||
2225 | qp->r_ack_psn = psn; | ||
2226 | qp->r_nak_state = 0; | ||
2227 | /* Send an ACK if requested or required. */ | ||
2228 | if (psn & (1 << 31)) | ||
2229 | goto send_ack; | ||
2230 | goto runlock; | ||
2231 | |||
2232 | rnr_nak: | ||
2233 | qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer; | ||
2234 | qp->r_ack_psn = qp->r_psn; | ||
2235 | /* Queue RNR NAK for later */ | ||
2236 | if (list_empty(&qp->rspwait)) { | ||
2237 | qp->r_flags |= QIB_R_RSP_NAK; | ||
2238 | atomic_inc(&qp->refcount); | ||
2239 | list_add_tail(&qp->rspwait, &rcd->qp_wait_list); | ||
2240 | } | ||
2241 | goto runlock; | ||
2242 | |||
2243 | nack_op_err: | ||
2244 | qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); | ||
2245 | qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR; | ||
2246 | qp->r_ack_psn = qp->r_psn; | ||
2247 | /* Queue NAK for later */ | ||
2248 | if (list_empty(&qp->rspwait)) { | ||
2249 | qp->r_flags |= QIB_R_RSP_NAK; | ||
2250 | atomic_inc(&qp->refcount); | ||
2251 | list_add_tail(&qp->rspwait, &rcd->qp_wait_list); | ||
2252 | } | ||
2253 | goto runlock; | ||
2254 | |||
2255 | nack_inv_unlck: | ||
2256 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
2257 | nack_inv: | ||
2258 | qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); | ||
2259 | qp->r_nak_state = IB_NAK_INVALID_REQUEST; | ||
2260 | qp->r_ack_psn = qp->r_psn; | ||
2261 | /* Queue NAK for later */ | ||
2262 | if (list_empty(&qp->rspwait)) { | ||
2263 | qp->r_flags |= QIB_R_RSP_NAK; | ||
2264 | atomic_inc(&qp->refcount); | ||
2265 | list_add_tail(&qp->rspwait, &rcd->qp_wait_list); | ||
2266 | } | ||
2267 | goto runlock; | ||
2268 | |||
2269 | nack_acc_unlck: | ||
2270 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
2271 | nack_acc: | ||
2272 | qib_rc_error(qp, IB_WC_LOC_PROT_ERR); | ||
2273 | qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR; | ||
2274 | qp->r_ack_psn = qp->r_psn; | ||
2275 | send_ack: | ||
2276 | qib_send_rc_ack(qp); | ||
2277 | runlock: | ||
2278 | spin_unlock(&qp->r_lock); | ||
2279 | return; | ||
2280 | |||
2281 | srunlock: | ||
2282 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
2283 | spin_unlock(&qp->r_lock); | ||
2284 | return; | ||
2285 | |||
2286 | sunlock: | ||
2287 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
2288 | } | ||
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c new file mode 100644 index 000000000000..eb78d9367f06 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_ruc.c | |||
@@ -0,0 +1,817 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. | ||
3 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | |||
34 | #include <linux/spinlock.h> | ||
35 | |||
36 | #include "qib.h" | ||
37 | #include "qib_mad.h" | ||
38 | |||
39 | /* | ||
40 | * Convert the AETH RNR timeout code into the number of microseconds. | ||
41 | */ | ||
42 | const u32 ib_qib_rnr_table[32] = { | ||
43 | 655360, /* 00: 655.36 */ | ||
44 | 10, /* 01: .01 */ | ||
45 | 20, /* 02 .02 */ | ||
46 | 30, /* 03: .03 */ | ||
47 | 40, /* 04: .04 */ | ||
48 | 60, /* 05: .06 */ | ||
49 | 80, /* 06: .08 */ | ||
50 | 120, /* 07: .12 */ | ||
51 | 160, /* 08: .16 */ | ||
52 | 240, /* 09: .24 */ | ||
53 | 320, /* 0A: .32 */ | ||
54 | 480, /* 0B: .48 */ | ||
55 | 640, /* 0C: .64 */ | ||
56 | 960, /* 0D: .96 */ | ||
57 | 1280, /* 0E: 1.28 */ | ||
58 | 1920, /* 0F: 1.92 */ | ||
59 | 2560, /* 10: 2.56 */ | ||
60 | 3840, /* 11: 3.84 */ | ||
61 | 5120, /* 12: 5.12 */ | ||
62 | 7680, /* 13: 7.68 */ | ||
63 | 10240, /* 14: 10.24 */ | ||
64 | 15360, /* 15: 15.36 */ | ||
65 | 20480, /* 16: 20.48 */ | ||
66 | 30720, /* 17: 30.72 */ | ||
67 | 40960, /* 18: 40.96 */ | ||
68 | 61440, /* 19: 61.44 */ | ||
69 | 81920, /* 1A: 81.92 */ | ||
70 | 122880, /* 1B: 122.88 */ | ||
71 | 163840, /* 1C: 163.84 */ | ||
72 | 245760, /* 1D: 245.76 */ | ||
73 | 327680, /* 1E: 327.68 */ | ||
74 | 491520 /* 1F: 491.52 */ | ||
75 | }; | ||
76 | |||
77 | /* | ||
78 | * Validate a RWQE and fill in the SGE state. | ||
79 | * Return 1 if OK. | ||
80 | */ | ||
81 | static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe) | ||
82 | { | ||
83 | int i, j, ret; | ||
84 | struct ib_wc wc; | ||
85 | struct qib_lkey_table *rkt; | ||
86 | struct qib_pd *pd; | ||
87 | struct qib_sge_state *ss; | ||
88 | |||
89 | rkt = &to_idev(qp->ibqp.device)->lk_table; | ||
90 | pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd); | ||
91 | ss = &qp->r_sge; | ||
92 | ss->sg_list = qp->r_sg_list; | ||
93 | qp->r_len = 0; | ||
94 | for (i = j = 0; i < wqe->num_sge; i++) { | ||
95 | if (wqe->sg_list[i].length == 0) | ||
96 | continue; | ||
97 | /* Check LKEY */ | ||
98 | if (!qib_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge, | ||
99 | &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) | ||
100 | goto bad_lkey; | ||
101 | qp->r_len += wqe->sg_list[i].length; | ||
102 | j++; | ||
103 | } | ||
104 | ss->num_sge = j; | ||
105 | ss->total_len = qp->r_len; | ||
106 | ret = 1; | ||
107 | goto bail; | ||
108 | |||
109 | bad_lkey: | ||
110 | while (j) { | ||
111 | struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge; | ||
112 | |||
113 | atomic_dec(&sge->mr->refcount); | ||
114 | } | ||
115 | ss->num_sge = 0; | ||
116 | memset(&wc, 0, sizeof(wc)); | ||
117 | wc.wr_id = wqe->wr_id; | ||
118 | wc.status = IB_WC_LOC_PROT_ERR; | ||
119 | wc.opcode = IB_WC_RECV; | ||
120 | wc.qp = &qp->ibqp; | ||
121 | /* Signal solicited completion event. */ | ||
122 | qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); | ||
123 | ret = 0; | ||
124 | bail: | ||
125 | return ret; | ||
126 | } | ||
127 | |||
128 | /** | ||
129 | * qib_get_rwqe - copy the next RWQE into the QP's RWQE | ||
130 | * @qp: the QP | ||
131 | * @wr_id_only: update qp->r_wr_id only, not qp->r_sge | ||
132 | * | ||
133 | * Return -1 if there is a local error, 0 if no RWQE is available, | ||
134 | * otherwise return 1. | ||
135 | * | ||
136 | * Can be called from interrupt level. | ||
137 | */ | ||
138 | int qib_get_rwqe(struct qib_qp *qp, int wr_id_only) | ||
139 | { | ||
140 | unsigned long flags; | ||
141 | struct qib_rq *rq; | ||
142 | struct qib_rwq *wq; | ||
143 | struct qib_srq *srq; | ||
144 | struct qib_rwqe *wqe; | ||
145 | void (*handler)(struct ib_event *, void *); | ||
146 | u32 tail; | ||
147 | int ret; | ||
148 | |||
149 | if (qp->ibqp.srq) { | ||
150 | srq = to_isrq(qp->ibqp.srq); | ||
151 | handler = srq->ibsrq.event_handler; | ||
152 | rq = &srq->rq; | ||
153 | } else { | ||
154 | srq = NULL; | ||
155 | handler = NULL; | ||
156 | rq = &qp->r_rq; | ||
157 | } | ||
158 | |||
159 | spin_lock_irqsave(&rq->lock, flags); | ||
160 | if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { | ||
161 | ret = 0; | ||
162 | goto unlock; | ||
163 | } | ||
164 | |||
165 | wq = rq->wq; | ||
166 | tail = wq->tail; | ||
167 | /* Validate tail before using it since it is user writable. */ | ||
168 | if (tail >= rq->size) | ||
169 | tail = 0; | ||
170 | if (unlikely(tail == wq->head)) { | ||
171 | ret = 0; | ||
172 | goto unlock; | ||
173 | } | ||
174 | /* Make sure entry is read after head index is read. */ | ||
175 | smp_rmb(); | ||
176 | wqe = get_rwqe_ptr(rq, tail); | ||
177 | /* | ||
178 | * Even though we update the tail index in memory, the verbs | ||
179 | * consumer is not supposed to post more entries until a | ||
180 | * completion is generated. | ||
181 | */ | ||
182 | if (++tail >= rq->size) | ||
183 | tail = 0; | ||
184 | wq->tail = tail; | ||
185 | if (!wr_id_only && !qib_init_sge(qp, wqe)) { | ||
186 | ret = -1; | ||
187 | goto unlock; | ||
188 | } | ||
189 | qp->r_wr_id = wqe->wr_id; | ||
190 | |||
191 | ret = 1; | ||
192 | set_bit(QIB_R_WRID_VALID, &qp->r_aflags); | ||
193 | if (handler) { | ||
194 | u32 n; | ||
195 | |||
196 | /* | ||
197 | * Validate head pointer value and compute | ||
198 | * the number of remaining WQEs. | ||
199 | */ | ||
200 | n = wq->head; | ||
201 | if (n >= rq->size) | ||
202 | n = 0; | ||
203 | if (n < tail) | ||
204 | n += rq->size - tail; | ||
205 | else | ||
206 | n -= tail; | ||
207 | if (n < srq->limit) { | ||
208 | struct ib_event ev; | ||
209 | |||
210 | srq->limit = 0; | ||
211 | spin_unlock_irqrestore(&rq->lock, flags); | ||
212 | ev.device = qp->ibqp.device; | ||
213 | ev.element.srq = qp->ibqp.srq; | ||
214 | ev.event = IB_EVENT_SRQ_LIMIT_REACHED; | ||
215 | handler(&ev, srq->ibsrq.srq_context); | ||
216 | goto bail; | ||
217 | } | ||
218 | } | ||
219 | unlock: | ||
220 | spin_unlock_irqrestore(&rq->lock, flags); | ||
221 | bail: | ||
222 | return ret; | ||
223 | } | ||
224 | |||
225 | /* | ||
226 | * Switch to alternate path. | ||
227 | * The QP s_lock should be held and interrupts disabled. | ||
228 | */ | ||
229 | void qib_migrate_qp(struct qib_qp *qp) | ||
230 | { | ||
231 | struct ib_event ev; | ||
232 | |||
233 | qp->s_mig_state = IB_MIG_MIGRATED; | ||
234 | qp->remote_ah_attr = qp->alt_ah_attr; | ||
235 | qp->port_num = qp->alt_ah_attr.port_num; | ||
236 | qp->s_pkey_index = qp->s_alt_pkey_index; | ||
237 | |||
238 | ev.device = qp->ibqp.device; | ||
239 | ev.element.qp = &qp->ibqp; | ||
240 | ev.event = IB_EVENT_PATH_MIG; | ||
241 | qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); | ||
242 | } | ||
243 | |||
244 | static __be64 get_sguid(struct qib_ibport *ibp, unsigned index) | ||
245 | { | ||
246 | if (!index) { | ||
247 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | ||
248 | |||
249 | return ppd->guid; | ||
250 | } else | ||
251 | return ibp->guids[index - 1]; | ||
252 | } | ||
253 | |||
254 | static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id) | ||
255 | { | ||
256 | return (gid->global.interface_id == id && | ||
257 | (gid->global.subnet_prefix == gid_prefix || | ||
258 | gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX)); | ||
259 | } | ||
260 | |||
261 | /* | ||
262 | * | ||
263 | * This should be called with the QP s_lock held. | ||
264 | */ | ||
265 | int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr, | ||
266 | int has_grh, struct qib_qp *qp, u32 bth0) | ||
267 | { | ||
268 | __be64 guid; | ||
269 | |||
270 | if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) { | ||
271 | if (!has_grh) { | ||
272 | if (qp->alt_ah_attr.ah_flags & IB_AH_GRH) | ||
273 | goto err; | ||
274 | } else { | ||
275 | if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH)) | ||
276 | goto err; | ||
277 | guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index); | ||
278 | if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid)) | ||
279 | goto err; | ||
280 | if (!gid_ok(&hdr->u.l.grh.sgid, | ||
281 | qp->alt_ah_attr.grh.dgid.global.subnet_prefix, | ||
282 | qp->alt_ah_attr.grh.dgid.global.interface_id)) | ||
283 | goto err; | ||
284 | } | ||
285 | if (!qib_pkey_ok((u16)bth0, | ||
286 | qib_get_pkey(ibp, qp->s_alt_pkey_index))) { | ||
287 | qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, | ||
288 | (u16)bth0, | ||
289 | (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF, | ||
290 | 0, qp->ibqp.qp_num, | ||
291 | hdr->lrh[3], hdr->lrh[1]); | ||
292 | goto err; | ||
293 | } | ||
294 | /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */ | ||
295 | if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid || | ||
296 | ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num) | ||
297 | goto err; | ||
298 | qib_migrate_qp(qp); | ||
299 | } else { | ||
300 | if (!has_grh) { | ||
301 | if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) | ||
302 | goto err; | ||
303 | } else { | ||
304 | if (!(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) | ||
305 | goto err; | ||
306 | guid = get_sguid(ibp, | ||
307 | qp->remote_ah_attr.grh.sgid_index); | ||
308 | if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid)) | ||
309 | goto err; | ||
310 | if (!gid_ok(&hdr->u.l.grh.sgid, | ||
311 | qp->remote_ah_attr.grh.dgid.global.subnet_prefix, | ||
312 | qp->remote_ah_attr.grh.dgid.global.interface_id)) | ||
313 | goto err; | ||
314 | } | ||
315 | if (!qib_pkey_ok((u16)bth0, | ||
316 | qib_get_pkey(ibp, qp->s_pkey_index))) { | ||
317 | qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, | ||
318 | (u16)bth0, | ||
319 | (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF, | ||
320 | 0, qp->ibqp.qp_num, | ||
321 | hdr->lrh[3], hdr->lrh[1]); | ||
322 | goto err; | ||
323 | } | ||
324 | /* Validate the SLID. See Ch. 9.6.1.5 */ | ||
325 | if (be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid || | ||
326 | ppd_from_ibp(ibp)->port != qp->port_num) | ||
327 | goto err; | ||
328 | if (qp->s_mig_state == IB_MIG_REARM && | ||
329 | !(bth0 & IB_BTH_MIG_REQ)) | ||
330 | qp->s_mig_state = IB_MIG_ARMED; | ||
331 | } | ||
332 | |||
333 | return 0; | ||
334 | |||
335 | err: | ||
336 | return 1; | ||
337 | } | ||
338 | |||
339 | /** | ||
340 | * qib_ruc_loopback - handle UC and RC lookback requests | ||
341 | * @sqp: the sending QP | ||
342 | * | ||
343 | * This is called from qib_do_send() to | ||
344 | * forward a WQE addressed to the same HCA. | ||
345 | * Note that although we are single threaded due to the tasklet, we still | ||
346 | * have to protect against post_send(). We don't have to worry about | ||
347 | * receive interrupts since this is a connected protocol and all packets | ||
348 | * will pass through here. | ||
349 | */ | ||
350 | static void qib_ruc_loopback(struct qib_qp *sqp) | ||
351 | { | ||
352 | struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num); | ||
353 | struct qib_qp *qp; | ||
354 | struct qib_swqe *wqe; | ||
355 | struct qib_sge *sge; | ||
356 | unsigned long flags; | ||
357 | struct ib_wc wc; | ||
358 | u64 sdata; | ||
359 | atomic64_t *maddr; | ||
360 | enum ib_wc_status send_status; | ||
361 | int release; | ||
362 | int ret; | ||
363 | |||
364 | /* | ||
365 | * Note that we check the responder QP state after | ||
366 | * checking the requester's state. | ||
367 | */ | ||
368 | qp = qib_lookup_qpn(ibp, sqp->remote_qpn); | ||
369 | |||
370 | spin_lock_irqsave(&sqp->s_lock, flags); | ||
371 | |||
372 | /* Return if we are already busy processing a work request. */ | ||
373 | if ((sqp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT)) || | ||
374 | !(ib_qib_state_ops[sqp->state] & QIB_PROCESS_OR_FLUSH_SEND)) | ||
375 | goto unlock; | ||
376 | |||
377 | sqp->s_flags |= QIB_S_BUSY; | ||
378 | |||
379 | again: | ||
380 | if (sqp->s_last == sqp->s_head) | ||
381 | goto clr_busy; | ||
382 | wqe = get_swqe_ptr(sqp, sqp->s_last); | ||
383 | |||
384 | /* Return if it is not OK to start a new work reqeust. */ | ||
385 | if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_NEXT_SEND_OK)) { | ||
386 | if (!(ib_qib_state_ops[sqp->state] & QIB_FLUSH_SEND)) | ||
387 | goto clr_busy; | ||
388 | /* We are in the error state, flush the work request. */ | ||
389 | send_status = IB_WC_WR_FLUSH_ERR; | ||
390 | goto flush_send; | ||
391 | } | ||
392 | |||
393 | /* | ||
394 | * We can rely on the entry not changing without the s_lock | ||
395 | * being held until we update s_last. | ||
396 | * We increment s_cur to indicate s_last is in progress. | ||
397 | */ | ||
398 | if (sqp->s_last == sqp->s_cur) { | ||
399 | if (++sqp->s_cur >= sqp->s_size) | ||
400 | sqp->s_cur = 0; | ||
401 | } | ||
402 | spin_unlock_irqrestore(&sqp->s_lock, flags); | ||
403 | |||
404 | if (!qp || !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) || | ||
405 | qp->ibqp.qp_type != sqp->ibqp.qp_type) { | ||
406 | ibp->n_pkt_drops++; | ||
407 | /* | ||
408 | * For RC, the requester would timeout and retry so | ||
409 | * shortcut the timeouts and just signal too many retries. | ||
410 | */ | ||
411 | if (sqp->ibqp.qp_type == IB_QPT_RC) | ||
412 | send_status = IB_WC_RETRY_EXC_ERR; | ||
413 | else | ||
414 | send_status = IB_WC_SUCCESS; | ||
415 | goto serr; | ||
416 | } | ||
417 | |||
418 | memset(&wc, 0, sizeof wc); | ||
419 | send_status = IB_WC_SUCCESS; | ||
420 | |||
421 | release = 1; | ||
422 | sqp->s_sge.sge = wqe->sg_list[0]; | ||
423 | sqp->s_sge.sg_list = wqe->sg_list + 1; | ||
424 | sqp->s_sge.num_sge = wqe->wr.num_sge; | ||
425 | sqp->s_len = wqe->length; | ||
426 | switch (wqe->wr.opcode) { | ||
427 | case IB_WR_SEND_WITH_IMM: | ||
428 | wc.wc_flags = IB_WC_WITH_IMM; | ||
429 | wc.ex.imm_data = wqe->wr.ex.imm_data; | ||
430 | /* FALLTHROUGH */ | ||
431 | case IB_WR_SEND: | ||
432 | ret = qib_get_rwqe(qp, 0); | ||
433 | if (ret < 0) | ||
434 | goto op_err; | ||
435 | if (!ret) | ||
436 | goto rnr_nak; | ||
437 | break; | ||
438 | |||
439 | case IB_WR_RDMA_WRITE_WITH_IMM: | ||
440 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) | ||
441 | goto inv_err; | ||
442 | wc.wc_flags = IB_WC_WITH_IMM; | ||
443 | wc.ex.imm_data = wqe->wr.ex.imm_data; | ||
444 | ret = qib_get_rwqe(qp, 1); | ||
445 | if (ret < 0) | ||
446 | goto op_err; | ||
447 | if (!ret) | ||
448 | goto rnr_nak; | ||
449 | /* FALLTHROUGH */ | ||
450 | case IB_WR_RDMA_WRITE: | ||
451 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) | ||
452 | goto inv_err; | ||
453 | if (wqe->length == 0) | ||
454 | break; | ||
455 | if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, wqe->length, | ||
456 | wqe->wr.wr.rdma.remote_addr, | ||
457 | wqe->wr.wr.rdma.rkey, | ||
458 | IB_ACCESS_REMOTE_WRITE))) | ||
459 | goto acc_err; | ||
460 | qp->r_sge.sg_list = NULL; | ||
461 | qp->r_sge.num_sge = 1; | ||
462 | qp->r_sge.total_len = wqe->length; | ||
463 | break; | ||
464 | |||
465 | case IB_WR_RDMA_READ: | ||
466 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) | ||
467 | goto inv_err; | ||
468 | if (unlikely(!qib_rkey_ok(qp, &sqp->s_sge.sge, wqe->length, | ||
469 | wqe->wr.wr.rdma.remote_addr, | ||
470 | wqe->wr.wr.rdma.rkey, | ||
471 | IB_ACCESS_REMOTE_READ))) | ||
472 | goto acc_err; | ||
473 | release = 0; | ||
474 | sqp->s_sge.sg_list = NULL; | ||
475 | sqp->s_sge.num_sge = 1; | ||
476 | qp->r_sge.sge = wqe->sg_list[0]; | ||
477 | qp->r_sge.sg_list = wqe->sg_list + 1; | ||
478 | qp->r_sge.num_sge = wqe->wr.num_sge; | ||
479 | qp->r_sge.total_len = wqe->length; | ||
480 | break; | ||
481 | |||
482 | case IB_WR_ATOMIC_CMP_AND_SWP: | ||
483 | case IB_WR_ATOMIC_FETCH_AND_ADD: | ||
484 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) | ||
485 | goto inv_err; | ||
486 | if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), | ||
487 | wqe->wr.wr.atomic.remote_addr, | ||
488 | wqe->wr.wr.atomic.rkey, | ||
489 | IB_ACCESS_REMOTE_ATOMIC))) | ||
490 | goto acc_err; | ||
491 | /* Perform atomic OP and save result. */ | ||
492 | maddr = (atomic64_t *) qp->r_sge.sge.vaddr; | ||
493 | sdata = wqe->wr.wr.atomic.compare_add; | ||
494 | *(u64 *) sqp->s_sge.sge.vaddr = | ||
495 | (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? | ||
496 | (u64) atomic64_add_return(sdata, maddr) - sdata : | ||
497 | (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, | ||
498 | sdata, wqe->wr.wr.atomic.swap); | ||
499 | atomic_dec(&qp->r_sge.sge.mr->refcount); | ||
500 | qp->r_sge.num_sge = 0; | ||
501 | goto send_comp; | ||
502 | |||
503 | default: | ||
504 | send_status = IB_WC_LOC_QP_OP_ERR; | ||
505 | goto serr; | ||
506 | } | ||
507 | |||
508 | sge = &sqp->s_sge.sge; | ||
509 | while (sqp->s_len) { | ||
510 | u32 len = sqp->s_len; | ||
511 | |||
512 | if (len > sge->length) | ||
513 | len = sge->length; | ||
514 | if (len > sge->sge_length) | ||
515 | len = sge->sge_length; | ||
516 | BUG_ON(len == 0); | ||
517 | qib_copy_sge(&qp->r_sge, sge->vaddr, len, release); | ||
518 | sge->vaddr += len; | ||
519 | sge->length -= len; | ||
520 | sge->sge_length -= len; | ||
521 | if (sge->sge_length == 0) { | ||
522 | if (!release) | ||
523 | atomic_dec(&sge->mr->refcount); | ||
524 | if (--sqp->s_sge.num_sge) | ||
525 | *sge = *sqp->s_sge.sg_list++; | ||
526 | } else if (sge->length == 0 && sge->mr->lkey) { | ||
527 | if (++sge->n >= QIB_SEGSZ) { | ||
528 | if (++sge->m >= sge->mr->mapsz) | ||
529 | break; | ||
530 | sge->n = 0; | ||
531 | } | ||
532 | sge->vaddr = | ||
533 | sge->mr->map[sge->m]->segs[sge->n].vaddr; | ||
534 | sge->length = | ||
535 | sge->mr->map[sge->m]->segs[sge->n].length; | ||
536 | } | ||
537 | sqp->s_len -= len; | ||
538 | } | ||
539 | if (release) | ||
540 | while (qp->r_sge.num_sge) { | ||
541 | atomic_dec(&qp->r_sge.sge.mr->refcount); | ||
542 | if (--qp->r_sge.num_sge) | ||
543 | qp->r_sge.sge = *qp->r_sge.sg_list++; | ||
544 | } | ||
545 | |||
546 | if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) | ||
547 | goto send_comp; | ||
548 | |||
549 | if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM) | ||
550 | wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; | ||
551 | else | ||
552 | wc.opcode = IB_WC_RECV; | ||
553 | wc.wr_id = qp->r_wr_id; | ||
554 | wc.status = IB_WC_SUCCESS; | ||
555 | wc.byte_len = wqe->length; | ||
556 | wc.qp = &qp->ibqp; | ||
557 | wc.src_qp = qp->remote_qpn; | ||
558 | wc.slid = qp->remote_ah_attr.dlid; | ||
559 | wc.sl = qp->remote_ah_attr.sl; | ||
560 | wc.port_num = 1; | ||
561 | /* Signal completion event if the solicited bit is set. */ | ||
562 | qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, | ||
563 | wqe->wr.send_flags & IB_SEND_SOLICITED); | ||
564 | |||
565 | send_comp: | ||
566 | spin_lock_irqsave(&sqp->s_lock, flags); | ||
567 | ibp->n_loop_pkts++; | ||
568 | flush_send: | ||
569 | sqp->s_rnr_retry = sqp->s_rnr_retry_cnt; | ||
570 | qib_send_complete(sqp, wqe, send_status); | ||
571 | goto again; | ||
572 | |||
573 | rnr_nak: | ||
574 | /* Handle RNR NAK */ | ||
575 | if (qp->ibqp.qp_type == IB_QPT_UC) | ||
576 | goto send_comp; | ||
577 | ibp->n_rnr_naks++; | ||
578 | /* | ||
579 | * Note: we don't need the s_lock held since the BUSY flag | ||
580 | * makes this single threaded. | ||
581 | */ | ||
582 | if (sqp->s_rnr_retry == 0) { | ||
583 | send_status = IB_WC_RNR_RETRY_EXC_ERR; | ||
584 | goto serr; | ||
585 | } | ||
586 | if (sqp->s_rnr_retry_cnt < 7) | ||
587 | sqp->s_rnr_retry--; | ||
588 | spin_lock_irqsave(&sqp->s_lock, flags); | ||
589 | if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_RECV_OK)) | ||
590 | goto clr_busy; | ||
591 | sqp->s_flags |= QIB_S_WAIT_RNR; | ||
592 | sqp->s_timer.function = qib_rc_rnr_retry; | ||
593 | sqp->s_timer.expires = jiffies + | ||
594 | usecs_to_jiffies(ib_qib_rnr_table[qp->r_min_rnr_timer]); | ||
595 | add_timer(&sqp->s_timer); | ||
596 | goto clr_busy; | ||
597 | |||
598 | op_err: | ||
599 | send_status = IB_WC_REM_OP_ERR; | ||
600 | wc.status = IB_WC_LOC_QP_OP_ERR; | ||
601 | goto err; | ||
602 | |||
603 | inv_err: | ||
604 | send_status = IB_WC_REM_INV_REQ_ERR; | ||
605 | wc.status = IB_WC_LOC_QP_OP_ERR; | ||
606 | goto err; | ||
607 | |||
608 | acc_err: | ||
609 | send_status = IB_WC_REM_ACCESS_ERR; | ||
610 | wc.status = IB_WC_LOC_PROT_ERR; | ||
611 | err: | ||
612 | /* responder goes to error state */ | ||
613 | qib_rc_error(qp, wc.status); | ||
614 | |||
615 | serr: | ||
616 | spin_lock_irqsave(&sqp->s_lock, flags); | ||
617 | qib_send_complete(sqp, wqe, send_status); | ||
618 | if (sqp->ibqp.qp_type == IB_QPT_RC) { | ||
619 | int lastwqe = qib_error_qp(sqp, IB_WC_WR_FLUSH_ERR); | ||
620 | |||
621 | sqp->s_flags &= ~QIB_S_BUSY; | ||
622 | spin_unlock_irqrestore(&sqp->s_lock, flags); | ||
623 | if (lastwqe) { | ||
624 | struct ib_event ev; | ||
625 | |||
626 | ev.device = sqp->ibqp.device; | ||
627 | ev.element.qp = &sqp->ibqp; | ||
628 | ev.event = IB_EVENT_QP_LAST_WQE_REACHED; | ||
629 | sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context); | ||
630 | } | ||
631 | goto done; | ||
632 | } | ||
633 | clr_busy: | ||
634 | sqp->s_flags &= ~QIB_S_BUSY; | ||
635 | unlock: | ||
636 | spin_unlock_irqrestore(&sqp->s_lock, flags); | ||
637 | done: | ||
638 | if (qp && atomic_dec_and_test(&qp->refcount)) | ||
639 | wake_up(&qp->wait); | ||
640 | } | ||
641 | |||
642 | /** | ||
643 | * qib_make_grh - construct a GRH header | ||
644 | * @ibp: a pointer to the IB port | ||
645 | * @hdr: a pointer to the GRH header being constructed | ||
646 | * @grh: the global route address to send to | ||
647 | * @hwords: the number of 32 bit words of header being sent | ||
648 | * @nwords: the number of 32 bit words of data being sent | ||
649 | * | ||
650 | * Return the size of the header in 32 bit words. | ||
651 | */ | ||
652 | u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr, | ||
653 | struct ib_global_route *grh, u32 hwords, u32 nwords) | ||
654 | { | ||
655 | hdr->version_tclass_flow = | ||
656 | cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) | | ||
657 | (grh->traffic_class << IB_GRH_TCLASS_SHIFT) | | ||
658 | (grh->flow_label << IB_GRH_FLOW_SHIFT)); | ||
659 | hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2); | ||
660 | /* next_hdr is defined by C8-7 in ch. 8.4.1 */ | ||
661 | hdr->next_hdr = IB_GRH_NEXT_HDR; | ||
662 | hdr->hop_limit = grh->hop_limit; | ||
663 | /* The SGID is 32-bit aligned. */ | ||
664 | hdr->sgid.global.subnet_prefix = ibp->gid_prefix; | ||
665 | hdr->sgid.global.interface_id = grh->sgid_index ? | ||
666 | ibp->guids[grh->sgid_index - 1] : ppd_from_ibp(ibp)->guid; | ||
667 | hdr->dgid = grh->dgid; | ||
668 | |||
669 | /* GRH header size in 32-bit words. */ | ||
670 | return sizeof(struct ib_grh) / sizeof(u32); | ||
671 | } | ||
672 | |||
673 | void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr, | ||
674 | u32 bth0, u32 bth2) | ||
675 | { | ||
676 | struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); | ||
677 | u16 lrh0; | ||
678 | u32 nwords; | ||
679 | u32 extra_bytes; | ||
680 | |||
681 | /* Construct the header. */ | ||
682 | extra_bytes = -qp->s_cur_size & 3; | ||
683 | nwords = (qp->s_cur_size + extra_bytes) >> 2; | ||
684 | lrh0 = QIB_LRH_BTH; | ||
685 | if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) { | ||
686 | qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr.u.l.grh, | ||
687 | &qp->remote_ah_attr.grh, | ||
688 | qp->s_hdrwords, nwords); | ||
689 | lrh0 = QIB_LRH_GRH; | ||
690 | } | ||
691 | lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 | | ||
692 | qp->remote_ah_attr.sl << 4; | ||
693 | qp->s_hdr.lrh[0] = cpu_to_be16(lrh0); | ||
694 | qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); | ||
695 | qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); | ||
696 | qp->s_hdr.lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid | | ||
697 | qp->remote_ah_attr.src_path_bits); | ||
698 | bth0 |= qib_get_pkey(ibp, qp->s_pkey_index); | ||
699 | bth0 |= extra_bytes << 20; | ||
700 | if (qp->s_mig_state == IB_MIG_MIGRATED) | ||
701 | bth0 |= IB_BTH_MIG_REQ; | ||
702 | ohdr->bth[0] = cpu_to_be32(bth0); | ||
703 | ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); | ||
704 | ohdr->bth[2] = cpu_to_be32(bth2); | ||
705 | } | ||
706 | |||
707 | /** | ||
708 | * qib_do_send - perform a send on a QP | ||
709 | * @work: contains a pointer to the QP | ||
710 | * | ||
711 | * Process entries in the send work queue until credit or queue is | ||
712 | * exhausted. Only allow one CPU to send a packet per QP (tasklet). | ||
713 | * Otherwise, two threads could send packets out of order. | ||
714 | */ | ||
715 | void qib_do_send(struct work_struct *work) | ||
716 | { | ||
717 | struct qib_qp *qp = container_of(work, struct qib_qp, s_work); | ||
718 | struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); | ||
719 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | ||
720 | int (*make_req)(struct qib_qp *qp); | ||
721 | unsigned long flags; | ||
722 | |||
723 | if ((qp->ibqp.qp_type == IB_QPT_RC || | ||
724 | qp->ibqp.qp_type == IB_QPT_UC) && | ||
725 | (qp->remote_ah_attr.dlid & ~((1 << ppd->lmc) - 1)) == ppd->lid) { | ||
726 | qib_ruc_loopback(qp); | ||
727 | return; | ||
728 | } | ||
729 | |||
730 | if (qp->ibqp.qp_type == IB_QPT_RC) | ||
731 | make_req = qib_make_rc_req; | ||
732 | else if (qp->ibqp.qp_type == IB_QPT_UC) | ||
733 | make_req = qib_make_uc_req; | ||
734 | else | ||
735 | make_req = qib_make_ud_req; | ||
736 | |||
737 | spin_lock_irqsave(&qp->s_lock, flags); | ||
738 | |||
739 | /* Return if we are already busy processing a work request. */ | ||
740 | if (!qib_send_ok(qp)) { | ||
741 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
742 | return; | ||
743 | } | ||
744 | |||
745 | qp->s_flags |= QIB_S_BUSY; | ||
746 | |||
747 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
748 | |||
749 | do { | ||
750 | /* Check for a constructed packet to be sent. */ | ||
751 | if (qp->s_hdrwords != 0) { | ||
752 | /* | ||
753 | * If the packet cannot be sent now, return and | ||
754 | * the send tasklet will be woken up later. | ||
755 | */ | ||
756 | if (qib_verbs_send(qp, &qp->s_hdr, qp->s_hdrwords, | ||
757 | qp->s_cur_sge, qp->s_cur_size)) | ||
758 | break; | ||
759 | /* Record that s_hdr is empty. */ | ||
760 | qp->s_hdrwords = 0; | ||
761 | } | ||
762 | } while (make_req(qp)); | ||
763 | } | ||
764 | |||
765 | /* | ||
766 | * This should be called with s_lock held. | ||
767 | */ | ||
768 | void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe, | ||
769 | enum ib_wc_status status) | ||
770 | { | ||
771 | u32 old_last, last; | ||
772 | unsigned i; | ||
773 | |||
774 | if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND)) | ||
775 | return; | ||
776 | |||
777 | for (i = 0; i < wqe->wr.num_sge; i++) { | ||
778 | struct qib_sge *sge = &wqe->sg_list[i]; | ||
779 | |||
780 | atomic_dec(&sge->mr->refcount); | ||
781 | } | ||
782 | if (qp->ibqp.qp_type == IB_QPT_UD || | ||
783 | qp->ibqp.qp_type == IB_QPT_SMI || | ||
784 | qp->ibqp.qp_type == IB_QPT_GSI) | ||
785 | atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount); | ||
786 | |||
787 | /* See ch. 11.2.4.1 and 10.7.3.1 */ | ||
788 | if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || | ||
789 | (wqe->wr.send_flags & IB_SEND_SIGNALED) || | ||
790 | status != IB_WC_SUCCESS) { | ||
791 | struct ib_wc wc; | ||
792 | |||
793 | memset(&wc, 0, sizeof wc); | ||
794 | wc.wr_id = wqe->wr.wr_id; | ||
795 | wc.status = status; | ||
796 | wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; | ||
797 | wc.qp = &qp->ibqp; | ||
798 | if (status == IB_WC_SUCCESS) | ||
799 | wc.byte_len = wqe->length; | ||
800 | qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, | ||
801 | status != IB_WC_SUCCESS); | ||
802 | } | ||
803 | |||
804 | last = qp->s_last; | ||
805 | old_last = last; | ||
806 | if (++last >= qp->s_size) | ||
807 | last = 0; | ||
808 | qp->s_last = last; | ||
809 | if (qp->s_acked == old_last) | ||
810 | qp->s_acked = last; | ||
811 | if (qp->s_cur == old_last) | ||
812 | qp->s_cur = last; | ||
813 | if (qp->s_tail == old_last) | ||
814 | qp->s_tail = last; | ||
815 | if (qp->state == IB_QPS_SQD && last == qp->s_cur) | ||
816 | qp->s_draining = 0; | ||
817 | } | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c index 2a68d9f624dd..0aeed0e74cb6 100644 --- a/drivers/infiniband/hw/ipath/ipath_sd7220.c +++ b/drivers/infiniband/hw/qib/qib_sd7220.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved. | 2 | * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. |
3 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | 3 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. |
4 | * | 4 | * |
5 | * This software is available to you under a choice of one of two | 5 | * This software is available to you under a choice of one of two |
@@ -32,22 +32,40 @@ | |||
32 | */ | 32 | */ |
33 | /* | 33 | /* |
34 | * This file contains all of the code that is specific to the SerDes | 34 | * This file contains all of the code that is specific to the SerDes |
35 | * on the InfiniPath 7220 chip. | 35 | * on the QLogic_IB 7220 chip. |
36 | */ | 36 | */ |
37 | 37 | ||
38 | #include <linux/pci.h> | 38 | #include <linux/pci.h> |
39 | #include <linux/delay.h> | 39 | #include <linux/delay.h> |
40 | 40 | ||
41 | #include "ipath_kernel.h" | 41 | #include "qib.h" |
42 | #include "ipath_registers.h" | 42 | #include "qib_7220.h" |
43 | #include "ipath_7220.h" | 43 | |
44 | /* | ||
45 | * Same as in qib_iba7220.c, but just the registers needed here. | ||
46 | * Could move whole set to qib_7220.h, but decided better to keep | ||
47 | * local. | ||
48 | */ | ||
49 | #define KREG_IDX(regname) (QIB_7220_##regname##_OFFS / sizeof(u64)) | ||
50 | #define kr_hwerrclear KREG_IDX(HwErrClear) | ||
51 | #define kr_hwerrmask KREG_IDX(HwErrMask) | ||
52 | #define kr_hwerrstatus KREG_IDX(HwErrStatus) | ||
53 | #define kr_ibcstatus KREG_IDX(IBCStatus) | ||
54 | #define kr_ibserdesctrl KREG_IDX(IBSerDesCtrl) | ||
55 | #define kr_scratch KREG_IDX(Scratch) | ||
56 | #define kr_xgxs_cfg KREG_IDX(XGXSCfg) | ||
57 | /* these are used only here, not in qib_iba7220.c */ | ||
58 | #define kr_ibsd_epb_access_ctrl KREG_IDX(ibsd_epb_access_ctrl) | ||
59 | #define kr_ibsd_epb_transaction_reg KREG_IDX(ibsd_epb_transaction_reg) | ||
60 | #define kr_pciesd_epb_transaction_reg KREG_IDX(pciesd_epb_transaction_reg) | ||
61 | #define kr_pciesd_epb_access_ctrl KREG_IDX(pciesd_epb_access_ctrl) | ||
62 | #define kr_serdes_ddsrxeq0 KREG_IDX(SerDes_DDSRXEQ0) | ||
44 | 63 | ||
45 | /* | 64 | /* |
46 | * The IBSerDesMappTable is a memory that holds values to be stored in | 65 | * The IBSerDesMappTable is a memory that holds values to be stored in |
47 | * various SerDes registers by IBC. It is not part of the normal kregs | 66 | * various SerDes registers by IBC. |
48 | * map and is used in exactly one place, hence the #define below. | ||
49 | */ | 67 | */ |
50 | #define KR_IBSerDesMappTable (0x94000 / (sizeof(uint64_t))) | 68 | #define kr_serdes_maptable KREG_IDX(IBSerDesMappTable) |
51 | 69 | ||
52 | /* | 70 | /* |
53 | * Below used for sdnum parameter, selecting one of the two sections | 71 | * Below used for sdnum parameter, selecting one of the two sections |
@@ -71,42 +89,37 @@ | |||
71 | #define EPB_GLOBAL_WR (1U << (EPB_ADDR_SHF + 8)) | 89 | #define EPB_GLOBAL_WR (1U << (EPB_ADDR_SHF + 8)) |
72 | 90 | ||
73 | /* Forward declarations. */ | 91 | /* Forward declarations. */ |
74 | static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc, | 92 | static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc, |
75 | u32 data, u32 mask); | 93 | u32 data, u32 mask); |
76 | static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val, | 94 | static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val, |
77 | int mask); | 95 | int mask); |
78 | static int ipath_sd_trimdone_poll(struct ipath_devdata *dd); | 96 | static int qib_sd_trimdone_poll(struct qib_devdata *dd); |
79 | static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd, | 97 | static void qib_sd_trimdone_monitor(struct qib_devdata *dd, const char *where); |
80 | const char *where); | 98 | static int qib_sd_setvals(struct qib_devdata *dd); |
81 | static int ipath_sd_setvals(struct ipath_devdata *dd); | 99 | static int qib_sd_early(struct qib_devdata *dd); |
82 | static int ipath_sd_early(struct ipath_devdata *dd); | 100 | static int qib_sd_dactrim(struct qib_devdata *dd); |
83 | static int ipath_sd_dactrim(struct ipath_devdata *dd); | 101 | static int qib_internal_presets(struct qib_devdata *dd); |
84 | /* Set the registers that IBC may muck with to their default "preset" values */ | ||
85 | int ipath_sd7220_presets(struct ipath_devdata *dd); | ||
86 | static int ipath_internal_presets(struct ipath_devdata *dd); | ||
87 | /* Tweak the register (CMUCTRL5) that contains the TRIMSELF controls */ | 102 | /* Tweak the register (CMUCTRL5) that contains the TRIMSELF controls */ |
88 | static int ipath_sd_trimself(struct ipath_devdata *dd, int val); | 103 | static int qib_sd_trimself(struct qib_devdata *dd, int val); |
89 | static int epb_access(struct ipath_devdata *dd, int sdnum, int claim); | 104 | static int epb_access(struct qib_devdata *dd, int sdnum, int claim); |
90 | |||
91 | void ipath_set_relock_poll(struct ipath_devdata *dd, int ibup); | ||
92 | 105 | ||
93 | /* | 106 | /* |
94 | * Below keeps track of whether the "once per power-on" initialization has | 107 | * Below keeps track of whether the "once per power-on" initialization has |
95 | * been done, because uC code Version 1.32.17 or higher allows the uC to | 108 | * been done, because uC code Version 1.32.17 or higher allows the uC to |
96 | * be reset at will, and Automatic Equalization may require it. So the | 109 | * be reset at will, and Automatic Equalization may require it. So the |
97 | * state of the reset "pin", as reflected in was_reset parameter to | 110 | * state of the reset "pin", is no longer valid. Instead, we check for the |
98 | * ipath_sd7220_init() is no longer valid. Instead, we check for the | ||
99 | * actual uC code having been loaded. | 111 | * actual uC code having been loaded. |
100 | */ | 112 | */ |
101 | static int ipath_ibsd_ucode_loaded(struct ipath_devdata *dd) | 113 | static int qib_ibsd_ucode_loaded(struct qib_pportdata *ppd) |
102 | { | 114 | { |
103 | if (!dd->serdes_first_init_done && (ipath_sd7220_ib_vfy(dd) > 0)) | 115 | struct qib_devdata *dd = ppd->dd; |
104 | dd->serdes_first_init_done = 1; | 116 | if (!dd->cspec->serdes_first_init_done && (qib_sd7220_ib_vfy(dd) > 0)) |
105 | return dd->serdes_first_init_done; | 117 | dd->cspec->serdes_first_init_done = 1; |
118 | return dd->cspec->serdes_first_init_done; | ||
106 | } | 119 | } |
107 | 120 | ||
108 | /* repeat #define for local use. "Real" #define is in ipath_iba7220.c */ | 121 | /* repeat #define for local use. "Real" #define is in qib_iba7220.c */ |
109 | #define INFINIPATH_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL | 122 | #define QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL |
110 | #define IB_MPREG5 (EPB_LOC(6, 0, 0xE) | (1L << EPB_IB_UC_CS_SHF)) | 123 | #define IB_MPREG5 (EPB_LOC(6, 0, 0xE) | (1L << EPB_IB_UC_CS_SHF)) |
111 | #define IB_MPREG6 (EPB_LOC(6, 0, 0xF) | (1U << EPB_IB_UC_CS_SHF)) | 124 | #define IB_MPREG6 (EPB_LOC(6, 0, 0xF) | (1U << EPB_IB_UC_CS_SHF)) |
112 | #define UC_PAR_CLR_D 8 | 125 | #define UC_PAR_CLR_D 8 |
@@ -114,25 +127,25 @@ static int ipath_ibsd_ucode_loaded(struct ipath_devdata *dd) | |||
114 | #define IB_CTRL2(chn) (EPB_LOC(chn, 7, 3) | EPB_IB_QUAD0_CS) | 127 | #define IB_CTRL2(chn) (EPB_LOC(chn, 7, 3) | EPB_IB_QUAD0_CS) |
115 | #define START_EQ1(chan) EPB_LOC(chan, 7, 0x27) | 128 | #define START_EQ1(chan) EPB_LOC(chan, 7, 0x27) |
116 | 129 | ||
117 | void ipath_sd7220_clr_ibpar(struct ipath_devdata *dd) | 130 | void qib_sd7220_clr_ibpar(struct qib_devdata *dd) |
118 | { | 131 | { |
119 | int ret; | 132 | int ret; |
120 | 133 | ||
121 | /* clear, then re-enable parity errs */ | 134 | /* clear, then re-enable parity errs */ |
122 | ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, | 135 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, |
123 | UC_PAR_CLR_D, UC_PAR_CLR_M); | 136 | UC_PAR_CLR_D, UC_PAR_CLR_M); |
124 | if (ret < 0) { | 137 | if (ret < 0) { |
125 | ipath_dev_err(dd, "Failed clearing IBSerDes Parity err\n"); | 138 | qib_dev_err(dd, "Failed clearing IBSerDes Parity err\n"); |
126 | goto bail; | 139 | goto bail; |
127 | } | 140 | } |
128 | ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0, | 141 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0, |
129 | UC_PAR_CLR_M); | 142 | UC_PAR_CLR_M); |
130 | 143 | ||
131 | ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); | 144 | qib_read_kreg32(dd, kr_scratch); |
132 | udelay(4); | 145 | udelay(4); |
133 | ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, | 146 | qib_write_kreg(dd, kr_hwerrclear, |
134 | INFINIPATH_HWE_IB_UC_MEMORYPARITYERR); | 147 | QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR); |
135 | ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); | 148 | qib_read_kreg32(dd, kr_scratch); |
136 | bail: | 149 | bail: |
137 | return; | 150 | return; |
138 | } | 151 | } |
@@ -146,7 +159,7 @@ bail: | |||
146 | #define IB_PGUDP(chn) (EPB_LOC((chn), 2, 1) | EPB_IB_QUAD0_CS) | 159 | #define IB_PGUDP(chn) (EPB_LOC((chn), 2, 1) | EPB_IB_QUAD0_CS) |
147 | #define IB_CMUDONE(chn) (EPB_LOC((chn), 7, 0xF) | EPB_IB_QUAD0_CS) | 160 | #define IB_CMUDONE(chn) (EPB_LOC((chn), 7, 0xF) | EPB_IB_QUAD0_CS) |
148 | 161 | ||
149 | static int ipath_resync_ibepb(struct ipath_devdata *dd) | 162 | static int qib_resync_ibepb(struct qib_devdata *dd) |
150 | { | 163 | { |
151 | int ret, pat, tries, chn; | 164 | int ret, pat, tries, chn; |
152 | u32 loc; | 165 | u32 loc; |
@@ -155,43 +168,42 @@ static int ipath_resync_ibepb(struct ipath_devdata *dd) | |||
155 | chn = 0; | 168 | chn = 0; |
156 | for (tries = 0; tries < (4 * IBSD_RESYNC_TRIES); ++tries) { | 169 | for (tries = 0; tries < (4 * IBSD_RESYNC_TRIES); ++tries) { |
157 | loc = IB_PGUDP(chn); | 170 | loc = IB_PGUDP(chn); |
158 | ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0); | 171 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0); |
159 | if (ret < 0) { | 172 | if (ret < 0) { |
160 | ipath_dev_err(dd, "Failed read in resync\n"); | 173 | qib_dev_err(dd, "Failed read in resync\n"); |
161 | continue; | 174 | continue; |
162 | } | 175 | } |
163 | if (ret != 0xF0 && ret != 0x55 && tries == 0) | 176 | if (ret != 0xF0 && ret != 0x55 && tries == 0) |
164 | ipath_dev_err(dd, "unexpected pattern in resync\n"); | 177 | qib_dev_err(dd, "unexpected pattern in resync\n"); |
165 | pat = ret ^ 0xA5; /* alternate F0 and 55 */ | 178 | pat = ret ^ 0xA5; /* alternate F0 and 55 */ |
166 | ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, pat, 0xFF); | 179 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, pat, 0xFF); |
167 | if (ret < 0) { | 180 | if (ret < 0) { |
168 | ipath_dev_err(dd, "Failed write in resync\n"); | 181 | qib_dev_err(dd, "Failed write in resync\n"); |
169 | continue; | 182 | continue; |
170 | } | 183 | } |
171 | ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0); | 184 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0); |
172 | if (ret < 0) { | 185 | if (ret < 0) { |
173 | ipath_dev_err(dd, "Failed re-read in resync\n"); | 186 | qib_dev_err(dd, "Failed re-read in resync\n"); |
174 | continue; | 187 | continue; |
175 | } | 188 | } |
176 | if (ret != pat) { | 189 | if (ret != pat) { |
177 | ipath_dev_err(dd, "Failed compare1 in resync\n"); | 190 | qib_dev_err(dd, "Failed compare1 in resync\n"); |
178 | continue; | 191 | continue; |
179 | } | 192 | } |
180 | loc = IB_CMUDONE(chn); | 193 | loc = IB_CMUDONE(chn); |
181 | ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0); | 194 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0); |
182 | if (ret < 0) { | 195 | if (ret < 0) { |
183 | ipath_dev_err(dd, "Failed CMUDONE rd in resync\n"); | 196 | qib_dev_err(dd, "Failed CMUDONE rd in resync\n"); |
184 | continue; | 197 | continue; |
185 | } | 198 | } |
186 | if ((ret & 0x70) != ((chn << 4) | 0x40)) { | 199 | if ((ret & 0x70) != ((chn << 4) | 0x40)) { |
187 | ipath_dev_err(dd, "Bad CMUDONE value %02X, chn %d\n", | 200 | qib_dev_err(dd, "Bad CMUDONE value %02X, chn %d\n", |
188 | ret, chn); | 201 | ret, chn); |
189 | continue; | 202 | continue; |
190 | } | 203 | } |
191 | if (++chn == 4) | 204 | if (++chn == 4) |
192 | break; /* Success */ | 205 | break; /* Success */ |
193 | } | 206 | } |
194 | ipath_cdbg(VERBOSE, "Resync in %d tries\n", tries); | ||
195 | return (ret > 0) ? 0 : ret; | 207 | return (ret > 0) ? 0 : ret; |
196 | } | 208 | } |
197 | 209 | ||
@@ -199,32 +211,32 @@ static int ipath_resync_ibepb(struct ipath_devdata *dd) | |||
199 | * Localize the stuff that should be done to change IB uC reset | 211 | * Localize the stuff that should be done to change IB uC reset |
200 | * returns <0 for errors. | 212 | * returns <0 for errors. |
201 | */ | 213 | */ |
202 | static int ipath_ibsd_reset(struct ipath_devdata *dd, int assert_rst) | 214 | static int qib_ibsd_reset(struct qib_devdata *dd, int assert_rst) |
203 | { | 215 | { |
204 | u64 rst_val; | 216 | u64 rst_val; |
205 | int ret = 0; | 217 | int ret = 0; |
206 | unsigned long flags; | 218 | unsigned long flags; |
207 | 219 | ||
208 | rst_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibserdesctrl); | 220 | rst_val = qib_read_kreg64(dd, kr_ibserdesctrl); |
209 | if (assert_rst) { | 221 | if (assert_rst) { |
210 | /* | 222 | /* |
211 | * Vendor recommends "interrupting" uC before reset, to | 223 | * Vendor recommends "interrupting" uC before reset, to |
212 | * minimize possible glitches. | 224 | * minimize possible glitches. |
213 | */ | 225 | */ |
214 | spin_lock_irqsave(&dd->ipath_sdepb_lock, flags); | 226 | spin_lock_irqsave(&dd->cspec->sdepb_lock, flags); |
215 | epb_access(dd, IB_7220_SERDES, 1); | 227 | epb_access(dd, IB_7220_SERDES, 1); |
216 | rst_val |= 1ULL; | 228 | rst_val |= 1ULL; |
217 | /* Squelch possible parity error from _asserting_ reset */ | 229 | /* Squelch possible parity error from _asserting_ reset */ |
218 | ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, | 230 | qib_write_kreg(dd, kr_hwerrmask, |
219 | dd->ipath_hwerrmask & | 231 | dd->cspec->hwerrmask & |
220 | ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR); | 232 | ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR); |
221 | ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, rst_val); | 233 | qib_write_kreg(dd, kr_ibserdesctrl, rst_val); |
222 | /* flush write, delay to ensure it took effect */ | 234 | /* flush write, delay to ensure it took effect */ |
223 | ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); | 235 | qib_read_kreg32(dd, kr_scratch); |
224 | udelay(2); | 236 | udelay(2); |
225 | /* once it's reset, can remove interrupt */ | 237 | /* once it's reset, can remove interrupt */ |
226 | epb_access(dd, IB_7220_SERDES, -1); | 238 | epb_access(dd, IB_7220_SERDES, -1); |
227 | spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags); | 239 | spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags); |
228 | } else { | 240 | } else { |
229 | /* | 241 | /* |
230 | * Before we de-assert reset, we need to deal with | 242 | * Before we de-assert reset, we need to deal with |
@@ -235,46 +247,46 @@ static int ipath_ibsd_reset(struct ipath_devdata *dd, int assert_rst) | |||
235 | */ | 247 | */ |
236 | u64 val; | 248 | u64 val; |
237 | rst_val &= ~(1ULL); | 249 | rst_val &= ~(1ULL); |
238 | ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, | 250 | qib_write_kreg(dd, kr_hwerrmask, |
239 | dd->ipath_hwerrmask & | 251 | dd->cspec->hwerrmask & |
240 | ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR); | 252 | ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR); |
241 | 253 | ||
242 | ret = ipath_resync_ibepb(dd); | 254 | ret = qib_resync_ibepb(dd); |
243 | if (ret < 0) | 255 | if (ret < 0) |
244 | ipath_dev_err(dd, "unable to re-sync IB EPB\n"); | 256 | qib_dev_err(dd, "unable to re-sync IB EPB\n"); |
245 | 257 | ||
246 | /* set uC control regs to suppress parity errs */ | 258 | /* set uC control regs to suppress parity errs */ |
247 | ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG5, 1, 1); | 259 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG5, 1, 1); |
248 | if (ret < 0) | 260 | if (ret < 0) |
249 | goto bail; | 261 | goto bail; |
250 | /* IB uC code past Version 1.32.17 allow suppression of wdog */ | 262 | /* IB uC code past Version 1.32.17 allow suppression of wdog */ |
251 | ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, | 263 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, |
252 | 0x80); | 264 | 0x80); |
253 | if (ret < 0) { | 265 | if (ret < 0) { |
254 | ipath_dev_err(dd, "Failed to set WDOG disable\n"); | 266 | qib_dev_err(dd, "Failed to set WDOG disable\n"); |
255 | goto bail; | 267 | goto bail; |
256 | } | 268 | } |
257 | ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, rst_val); | 269 | qib_write_kreg(dd, kr_ibserdesctrl, rst_val); |
258 | /* flush write, delay for startup */ | 270 | /* flush write, delay for startup */ |
259 | ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); | 271 | qib_read_kreg32(dd, kr_scratch); |
260 | udelay(1); | 272 | udelay(1); |
261 | /* clear, then re-enable parity errs */ | 273 | /* clear, then re-enable parity errs */ |
262 | ipath_sd7220_clr_ibpar(dd); | 274 | qib_sd7220_clr_ibpar(dd); |
263 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus); | 275 | val = qib_read_kreg64(dd, kr_hwerrstatus); |
264 | if (val & INFINIPATH_HWE_IB_UC_MEMORYPARITYERR) { | 276 | if (val & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR) { |
265 | ipath_dev_err(dd, "IBUC Parity still set after RST\n"); | 277 | qib_dev_err(dd, "IBUC Parity still set after RST\n"); |
266 | dd->ipath_hwerrmask &= | 278 | dd->cspec->hwerrmask &= |
267 | ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR; | 279 | ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR; |
268 | } | 280 | } |
269 | ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, | 281 | qib_write_kreg(dd, kr_hwerrmask, |
270 | dd->ipath_hwerrmask); | 282 | dd->cspec->hwerrmask); |
271 | } | 283 | } |
272 | 284 | ||
273 | bail: | 285 | bail: |
274 | return ret; | 286 | return ret; |
275 | } | 287 | } |
276 | 288 | ||
277 | static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd, | 289 | static void qib_sd_trimdone_monitor(struct qib_devdata *dd, |
278 | const char *where) | 290 | const char *where) |
279 | { | 291 | { |
280 | int ret, chn, baduns; | 292 | int ret, chn, baduns; |
@@ -286,69 +298,71 @@ static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd, | |||
286 | /* give time for reset to settle out in EPB */ | 298 | /* give time for reset to settle out in EPB */ |
287 | udelay(2); | 299 | udelay(2); |
288 | 300 | ||
289 | ret = ipath_resync_ibepb(dd); | 301 | ret = qib_resync_ibepb(dd); |
290 | if (ret < 0) | 302 | if (ret < 0) |
291 | ipath_dev_err(dd, "not able to re-sync IB EPB (%s)\n", where); | 303 | qib_dev_err(dd, "not able to re-sync IB EPB (%s)\n", where); |
292 | 304 | ||
293 | /* Do "sacrificial read" to get EPB in sane state after reset */ | 305 | /* Do "sacrificial read" to get EPB in sane state after reset */ |
294 | ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(0), 0, 0); | 306 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(0), 0, 0); |
295 | if (ret < 0) | 307 | if (ret < 0) |
296 | ipath_dev_err(dd, "Failed TRIMDONE 1st read, (%s)\n", where); | 308 | qib_dev_err(dd, "Failed TRIMDONE 1st read, (%s)\n", where); |
297 | 309 | ||
298 | /* Check/show "summary" Trim-done bit in IBCStatus */ | 310 | /* Check/show "summary" Trim-done bit in IBCStatus */ |
299 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus); | 311 | val = qib_read_kreg64(dd, kr_ibcstatus); |
300 | if (val & (1ULL << 11)) | 312 | if (!(val & (1ULL << 11))) |
301 | ipath_cdbg(VERBOSE, "IBCS TRIMDONE set (%s)\n", where); | 313 | qib_dev_err(dd, "IBCS TRIMDONE clear (%s)\n", where); |
302 | else | 314 | /* |
303 | ipath_dev_err(dd, "IBCS TRIMDONE clear (%s)\n", where); | 315 | * Do "dummy read/mod/wr" to get EPB in sane state after reset |
304 | 316 | * The default value for MPREG6 is 0. | |
317 | */ | ||
305 | udelay(2); | 318 | udelay(2); |
306 | 319 | ||
307 | ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, 0x80); | 320 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, 0x80); |
308 | if (ret < 0) | 321 | if (ret < 0) |
309 | ipath_dev_err(dd, "Failed Dummy RMW, (%s)\n", where); | 322 | qib_dev_err(dd, "Failed Dummy RMW, (%s)\n", where); |
310 | udelay(10); | 323 | udelay(10); |
311 | 324 | ||
312 | baduns = 0; | 325 | baduns = 0; |
313 | 326 | ||
314 | for (chn = 3; chn >= 0; --chn) { | 327 | for (chn = 3; chn >= 0; --chn) { |
315 | /* Read CTRL reg for each channel to check TRIMDONE */ | 328 | /* Read CTRL reg for each channel to check TRIMDONE */ |
316 | ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, | 329 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, |
317 | IB_CTRL2(chn), 0, 0); | 330 | IB_CTRL2(chn), 0, 0); |
318 | if (ret < 0) | 331 | if (ret < 0) |
319 | ipath_dev_err(dd, "Failed checking TRIMDONE, chn %d" | 332 | qib_dev_err(dd, "Failed checking TRIMDONE, chn %d" |
320 | " (%s)\n", chn, where); | 333 | " (%s)\n", chn, where); |
321 | 334 | ||
322 | if (!(ret & 0x10)) { | 335 | if (!(ret & 0x10)) { |
323 | int probe; | 336 | int probe; |
337 | |||
324 | baduns |= (1 << chn); | 338 | baduns |= (1 << chn); |
325 | ipath_dev_err(dd, "TRIMDONE cleared on chn %d (%02X)." | 339 | qib_dev_err(dd, "TRIMDONE cleared on chn %d (%02X)." |
326 | " (%s)\n", chn, ret, where); | 340 | " (%s)\n", chn, ret, where); |
327 | probe = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, | 341 | probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES, |
328 | IB_PGUDP(0), 0, 0); | 342 | IB_PGUDP(0), 0, 0); |
329 | ipath_dev_err(dd, "probe is %d (%02X)\n", | 343 | qib_dev_err(dd, "probe is %d (%02X)\n", |
330 | probe, probe); | 344 | probe, probe); |
331 | probe = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, | 345 | probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES, |
332 | IB_CTRL2(chn), 0, 0); | 346 | IB_CTRL2(chn), 0, 0); |
333 | ipath_dev_err(dd, "re-read: %d (%02X)\n", | 347 | qib_dev_err(dd, "re-read: %d (%02X)\n", |
334 | probe, probe); | 348 | probe, probe); |
335 | ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, | 349 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, |
336 | IB_CTRL2(chn), 0x10, 0x10); | 350 | IB_CTRL2(chn), 0x10, 0x10); |
337 | if (ret < 0) | 351 | if (ret < 0) |
338 | ipath_dev_err(dd, | 352 | qib_dev_err(dd, |
339 | "Err on TRIMDONE rewrite1\n"); | 353 | "Err on TRIMDONE rewrite1\n"); |
340 | } | 354 | } |
341 | } | 355 | } |
342 | for (chn = 3; chn >= 0; --chn) { | 356 | for (chn = 3; chn >= 0; --chn) { |
343 | /* Read CTRL reg for each channel to check TRIMDONE */ | 357 | /* Read CTRL reg for each channel to check TRIMDONE */ |
344 | if (baduns & (1 << chn)) { | 358 | if (baduns & (1 << chn)) { |
345 | ipath_dev_err(dd, | 359 | qib_dev_err(dd, |
346 | "Reseting TRIMDONE on chn %d (%s)\n", | 360 | "Reseting TRIMDONE on chn %d (%s)\n", |
347 | chn, where); | 361 | chn, where); |
348 | ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, | 362 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, |
349 | IB_CTRL2(chn), 0x10, 0x10); | 363 | IB_CTRL2(chn), 0x10, 0x10); |
350 | if (ret < 0) | 364 | if (ret < 0) |
351 | ipath_dev_err(dd, "Failed re-setting " | 365 | qib_dev_err(dd, "Failed re-setting " |
352 | "TRIMDONE, chn %d (%s)\n", | 366 | "TRIMDONE, chn %d (%s)\n", |
353 | chn, where); | 367 | chn, where); |
354 | } | 368 | } |
@@ -361,96 +375,86 @@ static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd, | |||
361 | * Post IB uC code version 1.32.17, was_reset being 1 is not really | 375 | * Post IB uC code version 1.32.17, was_reset being 1 is not really |
362 | * informative, so we double-check. | 376 | * informative, so we double-check. |
363 | */ | 377 | */ |
364 | int ipath_sd7220_init(struct ipath_devdata *dd, int was_reset) | 378 | int qib_sd7220_init(struct qib_devdata *dd) |
365 | { | 379 | { |
366 | int ret = 1; /* default to failure */ | 380 | int ret = 1; /* default to failure */ |
367 | int first_reset; | 381 | int first_reset, was_reset; |
368 | int val_stat; | ||
369 | 382 | ||
383 | /* SERDES MPU reset recorded in D0 */ | ||
384 | was_reset = (qib_read_kreg64(dd, kr_ibserdesctrl) & 1); | ||
370 | if (!was_reset) { | 385 | if (!was_reset) { |
371 | /* entered with reset not asserted, we need to do it */ | 386 | /* entered with reset not asserted, we need to do it */ |
372 | ipath_ibsd_reset(dd, 1); | 387 | qib_ibsd_reset(dd, 1); |
373 | ipath_sd_trimdone_monitor(dd, "Driver-reload"); | 388 | qib_sd_trimdone_monitor(dd, "Driver-reload"); |
374 | } | 389 | } |
375 | |||
376 | /* Substitute our deduced value for was_reset */ | 390 | /* Substitute our deduced value for was_reset */ |
377 | ret = ipath_ibsd_ucode_loaded(dd); | 391 | ret = qib_ibsd_ucode_loaded(dd->pport); |
378 | if (ret < 0) { | 392 | if (ret < 0) |
379 | ret = 1; | 393 | goto bail; |
380 | goto done; | ||
381 | } | ||
382 | first_reset = !ret; /* First reset if IBSD uCode not yet loaded */ | ||
383 | 394 | ||
395 | first_reset = !ret; /* First reset if IBSD uCode not yet loaded */ | ||
384 | /* | 396 | /* |
385 | * Alter some regs per vendor latest doc, reset-defaults | 397 | * Alter some regs per vendor latest doc, reset-defaults |
386 | * are not right for IB. | 398 | * are not right for IB. |
387 | */ | 399 | */ |
388 | ret = ipath_sd_early(dd); | 400 | ret = qib_sd_early(dd); |
389 | if (ret < 0) { | 401 | if (ret < 0) { |
390 | ipath_dev_err(dd, "Failed to set IB SERDES early defaults\n"); | 402 | qib_dev_err(dd, "Failed to set IB SERDES early defaults\n"); |
391 | ret = 1; | 403 | goto bail; |
392 | goto done; | ||
393 | } | 404 | } |
394 | |||
395 | /* | 405 | /* |
396 | * Set DAC manual trim IB. | 406 | * Set DAC manual trim IB. |
397 | * We only do this once after chip has been reset (usually | 407 | * We only do this once after chip has been reset (usually |
398 | * same as once per system boot). | 408 | * same as once per system boot). |
399 | */ | 409 | */ |
400 | if (first_reset) { | 410 | if (first_reset) { |
401 | ret = ipath_sd_dactrim(dd); | 411 | ret = qib_sd_dactrim(dd); |
402 | if (ret < 0) { | 412 | if (ret < 0) { |
403 | ipath_dev_err(dd, "Failed IB SERDES DAC trim\n"); | 413 | qib_dev_err(dd, "Failed IB SERDES DAC trim\n"); |
404 | ret = 1; | 414 | goto bail; |
405 | goto done; | ||
406 | } | 415 | } |
407 | } | 416 | } |
408 | |||
409 | /* | 417 | /* |
410 | * Set various registers (DDS and RXEQ) that will be | 418 | * Set various registers (DDS and RXEQ) that will be |
411 | * controlled by IBC (in 1.2 mode) to reasonable preset values | 419 | * controlled by IBC (in 1.2 mode) to reasonable preset values |
412 | * Calling the "internal" version avoids the "check for needed" | 420 | * Calling the "internal" version avoids the "check for needed" |
413 | * and "trimdone monitor" that might be counter-productive. | 421 | * and "trimdone monitor" that might be counter-productive. |
414 | */ | 422 | */ |
415 | ret = ipath_internal_presets(dd); | 423 | ret = qib_internal_presets(dd); |
416 | if (ret < 0) { | 424 | if (ret < 0) { |
417 | ipath_dev_err(dd, "Failed to set IB SERDES presets\n"); | 425 | qib_dev_err(dd, "Failed to set IB SERDES presets\n"); |
418 | ret = 1; | 426 | goto bail; |
419 | goto done; | ||
420 | } | 427 | } |
421 | ret = ipath_sd_trimself(dd, 0x80); | 428 | ret = qib_sd_trimself(dd, 0x80); |
422 | if (ret < 0) { | 429 | if (ret < 0) { |
423 | ipath_dev_err(dd, "Failed to set IB SERDES TRIMSELF\n"); | 430 | qib_dev_err(dd, "Failed to set IB SERDES TRIMSELF\n"); |
424 | ret = 1; | 431 | goto bail; |
425 | goto done; | ||
426 | } | 432 | } |
427 | 433 | ||
428 | /* Load image, then try to verify */ | 434 | /* Load image, then try to verify */ |
429 | ret = 0; /* Assume success */ | 435 | ret = 0; /* Assume success */ |
430 | if (first_reset) { | 436 | if (first_reset) { |
431 | int vfy; | 437 | int vfy; |
432 | int trim_done; | 438 | int trim_done; |
433 | ipath_dbg("SerDes uC was reset, reloading PRAM\n"); | 439 | |
434 | ret = ipath_sd7220_ib_load(dd); | 440 | ret = qib_sd7220_ib_load(dd); |
435 | if (ret < 0) { | 441 | if (ret < 0) { |
436 | ipath_dev_err(dd, "Failed to load IB SERDES image\n"); | 442 | qib_dev_err(dd, "Failed to load IB SERDES image\n"); |
437 | ret = 1; | 443 | goto bail; |
438 | goto done; | 444 | } else { |
439 | } | 445 | /* Loaded image, try to verify */ |
446 | vfy = qib_sd7220_ib_vfy(dd); | ||
447 | if (vfy != ret) { | ||
448 | qib_dev_err(dd, "SERDES PRAM VFY failed\n"); | ||
449 | goto bail; | ||
450 | } /* end if verified */ | ||
451 | } /* end if loaded */ | ||
440 | 452 | ||
441 | /* Loaded image, try to verify */ | ||
442 | vfy = ipath_sd7220_ib_vfy(dd); | ||
443 | if (vfy != ret) { | ||
444 | ipath_dev_err(dd, "SERDES PRAM VFY failed\n"); | ||
445 | ret = 1; | ||
446 | goto done; | ||
447 | } | ||
448 | /* | 453 | /* |
449 | * Loaded and verified. Almost good... | 454 | * Loaded and verified. Almost good... |
450 | * hold "success" in ret | 455 | * hold "success" in ret |
451 | */ | 456 | */ |
452 | ret = 0; | 457 | ret = 0; |
453 | |||
454 | /* | 458 | /* |
455 | * Prev steps all worked, continue bringup | 459 | * Prev steps all worked, continue bringup |
456 | * De-assert RESET to uC, only in first reset, to allow | 460 | * De-assert RESET to uC, only in first reset, to allow |
@@ -461,45 +465,47 @@ int ipath_sd7220_init(struct ipath_devdata *dd, int was_reset) | |||
461 | */ | 465 | */ |
462 | ret = ibsd_mod_allchnls(dd, START_EQ1(0), 0, 0x38); | 466 | ret = ibsd_mod_allchnls(dd, START_EQ1(0), 0, 0x38); |
463 | if (ret < 0) { | 467 | if (ret < 0) { |
464 | ipath_dev_err(dd, "Failed clearing START_EQ1\n"); | 468 | qib_dev_err(dd, "Failed clearing START_EQ1\n"); |
465 | ret = 1; | 469 | goto bail; |
466 | goto done; | ||
467 | } | 470 | } |
468 | 471 | ||
469 | ipath_ibsd_reset(dd, 0); | 472 | qib_ibsd_reset(dd, 0); |
470 | /* | 473 | /* |
471 | * If this is not the first reset, trimdone should be set | 474 | * If this is not the first reset, trimdone should be set |
472 | * already. | 475 | * already. We may need to check about this. |
473 | */ | 476 | */ |
474 | trim_done = ipath_sd_trimdone_poll(dd); | 477 | trim_done = qib_sd_trimdone_poll(dd); |
475 | /* | 478 | /* |
476 | * Whether or not trimdone succeeded, we need to put the | 479 | * Whether or not trimdone succeeded, we need to put the |
477 | * uC back into reset to avoid a possible fight with the | 480 | * uC back into reset to avoid a possible fight with the |
478 | * IBC state-machine. | 481 | * IBC state-machine. |
479 | */ | 482 | */ |
480 | ipath_ibsd_reset(dd, 1); | 483 | qib_ibsd_reset(dd, 1); |
481 | 484 | ||
482 | if (!trim_done) { | 485 | if (!trim_done) { |
483 | ipath_dev_err(dd, "No TRIMDONE seen\n"); | 486 | qib_dev_err(dd, "No TRIMDONE seen\n"); |
484 | ret = 1; | 487 | goto bail; |
485 | goto done; | ||
486 | } | 488 | } |
487 | 489 | /* | |
488 | ipath_sd_trimdone_monitor(dd, "First-reset"); | 490 | * DEBUG: check each time we reset if trimdone bits have |
491 | * gotten cleared, and re-set them. | ||
492 | */ | ||
493 | qib_sd_trimdone_monitor(dd, "First-reset"); | ||
489 | /* Remember so we do not re-do the load, dactrim, etc. */ | 494 | /* Remember so we do not re-do the load, dactrim, etc. */ |
490 | dd->serdes_first_init_done = 1; | 495 | dd->cspec->serdes_first_init_done = 1; |
491 | } | 496 | } |
492 | /* | 497 | /* |
493 | * Setup for channel training and load values for | 498 | * setup for channel training and load values for |
494 | * RxEq and DDS in tables used by IBC in IB1.2 mode | 499 | * RxEq and DDS in tables used by IBC in IB1.2 mode |
495 | */ | 500 | */ |
496 | 501 | ret = 0; | |
497 | val_stat = ipath_sd_setvals(dd); | 502 | if (qib_sd_setvals(dd) >= 0) |
498 | if (val_stat < 0) | 503 | goto done; |
499 | ret = 1; | 504 | bail: |
505 | ret = 1; | ||
500 | done: | 506 | done: |
501 | /* start relock timer regardless, but start at 1 second */ | 507 | /* start relock timer regardless, but start at 1 second */ |
502 | ipath_set_relock_poll(dd, -1); | 508 | set_7220_relock_poll(dd, -1); |
503 | return ret; | 509 | return ret; |
504 | } | 510 | } |
505 | 511 | ||
@@ -517,7 +523,7 @@ done: | |||
517 | * the "claim" parameter is >0 to claim, <0 to release, 0 to query. | 523 | * the "claim" parameter is >0 to claim, <0 to release, 0 to query. |
518 | * Returns <0 for errors, >0 if we had ownership, else 0. | 524 | * Returns <0 for errors, >0 if we had ownership, else 0. |
519 | */ | 525 | */ |
520 | static int epb_access(struct ipath_devdata *dd, int sdnum, int claim) | 526 | static int epb_access(struct qib_devdata *dd, int sdnum, int claim) |
521 | { | 527 | { |
522 | u16 acc; | 528 | u16 acc; |
523 | u64 accval; | 529 | u64 accval; |
@@ -525,28 +531,30 @@ static int epb_access(struct ipath_devdata *dd, int sdnum, int claim) | |||
525 | u64 oct_sel = 0; | 531 | u64 oct_sel = 0; |
526 | 532 | ||
527 | switch (sdnum) { | 533 | switch (sdnum) { |
528 | case IB_7220_SERDES : | 534 | case IB_7220_SERDES: |
529 | /* | 535 | /* |
530 | * The IB SERDES "ownership" is fairly simple. A single each | 536 | * The IB SERDES "ownership" is fairly simple. A single each |
531 | * request/grant. | 537 | * request/grant. |
532 | */ | 538 | */ |
533 | acc = dd->ipath_kregs->kr_ib_epbacc; | 539 | acc = kr_ibsd_epb_access_ctrl; |
534 | break; | 540 | break; |
535 | case PCIE_SERDES0 : | 541 | |
536 | case PCIE_SERDES1 : | 542 | case PCIE_SERDES0: |
543 | case PCIE_SERDES1: | ||
537 | /* PCIe SERDES has two "octants", need to select which */ | 544 | /* PCIe SERDES has two "octants", need to select which */ |
538 | acc = dd->ipath_kregs->kr_pcie_epbacc; | 545 | acc = kr_pciesd_epb_access_ctrl; |
539 | oct_sel = (2 << (sdnum - PCIE_SERDES0)); | 546 | oct_sel = (2 << (sdnum - PCIE_SERDES0)); |
540 | break; | 547 | break; |
541 | default : | 548 | |
549 | default: | ||
542 | return 0; | 550 | return 0; |
543 | } | 551 | } |
544 | 552 | ||
545 | /* Make sure any outstanding transaction was seen */ | 553 | /* Make sure any outstanding transaction was seen */ |
546 | ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); | 554 | qib_read_kreg32(dd, kr_scratch); |
547 | udelay(15); | 555 | udelay(15); |
548 | 556 | ||
549 | accval = ipath_read_kreg32(dd, acc); | 557 | accval = qib_read_kreg32(dd, acc); |
550 | 558 | ||
551 | owned = !!(accval & EPB_ACC_GNT); | 559 | owned = !!(accval & EPB_ACC_GNT); |
552 | if (claim < 0) { | 560 | if (claim < 0) { |
@@ -557,22 +565,22 @@ static int epb_access(struct ipath_devdata *dd, int sdnum, int claim) | |||
557 | * Both should be clear | 565 | * Both should be clear |
558 | */ | 566 | */ |
559 | u64 newval = 0; | 567 | u64 newval = 0; |
560 | ipath_write_kreg(dd, acc, newval); | 568 | qib_write_kreg(dd, acc, newval); |
561 | /* First read after write is not trustworthy */ | 569 | /* First read after write is not trustworthy */ |
562 | pollval = ipath_read_kreg32(dd, acc); | 570 | pollval = qib_read_kreg32(dd, acc); |
563 | udelay(5); | 571 | udelay(5); |
564 | pollval = ipath_read_kreg32(dd, acc); | 572 | pollval = qib_read_kreg32(dd, acc); |
565 | if (pollval & EPB_ACC_GNT) | 573 | if (pollval & EPB_ACC_GNT) |
566 | owned = -1; | 574 | owned = -1; |
567 | } else if (claim > 0) { | 575 | } else if (claim > 0) { |
568 | /* Need to claim */ | 576 | /* Need to claim */ |
569 | u64 pollval; | 577 | u64 pollval; |
570 | u64 newval = EPB_ACC_REQ | oct_sel; | 578 | u64 newval = EPB_ACC_REQ | oct_sel; |
571 | ipath_write_kreg(dd, acc, newval); | 579 | qib_write_kreg(dd, acc, newval); |
572 | /* First read after write is not trustworthy */ | 580 | /* First read after write is not trustworthy */ |
573 | pollval = ipath_read_kreg32(dd, acc); | 581 | pollval = qib_read_kreg32(dd, acc); |
574 | udelay(5); | 582 | udelay(5); |
575 | pollval = ipath_read_kreg32(dd, acc); | 583 | pollval = qib_read_kreg32(dd, acc); |
576 | if (!(pollval & EPB_ACC_GNT)) | 584 | if (!(pollval & EPB_ACC_GNT)) |
577 | owned = -1; | 585 | owned = -1; |
578 | } | 586 | } |
@@ -582,18 +590,17 @@ static int epb_access(struct ipath_devdata *dd, int sdnum, int claim) | |||
582 | /* | 590 | /* |
583 | * Lemma to deal with race condition of write..read to epb regs | 591 | * Lemma to deal with race condition of write..read to epb regs |
584 | */ | 592 | */ |
585 | static int epb_trans(struct ipath_devdata *dd, u16 reg, u64 i_val, u64 *o_vp) | 593 | static int epb_trans(struct qib_devdata *dd, u16 reg, u64 i_val, u64 *o_vp) |
586 | { | 594 | { |
587 | int tries; | 595 | int tries; |
588 | u64 transval; | 596 | u64 transval; |
589 | 597 | ||
590 | 598 | qib_write_kreg(dd, reg, i_val); | |
591 | ipath_write_kreg(dd, reg, i_val); | ||
592 | /* Throw away first read, as RDY bit may be stale */ | 599 | /* Throw away first read, as RDY bit may be stale */ |
593 | transval = ipath_read_kreg64(dd, reg); | 600 | transval = qib_read_kreg64(dd, reg); |
594 | 601 | ||
595 | for (tries = EPB_TRANS_TRIES; tries; --tries) { | 602 | for (tries = EPB_TRANS_TRIES; tries; --tries) { |
596 | transval = ipath_read_kreg32(dd, reg); | 603 | transval = qib_read_kreg32(dd, reg); |
597 | if (transval & EPB_TRANS_RDY) | 604 | if (transval & EPB_TRANS_RDY) |
598 | break; | 605 | break; |
599 | udelay(5); | 606 | udelay(5); |
@@ -606,21 +613,20 @@ static int epb_trans(struct ipath_devdata *dd, u16 reg, u64 i_val, u64 *o_vp) | |||
606 | } | 613 | } |
607 | 614 | ||
608 | /** | 615 | /** |
609 | * | 616 | * qib_sd7220_reg_mod - modify SERDES register |
610 | * ipath_sd7220_reg_mod - modify SERDES register | 617 | * @dd: the qlogic_ib device |
611 | * @dd: the infinipath device | ||
612 | * @sdnum: which SERDES to access | 618 | * @sdnum: which SERDES to access |
613 | * @loc: location - channel, element, register, as packed by EPB_LOC() macro. | 619 | * @loc: location - channel, element, register, as packed by EPB_LOC() macro. |
614 | * @wd: Write Data - value to set in register | 620 | * @wd: Write Data - value to set in register |
615 | * @mask: ones where data should be spliced into reg. | 621 | * @mask: ones where data should be spliced into reg. |
616 | * | 622 | * |
617 | * Basic register read/modify/write, with un-needed accesses elided. That is, | 623 | * Basic register read/modify/write, with un-needed acesses elided. That is, |
618 | * a mask of zero will prevent write, while a mask of 0xFF will prevent read. | 624 | * a mask of zero will prevent write, while a mask of 0xFF will prevent read. |
619 | * returns current (presumed, if a write was done) contents of selected | 625 | * returns current (presumed, if a write was done) contents of selected |
620 | * register, or <0 if errors. | 626 | * register, or <0 if errors. |
621 | */ | 627 | */ |
622 | static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc, | 628 | static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc, |
623 | u32 wd, u32 mask) | 629 | u32 wd, u32 mask) |
624 | { | 630 | { |
625 | u16 trans; | 631 | u16 trans; |
626 | u64 transval; | 632 | u64 transval; |
@@ -629,14 +635,16 @@ static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc, | |||
629 | unsigned long flags; | 635 | unsigned long flags; |
630 | 636 | ||
631 | switch (sdnum) { | 637 | switch (sdnum) { |
632 | case IB_7220_SERDES : | 638 | case IB_7220_SERDES: |
633 | trans = dd->ipath_kregs->kr_ib_epbtrans; | 639 | trans = kr_ibsd_epb_transaction_reg; |
634 | break; | 640 | break; |
635 | case PCIE_SERDES0 : | 641 | |
636 | case PCIE_SERDES1 : | 642 | case PCIE_SERDES0: |
637 | trans = dd->ipath_kregs->kr_pcie_epbtrans; | 643 | case PCIE_SERDES1: |
644 | trans = kr_pciesd_epb_transaction_reg; | ||
638 | break; | 645 | break; |
639 | default : | 646 | |
647 | default: | ||
640 | return -1; | 648 | return -1; |
641 | } | 649 | } |
642 | 650 | ||
@@ -644,23 +652,23 @@ static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc, | |||
644 | * All access is locked in software (vs other host threads) and | 652 | * All access is locked in software (vs other host threads) and |
645 | * hardware (vs uC access). | 653 | * hardware (vs uC access). |
646 | */ | 654 | */ |
647 | spin_lock_irqsave(&dd->ipath_sdepb_lock, flags); | 655 | spin_lock_irqsave(&dd->cspec->sdepb_lock, flags); |
648 | 656 | ||
649 | owned = epb_access(dd, sdnum, 1); | 657 | owned = epb_access(dd, sdnum, 1); |
650 | if (owned < 0) { | 658 | if (owned < 0) { |
651 | spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags); | 659 | spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags); |
652 | return -1; | 660 | return -1; |
653 | } | 661 | } |
654 | ret = 0; | 662 | ret = 0; |
655 | for (tries = EPB_TRANS_TRIES; tries; --tries) { | 663 | for (tries = EPB_TRANS_TRIES; tries; --tries) { |
656 | transval = ipath_read_kreg32(dd, trans); | 664 | transval = qib_read_kreg32(dd, trans); |
657 | if (transval & EPB_TRANS_RDY) | 665 | if (transval & EPB_TRANS_RDY) |
658 | break; | 666 | break; |
659 | udelay(5); | 667 | udelay(5); |
660 | } | 668 | } |
661 | 669 | ||
662 | if (tries > 0) { | 670 | if (tries > 0) { |
663 | tries = 1; /* to make read-skip work */ | 671 | tries = 1; /* to make read-skip work */ |
664 | if (mask != 0xFF) { | 672 | if (mask != 0xFF) { |
665 | /* | 673 | /* |
666 | * Not a pure write, so need to read. | 674 | * Not a pure write, so need to read. |
@@ -688,7 +696,7 @@ static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc, | |||
688 | else | 696 | else |
689 | ret = transval & EPB_DATA_MASK; | 697 | ret = transval & EPB_DATA_MASK; |
690 | 698 | ||
691 | spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags); | 699 | spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags); |
692 | if (tries <= 0) | 700 | if (tries <= 0) |
693 | ret = -1; | 701 | ret = -1; |
694 | return ret; | 702 | return ret; |
@@ -707,7 +715,7 @@ static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc, | |||
707 | #define EPB_RAMDATA EPB_LOC(6, 0, 5) | 715 | #define EPB_RAMDATA EPB_LOC(6, 0, 5) |
708 | 716 | ||
709 | /* Transfer date to/from uC Program RAM of IB or PCIe SerDes */ | 717 | /* Transfer date to/from uC Program RAM of IB or PCIe SerDes */ |
710 | static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc, | 718 | static int qib_sd7220_ram_xfer(struct qib_devdata *dd, int sdnum, u32 loc, |
711 | u8 *buf, int cnt, int rd_notwr) | 719 | u8 *buf, int cnt, int rd_notwr) |
712 | { | 720 | { |
713 | u16 trans; | 721 | u16 trans; |
@@ -723,29 +731,28 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc, | |||
723 | 731 | ||
724 | /* Pick appropriate transaction reg and "Chip select" for this serdes */ | 732 | /* Pick appropriate transaction reg and "Chip select" for this serdes */ |
725 | switch (sdnum) { | 733 | switch (sdnum) { |
726 | case IB_7220_SERDES : | 734 | case IB_7220_SERDES: |
727 | csbit = 1ULL << EPB_IB_UC_CS_SHF; | 735 | csbit = 1ULL << EPB_IB_UC_CS_SHF; |
728 | trans = dd->ipath_kregs->kr_ib_epbtrans; | 736 | trans = kr_ibsd_epb_transaction_reg; |
729 | break; | 737 | break; |
730 | case PCIE_SERDES0 : | 738 | |
731 | case PCIE_SERDES1 : | 739 | case PCIE_SERDES0: |
740 | case PCIE_SERDES1: | ||
732 | /* PCIe SERDES has uC "chip select" in different bit, too */ | 741 | /* PCIe SERDES has uC "chip select" in different bit, too */ |
733 | csbit = 1ULL << EPB_PCIE_UC_CS_SHF; | 742 | csbit = 1ULL << EPB_PCIE_UC_CS_SHF; |
734 | trans = dd->ipath_kregs->kr_pcie_epbtrans; | 743 | trans = kr_pciesd_epb_transaction_reg; |
735 | break; | 744 | break; |
736 | default : | 745 | |
746 | default: | ||
737 | return -1; | 747 | return -1; |
738 | } | 748 | } |
739 | 749 | ||
740 | op = rd_notwr ? "Rd" : "Wr"; | 750 | op = rd_notwr ? "Rd" : "Wr"; |
741 | spin_lock_irqsave(&dd->ipath_sdepb_lock, flags); | 751 | spin_lock_irqsave(&dd->cspec->sdepb_lock, flags); |
742 | 752 | ||
743 | owned = epb_access(dd, sdnum, 1); | 753 | owned = epb_access(dd, sdnum, 1); |
744 | if (owned < 0) { | 754 | if (owned < 0) { |
745 | spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags); | 755 | spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags); |
746 | ipath_dbg("Could not get %s access to %s EPB: %X, loc %X\n", | ||
747 | op, (sdnum == IB_7220_SERDES) ? "IB" : "PCIe", | ||
748 | owned, loc); | ||
749 | return -1; | 756 | return -1; |
750 | } | 757 | } |
751 | 758 | ||
@@ -758,16 +765,14 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc, | |||
758 | */ | 765 | */ |
759 | addr = loc & 0x1FFF; | 766 | addr = loc & 0x1FFF; |
760 | for (tries = EPB_TRANS_TRIES; tries; --tries) { | 767 | for (tries = EPB_TRANS_TRIES; tries; --tries) { |
761 | transval = ipath_read_kreg32(dd, trans); | 768 | transval = qib_read_kreg32(dd, trans); |
762 | if (transval & EPB_TRANS_RDY) | 769 | if (transval & EPB_TRANS_RDY) |
763 | break; | 770 | break; |
764 | udelay(5); | 771 | udelay(5); |
765 | } | 772 | } |
766 | 773 | ||
767 | sofar = 0; | 774 | sofar = 0; |
768 | if (tries <= 0) | 775 | if (tries > 0) { |
769 | ipath_dbg("No initial RDY on EPB access request\n"); | ||
770 | else { | ||
771 | /* | 776 | /* |
772 | * Every "memory" access is doubly-indirect. | 777 | * Every "memory" access is doubly-indirect. |
773 | * We set two bytes of address, then read/write | 778 | * We set two bytes of address, then read/write |
@@ -778,8 +783,6 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc, | |||
778 | transval = csbit | EPB_UC_CTL | | 783 | transval = csbit | EPB_UC_CTL | |
779 | (rd_notwr ? EPB_ROM_R : EPB_ROM_W); | 784 | (rd_notwr ? EPB_ROM_R : EPB_ROM_W); |
780 | tries = epb_trans(dd, trans, transval, &transval); | 785 | tries = epb_trans(dd, trans, transval, &transval); |
781 | if (tries <= 0) | ||
782 | ipath_dbg("No EPB response to uC %s cmd\n", op); | ||
783 | while (tries > 0 && sofar < cnt) { | 786 | while (tries > 0 && sofar < cnt) { |
784 | if (!sofar) { | 787 | if (!sofar) { |
785 | /* Only set address at start of chunk */ | 788 | /* Only set address at start of chunk */ |
@@ -787,18 +790,14 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc, | |||
787 | transval = csbit | EPB_MADDRH | addrbyte; | 790 | transval = csbit | EPB_MADDRH | addrbyte; |
788 | tries = epb_trans(dd, trans, transval, | 791 | tries = epb_trans(dd, trans, transval, |
789 | &transval); | 792 | &transval); |
790 | if (tries <= 0) { | 793 | if (tries <= 0) |
791 | ipath_dbg("No EPB response ADDRH\n"); | ||
792 | break; | 794 | break; |
793 | } | ||
794 | addrbyte = (addr + sofar) & 0xFF; | 795 | addrbyte = (addr + sofar) & 0xFF; |
795 | transval = csbit | EPB_MADDRL | addrbyte; | 796 | transval = csbit | EPB_MADDRL | addrbyte; |
796 | tries = epb_trans(dd, trans, transval, | 797 | tries = epb_trans(dd, trans, transval, |
797 | &transval); | 798 | &transval); |
798 | if (tries <= 0) { | 799 | if (tries <= 0) |
799 | ipath_dbg("No EPB response ADDRL\n"); | ||
800 | break; | 800 | break; |
801 | } | ||
802 | } | 801 | } |
803 | 802 | ||
804 | if (rd_notwr) | 803 | if (rd_notwr) |
@@ -806,10 +805,8 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc, | |||
806 | else | 805 | else |
807 | transval = csbit | EPB_ROMDATA | buf[sofar]; | 806 | transval = csbit | EPB_ROMDATA | buf[sofar]; |
808 | tries = epb_trans(dd, trans, transval, &transval); | 807 | tries = epb_trans(dd, trans, transval, &transval); |
809 | if (tries <= 0) { | 808 | if (tries <= 0) |
810 | ipath_dbg("No EPB response DATA\n"); | ||
811 | break; | 809 | break; |
812 | } | ||
813 | if (rd_notwr) | 810 | if (rd_notwr) |
814 | buf[sofar] = transval & EPB_DATA_MASK; | 811 | buf[sofar] = transval & EPB_DATA_MASK; |
815 | ++sofar; | 812 | ++sofar; |
@@ -817,8 +814,6 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc, | |||
817 | /* Finally, clear control-bit for Read or Write */ | 814 | /* Finally, clear control-bit for Read or Write */ |
818 | transval = csbit | EPB_UC_CTL; | 815 | transval = csbit | EPB_UC_CTL; |
819 | tries = epb_trans(dd, trans, transval, &transval); | 816 | tries = epb_trans(dd, trans, transval, &transval); |
820 | if (tries <= 0) | ||
821 | ipath_dbg("No EPB response to drop of uC %s cmd\n", op); | ||
822 | } | 817 | } |
823 | 818 | ||
824 | ret = sofar; | 819 | ret = sofar; |
@@ -826,18 +821,16 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc, | |||
826 | if (epb_access(dd, sdnum, -1) < 0) | 821 | if (epb_access(dd, sdnum, -1) < 0) |
827 | ret = -1; | 822 | ret = -1; |
828 | 823 | ||
829 | spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags); | 824 | spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags); |
830 | if (tries <= 0) { | 825 | if (tries <= 0) |
831 | ipath_dbg("SERDES PRAM %s failed after %d bytes\n", op, sofar); | ||
832 | ret = -1; | 826 | ret = -1; |
833 | } | ||
834 | return ret; | 827 | return ret; |
835 | } | 828 | } |
836 | 829 | ||
837 | #define PROG_CHUNK 64 | 830 | #define PROG_CHUNK 64 |
838 | 831 | ||
839 | int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum, | 832 | int qib_sd7220_prog_ld(struct qib_devdata *dd, int sdnum, |
840 | u8 *img, int len, int offset) | 833 | u8 *img, int len, int offset) |
841 | { | 834 | { |
842 | int cnt, sofar, req; | 835 | int cnt, sofar, req; |
843 | 836 | ||
@@ -846,7 +839,7 @@ int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum, | |||
846 | req = len - sofar; | 839 | req = len - sofar; |
847 | if (req > PROG_CHUNK) | 840 | if (req > PROG_CHUNK) |
848 | req = PROG_CHUNK; | 841 | req = PROG_CHUNK; |
849 | cnt = ipath_sd7220_ram_xfer(dd, sdnum, offset + sofar, | 842 | cnt = qib_sd7220_ram_xfer(dd, sdnum, offset + sofar, |
850 | img + sofar, req, 0); | 843 | img + sofar, req, 0); |
851 | if (cnt < req) { | 844 | if (cnt < req) { |
852 | sofar = -1; | 845 | sofar = -1; |
@@ -860,8 +853,8 @@ int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum, | |||
860 | #define VFY_CHUNK 64 | 853 | #define VFY_CHUNK 64 |
861 | #define SD_PRAM_ERROR_LIMIT 42 | 854 | #define SD_PRAM_ERROR_LIMIT 42 |
862 | 855 | ||
863 | int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum, | 856 | int qib_sd7220_prog_vfy(struct qib_devdata *dd, int sdnum, |
864 | const u8 *img, int len, int offset) | 857 | const u8 *img, int len, int offset) |
865 | { | 858 | { |
866 | int cnt, sofar, req, idx, errors; | 859 | int cnt, sofar, req, idx, errors; |
867 | unsigned char readback[VFY_CHUNK]; | 860 | unsigned char readback[VFY_CHUNK]; |
@@ -872,7 +865,7 @@ int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum, | |||
872 | req = len - sofar; | 865 | req = len - sofar; |
873 | if (req > VFY_CHUNK) | 866 | if (req > VFY_CHUNK) |
874 | req = VFY_CHUNK; | 867 | req = VFY_CHUNK; |
875 | cnt = ipath_sd7220_ram_xfer(dd, sdnum, sofar + offset, | 868 | cnt = qib_sd7220_ram_xfer(dd, sdnum, sofar + offset, |
876 | readback, req, 1); | 869 | readback, req, 1); |
877 | if (cnt < req) { | 870 | if (cnt < req) { |
878 | /* failed in read itself */ | 871 | /* failed in read itself */ |
@@ -888,11 +881,13 @@ int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum, | |||
888 | return errors ? -errors : sofar; | 881 | return errors ? -errors : sofar; |
889 | } | 882 | } |
890 | 883 | ||
891 | /* IRQ not set up at this point in init, so we poll. */ | 884 | /* |
885 | * IRQ not set up at this point in init, so we poll. | ||
886 | */ | ||
892 | #define IB_SERDES_TRIM_DONE (1ULL << 11) | 887 | #define IB_SERDES_TRIM_DONE (1ULL << 11) |
893 | #define TRIM_TMO (30) | 888 | #define TRIM_TMO (30) |
894 | 889 | ||
895 | static int ipath_sd_trimdone_poll(struct ipath_devdata *dd) | 890 | static int qib_sd_trimdone_poll(struct qib_devdata *dd) |
896 | { | 891 | { |
897 | int trim_tmo, ret; | 892 | int trim_tmo, ret; |
898 | uint64_t val; | 893 | uint64_t val; |
@@ -903,16 +898,15 @@ static int ipath_sd_trimdone_poll(struct ipath_devdata *dd) | |||
903 | */ | 898 | */ |
904 | ret = 0; | 899 | ret = 0; |
905 | for (trim_tmo = 0; trim_tmo < TRIM_TMO; ++trim_tmo) { | 900 | for (trim_tmo = 0; trim_tmo < TRIM_TMO; ++trim_tmo) { |
906 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus); | 901 | val = qib_read_kreg64(dd, kr_ibcstatus); |
907 | if (val & IB_SERDES_TRIM_DONE) { | 902 | if (val & IB_SERDES_TRIM_DONE) { |
908 | ipath_cdbg(VERBOSE, "TRIMDONE after %d\n", trim_tmo); | ||
909 | ret = 1; | 903 | ret = 1; |
910 | break; | 904 | break; |
911 | } | 905 | } |
912 | msleep(10); | 906 | msleep(10); |
913 | } | 907 | } |
914 | if (trim_tmo >= TRIM_TMO) { | 908 | if (trim_tmo >= TRIM_TMO) { |
915 | ipath_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo); | 909 | qib_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo); |
916 | ret = 0; | 910 | ret = 0; |
917 | } | 911 | } |
918 | return ret; | 912 | return ret; |
@@ -964,8 +958,7 @@ static struct dds_init { | |||
964 | }; | 958 | }; |
965 | 959 | ||
966 | /* | 960 | /* |
967 | * Next, values related to Receive Equalization. | 961 | * Now the RXEQ section of the table. |
968 | * In comments, FDR (Full) is IB DDR, HDR (Half) is IB SDR | ||
969 | */ | 962 | */ |
970 | /* Hardware packs an element number and register address thus: */ | 963 | /* Hardware packs an element number and register address thus: */ |
971 | #define RXEQ_INIT_RDESC(elt, addr) (((elt) & 0xF) | ((addr) << 4)) | 964 | #define RXEQ_INIT_RDESC(elt, addr) (((elt) & 0xF) | ((addr) << 4)) |
@@ -981,23 +974,23 @@ static struct dds_init { | |||
981 | #define RXEQ_SDR_ZCNT 23 | 974 | #define RXEQ_SDR_ZCNT 23 |
982 | 975 | ||
983 | static struct rxeq_init { | 976 | static struct rxeq_init { |
984 | u16 rdesc; /* in form used in SerDesDDSRXEQ */ | 977 | u16 rdesc; /* in form used in SerDesDDSRXEQ */ |
985 | u8 rdata[4]; | 978 | u8 rdata[4]; |
986 | } rxeq_init_vals[] = { | 979 | } rxeq_init_vals[] = { |
987 | /* Set Rcv Eq. to Preset node */ | 980 | /* Set Rcv Eq. to Preset node */ |
988 | RXEQ_VAL_ALL(7, 0x27, 0x10), | 981 | RXEQ_VAL_ALL(7, 0x27, 0x10), |
989 | /* Set DFELTHFDR/HDR thresholds */ | 982 | /* Set DFELTHFDR/HDR thresholds */ |
990 | RXEQ_VAL(7, 8, 0, 0, 0, 0), /* FDR */ | 983 | RXEQ_VAL(7, 8, 0, 0, 0, 0), /* FDR, was 0, 1, 2, 3 */ |
991 | RXEQ_VAL(7, 0x21, 0, 0, 0, 0), /* HDR */ | 984 | RXEQ_VAL(7, 0x21, 0, 0, 0, 0), /* HDR */ |
992 | /* Set TLTHFDR/HDR threshold */ | 985 | /* Set TLTHFDR/HDR theshold */ |
993 | RXEQ_VAL(7, 9, 2, 2, 2, 2), /* FDR */ | 986 | RXEQ_VAL(7, 9, 2, 2, 2, 2), /* FDR, was 0, 2, 4, 6 */ |
994 | RXEQ_VAL(7, 0x23, 2, 2, 2, 2), /* HDR */ | 987 | RXEQ_VAL(7, 0x23, 2, 2, 2, 2), /* HDR, was 0, 1, 2, 3 */ |
995 | /* Set Preamp setting 2 (ZFR/ZCNT) */ | 988 | /* Set Preamp setting 2 (ZFR/ZCNT) */ |
996 | RXEQ_VAL(7, 0x1B, 12, 12, 12, 12), /* FDR */ | 989 | RXEQ_VAL(7, 0x1B, 12, 12, 12, 12), /* FDR, was 12, 16, 20, 24 */ |
997 | RXEQ_VAL(7, 0x1C, 12, 12, 12, 12), /* HDR */ | 990 | RXEQ_VAL(7, 0x1C, 12, 12, 12, 12), /* HDR, was 12, 16, 20, 24 */ |
998 | /* Set Preamp DC gain and Setting 1 (GFR/GHR) */ | 991 | /* Set Preamp DC gain and Setting 1 (GFR/GHR) */ |
999 | RXEQ_VAL(7, 0x1E, 0x10, 0x10, 0x10, 0x10), /* FDR */ | 992 | RXEQ_VAL(7, 0x1E, 16, 16, 16, 16), /* FDR, was 16, 17, 18, 20 */ |
1000 | RXEQ_VAL(7, 0x1F, 0x10, 0x10, 0x10, 0x10), /* HDR */ | 993 | RXEQ_VAL(7, 0x1F, 16, 16, 16, 16), /* HDR, was 16, 17, 18, 20 */ |
1001 | /* Toggle RELOCK (in VCDL_CTRL0) to lock to data */ | 994 | /* Toggle RELOCK (in VCDL_CTRL0) to lock to data */ |
1002 | RXEQ_VAL_ALL(6, 6, 0x20), /* Set D5 High */ | 995 | RXEQ_VAL_ALL(6, 6, 0x20), /* Set D5 High */ |
1003 | RXEQ_VAL_ALL(6, 6, 0), /* Set D5 Low */ | 996 | RXEQ_VAL_ALL(6, 6, 0), /* Set D5 Low */ |
@@ -1007,27 +1000,27 @@ static struct rxeq_init { | |||
1007 | #define DDS_ROWS (16) | 1000 | #define DDS_ROWS (16) |
1008 | #define RXEQ_ROWS ARRAY_SIZE(rxeq_init_vals) | 1001 | #define RXEQ_ROWS ARRAY_SIZE(rxeq_init_vals) |
1009 | 1002 | ||
1010 | static int ipath_sd_setvals(struct ipath_devdata *dd) | 1003 | static int qib_sd_setvals(struct qib_devdata *dd) |
1011 | { | 1004 | { |
1012 | int idx, midx; | 1005 | int idx, midx; |
1013 | int min_idx; /* Minimum index for this portion of table */ | 1006 | int min_idx; /* Minimum index for this portion of table */ |
1014 | uint32_t dds_reg_map; | 1007 | uint32_t dds_reg_map; |
1015 | u64 __iomem *taddr, *iaddr; | 1008 | u64 __iomem *taddr, *iaddr; |
1016 | uint64_t data; | 1009 | uint64_t data; |
1017 | uint64_t sdctl; | 1010 | uint64_t sdctl; |
1018 | 1011 | ||
1019 | taddr = dd->ipath_kregbase + KR_IBSerDesMappTable; | 1012 | taddr = dd->kregbase + kr_serdes_maptable; |
1020 | iaddr = dd->ipath_kregbase + dd->ipath_kregs->kr_ib_ddsrxeq; | 1013 | iaddr = dd->kregbase + kr_serdes_ddsrxeq0; |
1021 | 1014 | ||
1022 | /* | 1015 | /* |
1023 | * Init the DDS section of the table. | 1016 | * Init the DDS section of the table. |
1024 | * Each "row" of the table provokes NUM_DDS_REG writes, to the | 1017 | * Each "row" of the table provokes NUM_DDS_REG writes, to the |
1025 | * registers indicated in DDS_REG_MAP. | 1018 | * registers indicated in DDS_REG_MAP. |
1026 | */ | 1019 | */ |
1027 | sdctl = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibserdesctrl); | 1020 | sdctl = qib_read_kreg64(dd, kr_ibserdesctrl); |
1028 | sdctl = (sdctl & ~(0x1f << 8)) | (NUM_DDS_REGS << 8); | 1021 | sdctl = (sdctl & ~(0x1f << 8)) | (NUM_DDS_REGS << 8); |
1029 | sdctl = (sdctl & ~(0x1f << 13)) | (RXEQ_ROWS << 13); | 1022 | sdctl = (sdctl & ~(0x1f << 13)) | (RXEQ_ROWS << 13); |
1030 | ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, sdctl); | 1023 | qib_write_kreg(dd, kr_ibserdesctrl, sdctl); |
1031 | 1024 | ||
1032 | /* | 1025 | /* |
1033 | * Iterate down table within loop for each register to store. | 1026 | * Iterate down table within loop for each register to store. |
@@ -1037,21 +1030,21 @@ static int ipath_sd_setvals(struct ipath_devdata *dd) | |||
1037 | data = ((dds_reg_map & 0xF) << 4) | TX_FAST_ELT; | 1030 | data = ((dds_reg_map & 0xF) << 4) | TX_FAST_ELT; |
1038 | writeq(data, iaddr + idx); | 1031 | writeq(data, iaddr + idx); |
1039 | mmiowb(); | 1032 | mmiowb(); |
1040 | ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); | 1033 | qib_read_kreg32(dd, kr_scratch); |
1041 | dds_reg_map >>= 4; | 1034 | dds_reg_map >>= 4; |
1042 | for (midx = 0; midx < DDS_ROWS; ++midx) { | 1035 | for (midx = 0; midx < DDS_ROWS; ++midx) { |
1043 | u64 __iomem *daddr = taddr + ((midx << 4) + idx); | 1036 | u64 __iomem *daddr = taddr + ((midx << 4) + idx); |
1044 | data = dds_init_vals[midx].reg_vals[idx]; | 1037 | data = dds_init_vals[midx].reg_vals[idx]; |
1045 | writeq(data, daddr); | 1038 | writeq(data, daddr); |
1046 | mmiowb(); | 1039 | mmiowb(); |
1047 | ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); | 1040 | qib_read_kreg32(dd, kr_scratch); |
1048 | } /* End inner for (vals for this reg, each row) */ | 1041 | } /* End inner for (vals for this reg, each row) */ |
1049 | } /* end outer for (regs to be stored) */ | 1042 | } /* end outer for (regs to be stored) */ |
1050 | 1043 | ||
1051 | /* | 1044 | /* |
1052 | * Init the RXEQ section of the table. As explained above the table | 1045 | * Init the RXEQ section of the table. |
1053 | * rxeq_init_vals[], this runs in a different order, as the pattern | 1046 | * This runs in a different order, as the pattern of |
1054 | * of register references is more complex, but there are only | 1047 | * register references is more complex, but there are only |
1055 | * four "data" values per register. | 1048 | * four "data" values per register. |
1056 | */ | 1049 | */ |
1057 | min_idx = idx; /* RXEQ indices pick up where DDS left off */ | 1050 | min_idx = idx; /* RXEQ indices pick up where DDS left off */ |
@@ -1066,13 +1059,13 @@ static int ipath_sd_setvals(struct ipath_devdata *dd) | |||
1066 | /* Store the next RXEQ register address */ | 1059 | /* Store the next RXEQ register address */ |
1067 | writeq(rxeq_init_vals[idx].rdesc, iaddr + didx); | 1060 | writeq(rxeq_init_vals[idx].rdesc, iaddr + didx); |
1068 | mmiowb(); | 1061 | mmiowb(); |
1069 | ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); | 1062 | qib_read_kreg32(dd, kr_scratch); |
1070 | /* Iterate through RXEQ values */ | 1063 | /* Iterate through RXEQ values */ |
1071 | for (vidx = 0; vidx < 4; vidx++) { | 1064 | for (vidx = 0; vidx < 4; vidx++) { |
1072 | data = rxeq_init_vals[idx].rdata[vidx]; | 1065 | data = rxeq_init_vals[idx].rdata[vidx]; |
1073 | writeq(data, taddr + (vidx << 6) + idx); | 1066 | writeq(data, taddr + (vidx << 6) + idx); |
1074 | mmiowb(); | 1067 | mmiowb(); |
1075 | ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); | 1068 | qib_read_kreg32(dd, kr_scratch); |
1076 | } | 1069 | } |
1077 | } /* end outer for (Reg-writes for RXEQ) */ | 1070 | } /* end outer for (Reg-writes for RXEQ) */ |
1078 | return 0; | 1071 | return 0; |
@@ -1085,33 +1078,18 @@ static int ipath_sd_setvals(struct ipath_devdata *dd) | |||
1085 | #define VCDL_CTRL2(chan) EPB_LOC(chan, 6, 8) | 1078 | #define VCDL_CTRL2(chan) EPB_LOC(chan, 6, 8) |
1086 | #define START_EQ2(chan) EPB_LOC(chan, 7, 0x28) | 1079 | #define START_EQ2(chan) EPB_LOC(chan, 7, 0x28) |
1087 | 1080 | ||
1088 | static int ibsd_sto_noisy(struct ipath_devdata *dd, int loc, int val, int mask) | ||
1089 | { | ||
1090 | int ret = -1; | ||
1091 | int sloc; /* shifted loc, for messages */ | ||
1092 | |||
1093 | loc |= (1U << EPB_IB_QUAD0_CS_SHF); | ||
1094 | sloc = loc >> EPB_ADDR_SHF; | ||
1095 | |||
1096 | ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, mask); | ||
1097 | if (ret < 0) | ||
1098 | ipath_dev_err(dd, "Write failed: elt %d," | ||
1099 | " addr 0x%X, chnl %d, val 0x%02X, mask 0x%02X\n", | ||
1100 | (sloc & 0xF), (sloc >> 9) & 0x3f, (sloc >> 4) & 7, | ||
1101 | val & 0xFF, mask & 0xFF); | ||
1102 | return ret; | ||
1103 | } | ||
1104 | |||
1105 | /* | 1081 | /* |
1106 | * Repeat a "store" across all channels of the IB SerDes. | 1082 | * Repeat a "store" across all channels of the IB SerDes. |
1107 | * Although nominally it inherits the "read value" of the last | 1083 | * Although nominally it inherits the "read value" of the last |
1108 | * channel it modified, the only really useful return is <0 for | 1084 | * channel it modified, the only really useful return is <0 for |
1109 | * failure, >= 0 for success. The parameter 'loc' is assumed to | 1085 | * failure, >= 0 for success. The parameter 'loc' is assumed to |
1110 | * be the location for the channel-0 copy of the register to | 1086 | * be the location in some channel of the register to be modified |
1111 | * be modified. | 1087 | * The caller can specify use of the "gang write" option of EPB, |
1088 | * in which case we use the specified channel data for any fields | ||
1089 | * not explicitely written. | ||
1112 | */ | 1090 | */ |
1113 | static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val, | 1091 | static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val, |
1114 | int mask) | 1092 | int mask) |
1115 | { | 1093 | { |
1116 | int ret = -1; | 1094 | int ret = -1; |
1117 | int chnl; | 1095 | int chnl; |
@@ -1126,24 +1104,27 @@ static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val, | |||
1126 | loc |= (1U << EPB_IB_QUAD0_CS_SHF); | 1104 | loc |= (1U << EPB_IB_QUAD0_CS_SHF); |
1127 | chnl = (loc >> (4 + EPB_ADDR_SHF)) & 7; | 1105 | chnl = (loc >> (4 + EPB_ADDR_SHF)) & 7; |
1128 | if (mask != 0xFF) { | 1106 | if (mask != 0xFF) { |
1129 | ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, | 1107 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, |
1130 | loc & ~EPB_GLOBAL_WR, 0, 0); | 1108 | loc & ~EPB_GLOBAL_WR, 0, 0); |
1131 | if (ret < 0) { | 1109 | if (ret < 0) { |
1132 | int sloc = loc >> EPB_ADDR_SHF; | 1110 | int sloc = loc >> EPB_ADDR_SHF; |
1133 | ipath_dev_err(dd, "pre-read failed: elt %d," | 1111 | |
1134 | " addr 0x%X, chnl %d\n", (sloc & 0xF), | 1112 | qib_dev_err(dd, "pre-read failed: elt %d," |
1135 | (sloc >> 9) & 0x3f, chnl); | 1113 | " addr 0x%X, chnl %d\n", |
1114 | (sloc & 0xF), | ||
1115 | (sloc >> 9) & 0x3f, chnl); | ||
1136 | return ret; | 1116 | return ret; |
1137 | } | 1117 | } |
1138 | val = (ret & ~mask) | (val & mask); | 1118 | val = (ret & ~mask) | (val & mask); |
1139 | } | 1119 | } |
1140 | loc &= ~(7 << (4+EPB_ADDR_SHF)); | 1120 | loc &= ~(7 << (4+EPB_ADDR_SHF)); |
1141 | ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF); | 1121 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF); |
1142 | if (ret < 0) { | 1122 | if (ret < 0) { |
1143 | int sloc = loc >> EPB_ADDR_SHF; | 1123 | int sloc = loc >> EPB_ADDR_SHF; |
1144 | ipath_dev_err(dd, "Global WR failed: elt %d," | 1124 | |
1145 | " addr 0x%X, val %02X\n", | 1125 | qib_dev_err(dd, "Global WR failed: elt %d," |
1146 | (sloc & 0xF), (sloc >> 9) & 0x3f, val); | 1126 | " addr 0x%X, val %02X\n", |
1127 | (sloc & 0xF), (sloc >> 9) & 0x3f, val); | ||
1147 | } | 1128 | } |
1148 | return ret; | 1129 | return ret; |
1149 | } | 1130 | } |
@@ -1151,16 +1132,17 @@ static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val, | |||
1151 | loc &= ~(7 << (4+EPB_ADDR_SHF)); | 1132 | loc &= ~(7 << (4+EPB_ADDR_SHF)); |
1152 | loc |= (1U << EPB_IB_QUAD0_CS_SHF); | 1133 | loc |= (1U << EPB_IB_QUAD0_CS_SHF); |
1153 | for (chnl = 0; chnl < 4; ++chnl) { | 1134 | for (chnl = 0; chnl < 4; ++chnl) { |
1154 | int cloc; | 1135 | int cloc = loc | (chnl << (4+EPB_ADDR_SHF)); |
1155 | cloc = loc | (chnl << (4+EPB_ADDR_SHF)); | 1136 | |
1156 | ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, cloc, val, mask); | 1137 | ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, cloc, val, mask); |
1157 | if (ret < 0) { | 1138 | if (ret < 0) { |
1158 | int sloc = loc >> EPB_ADDR_SHF; | 1139 | int sloc = loc >> EPB_ADDR_SHF; |
1159 | ipath_dev_err(dd, "Write failed: elt %d," | 1140 | |
1160 | " addr 0x%X, chnl %d, val 0x%02X," | 1141 | qib_dev_err(dd, "Write failed: elt %d," |
1161 | " mask 0x%02X\n", | 1142 | " addr 0x%X, chnl %d, val 0x%02X," |
1162 | (sloc & 0xF), (sloc >> 9) & 0x3f, chnl, | 1143 | " mask 0x%02X\n", |
1163 | val & 0xFF, mask & 0xFF); | 1144 | (sloc & 0xF), (sloc >> 9) & 0x3f, chnl, |
1145 | val & 0xFF, mask & 0xFF); | ||
1164 | break; | 1146 | break; |
1165 | } | 1147 | } |
1166 | } | 1148 | } |
@@ -1171,7 +1153,7 @@ static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val, | |||
1171 | * Set the Tx values normally modified by IBC in IB1.2 mode to default | 1153 | * Set the Tx values normally modified by IBC in IB1.2 mode to default |
1172 | * values, as gotten from first row of init table. | 1154 | * values, as gotten from first row of init table. |
1173 | */ | 1155 | */ |
1174 | static int set_dds_vals(struct ipath_devdata *dd, struct dds_init *ddi) | 1156 | static int set_dds_vals(struct qib_devdata *dd, struct dds_init *ddi) |
1175 | { | 1157 | { |
1176 | int ret; | 1158 | int ret; |
1177 | int idx, reg, data; | 1159 | int idx, reg, data; |
@@ -1194,7 +1176,7 @@ static int set_dds_vals(struct ipath_devdata *dd, struct dds_init *ddi) | |||
1194 | * Set the Rx values normally modified by IBC in IB1.2 mode to default | 1176 | * Set the Rx values normally modified by IBC in IB1.2 mode to default |
1195 | * values, as gotten from selected column of init table. | 1177 | * values, as gotten from selected column of init table. |
1196 | */ | 1178 | */ |
1197 | static int set_rxeq_vals(struct ipath_devdata *dd, int vsel) | 1179 | static int set_rxeq_vals(struct qib_devdata *dd, int vsel) |
1198 | { | 1180 | { |
1199 | int ret; | 1181 | int ret; |
1200 | int ridx; | 1182 | int ridx; |
@@ -1202,6 +1184,7 @@ static int set_rxeq_vals(struct ipath_devdata *dd, int vsel) | |||
1202 | 1184 | ||
1203 | for (ridx = 0; ridx < cnt; ++ridx) { | 1185 | for (ridx = 0; ridx < cnt; ++ridx) { |
1204 | int elt, reg, val, loc; | 1186 | int elt, reg, val, loc; |
1187 | |||
1205 | elt = rxeq_init_vals[ridx].rdesc & 0xF; | 1188 | elt = rxeq_init_vals[ridx].rdesc & 0xF; |
1206 | reg = rxeq_init_vals[ridx].rdesc >> 4; | 1189 | reg = rxeq_init_vals[ridx].rdesc >> 4; |
1207 | loc = EPB_LOC(0, elt, reg); | 1190 | loc = EPB_LOC(0, elt, reg); |
@@ -1217,83 +1200,66 @@ static int set_rxeq_vals(struct ipath_devdata *dd, int vsel) | |||
1217 | /* | 1200 | /* |
1218 | * Set the default values (row 0) for DDR Driver Demphasis. | 1201 | * Set the default values (row 0) for DDR Driver Demphasis. |
1219 | * we do this initially and whenever we turn off IB-1.2 | 1202 | * we do this initially and whenever we turn off IB-1.2 |
1203 | * | ||
1220 | * The "default" values for Rx equalization are also stored to | 1204 | * The "default" values for Rx equalization are also stored to |
1221 | * SerDes registers. Formerly (and still default), we used set 2. | 1205 | * SerDes registers. Formerly (and still default), we used set 2. |
1222 | * For experimenting with cables and link-partners, we allow changing | 1206 | * For experimenting with cables and link-partners, we allow changing |
1223 | * that via a module parameter. | 1207 | * that via a module parameter. |
1224 | */ | 1208 | */ |
1225 | static unsigned ipath_rxeq_set = 2; | 1209 | static unsigned qib_rxeq_set = 2; |
1226 | module_param_named(rxeq_default_set, ipath_rxeq_set, uint, | 1210 | module_param_named(rxeq_default_set, qib_rxeq_set, uint, |
1227 | S_IWUSR | S_IRUGO); | 1211 | S_IWUSR | S_IRUGO); |
1228 | MODULE_PARM_DESC(rxeq_default_set, | 1212 | MODULE_PARM_DESC(rxeq_default_set, |
1229 | "Which set [0..3] of Rx Equalization values is default"); | 1213 | "Which set [0..3] of Rx Equalization values is default"); |
1230 | 1214 | ||
1231 | static int ipath_internal_presets(struct ipath_devdata *dd) | 1215 | static int qib_internal_presets(struct qib_devdata *dd) |
1232 | { | 1216 | { |
1233 | int ret = 0; | 1217 | int ret = 0; |
1234 | 1218 | ||
1235 | ret = set_dds_vals(dd, dds_init_vals + DDS_3M); | 1219 | ret = set_dds_vals(dd, dds_init_vals + DDS_3M); |
1236 | 1220 | ||
1237 | if (ret < 0) | 1221 | if (ret < 0) |
1238 | ipath_dev_err(dd, "Failed to set default DDS values\n"); | 1222 | qib_dev_err(dd, "Failed to set default DDS values\n"); |
1239 | ret = set_rxeq_vals(dd, ipath_rxeq_set & 3); | 1223 | ret = set_rxeq_vals(dd, qib_rxeq_set & 3); |
1240 | if (ret < 0) | 1224 | if (ret < 0) |
1241 | ipath_dev_err(dd, "Failed to set default RXEQ values\n"); | 1225 | qib_dev_err(dd, "Failed to set default RXEQ values\n"); |
1242 | return ret; | 1226 | return ret; |
1243 | } | 1227 | } |
1244 | 1228 | ||
1245 | int ipath_sd7220_presets(struct ipath_devdata *dd) | 1229 | int qib_sd7220_presets(struct qib_devdata *dd) |
1246 | { | 1230 | { |
1247 | int ret = 0; | 1231 | int ret = 0; |
1248 | 1232 | ||
1249 | if (!dd->ipath_presets_needed) | 1233 | if (!dd->cspec->presets_needed) |
1250 | return ret; | 1234 | return ret; |
1251 | dd->ipath_presets_needed = 0; | 1235 | dd->cspec->presets_needed = 0; |
1252 | /* Assert uC reset, so we don't clash with it. */ | 1236 | /* Assert uC reset, so we don't clash with it. */ |
1253 | ipath_ibsd_reset(dd, 1); | 1237 | qib_ibsd_reset(dd, 1); |
1254 | udelay(2); | 1238 | udelay(2); |
1255 | ipath_sd_trimdone_monitor(dd, "link-down"); | 1239 | qib_sd_trimdone_monitor(dd, "link-down"); |
1256 | 1240 | ||
1257 | ret = ipath_internal_presets(dd); | 1241 | ret = qib_internal_presets(dd); |
1258 | return ret; | 1242 | return ret; |
1259 | } | 1243 | } |
1260 | 1244 | ||
1261 | static int ipath_sd_trimself(struct ipath_devdata *dd, int val) | 1245 | static int qib_sd_trimself(struct qib_devdata *dd, int val) |
1262 | { | 1246 | { |
1263 | return ibsd_sto_noisy(dd, CMUCTRL5, val, 0xFF); | 1247 | int loc = CMUCTRL5 | (1U << EPB_IB_QUAD0_CS_SHF); |
1248 | |||
1249 | return qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF); | ||
1264 | } | 1250 | } |
1265 | 1251 | ||
1266 | static int ipath_sd_early(struct ipath_devdata *dd) | 1252 | static int qib_sd_early(struct qib_devdata *dd) |
1267 | { | 1253 | { |
1268 | int ret = -1; /* Default failed */ | 1254 | int ret; |
1269 | int chnl; | ||
1270 | 1255 | ||
1271 | for (chnl = 0; chnl < 4; ++chnl) { | 1256 | ret = ibsd_mod_allchnls(dd, RXHSCTRL0(0) | EPB_GLOBAL_WR, 0xD4, 0xFF); |
1272 | ret = ibsd_sto_noisy(dd, RXHSCTRL0(chnl), 0xD4, 0xFF); | 1257 | if (ret < 0) |
1273 | if (ret < 0) | 1258 | goto bail; |
1274 | goto bail; | 1259 | ret = ibsd_mod_allchnls(dd, START_EQ1(0) | EPB_GLOBAL_WR, 0x10, 0xFF); |
1275 | } | 1260 | if (ret < 0) |
1276 | for (chnl = 0; chnl < 4; ++chnl) { | 1261 | goto bail; |
1277 | ret = ibsd_sto_noisy(dd, VCDL_DAC2(chnl), 0x2D, 0xFF); | 1262 | ret = ibsd_mod_allchnls(dd, START_EQ2(0) | EPB_GLOBAL_WR, 0x30, 0xFF); |
1278 | if (ret < 0) | ||
1279 | goto bail; | ||
1280 | } | ||
1281 | /* more fine-tuning of what will be default */ | ||
1282 | for (chnl = 0; chnl < 4; ++chnl) { | ||
1283 | ret = ibsd_sto_noisy(dd, VCDL_CTRL2(chnl), 3, 0xF); | ||
1284 | if (ret < 0) | ||
1285 | goto bail; | ||
1286 | } | ||
1287 | for (chnl = 0; chnl < 4; ++chnl) { | ||
1288 | ret = ibsd_sto_noisy(dd, START_EQ1(chnl), 0x10, 0xFF); | ||
1289 | if (ret < 0) | ||
1290 | goto bail; | ||
1291 | } | ||
1292 | for (chnl = 0; chnl < 4; ++chnl) { | ||
1293 | ret = ibsd_sto_noisy(dd, START_EQ2(chnl), 0x30, 0xFF); | ||
1294 | if (ret < 0) | ||
1295 | goto bail; | ||
1296 | } | ||
1297 | bail: | 1263 | bail: |
1298 | return ret; | 1264 | return ret; |
1299 | } | 1265 | } |
@@ -1302,50 +1268,53 @@ bail: | |||
1302 | #define LDOUTCTRL1(chnl) EPB_LOC(chnl, 7, 6) | 1268 | #define LDOUTCTRL1(chnl) EPB_LOC(chnl, 7, 6) |
1303 | #define RXHSSTATUS(chnl) EPB_LOC(chnl, 6, 0xF) | 1269 | #define RXHSSTATUS(chnl) EPB_LOC(chnl, 6, 0xF) |
1304 | 1270 | ||
1305 | static int ipath_sd_dactrim(struct ipath_devdata *dd) | 1271 | static int qib_sd_dactrim(struct qib_devdata *dd) |
1306 | { | 1272 | { |
1307 | int ret = -1; /* Default failed */ | 1273 | int ret; |
1308 | int chnl; | 1274 | |
1275 | ret = ibsd_mod_allchnls(dd, VCDL_DAC2(0) | EPB_GLOBAL_WR, 0x2D, 0xFF); | ||
1276 | if (ret < 0) | ||
1277 | goto bail; | ||
1278 | |||
1279 | /* more fine-tuning of what will be default */ | ||
1280 | ret = ibsd_mod_allchnls(dd, VCDL_CTRL2(0), 3, 0xF); | ||
1281 | if (ret < 0) | ||
1282 | goto bail; | ||
1283 | |||
1284 | ret = ibsd_mod_allchnls(dd, BACTRL(0) | EPB_GLOBAL_WR, 0x40, 0xFF); | ||
1285 | if (ret < 0) | ||
1286 | goto bail; | ||
1287 | |||
1288 | ret = ibsd_mod_allchnls(dd, LDOUTCTRL1(0) | EPB_GLOBAL_WR, 0x04, 0xFF); | ||
1289 | if (ret < 0) | ||
1290 | goto bail; | ||
1291 | |||
1292 | ret = ibsd_mod_allchnls(dd, RXHSSTATUS(0) | EPB_GLOBAL_WR, 0x04, 0xFF); | ||
1293 | if (ret < 0) | ||
1294 | goto bail; | ||
1309 | 1295 | ||
1310 | for (chnl = 0; chnl < 4; ++chnl) { | ||
1311 | ret = ibsd_sto_noisy(dd, BACTRL(chnl), 0x40, 0xFF); | ||
1312 | if (ret < 0) | ||
1313 | goto bail; | ||
1314 | } | ||
1315 | for (chnl = 0; chnl < 4; ++chnl) { | ||
1316 | ret = ibsd_sto_noisy(dd, LDOUTCTRL1(chnl), 0x04, 0xFF); | ||
1317 | if (ret < 0) | ||
1318 | goto bail; | ||
1319 | } | ||
1320 | for (chnl = 0; chnl < 4; ++chnl) { | ||
1321 | ret = ibsd_sto_noisy(dd, RXHSSTATUS(chnl), 0x04, 0xFF); | ||
1322 | if (ret < 0) | ||
1323 | goto bail; | ||
1324 | } | ||
1325 | /* | 1296 | /* |
1326 | * delay for max possible number of steps, with slop. | 1297 | * Delay for max possible number of steps, with slop. |
1327 | * Each step is about 4usec. | 1298 | * Each step is about 4usec. |
1328 | */ | 1299 | */ |
1329 | udelay(415); | 1300 | udelay(415); |
1330 | for (chnl = 0; chnl < 4; ++chnl) { | 1301 | |
1331 | ret = ibsd_sto_noisy(dd, LDOUTCTRL1(chnl), 0x00, 0xFF); | 1302 | ret = ibsd_mod_allchnls(dd, LDOUTCTRL1(0) | EPB_GLOBAL_WR, 0x00, 0xFF); |
1332 | if (ret < 0) | 1303 | |
1333 | goto bail; | ||
1334 | } | ||
1335 | bail: | 1304 | bail: |
1336 | return ret; | 1305 | return ret; |
1337 | } | 1306 | } |
1338 | 1307 | ||
1339 | #define RELOCK_FIRST_MS 3 | 1308 | #define RELOCK_FIRST_MS 3 |
1340 | #define RXLSPPM(chan) EPB_LOC(chan, 0, 2) | 1309 | #define RXLSPPM(chan) EPB_LOC(chan, 0, 2) |
1341 | void ipath_toggle_rclkrls(struct ipath_devdata *dd) | 1310 | void toggle_7220_rclkrls(struct qib_devdata *dd) |
1342 | { | 1311 | { |
1343 | int loc = RXLSPPM(0) | EPB_GLOBAL_WR; | 1312 | int loc = RXLSPPM(0) | EPB_GLOBAL_WR; |
1344 | int ret; | 1313 | int ret; |
1345 | 1314 | ||
1346 | ret = ibsd_mod_allchnls(dd, loc, 0, 0x80); | 1315 | ret = ibsd_mod_allchnls(dd, loc, 0, 0x80); |
1347 | if (ret < 0) | 1316 | if (ret < 0) |
1348 | ipath_dev_err(dd, "RCLKRLS failed to clear D7\n"); | 1317 | qib_dev_err(dd, "RCLKRLS failed to clear D7\n"); |
1349 | else { | 1318 | else { |
1350 | udelay(1); | 1319 | udelay(1); |
1351 | ibsd_mod_allchnls(dd, loc, 0x80, 0x80); | 1320 | ibsd_mod_allchnls(dd, loc, 0x80, 0x80); |
@@ -1354,109 +1323,91 @@ void ipath_toggle_rclkrls(struct ipath_devdata *dd) | |||
1354 | udelay(1); | 1323 | udelay(1); |
1355 | ret = ibsd_mod_allchnls(dd, loc, 0, 0x80); | 1324 | ret = ibsd_mod_allchnls(dd, loc, 0, 0x80); |
1356 | if (ret < 0) | 1325 | if (ret < 0) |
1357 | ipath_dev_err(dd, "RCLKRLS failed to clear D7\n"); | 1326 | qib_dev_err(dd, "RCLKRLS failed to clear D7\n"); |
1358 | else { | 1327 | else { |
1359 | udelay(1); | 1328 | udelay(1); |
1360 | ibsd_mod_allchnls(dd, loc, 0x80, 0x80); | 1329 | ibsd_mod_allchnls(dd, loc, 0x80, 0x80); |
1361 | } | 1330 | } |
1362 | /* Now reset xgxs and IBC to complete the recovery */ | 1331 | /* Now reset xgxs and IBC to complete the recovery */ |
1363 | dd->ipath_f_xgxs_reset(dd); | 1332 | dd->f_xgxs_reset(dd->pport); |
1364 | } | 1333 | } |
1365 | 1334 | ||
1366 | /* | 1335 | /* |
1367 | * Shut down the timer that polls for relock occasions, if needed | 1336 | * Shut down the timer that polls for relock occasions, if needed |
1368 | * this is "hooked" from ipath_7220_quiet_serdes(), which is called | 1337 | * this is "hooked" from qib_7220_quiet_serdes(), which is called |
1369 | * just before ipath_shutdown_device() in ipath_driver.c shuts down all | 1338 | * just before qib_shutdown_device() in qib_driver.c shuts down all |
1370 | * the other timers | 1339 | * the other timers |
1371 | */ | 1340 | */ |
1372 | void ipath_shutdown_relock_poll(struct ipath_devdata *dd) | 1341 | void shutdown_7220_relock_poll(struct qib_devdata *dd) |
1373 | { | 1342 | { |
1374 | struct ipath_relock *irp = &dd->ipath_relock_singleton; | 1343 | if (dd->cspec->relock_timer_active) |
1375 | if (atomic_read(&irp->ipath_relock_timer_active)) { | 1344 | del_timer_sync(&dd->cspec->relock_timer); |
1376 | del_timer_sync(&irp->ipath_relock_timer); | ||
1377 | atomic_set(&irp->ipath_relock_timer_active, 0); | ||
1378 | } | ||
1379 | } | 1345 | } |
1380 | 1346 | ||
1381 | static unsigned ipath_relock_by_timer = 1; | 1347 | static unsigned qib_relock_by_timer = 1; |
1382 | module_param_named(relock_by_timer, ipath_relock_by_timer, uint, | 1348 | module_param_named(relock_by_timer, qib_relock_by_timer, uint, |
1383 | S_IWUSR | S_IRUGO); | 1349 | S_IWUSR | S_IRUGO); |
1384 | MODULE_PARM_DESC(relock_by_timer, "Allow relock attempt if link not up"); | 1350 | MODULE_PARM_DESC(relock_by_timer, "Allow relock attempt if link not up"); |
1385 | 1351 | ||
1386 | static void ipath_run_relock(unsigned long opaque) | 1352 | static void qib_run_relock(unsigned long opaque) |
1387 | { | 1353 | { |
1388 | struct ipath_devdata *dd = (struct ipath_devdata *)opaque; | 1354 | struct qib_devdata *dd = (struct qib_devdata *)opaque; |
1389 | struct ipath_relock *irp = &dd->ipath_relock_singleton; | 1355 | struct qib_pportdata *ppd = dd->pport; |
1390 | u64 val, ltstate; | 1356 | struct qib_chip_specific *cs = dd->cspec; |
1391 | 1357 | int timeoff; | |
1392 | if (!(dd->ipath_flags & IPATH_INITTED)) { | ||
1393 | /* Not yet up, just reenable the timer for later */ | ||
1394 | irp->ipath_relock_interval = HZ; | ||
1395 | mod_timer(&irp->ipath_relock_timer, jiffies + HZ); | ||
1396 | return; | ||
1397 | } | ||
1398 | 1358 | ||
1399 | /* | 1359 | /* |
1400 | * Check link-training state for "stuck" state. | 1360 | * Check link-training state for "stuck" state, when down. |
1401 | * if found, try relock and schedule another try at | 1361 | * if found, try relock and schedule another try at |
1402 | * exponentially growing delay, maxed at one second. | 1362 | * exponentially growing delay, maxed at one second. |
1403 | * if not stuck, our work is done. | 1363 | * if not stuck, our work is done. |
1404 | */ | 1364 | */ |
1405 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus); | 1365 | if ((dd->flags & QIB_INITTED) && !(ppd->lflags & |
1406 | ltstate = ipath_ib_linktrstate(dd, val); | 1366 | (QIBL_IB_AUTONEG_INPROG | QIBL_LINKINIT | QIBL_LINKARMED | |
1407 | 1367 | QIBL_LINKACTIVE))) { | |
1408 | if (ltstate <= INFINIPATH_IBCS_LT_STATE_CFGWAITRMT | 1368 | if (qib_relock_by_timer) { |
1409 | && ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP) { | 1369 | if (!(ppd->lflags & QIBL_IB_LINK_DISABLED)) |
1410 | int timeoff; | 1370 | toggle_7220_rclkrls(dd); |
1411 | /* Not up yet. Try again, if allowed by module-param */ | ||
1412 | if (ipath_relock_by_timer) { | ||
1413 | if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) | ||
1414 | ipath_cdbg(VERBOSE, "Skip RELOCK in AUTONEG\n"); | ||
1415 | else if (!(dd->ipath_flags & IPATH_IB_LINK_DISABLED)) { | ||
1416 | ipath_cdbg(VERBOSE, "RELOCK\n"); | ||
1417 | ipath_toggle_rclkrls(dd); | ||
1418 | } | ||
1419 | } | 1371 | } |
1420 | /* re-set timer for next check */ | 1372 | /* re-set timer for next check */ |
1421 | timeoff = irp->ipath_relock_interval << 1; | 1373 | timeoff = cs->relock_interval << 1; |
1422 | if (timeoff > HZ) | 1374 | if (timeoff > HZ) |
1423 | timeoff = HZ; | 1375 | timeoff = HZ; |
1424 | irp->ipath_relock_interval = timeoff; | 1376 | cs->relock_interval = timeoff; |
1425 | 1377 | } else | |
1426 | mod_timer(&irp->ipath_relock_timer, jiffies + timeoff); | 1378 | timeoff = HZ; |
1427 | } else { | 1379 | mod_timer(&cs->relock_timer, jiffies + timeoff); |
1428 | /* Up, so no more need to check so often */ | ||
1429 | mod_timer(&irp->ipath_relock_timer, jiffies + HZ); | ||
1430 | } | ||
1431 | } | 1380 | } |
1432 | 1381 | ||
1433 | void ipath_set_relock_poll(struct ipath_devdata *dd, int ibup) | 1382 | void set_7220_relock_poll(struct qib_devdata *dd, int ibup) |
1434 | { | 1383 | { |
1435 | struct ipath_relock *irp = &dd->ipath_relock_singleton; | 1384 | struct qib_chip_specific *cs = dd->cspec; |
1436 | 1385 | ||
1437 | if (ibup > 0) { | 1386 | if (ibup) { |
1438 | /* we are now up, so relax timer to 1 second interval */ | 1387 | /* We are now up, relax timer to 1 second interval */ |
1439 | if (atomic_read(&irp->ipath_relock_timer_active)) | 1388 | if (cs->relock_timer_active) { |
1440 | mod_timer(&irp->ipath_relock_timer, jiffies + HZ); | 1389 | cs->relock_interval = HZ; |
1390 | mod_timer(&cs->relock_timer, jiffies + HZ); | ||
1391 | } | ||
1441 | } else { | 1392 | } else { |
1442 | /* Transition to down, (re-)set timer to short interval. */ | 1393 | /* Transition to down, (re-)set timer to short interval. */ |
1443 | int timeout; | 1394 | unsigned int timeout; |
1444 | timeout = (HZ * ((ibup == -1) ? 1000 : RELOCK_FIRST_MS))/1000; | 1395 | |
1396 | timeout = msecs_to_jiffies(RELOCK_FIRST_MS); | ||
1445 | if (timeout == 0) | 1397 | if (timeout == 0) |
1446 | timeout = 1; | 1398 | timeout = 1; |
1447 | /* If timer has not yet been started, do so. */ | 1399 | /* If timer has not yet been started, do so. */ |
1448 | if (atomic_inc_return(&irp->ipath_relock_timer_active) == 1) { | 1400 | if (!cs->relock_timer_active) { |
1449 | init_timer(&irp->ipath_relock_timer); | 1401 | cs->relock_timer_active = 1; |
1450 | irp->ipath_relock_timer.function = ipath_run_relock; | 1402 | init_timer(&cs->relock_timer); |
1451 | irp->ipath_relock_timer.data = (unsigned long) dd; | 1403 | cs->relock_timer.function = qib_run_relock; |
1452 | irp->ipath_relock_interval = timeout; | 1404 | cs->relock_timer.data = (unsigned long) dd; |
1453 | irp->ipath_relock_timer.expires = jiffies + timeout; | 1405 | cs->relock_interval = timeout; |
1454 | add_timer(&irp->ipath_relock_timer); | 1406 | cs->relock_timer.expires = jiffies + timeout; |
1407 | add_timer(&cs->relock_timer); | ||
1455 | } else { | 1408 | } else { |
1456 | irp->ipath_relock_interval = timeout; | 1409 | cs->relock_interval = timeout; |
1457 | mod_timer(&irp->ipath_relock_timer, jiffies + timeout); | 1410 | mod_timer(&cs->relock_timer, jiffies + timeout); |
1458 | atomic_dec(&irp->ipath_relock_timer_active); | ||
1459 | } | 1411 | } |
1460 | } | 1412 | } |
1461 | } | 1413 | } |
1462 | |||
diff --git a/drivers/infiniband/hw/ipath/ipath_sd7220_img.c b/drivers/infiniband/hw/qib/qib_sd7220_img.c index 5ef59da9270a..a1118fbd2370 100644 --- a/drivers/infiniband/hw/ipath/ipath_sd7220_img.c +++ b/drivers/infiniband/hw/qib/qib_sd7220_img.c | |||
@@ -38,11 +38,10 @@ | |||
38 | #include <linux/pci.h> | 38 | #include <linux/pci.h> |
39 | #include <linux/delay.h> | 39 | #include <linux/delay.h> |
40 | 40 | ||
41 | #include "ipath_kernel.h" | 41 | #include "qib.h" |
42 | #include "ipath_registers.h" | 42 | #include "qib_7220.h" |
43 | #include "ipath_7220.h" | ||
44 | 43 | ||
45 | static unsigned char ipath_sd7220_ib_img[] = { | 44 | static unsigned char qib_sd7220_ib_img[] = { |
46 | /*0000*/0x02, 0x0A, 0x29, 0x02, 0x0A, 0x87, 0xE5, 0xE6, | 45 | /*0000*/0x02, 0x0A, 0x29, 0x02, 0x0A, 0x87, 0xE5, 0xE6, |
47 | 0x30, 0xE6, 0x04, 0x7F, 0x01, 0x80, 0x02, 0x7F, | 46 | 0x30, 0xE6, 0x04, 0x7F, 0x01, 0x80, 0x02, 0x7F, |
48 | /*0010*/0x00, 0xE5, 0xE2, 0x30, 0xE4, 0x04, 0x7E, 0x01, | 47 | /*0010*/0x00, 0xE5, 0xE2, 0x30, 0xE4, 0x04, 0x7E, 0x01, |
@@ -1069,14 +1068,14 @@ static unsigned char ipath_sd7220_ib_img[] = { | |||
1069 | 0x01, 0x20, 0x11, 0x00, 0x04, 0x20, 0x00, 0x81 | 1068 | 0x01, 0x20, 0x11, 0x00, 0x04, 0x20, 0x00, 0x81 |
1070 | }; | 1069 | }; |
1071 | 1070 | ||
1072 | int ipath_sd7220_ib_load(struct ipath_devdata *dd) | 1071 | int qib_sd7220_ib_load(struct qib_devdata *dd) |
1073 | { | 1072 | { |
1074 | return ipath_sd7220_prog_ld(dd, IB_7220_SERDES, ipath_sd7220_ib_img, | 1073 | return qib_sd7220_prog_ld(dd, IB_7220_SERDES, qib_sd7220_ib_img, |
1075 | sizeof(ipath_sd7220_ib_img), 0); | 1074 | sizeof(qib_sd7220_ib_img), 0); |
1076 | } | 1075 | } |
1077 | 1076 | ||
1078 | int ipath_sd7220_ib_vfy(struct ipath_devdata *dd) | 1077 | int qib_sd7220_ib_vfy(struct qib_devdata *dd) |
1079 | { | 1078 | { |
1080 | return ipath_sd7220_prog_vfy(dd, IB_7220_SERDES, ipath_sd7220_ib_img, | 1079 | return qib_sd7220_prog_vfy(dd, IB_7220_SERDES, qib_sd7220_ib_img, |
1081 | sizeof(ipath_sd7220_ib_img), 0); | 1080 | sizeof(qib_sd7220_ib_img), 0); |
1082 | } | 1081 | } |
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c new file mode 100644 index 000000000000..b8456881f7f6 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_sdma.c | |||
@@ -0,0 +1,973 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007, 2008, 2009, 2010 QLogic Corporation. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #include <linux/spinlock.h> | ||
34 | #include <linux/netdevice.h> | ||
35 | |||
36 | #include "qib.h" | ||
37 | #include "qib_common.h" | ||
38 | |||
39 | /* default pio off, sdma on */ | ||
40 | static ushort sdma_descq_cnt = 256; | ||
41 | module_param_named(sdma_descq_cnt, sdma_descq_cnt, ushort, S_IRUGO); | ||
42 | MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries"); | ||
43 | |||
44 | /* | ||
45 | * Bits defined in the send DMA descriptor. | ||
46 | */ | ||
47 | #define SDMA_DESC_LAST (1ULL << 11) | ||
48 | #define SDMA_DESC_FIRST (1ULL << 12) | ||
49 | #define SDMA_DESC_DMA_HEAD (1ULL << 13) | ||
50 | #define SDMA_DESC_USE_LARGE_BUF (1ULL << 14) | ||
51 | #define SDMA_DESC_INTR (1ULL << 15) | ||
52 | #define SDMA_DESC_COUNT_LSB 16 | ||
53 | #define SDMA_DESC_GEN_LSB 30 | ||
54 | |||
55 | char *qib_sdma_state_names[] = { | ||
56 | [qib_sdma_state_s00_hw_down] = "s00_HwDown", | ||
57 | [qib_sdma_state_s10_hw_start_up_wait] = "s10_HwStartUpWait", | ||
58 | [qib_sdma_state_s20_idle] = "s20_Idle", | ||
59 | [qib_sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait", | ||
60 | [qib_sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait", | ||
61 | [qib_sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait", | ||
62 | [qib_sdma_state_s99_running] = "s99_Running", | ||
63 | }; | ||
64 | |||
65 | char *qib_sdma_event_names[] = { | ||
66 | [qib_sdma_event_e00_go_hw_down] = "e00_GoHwDown", | ||
67 | [qib_sdma_event_e10_go_hw_start] = "e10_GoHwStart", | ||
68 | [qib_sdma_event_e20_hw_started] = "e20_HwStarted", | ||
69 | [qib_sdma_event_e30_go_running] = "e30_GoRunning", | ||
70 | [qib_sdma_event_e40_sw_cleaned] = "e40_SwCleaned", | ||
71 | [qib_sdma_event_e50_hw_cleaned] = "e50_HwCleaned", | ||
72 | [qib_sdma_event_e60_hw_halted] = "e60_HwHalted", | ||
73 | [qib_sdma_event_e70_go_idle] = "e70_GoIdle", | ||
74 | [qib_sdma_event_e7220_err_halted] = "e7220_ErrHalted", | ||
75 | [qib_sdma_event_e7322_err_halted] = "e7322_ErrHalted", | ||
76 | [qib_sdma_event_e90_timer_tick] = "e90_TimerTick", | ||
77 | }; | ||
78 | |||
79 | /* declare all statics here rather than keep sorting */ | ||
80 | static int alloc_sdma(struct qib_pportdata *); | ||
81 | static void sdma_complete(struct kref *); | ||
82 | static void sdma_finalput(struct qib_sdma_state *); | ||
83 | static void sdma_get(struct qib_sdma_state *); | ||
84 | static void sdma_put(struct qib_sdma_state *); | ||
85 | static void sdma_set_state(struct qib_pportdata *, enum qib_sdma_states); | ||
86 | static void sdma_start_sw_clean_up(struct qib_pportdata *); | ||
87 | static void sdma_sw_clean_up_task(unsigned long); | ||
88 | static void unmap_desc(struct qib_pportdata *, unsigned); | ||
89 | |||
90 | static void sdma_get(struct qib_sdma_state *ss) | ||
91 | { | ||
92 | kref_get(&ss->kref); | ||
93 | } | ||
94 | |||
95 | static void sdma_complete(struct kref *kref) | ||
96 | { | ||
97 | struct qib_sdma_state *ss = | ||
98 | container_of(kref, struct qib_sdma_state, kref); | ||
99 | |||
100 | complete(&ss->comp); | ||
101 | } | ||
102 | |||
103 | static void sdma_put(struct qib_sdma_state *ss) | ||
104 | { | ||
105 | kref_put(&ss->kref, sdma_complete); | ||
106 | } | ||
107 | |||
108 | static void sdma_finalput(struct qib_sdma_state *ss) | ||
109 | { | ||
110 | sdma_put(ss); | ||
111 | wait_for_completion(&ss->comp); | ||
112 | } | ||
113 | |||
114 | /* | ||
115 | * Complete all the sdma requests on the active list, in the correct | ||
116 | * order, and with appropriate processing. Called when cleaning up | ||
117 | * after sdma shutdown, and when new sdma requests are submitted for | ||
118 | * a link that is down. This matches what is done for requests | ||
119 | * that complete normally, it's just the full list. | ||
120 | * | ||
121 | * Must be called with sdma_lock held | ||
122 | */ | ||
123 | static void clear_sdma_activelist(struct qib_pportdata *ppd) | ||
124 | { | ||
125 | struct qib_sdma_txreq *txp, *txp_next; | ||
126 | |||
127 | list_for_each_entry_safe(txp, txp_next, &ppd->sdma_activelist, list) { | ||
128 | list_del_init(&txp->list); | ||
129 | if (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) { | ||
130 | unsigned idx; | ||
131 | |||
132 | idx = txp->start_idx; | ||
133 | while (idx != txp->next_descq_idx) { | ||
134 | unmap_desc(ppd, idx); | ||
135 | if (++idx == ppd->sdma_descq_cnt) | ||
136 | idx = 0; | ||
137 | } | ||
138 | } | ||
139 | if (txp->callback) | ||
140 | (*txp->callback)(txp, QIB_SDMA_TXREQ_S_ABORTED); | ||
141 | } | ||
142 | } | ||
143 | |||
144 | static void sdma_sw_clean_up_task(unsigned long opaque) | ||
145 | { | ||
146 | struct qib_pportdata *ppd = (struct qib_pportdata *) opaque; | ||
147 | unsigned long flags; | ||
148 | |||
149 | spin_lock_irqsave(&ppd->sdma_lock, flags); | ||
150 | |||
151 | /* | ||
152 | * At this point, the following should always be true: | ||
153 | * - We are halted, so no more descriptors are getting retired. | ||
154 | * - We are not running, so no one is submitting new work. | ||
155 | * - Only we can send the e40_sw_cleaned, so we can't start | ||
156 | * running again until we say so. So, the active list and | ||
157 | * descq are ours to play with. | ||
158 | */ | ||
159 | |||
160 | /* Process all retired requests. */ | ||
161 | qib_sdma_make_progress(ppd); | ||
162 | |||
163 | clear_sdma_activelist(ppd); | ||
164 | |||
165 | /* | ||
166 | * Resync count of added and removed. It is VERY important that | ||
167 | * sdma_descq_removed NEVER decrement - user_sdma depends on it. | ||
168 | */ | ||
169 | ppd->sdma_descq_removed = ppd->sdma_descq_added; | ||
170 | |||
171 | /* | ||
172 | * Reset our notion of head and tail. | ||
173 | * Note that the HW registers will be reset when switching states | ||
174 | * due to calling __qib_sdma_process_event() below. | ||
175 | */ | ||
176 | ppd->sdma_descq_tail = 0; | ||
177 | ppd->sdma_descq_head = 0; | ||
178 | ppd->sdma_head_dma[0] = 0; | ||
179 | ppd->sdma_generation = 0; | ||
180 | |||
181 | __qib_sdma_process_event(ppd, qib_sdma_event_e40_sw_cleaned); | ||
182 | |||
183 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); | ||
184 | } | ||
185 | |||
186 | /* | ||
187 | * This is called when changing to state qib_sdma_state_s10_hw_start_up_wait | ||
188 | * as a result of send buffer errors or send DMA descriptor errors. | ||
189 | * We want to disarm the buffers in these cases. | ||
190 | */ | ||
191 | static void sdma_hw_start_up(struct qib_pportdata *ppd) | ||
192 | { | ||
193 | struct qib_sdma_state *ss = &ppd->sdma_state; | ||
194 | unsigned bufno; | ||
195 | |||
196 | for (bufno = ss->first_sendbuf; bufno < ss->last_sendbuf; ++bufno) | ||
197 | ppd->dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_BUF(bufno)); | ||
198 | |||
199 | ppd->dd->f_sdma_hw_start_up(ppd); | ||
200 | } | ||
201 | |||
202 | static void sdma_sw_tear_down(struct qib_pportdata *ppd) | ||
203 | { | ||
204 | struct qib_sdma_state *ss = &ppd->sdma_state; | ||
205 | |||
206 | /* Releasing this reference means the state machine has stopped. */ | ||
207 | sdma_put(ss); | ||
208 | } | ||
209 | |||
210 | static void sdma_start_sw_clean_up(struct qib_pportdata *ppd) | ||
211 | { | ||
212 | tasklet_hi_schedule(&ppd->sdma_sw_clean_up_task); | ||
213 | } | ||
214 | |||
215 | static void sdma_set_state(struct qib_pportdata *ppd, | ||
216 | enum qib_sdma_states next_state) | ||
217 | { | ||
218 | struct qib_sdma_state *ss = &ppd->sdma_state; | ||
219 | struct sdma_set_state_action *action = ss->set_state_action; | ||
220 | unsigned op = 0; | ||
221 | |||
222 | /* debugging bookkeeping */ | ||
223 | ss->previous_state = ss->current_state; | ||
224 | ss->previous_op = ss->current_op; | ||
225 | |||
226 | ss->current_state = next_state; | ||
227 | |||
228 | if (action[next_state].op_enable) | ||
229 | op |= QIB_SDMA_SENDCTRL_OP_ENABLE; | ||
230 | |||
231 | if (action[next_state].op_intenable) | ||
232 | op |= QIB_SDMA_SENDCTRL_OP_INTENABLE; | ||
233 | |||
234 | if (action[next_state].op_halt) | ||
235 | op |= QIB_SDMA_SENDCTRL_OP_HALT; | ||
236 | |||
237 | if (action[next_state].op_drain) | ||
238 | op |= QIB_SDMA_SENDCTRL_OP_DRAIN; | ||
239 | |||
240 | if (action[next_state].go_s99_running_tofalse) | ||
241 | ss->go_s99_running = 0; | ||
242 | |||
243 | if (action[next_state].go_s99_running_totrue) | ||
244 | ss->go_s99_running = 1; | ||
245 | |||
246 | ss->current_op = op; | ||
247 | |||
248 | ppd->dd->f_sdma_sendctrl(ppd, ss->current_op); | ||
249 | } | ||
250 | |||
251 | static void unmap_desc(struct qib_pportdata *ppd, unsigned head) | ||
252 | { | ||
253 | __le64 *descqp = &ppd->sdma_descq[head].qw[0]; | ||
254 | u64 desc[2]; | ||
255 | dma_addr_t addr; | ||
256 | size_t len; | ||
257 | |||
258 | desc[0] = le64_to_cpu(descqp[0]); | ||
259 | desc[1] = le64_to_cpu(descqp[1]); | ||
260 | |||
261 | addr = (desc[1] << 32) | (desc[0] >> 32); | ||
262 | len = (desc[0] >> 14) & (0x7ffULL << 2); | ||
263 | dma_unmap_single(&ppd->dd->pcidev->dev, addr, len, DMA_TO_DEVICE); | ||
264 | } | ||
265 | |||
266 | static int alloc_sdma(struct qib_pportdata *ppd) | ||
267 | { | ||
268 | ppd->sdma_descq_cnt = sdma_descq_cnt; | ||
269 | if (!ppd->sdma_descq_cnt) | ||
270 | ppd->sdma_descq_cnt = 256; | ||
271 | |||
272 | /* Allocate memory for SendDMA descriptor FIFO */ | ||
273 | ppd->sdma_descq = dma_alloc_coherent(&ppd->dd->pcidev->dev, | ||
274 | ppd->sdma_descq_cnt * sizeof(u64[2]), &ppd->sdma_descq_phys, | ||
275 | GFP_KERNEL); | ||
276 | |||
277 | if (!ppd->sdma_descq) { | ||
278 | qib_dev_err(ppd->dd, "failed to allocate SendDMA descriptor " | ||
279 | "FIFO memory\n"); | ||
280 | goto bail; | ||
281 | } | ||
282 | |||
283 | /* Allocate memory for DMA of head register to memory */ | ||
284 | ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev, | ||
285 | PAGE_SIZE, &ppd->sdma_head_phys, GFP_KERNEL); | ||
286 | if (!ppd->sdma_head_dma) { | ||
287 | qib_dev_err(ppd->dd, "failed to allocate SendDMA " | ||
288 | "head memory\n"); | ||
289 | goto cleanup_descq; | ||
290 | } | ||
291 | ppd->sdma_head_dma[0] = 0; | ||
292 | return 0; | ||
293 | |||
294 | cleanup_descq: | ||
295 | dma_free_coherent(&ppd->dd->pcidev->dev, | ||
296 | ppd->sdma_descq_cnt * sizeof(u64[2]), (void *)ppd->sdma_descq, | ||
297 | ppd->sdma_descq_phys); | ||
298 | ppd->sdma_descq = NULL; | ||
299 | ppd->sdma_descq_phys = 0; | ||
300 | bail: | ||
301 | ppd->sdma_descq_cnt = 0; | ||
302 | return -ENOMEM; | ||
303 | } | ||
304 | |||
305 | static void free_sdma(struct qib_pportdata *ppd) | ||
306 | { | ||
307 | struct qib_devdata *dd = ppd->dd; | ||
308 | |||
309 | if (ppd->sdma_head_dma) { | ||
310 | dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, | ||
311 | (void *)ppd->sdma_head_dma, | ||
312 | ppd->sdma_head_phys); | ||
313 | ppd->sdma_head_dma = NULL; | ||
314 | ppd->sdma_head_phys = 0; | ||
315 | } | ||
316 | |||
317 | if (ppd->sdma_descq) { | ||
318 | dma_free_coherent(&dd->pcidev->dev, | ||
319 | ppd->sdma_descq_cnt * sizeof(u64[2]), | ||
320 | ppd->sdma_descq, ppd->sdma_descq_phys); | ||
321 | ppd->sdma_descq = NULL; | ||
322 | ppd->sdma_descq_phys = 0; | ||
323 | } | ||
324 | } | ||
325 | |||
326 | static inline void make_sdma_desc(struct qib_pportdata *ppd, | ||
327 | u64 *sdmadesc, u64 addr, u64 dwlen, | ||
328 | u64 dwoffset) | ||
329 | { | ||
330 | |||
331 | WARN_ON(addr & 3); | ||
332 | /* SDmaPhyAddr[47:32] */ | ||
333 | sdmadesc[1] = addr >> 32; | ||
334 | /* SDmaPhyAddr[31:0] */ | ||
335 | sdmadesc[0] = (addr & 0xfffffffcULL) << 32; | ||
336 | /* SDmaGeneration[1:0] */ | ||
337 | sdmadesc[0] |= (ppd->sdma_generation & 3ULL) << | ||
338 | SDMA_DESC_GEN_LSB; | ||
339 | /* SDmaDwordCount[10:0] */ | ||
340 | sdmadesc[0] |= (dwlen & 0x7ffULL) << SDMA_DESC_COUNT_LSB; | ||
341 | /* SDmaBufOffset[12:2] */ | ||
342 | sdmadesc[0] |= dwoffset & 0x7ffULL; | ||
343 | } | ||
344 | |||
345 | /* sdma_lock must be held */ | ||
346 | int qib_sdma_make_progress(struct qib_pportdata *ppd) | ||
347 | { | ||
348 | struct list_head *lp = NULL; | ||
349 | struct qib_sdma_txreq *txp = NULL; | ||
350 | struct qib_devdata *dd = ppd->dd; | ||
351 | int progress = 0; | ||
352 | u16 hwhead; | ||
353 | u16 idx = 0; | ||
354 | |||
355 | hwhead = dd->f_sdma_gethead(ppd); | ||
356 | |||
357 | /* The reason for some of the complexity of this code is that | ||
358 | * not all descriptors have corresponding txps. So, we have to | ||
359 | * be able to skip over descs until we wander into the range of | ||
360 | * the next txp on the list. | ||
361 | */ | ||
362 | |||
363 | if (!list_empty(&ppd->sdma_activelist)) { | ||
364 | lp = ppd->sdma_activelist.next; | ||
365 | txp = list_entry(lp, struct qib_sdma_txreq, list); | ||
366 | idx = txp->start_idx; | ||
367 | } | ||
368 | |||
369 | while (ppd->sdma_descq_head != hwhead) { | ||
370 | /* if desc is part of this txp, unmap if needed */ | ||
371 | if (txp && (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) && | ||
372 | (idx == ppd->sdma_descq_head)) { | ||
373 | unmap_desc(ppd, ppd->sdma_descq_head); | ||
374 | if (++idx == ppd->sdma_descq_cnt) | ||
375 | idx = 0; | ||
376 | } | ||
377 | |||
378 | /* increment dequed desc count */ | ||
379 | ppd->sdma_descq_removed++; | ||
380 | |||
381 | /* advance head, wrap if needed */ | ||
382 | if (++ppd->sdma_descq_head == ppd->sdma_descq_cnt) | ||
383 | ppd->sdma_descq_head = 0; | ||
384 | |||
385 | /* if now past this txp's descs, do the callback */ | ||
386 | if (txp && txp->next_descq_idx == ppd->sdma_descq_head) { | ||
387 | /* remove from active list */ | ||
388 | list_del_init(&txp->list); | ||
389 | if (txp->callback) | ||
390 | (*txp->callback)(txp, QIB_SDMA_TXREQ_S_OK); | ||
391 | /* see if there is another txp */ | ||
392 | if (list_empty(&ppd->sdma_activelist)) | ||
393 | txp = NULL; | ||
394 | else { | ||
395 | lp = ppd->sdma_activelist.next; | ||
396 | txp = list_entry(lp, struct qib_sdma_txreq, | ||
397 | list); | ||
398 | idx = txp->start_idx; | ||
399 | } | ||
400 | } | ||
401 | progress = 1; | ||
402 | } | ||
403 | if (progress) | ||
404 | qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd)); | ||
405 | return progress; | ||
406 | } | ||
407 | |||
408 | /* | ||
409 | * This is called from interrupt context. | ||
410 | */ | ||
411 | void qib_sdma_intr(struct qib_pportdata *ppd) | ||
412 | { | ||
413 | unsigned long flags; | ||
414 | |||
415 | spin_lock_irqsave(&ppd->sdma_lock, flags); | ||
416 | |||
417 | __qib_sdma_intr(ppd); | ||
418 | |||
419 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); | ||
420 | } | ||
421 | |||
422 | void __qib_sdma_intr(struct qib_pportdata *ppd) | ||
423 | { | ||
424 | if (__qib_sdma_running(ppd)) | ||
425 | qib_sdma_make_progress(ppd); | ||
426 | } | ||
427 | |||
428 | int qib_setup_sdma(struct qib_pportdata *ppd) | ||
429 | { | ||
430 | struct qib_devdata *dd = ppd->dd; | ||
431 | unsigned long flags; | ||
432 | int ret = 0; | ||
433 | |||
434 | ret = alloc_sdma(ppd); | ||
435 | if (ret) | ||
436 | goto bail; | ||
437 | |||
438 | /* set consistent sdma state */ | ||
439 | ppd->dd->f_sdma_init_early(ppd); | ||
440 | spin_lock_irqsave(&ppd->sdma_lock, flags); | ||
441 | sdma_set_state(ppd, qib_sdma_state_s00_hw_down); | ||
442 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); | ||
443 | |||
444 | /* set up reference counting */ | ||
445 | kref_init(&ppd->sdma_state.kref); | ||
446 | init_completion(&ppd->sdma_state.comp); | ||
447 | |||
448 | ppd->sdma_generation = 0; | ||
449 | ppd->sdma_descq_head = 0; | ||
450 | ppd->sdma_descq_removed = 0; | ||
451 | ppd->sdma_descq_added = 0; | ||
452 | |||
453 | INIT_LIST_HEAD(&ppd->sdma_activelist); | ||
454 | |||
455 | tasklet_init(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task, | ||
456 | (unsigned long)ppd); | ||
457 | |||
458 | ret = dd->f_init_sdma_regs(ppd); | ||
459 | if (ret) | ||
460 | goto bail_alloc; | ||
461 | |||
462 | qib_sdma_process_event(ppd, qib_sdma_event_e10_go_hw_start); | ||
463 | |||
464 | return 0; | ||
465 | |||
466 | bail_alloc: | ||
467 | qib_teardown_sdma(ppd); | ||
468 | bail: | ||
469 | return ret; | ||
470 | } | ||
471 | |||
472 | void qib_teardown_sdma(struct qib_pportdata *ppd) | ||
473 | { | ||
474 | qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down); | ||
475 | |||
476 | /* | ||
477 | * This waits for the state machine to exit so it is not | ||
478 | * necessary to kill the sdma_sw_clean_up_task to make sure | ||
479 | * it is not running. | ||
480 | */ | ||
481 | sdma_finalput(&ppd->sdma_state); | ||
482 | |||
483 | free_sdma(ppd); | ||
484 | } | ||
485 | |||
486 | int qib_sdma_running(struct qib_pportdata *ppd) | ||
487 | { | ||
488 | unsigned long flags; | ||
489 | int ret; | ||
490 | |||
491 | spin_lock_irqsave(&ppd->sdma_lock, flags); | ||
492 | ret = __qib_sdma_running(ppd); | ||
493 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); | ||
494 | |||
495 | return ret; | ||
496 | } | ||
497 | |||
498 | /* | ||
499 | * Complete a request when sdma not running; likely only request | ||
500 | * but to simplify the code, always queue it, then process the full | ||
501 | * activelist. We process the entire list to ensure that this particular | ||
502 | * request does get it's callback, but in the correct order. | ||
503 | * Must be called with sdma_lock held | ||
504 | */ | ||
505 | static void complete_sdma_err_req(struct qib_pportdata *ppd, | ||
506 | struct qib_verbs_txreq *tx) | ||
507 | { | ||
508 | atomic_inc(&tx->qp->s_dma_busy); | ||
509 | /* no sdma descriptors, so no unmap_desc */ | ||
510 | tx->txreq.start_idx = 0; | ||
511 | tx->txreq.next_descq_idx = 0; | ||
512 | list_add_tail(&tx->txreq.list, &ppd->sdma_activelist); | ||
513 | clear_sdma_activelist(ppd); | ||
514 | } | ||
515 | |||
516 | /* | ||
517 | * This function queues one IB packet onto the send DMA queue per call. | ||
518 | * The caller is responsible for checking: | ||
519 | * 1) The number of send DMA descriptor entries is less than the size of | ||
520 | * the descriptor queue. | ||
521 | * 2) The IB SGE addresses and lengths are 32-bit aligned | ||
522 | * (except possibly the last SGE's length) | ||
523 | * 3) The SGE addresses are suitable for passing to dma_map_single(). | ||
524 | */ | ||
525 | int qib_sdma_verbs_send(struct qib_pportdata *ppd, | ||
526 | struct qib_sge_state *ss, u32 dwords, | ||
527 | struct qib_verbs_txreq *tx) | ||
528 | { | ||
529 | unsigned long flags; | ||
530 | struct qib_sge *sge; | ||
531 | struct qib_qp *qp; | ||
532 | int ret = 0; | ||
533 | u16 tail; | ||
534 | __le64 *descqp; | ||
535 | u64 sdmadesc[2]; | ||
536 | u32 dwoffset; | ||
537 | dma_addr_t addr; | ||
538 | |||
539 | spin_lock_irqsave(&ppd->sdma_lock, flags); | ||
540 | |||
541 | retry: | ||
542 | if (unlikely(!__qib_sdma_running(ppd))) { | ||
543 | complete_sdma_err_req(ppd, tx); | ||
544 | goto unlock; | ||
545 | } | ||
546 | |||
547 | if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) { | ||
548 | if (qib_sdma_make_progress(ppd)) | ||
549 | goto retry; | ||
550 | if (ppd->dd->flags & QIB_HAS_SDMA_TIMEOUT) | ||
551 | ppd->dd->f_sdma_set_desc_cnt(ppd, | ||
552 | ppd->sdma_descq_cnt / 2); | ||
553 | goto busy; | ||
554 | } | ||
555 | |||
556 | dwoffset = tx->hdr_dwords; | ||
557 | make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0); | ||
558 | |||
559 | sdmadesc[0] |= SDMA_DESC_FIRST; | ||
560 | if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF) | ||
561 | sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF; | ||
562 | |||
563 | /* write to the descq */ | ||
564 | tail = ppd->sdma_descq_tail; | ||
565 | descqp = &ppd->sdma_descq[tail].qw[0]; | ||
566 | *descqp++ = cpu_to_le64(sdmadesc[0]); | ||
567 | *descqp++ = cpu_to_le64(sdmadesc[1]); | ||
568 | |||
569 | /* increment the tail */ | ||
570 | if (++tail == ppd->sdma_descq_cnt) { | ||
571 | tail = 0; | ||
572 | descqp = &ppd->sdma_descq[0].qw[0]; | ||
573 | ++ppd->sdma_generation; | ||
574 | } | ||
575 | |||
576 | tx->txreq.start_idx = tail; | ||
577 | |||
578 | sge = &ss->sge; | ||
579 | while (dwords) { | ||
580 | u32 dw; | ||
581 | u32 len; | ||
582 | |||
583 | len = dwords << 2; | ||
584 | if (len > sge->length) | ||
585 | len = sge->length; | ||
586 | if (len > sge->sge_length) | ||
587 | len = sge->sge_length; | ||
588 | BUG_ON(len == 0); | ||
589 | dw = (len + 3) >> 2; | ||
590 | addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr, | ||
591 | dw << 2, DMA_TO_DEVICE); | ||
592 | if (dma_mapping_error(&ppd->dd->pcidev->dev, addr)) | ||
593 | goto unmap; | ||
594 | sdmadesc[0] = 0; | ||
595 | make_sdma_desc(ppd, sdmadesc, (u64) addr, dw, dwoffset); | ||
596 | /* SDmaUseLargeBuf has to be set in every descriptor */ | ||
597 | if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF) | ||
598 | sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF; | ||
599 | /* write to the descq */ | ||
600 | *descqp++ = cpu_to_le64(sdmadesc[0]); | ||
601 | *descqp++ = cpu_to_le64(sdmadesc[1]); | ||
602 | |||
603 | /* increment the tail */ | ||
604 | if (++tail == ppd->sdma_descq_cnt) { | ||
605 | tail = 0; | ||
606 | descqp = &ppd->sdma_descq[0].qw[0]; | ||
607 | ++ppd->sdma_generation; | ||
608 | } | ||
609 | sge->vaddr += len; | ||
610 | sge->length -= len; | ||
611 | sge->sge_length -= len; | ||
612 | if (sge->sge_length == 0) { | ||
613 | if (--ss->num_sge) | ||
614 | *sge = *ss->sg_list++; | ||
615 | } else if (sge->length == 0 && sge->mr->lkey) { | ||
616 | if (++sge->n >= QIB_SEGSZ) { | ||
617 | if (++sge->m >= sge->mr->mapsz) | ||
618 | break; | ||
619 | sge->n = 0; | ||
620 | } | ||
621 | sge->vaddr = | ||
622 | sge->mr->map[sge->m]->segs[sge->n].vaddr; | ||
623 | sge->length = | ||
624 | sge->mr->map[sge->m]->segs[sge->n].length; | ||
625 | } | ||
626 | |||
627 | dwoffset += dw; | ||
628 | dwords -= dw; | ||
629 | } | ||
630 | |||
631 | if (!tail) | ||
632 | descqp = &ppd->sdma_descq[ppd->sdma_descq_cnt].qw[0]; | ||
633 | descqp -= 2; | ||
634 | descqp[0] |= cpu_to_le64(SDMA_DESC_LAST); | ||
635 | if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST) | ||
636 | descqp[0] |= cpu_to_le64(SDMA_DESC_DMA_HEAD); | ||
637 | if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ) | ||
638 | descqp[0] |= cpu_to_le64(SDMA_DESC_INTR); | ||
639 | |||
640 | atomic_inc(&tx->qp->s_dma_busy); | ||
641 | tx->txreq.next_descq_idx = tail; | ||
642 | ppd->dd->f_sdma_update_tail(ppd, tail); | ||
643 | ppd->sdma_descq_added += tx->txreq.sg_count; | ||
644 | list_add_tail(&tx->txreq.list, &ppd->sdma_activelist); | ||
645 | goto unlock; | ||
646 | |||
647 | unmap: | ||
648 | for (;;) { | ||
649 | if (!tail) | ||
650 | tail = ppd->sdma_descq_cnt - 1; | ||
651 | else | ||
652 | tail--; | ||
653 | if (tail == ppd->sdma_descq_tail) | ||
654 | break; | ||
655 | unmap_desc(ppd, tail); | ||
656 | } | ||
657 | qp = tx->qp; | ||
658 | qib_put_txreq(tx); | ||
659 | spin_lock(&qp->s_lock); | ||
660 | if (qp->ibqp.qp_type == IB_QPT_RC) { | ||
661 | /* XXX what about error sending RDMA read responses? */ | ||
662 | if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) | ||
663 | qib_error_qp(qp, IB_WC_GENERAL_ERR); | ||
664 | } else if (qp->s_wqe) | ||
665 | qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR); | ||
666 | spin_unlock(&qp->s_lock); | ||
667 | /* return zero to process the next send work request */ | ||
668 | goto unlock; | ||
669 | |||
670 | busy: | ||
671 | qp = tx->qp; | ||
672 | spin_lock(&qp->s_lock); | ||
673 | if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { | ||
674 | struct qib_ibdev *dev; | ||
675 | |||
676 | /* | ||
677 | * If we couldn't queue the DMA request, save the info | ||
678 | * and try again later rather than destroying the | ||
679 | * buffer and undoing the side effects of the copy. | ||
680 | */ | ||
681 | tx->ss = ss; | ||
682 | tx->dwords = dwords; | ||
683 | qp->s_tx = tx; | ||
684 | dev = &ppd->dd->verbs_dev; | ||
685 | spin_lock(&dev->pending_lock); | ||
686 | if (list_empty(&qp->iowait)) { | ||
687 | struct qib_ibport *ibp; | ||
688 | |||
689 | ibp = &ppd->ibport_data; | ||
690 | ibp->n_dmawait++; | ||
691 | qp->s_flags |= QIB_S_WAIT_DMA_DESC; | ||
692 | list_add_tail(&qp->iowait, &dev->dmawait); | ||
693 | } | ||
694 | spin_unlock(&dev->pending_lock); | ||
695 | qp->s_flags &= ~QIB_S_BUSY; | ||
696 | spin_unlock(&qp->s_lock); | ||
697 | ret = -EBUSY; | ||
698 | } else { | ||
699 | spin_unlock(&qp->s_lock); | ||
700 | qib_put_txreq(tx); | ||
701 | } | ||
702 | unlock: | ||
703 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); | ||
704 | return ret; | ||
705 | } | ||
706 | |||
707 | void qib_sdma_process_event(struct qib_pportdata *ppd, | ||
708 | enum qib_sdma_events event) | ||
709 | { | ||
710 | unsigned long flags; | ||
711 | |||
712 | spin_lock_irqsave(&ppd->sdma_lock, flags); | ||
713 | |||
714 | __qib_sdma_process_event(ppd, event); | ||
715 | |||
716 | if (ppd->sdma_state.current_state == qib_sdma_state_s99_running) | ||
717 | qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd)); | ||
718 | |||
719 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); | ||
720 | } | ||
721 | |||
722 | void __qib_sdma_process_event(struct qib_pportdata *ppd, | ||
723 | enum qib_sdma_events event) | ||
724 | { | ||
725 | struct qib_sdma_state *ss = &ppd->sdma_state; | ||
726 | |||
727 | switch (ss->current_state) { | ||
728 | case qib_sdma_state_s00_hw_down: | ||
729 | switch (event) { | ||
730 | case qib_sdma_event_e00_go_hw_down: | ||
731 | break; | ||
732 | case qib_sdma_event_e30_go_running: | ||
733 | /* | ||
734 | * If down, but running requested (usually result | ||
735 | * of link up, then we need to start up. | ||
736 | * This can happen when hw down is requested while | ||
737 | * bringing the link up with traffic active on | ||
738 | * 7220, e.g. */ | ||
739 | ss->go_s99_running = 1; | ||
740 | /* fall through and start dma engine */ | ||
741 | case qib_sdma_event_e10_go_hw_start: | ||
742 | /* This reference means the state machine is started */ | ||
743 | sdma_get(&ppd->sdma_state); | ||
744 | sdma_set_state(ppd, | ||
745 | qib_sdma_state_s10_hw_start_up_wait); | ||
746 | break; | ||
747 | case qib_sdma_event_e20_hw_started: | ||
748 | break; | ||
749 | case qib_sdma_event_e40_sw_cleaned: | ||
750 | sdma_sw_tear_down(ppd); | ||
751 | break; | ||
752 | case qib_sdma_event_e50_hw_cleaned: | ||
753 | break; | ||
754 | case qib_sdma_event_e60_hw_halted: | ||
755 | break; | ||
756 | case qib_sdma_event_e70_go_idle: | ||
757 | break; | ||
758 | case qib_sdma_event_e7220_err_halted: | ||
759 | break; | ||
760 | case qib_sdma_event_e7322_err_halted: | ||
761 | break; | ||
762 | case qib_sdma_event_e90_timer_tick: | ||
763 | break; | ||
764 | } | ||
765 | break; | ||
766 | |||
767 | case qib_sdma_state_s10_hw_start_up_wait: | ||
768 | switch (event) { | ||
769 | case qib_sdma_event_e00_go_hw_down: | ||
770 | sdma_set_state(ppd, qib_sdma_state_s00_hw_down); | ||
771 | sdma_sw_tear_down(ppd); | ||
772 | break; | ||
773 | case qib_sdma_event_e10_go_hw_start: | ||
774 | break; | ||
775 | case qib_sdma_event_e20_hw_started: | ||
776 | sdma_set_state(ppd, ss->go_s99_running ? | ||
777 | qib_sdma_state_s99_running : | ||
778 | qib_sdma_state_s20_idle); | ||
779 | break; | ||
780 | case qib_sdma_event_e30_go_running: | ||
781 | ss->go_s99_running = 1; | ||
782 | break; | ||
783 | case qib_sdma_event_e40_sw_cleaned: | ||
784 | break; | ||
785 | case qib_sdma_event_e50_hw_cleaned: | ||
786 | break; | ||
787 | case qib_sdma_event_e60_hw_halted: | ||
788 | break; | ||
789 | case qib_sdma_event_e70_go_idle: | ||
790 | ss->go_s99_running = 0; | ||
791 | break; | ||
792 | case qib_sdma_event_e7220_err_halted: | ||
793 | break; | ||
794 | case qib_sdma_event_e7322_err_halted: | ||
795 | break; | ||
796 | case qib_sdma_event_e90_timer_tick: | ||
797 | break; | ||
798 | } | ||
799 | break; | ||
800 | |||
801 | case qib_sdma_state_s20_idle: | ||
802 | switch (event) { | ||
803 | case qib_sdma_event_e00_go_hw_down: | ||
804 | sdma_set_state(ppd, qib_sdma_state_s00_hw_down); | ||
805 | sdma_sw_tear_down(ppd); | ||
806 | break; | ||
807 | case qib_sdma_event_e10_go_hw_start: | ||
808 | break; | ||
809 | case qib_sdma_event_e20_hw_started: | ||
810 | break; | ||
811 | case qib_sdma_event_e30_go_running: | ||
812 | sdma_set_state(ppd, qib_sdma_state_s99_running); | ||
813 | ss->go_s99_running = 1; | ||
814 | break; | ||
815 | case qib_sdma_event_e40_sw_cleaned: | ||
816 | break; | ||
817 | case qib_sdma_event_e50_hw_cleaned: | ||
818 | break; | ||
819 | case qib_sdma_event_e60_hw_halted: | ||
820 | break; | ||
821 | case qib_sdma_event_e70_go_idle: | ||
822 | break; | ||
823 | case qib_sdma_event_e7220_err_halted: | ||
824 | break; | ||
825 | case qib_sdma_event_e7322_err_halted: | ||
826 | break; | ||
827 | case qib_sdma_event_e90_timer_tick: | ||
828 | break; | ||
829 | } | ||
830 | break; | ||
831 | |||
832 | case qib_sdma_state_s30_sw_clean_up_wait: | ||
833 | switch (event) { | ||
834 | case qib_sdma_event_e00_go_hw_down: | ||
835 | sdma_set_state(ppd, qib_sdma_state_s00_hw_down); | ||
836 | break; | ||
837 | case qib_sdma_event_e10_go_hw_start: | ||
838 | break; | ||
839 | case qib_sdma_event_e20_hw_started: | ||
840 | break; | ||
841 | case qib_sdma_event_e30_go_running: | ||
842 | ss->go_s99_running = 1; | ||
843 | break; | ||
844 | case qib_sdma_event_e40_sw_cleaned: | ||
845 | sdma_set_state(ppd, | ||
846 | qib_sdma_state_s10_hw_start_up_wait); | ||
847 | sdma_hw_start_up(ppd); | ||
848 | break; | ||
849 | case qib_sdma_event_e50_hw_cleaned: | ||
850 | break; | ||
851 | case qib_sdma_event_e60_hw_halted: | ||
852 | break; | ||
853 | case qib_sdma_event_e70_go_idle: | ||
854 | ss->go_s99_running = 0; | ||
855 | break; | ||
856 | case qib_sdma_event_e7220_err_halted: | ||
857 | break; | ||
858 | case qib_sdma_event_e7322_err_halted: | ||
859 | break; | ||
860 | case qib_sdma_event_e90_timer_tick: | ||
861 | break; | ||
862 | } | ||
863 | break; | ||
864 | |||
865 | case qib_sdma_state_s40_hw_clean_up_wait: | ||
866 | switch (event) { | ||
867 | case qib_sdma_event_e00_go_hw_down: | ||
868 | sdma_set_state(ppd, qib_sdma_state_s00_hw_down); | ||
869 | sdma_start_sw_clean_up(ppd); | ||
870 | break; | ||
871 | case qib_sdma_event_e10_go_hw_start: | ||
872 | break; | ||
873 | case qib_sdma_event_e20_hw_started: | ||
874 | break; | ||
875 | case qib_sdma_event_e30_go_running: | ||
876 | ss->go_s99_running = 1; | ||
877 | break; | ||
878 | case qib_sdma_event_e40_sw_cleaned: | ||
879 | break; | ||
880 | case qib_sdma_event_e50_hw_cleaned: | ||
881 | sdma_set_state(ppd, | ||
882 | qib_sdma_state_s30_sw_clean_up_wait); | ||
883 | sdma_start_sw_clean_up(ppd); | ||
884 | break; | ||
885 | case qib_sdma_event_e60_hw_halted: | ||
886 | break; | ||
887 | case qib_sdma_event_e70_go_idle: | ||
888 | ss->go_s99_running = 0; | ||
889 | break; | ||
890 | case qib_sdma_event_e7220_err_halted: | ||
891 | break; | ||
892 | case qib_sdma_event_e7322_err_halted: | ||
893 | break; | ||
894 | case qib_sdma_event_e90_timer_tick: | ||
895 | break; | ||
896 | } | ||
897 | break; | ||
898 | |||
899 | case qib_sdma_state_s50_hw_halt_wait: | ||
900 | switch (event) { | ||
901 | case qib_sdma_event_e00_go_hw_down: | ||
902 | sdma_set_state(ppd, qib_sdma_state_s00_hw_down); | ||
903 | sdma_start_sw_clean_up(ppd); | ||
904 | break; | ||
905 | case qib_sdma_event_e10_go_hw_start: | ||
906 | break; | ||
907 | case qib_sdma_event_e20_hw_started: | ||
908 | break; | ||
909 | case qib_sdma_event_e30_go_running: | ||
910 | ss->go_s99_running = 1; | ||
911 | break; | ||
912 | case qib_sdma_event_e40_sw_cleaned: | ||
913 | break; | ||
914 | case qib_sdma_event_e50_hw_cleaned: | ||
915 | break; | ||
916 | case qib_sdma_event_e60_hw_halted: | ||
917 | sdma_set_state(ppd, | ||
918 | qib_sdma_state_s40_hw_clean_up_wait); | ||
919 | ppd->dd->f_sdma_hw_clean_up(ppd); | ||
920 | break; | ||
921 | case qib_sdma_event_e70_go_idle: | ||
922 | ss->go_s99_running = 0; | ||
923 | break; | ||
924 | case qib_sdma_event_e7220_err_halted: | ||
925 | break; | ||
926 | case qib_sdma_event_e7322_err_halted: | ||
927 | break; | ||
928 | case qib_sdma_event_e90_timer_tick: | ||
929 | break; | ||
930 | } | ||
931 | break; | ||
932 | |||
933 | case qib_sdma_state_s99_running: | ||
934 | switch (event) { | ||
935 | case qib_sdma_event_e00_go_hw_down: | ||
936 | sdma_set_state(ppd, qib_sdma_state_s00_hw_down); | ||
937 | sdma_start_sw_clean_up(ppd); | ||
938 | break; | ||
939 | case qib_sdma_event_e10_go_hw_start: | ||
940 | break; | ||
941 | case qib_sdma_event_e20_hw_started: | ||
942 | break; | ||
943 | case qib_sdma_event_e30_go_running: | ||
944 | break; | ||
945 | case qib_sdma_event_e40_sw_cleaned: | ||
946 | break; | ||
947 | case qib_sdma_event_e50_hw_cleaned: | ||
948 | break; | ||
949 | case qib_sdma_event_e60_hw_halted: | ||
950 | sdma_set_state(ppd, | ||
951 | qib_sdma_state_s30_sw_clean_up_wait); | ||
952 | sdma_start_sw_clean_up(ppd); | ||
953 | break; | ||
954 | case qib_sdma_event_e70_go_idle: | ||
955 | sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait); | ||
956 | ss->go_s99_running = 0; | ||
957 | break; | ||
958 | case qib_sdma_event_e7220_err_halted: | ||
959 | sdma_set_state(ppd, | ||
960 | qib_sdma_state_s30_sw_clean_up_wait); | ||
961 | sdma_start_sw_clean_up(ppd); | ||
962 | break; | ||
963 | case qib_sdma_event_e7322_err_halted: | ||
964 | sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait); | ||
965 | break; | ||
966 | case qib_sdma_event_e90_timer_tick: | ||
967 | break; | ||
968 | } | ||
969 | break; | ||
970 | } | ||
971 | |||
972 | ss->last_event = event; | ||
973 | } | ||
diff --git a/drivers/infiniband/hw/qib/qib_srq.c b/drivers/infiniband/hw/qib/qib_srq.c new file mode 100644 index 000000000000..c3ec8efc2ed8 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_srq.c | |||
@@ -0,0 +1,375 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. | ||
3 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | |||
34 | #include <linux/err.h> | ||
35 | #include <linux/slab.h> | ||
36 | #include <linux/vmalloc.h> | ||
37 | |||
38 | #include "qib_verbs.h" | ||
39 | |||
40 | /** | ||
41 | * qib_post_srq_receive - post a receive on a shared receive queue | ||
42 | * @ibsrq: the SRQ to post the receive on | ||
43 | * @wr: the list of work requests to post | ||
44 | * @bad_wr: A pointer to the first WR to cause a problem is put here | ||
45 | * | ||
46 | * This may be called from interrupt context. | ||
47 | */ | ||
48 | int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | ||
49 | struct ib_recv_wr **bad_wr) | ||
50 | { | ||
51 | struct qib_srq *srq = to_isrq(ibsrq); | ||
52 | struct qib_rwq *wq; | ||
53 | unsigned long flags; | ||
54 | int ret; | ||
55 | |||
56 | for (; wr; wr = wr->next) { | ||
57 | struct qib_rwqe *wqe; | ||
58 | u32 next; | ||
59 | int i; | ||
60 | |||
61 | if ((unsigned) wr->num_sge > srq->rq.max_sge) { | ||
62 | *bad_wr = wr; | ||
63 | ret = -EINVAL; | ||
64 | goto bail; | ||
65 | } | ||
66 | |||
67 | spin_lock_irqsave(&srq->rq.lock, flags); | ||
68 | wq = srq->rq.wq; | ||
69 | next = wq->head + 1; | ||
70 | if (next >= srq->rq.size) | ||
71 | next = 0; | ||
72 | if (next == wq->tail) { | ||
73 | spin_unlock_irqrestore(&srq->rq.lock, flags); | ||
74 | *bad_wr = wr; | ||
75 | ret = -ENOMEM; | ||
76 | goto bail; | ||
77 | } | ||
78 | |||
79 | wqe = get_rwqe_ptr(&srq->rq, wq->head); | ||
80 | wqe->wr_id = wr->wr_id; | ||
81 | wqe->num_sge = wr->num_sge; | ||
82 | for (i = 0; i < wr->num_sge; i++) | ||
83 | wqe->sg_list[i] = wr->sg_list[i]; | ||
84 | /* Make sure queue entry is written before the head index. */ | ||
85 | smp_wmb(); | ||
86 | wq->head = next; | ||
87 | spin_unlock_irqrestore(&srq->rq.lock, flags); | ||
88 | } | ||
89 | ret = 0; | ||
90 | |||
91 | bail: | ||
92 | return ret; | ||
93 | } | ||
94 | |||
95 | /** | ||
96 | * qib_create_srq - create a shared receive queue | ||
97 | * @ibpd: the protection domain of the SRQ to create | ||
98 | * @srq_init_attr: the attributes of the SRQ | ||
99 | * @udata: data from libibverbs when creating a user SRQ | ||
100 | */ | ||
101 | struct ib_srq *qib_create_srq(struct ib_pd *ibpd, | ||
102 | struct ib_srq_init_attr *srq_init_attr, | ||
103 | struct ib_udata *udata) | ||
104 | { | ||
105 | struct qib_ibdev *dev = to_idev(ibpd->device); | ||
106 | struct qib_srq *srq; | ||
107 | u32 sz; | ||
108 | struct ib_srq *ret; | ||
109 | |||
110 | if (srq_init_attr->attr.max_sge == 0 || | ||
111 | srq_init_attr->attr.max_sge > ib_qib_max_srq_sges || | ||
112 | srq_init_attr->attr.max_wr == 0 || | ||
113 | srq_init_attr->attr.max_wr > ib_qib_max_srq_wrs) { | ||
114 | ret = ERR_PTR(-EINVAL); | ||
115 | goto done; | ||
116 | } | ||
117 | |||
118 | srq = kmalloc(sizeof(*srq), GFP_KERNEL); | ||
119 | if (!srq) { | ||
120 | ret = ERR_PTR(-ENOMEM); | ||
121 | goto done; | ||
122 | } | ||
123 | |||
124 | /* | ||
125 | * Need to use vmalloc() if we want to support large #s of entries. | ||
126 | */ | ||
127 | srq->rq.size = srq_init_attr->attr.max_wr + 1; | ||
128 | srq->rq.max_sge = srq_init_attr->attr.max_sge; | ||
129 | sz = sizeof(struct ib_sge) * srq->rq.max_sge + | ||
130 | sizeof(struct qib_rwqe); | ||
131 | srq->rq.wq = vmalloc_user(sizeof(struct qib_rwq) + srq->rq.size * sz); | ||
132 | if (!srq->rq.wq) { | ||
133 | ret = ERR_PTR(-ENOMEM); | ||
134 | goto bail_srq; | ||
135 | } | ||
136 | |||
137 | /* | ||
138 | * Return the address of the RWQ as the offset to mmap. | ||
139 | * See qib_mmap() for details. | ||
140 | */ | ||
141 | if (udata && udata->outlen >= sizeof(__u64)) { | ||
142 | int err; | ||
143 | u32 s = sizeof(struct qib_rwq) + srq->rq.size * sz; | ||
144 | |||
145 | srq->ip = | ||
146 | qib_create_mmap_info(dev, s, ibpd->uobject->context, | ||
147 | srq->rq.wq); | ||
148 | if (!srq->ip) { | ||
149 | ret = ERR_PTR(-ENOMEM); | ||
150 | goto bail_wq; | ||
151 | } | ||
152 | |||
153 | err = ib_copy_to_udata(udata, &srq->ip->offset, | ||
154 | sizeof(srq->ip->offset)); | ||
155 | if (err) { | ||
156 | ret = ERR_PTR(err); | ||
157 | goto bail_ip; | ||
158 | } | ||
159 | } else | ||
160 | srq->ip = NULL; | ||
161 | |||
162 | /* | ||
163 | * ib_create_srq() will initialize srq->ibsrq. | ||
164 | */ | ||
165 | spin_lock_init(&srq->rq.lock); | ||
166 | srq->rq.wq->head = 0; | ||
167 | srq->rq.wq->tail = 0; | ||
168 | srq->limit = srq_init_attr->attr.srq_limit; | ||
169 | |||
170 | spin_lock(&dev->n_srqs_lock); | ||
171 | if (dev->n_srqs_allocated == ib_qib_max_srqs) { | ||
172 | spin_unlock(&dev->n_srqs_lock); | ||
173 | ret = ERR_PTR(-ENOMEM); | ||
174 | goto bail_ip; | ||
175 | } | ||
176 | |||
177 | dev->n_srqs_allocated++; | ||
178 | spin_unlock(&dev->n_srqs_lock); | ||
179 | |||
180 | if (srq->ip) { | ||
181 | spin_lock_irq(&dev->pending_lock); | ||
182 | list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps); | ||
183 | spin_unlock_irq(&dev->pending_lock); | ||
184 | } | ||
185 | |||
186 | ret = &srq->ibsrq; | ||
187 | goto done; | ||
188 | |||
189 | bail_ip: | ||
190 | kfree(srq->ip); | ||
191 | bail_wq: | ||
192 | vfree(srq->rq.wq); | ||
193 | bail_srq: | ||
194 | kfree(srq); | ||
195 | done: | ||
196 | return ret; | ||
197 | } | ||
198 | |||
199 | /** | ||
200 | * qib_modify_srq - modify a shared receive queue | ||
201 | * @ibsrq: the SRQ to modify | ||
202 | * @attr: the new attributes of the SRQ | ||
203 | * @attr_mask: indicates which attributes to modify | ||
204 | * @udata: user data for libibverbs.so | ||
205 | */ | ||
206 | int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | ||
207 | enum ib_srq_attr_mask attr_mask, | ||
208 | struct ib_udata *udata) | ||
209 | { | ||
210 | struct qib_srq *srq = to_isrq(ibsrq); | ||
211 | struct qib_rwq *wq; | ||
212 | int ret = 0; | ||
213 | |||
214 | if (attr_mask & IB_SRQ_MAX_WR) { | ||
215 | struct qib_rwq *owq; | ||
216 | struct qib_rwqe *p; | ||
217 | u32 sz, size, n, head, tail; | ||
218 | |||
219 | /* Check that the requested sizes are below the limits. */ | ||
220 | if ((attr->max_wr > ib_qib_max_srq_wrs) || | ||
221 | ((attr_mask & IB_SRQ_LIMIT) ? | ||
222 | attr->srq_limit : srq->limit) > attr->max_wr) { | ||
223 | ret = -EINVAL; | ||
224 | goto bail; | ||
225 | } | ||
226 | |||
227 | sz = sizeof(struct qib_rwqe) + | ||
228 | srq->rq.max_sge * sizeof(struct ib_sge); | ||
229 | size = attr->max_wr + 1; | ||
230 | wq = vmalloc_user(sizeof(struct qib_rwq) + size * sz); | ||
231 | if (!wq) { | ||
232 | ret = -ENOMEM; | ||
233 | goto bail; | ||
234 | } | ||
235 | |||
236 | /* Check that we can write the offset to mmap. */ | ||
237 | if (udata && udata->inlen >= sizeof(__u64)) { | ||
238 | __u64 offset_addr; | ||
239 | __u64 offset = 0; | ||
240 | |||
241 | ret = ib_copy_from_udata(&offset_addr, udata, | ||
242 | sizeof(offset_addr)); | ||
243 | if (ret) | ||
244 | goto bail_free; | ||
245 | udata->outbuf = | ||
246 | (void __user *) (unsigned long) offset_addr; | ||
247 | ret = ib_copy_to_udata(udata, &offset, | ||
248 | sizeof(offset)); | ||
249 | if (ret) | ||
250 | goto bail_free; | ||
251 | } | ||
252 | |||
253 | spin_lock_irq(&srq->rq.lock); | ||
254 | /* | ||
255 | * validate head and tail pointer values and compute | ||
256 | * the number of remaining WQEs. | ||
257 | */ | ||
258 | owq = srq->rq.wq; | ||
259 | head = owq->head; | ||
260 | tail = owq->tail; | ||
261 | if (head >= srq->rq.size || tail >= srq->rq.size) { | ||
262 | ret = -EINVAL; | ||
263 | goto bail_unlock; | ||
264 | } | ||
265 | n = head; | ||
266 | if (n < tail) | ||
267 | n += srq->rq.size - tail; | ||
268 | else | ||
269 | n -= tail; | ||
270 | if (size <= n) { | ||
271 | ret = -EINVAL; | ||
272 | goto bail_unlock; | ||
273 | } | ||
274 | n = 0; | ||
275 | p = wq->wq; | ||
276 | while (tail != head) { | ||
277 | struct qib_rwqe *wqe; | ||
278 | int i; | ||
279 | |||
280 | wqe = get_rwqe_ptr(&srq->rq, tail); | ||
281 | p->wr_id = wqe->wr_id; | ||
282 | p->num_sge = wqe->num_sge; | ||
283 | for (i = 0; i < wqe->num_sge; i++) | ||
284 | p->sg_list[i] = wqe->sg_list[i]; | ||
285 | n++; | ||
286 | p = (struct qib_rwqe *)((char *) p + sz); | ||
287 | if (++tail >= srq->rq.size) | ||
288 | tail = 0; | ||
289 | } | ||
290 | srq->rq.wq = wq; | ||
291 | srq->rq.size = size; | ||
292 | wq->head = n; | ||
293 | wq->tail = 0; | ||
294 | if (attr_mask & IB_SRQ_LIMIT) | ||
295 | srq->limit = attr->srq_limit; | ||
296 | spin_unlock_irq(&srq->rq.lock); | ||
297 | |||
298 | vfree(owq); | ||
299 | |||
300 | if (srq->ip) { | ||
301 | struct qib_mmap_info *ip = srq->ip; | ||
302 | struct qib_ibdev *dev = to_idev(srq->ibsrq.device); | ||
303 | u32 s = sizeof(struct qib_rwq) + size * sz; | ||
304 | |||
305 | qib_update_mmap_info(dev, ip, s, wq); | ||
306 | |||
307 | /* | ||
308 | * Return the offset to mmap. | ||
309 | * See qib_mmap() for details. | ||
310 | */ | ||
311 | if (udata && udata->inlen >= sizeof(__u64)) { | ||
312 | ret = ib_copy_to_udata(udata, &ip->offset, | ||
313 | sizeof(ip->offset)); | ||
314 | if (ret) | ||
315 | goto bail; | ||
316 | } | ||
317 | |||
318 | /* | ||
319 | * Put user mapping info onto the pending list | ||
320 | * unless it already is on the list. | ||
321 | */ | ||
322 | spin_lock_irq(&dev->pending_lock); | ||
323 | if (list_empty(&ip->pending_mmaps)) | ||
324 | list_add(&ip->pending_mmaps, | ||
325 | &dev->pending_mmaps); | ||
326 | spin_unlock_irq(&dev->pending_lock); | ||
327 | } | ||
328 | } else if (attr_mask & IB_SRQ_LIMIT) { | ||
329 | spin_lock_irq(&srq->rq.lock); | ||
330 | if (attr->srq_limit >= srq->rq.size) | ||
331 | ret = -EINVAL; | ||
332 | else | ||
333 | srq->limit = attr->srq_limit; | ||
334 | spin_unlock_irq(&srq->rq.lock); | ||
335 | } | ||
336 | goto bail; | ||
337 | |||
338 | bail_unlock: | ||
339 | spin_unlock_irq(&srq->rq.lock); | ||
340 | bail_free: | ||
341 | vfree(wq); | ||
342 | bail: | ||
343 | return ret; | ||
344 | } | ||
345 | |||
346 | int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) | ||
347 | { | ||
348 | struct qib_srq *srq = to_isrq(ibsrq); | ||
349 | |||
350 | attr->max_wr = srq->rq.size - 1; | ||
351 | attr->max_sge = srq->rq.max_sge; | ||
352 | attr->srq_limit = srq->limit; | ||
353 | return 0; | ||
354 | } | ||
355 | |||
356 | /** | ||
357 | * qib_destroy_srq - destroy a shared receive queue | ||
358 | * @ibsrq: the SRQ to destroy | ||
359 | */ | ||
360 | int qib_destroy_srq(struct ib_srq *ibsrq) | ||
361 | { | ||
362 | struct qib_srq *srq = to_isrq(ibsrq); | ||
363 | struct qib_ibdev *dev = to_idev(ibsrq->device); | ||
364 | |||
365 | spin_lock(&dev->n_srqs_lock); | ||
366 | dev->n_srqs_allocated--; | ||
367 | spin_unlock(&dev->n_srqs_lock); | ||
368 | if (srq->ip) | ||
369 | kref_put(&srq->ip->ref, qib_release_mmap_info); | ||
370 | else | ||
371 | vfree(srq->rq.wq); | ||
372 | kfree(srq); | ||
373 | |||
374 | return 0; | ||
375 | } | ||
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c new file mode 100644 index 000000000000..dab4d9f4a2cc --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_sysfs.c | |||
@@ -0,0 +1,691 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. | ||
3 | * Copyright (c) 2006 PathScale, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | #include <linux/ctype.h> | ||
34 | |||
35 | #include "qib.h" | ||
36 | |||
37 | /** | ||
38 | * qib_parse_ushort - parse an unsigned short value in an arbitrary base | ||
39 | * @str: the string containing the number | ||
40 | * @valp: where to put the result | ||
41 | * | ||
42 | * Returns the number of bytes consumed, or negative value on error. | ||
43 | */ | ||
44 | static int qib_parse_ushort(const char *str, unsigned short *valp) | ||
45 | { | ||
46 | unsigned long val; | ||
47 | char *end; | ||
48 | int ret; | ||
49 | |||
50 | if (!isdigit(str[0])) { | ||
51 | ret = -EINVAL; | ||
52 | goto bail; | ||
53 | } | ||
54 | |||
55 | val = simple_strtoul(str, &end, 0); | ||
56 | |||
57 | if (val > 0xffff) { | ||
58 | ret = -EINVAL; | ||
59 | goto bail; | ||
60 | } | ||
61 | |||
62 | *valp = val; | ||
63 | |||
64 | ret = end + 1 - str; | ||
65 | if (ret == 0) | ||
66 | ret = -EINVAL; | ||
67 | |||
68 | bail: | ||
69 | return ret; | ||
70 | } | ||
71 | |||
72 | /* start of per-port functions */ | ||
73 | /* | ||
74 | * Get/Set heartbeat enable. OR of 1=enabled, 2=auto | ||
75 | */ | ||
76 | static ssize_t show_hrtbt_enb(struct qib_pportdata *ppd, char *buf) | ||
77 | { | ||
78 | struct qib_devdata *dd = ppd->dd; | ||
79 | int ret; | ||
80 | |||
81 | ret = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_HRTBT); | ||
82 | ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret); | ||
83 | return ret; | ||
84 | } | ||
85 | |||
86 | static ssize_t store_hrtbt_enb(struct qib_pportdata *ppd, const char *buf, | ||
87 | size_t count) | ||
88 | { | ||
89 | struct qib_devdata *dd = ppd->dd; | ||
90 | int ret; | ||
91 | u16 val; | ||
92 | |||
93 | ret = qib_parse_ushort(buf, &val); | ||
94 | |||
95 | /* | ||
96 | * Set the "intentional" heartbeat enable per either of | ||
97 | * "Enable" and "Auto", as these are normally set together. | ||
98 | * This bit is consulted when leaving loopback mode, | ||
99 | * because entering loopback mode overrides it and automatically | ||
100 | * disables heartbeat. | ||
101 | */ | ||
102 | if (ret >= 0) | ||
103 | ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val); | ||
104 | if (ret < 0) | ||
105 | qib_dev_err(dd, "attempt to set invalid Heartbeat enable\n"); | ||
106 | return ret < 0 ? ret : count; | ||
107 | } | ||
108 | |||
109 | static ssize_t store_loopback(struct qib_pportdata *ppd, const char *buf, | ||
110 | size_t count) | ||
111 | { | ||
112 | struct qib_devdata *dd = ppd->dd; | ||
113 | int ret = count, r; | ||
114 | |||
115 | r = dd->f_set_ib_loopback(ppd, buf); | ||
116 | if (r < 0) | ||
117 | ret = r; | ||
118 | |||
119 | return ret; | ||
120 | } | ||
121 | |||
122 | static ssize_t store_led_override(struct qib_pportdata *ppd, const char *buf, | ||
123 | size_t count) | ||
124 | { | ||
125 | struct qib_devdata *dd = ppd->dd; | ||
126 | int ret; | ||
127 | u16 val; | ||
128 | |||
129 | ret = qib_parse_ushort(buf, &val); | ||
130 | if (ret > 0) | ||
131 | qib_set_led_override(ppd, val); | ||
132 | else | ||
133 | qib_dev_err(dd, "attempt to set invalid LED override\n"); | ||
134 | return ret < 0 ? ret : count; | ||
135 | } | ||
136 | |||
137 | static ssize_t show_status(struct qib_pportdata *ppd, char *buf) | ||
138 | { | ||
139 | ssize_t ret; | ||
140 | |||
141 | if (!ppd->statusp) | ||
142 | ret = -EINVAL; | ||
143 | else | ||
144 | ret = scnprintf(buf, PAGE_SIZE, "0x%llx\n", | ||
145 | (unsigned long long) *(ppd->statusp)); | ||
146 | return ret; | ||
147 | } | ||
148 | |||
149 | /* | ||
150 | * For userland compatibility, these offsets must remain fixed. | ||
151 | * They are strings for QIB_STATUS_* | ||
152 | */ | ||
153 | static const char *qib_status_str[] = { | ||
154 | "Initted", | ||
155 | "", | ||
156 | "", | ||
157 | "", | ||
158 | "", | ||
159 | "Present", | ||
160 | "IB_link_up", | ||
161 | "IB_configured", | ||
162 | "", | ||
163 | "Fatal_Hardware_Error", | ||
164 | NULL, | ||
165 | }; | ||
166 | |||
167 | static ssize_t show_status_str(struct qib_pportdata *ppd, char *buf) | ||
168 | { | ||
169 | int i, any; | ||
170 | u64 s; | ||
171 | ssize_t ret; | ||
172 | |||
173 | if (!ppd->statusp) { | ||
174 | ret = -EINVAL; | ||
175 | goto bail; | ||
176 | } | ||
177 | |||
178 | s = *(ppd->statusp); | ||
179 | *buf = '\0'; | ||
180 | for (any = i = 0; s && qib_status_str[i]; i++) { | ||
181 | if (s & 1) { | ||
182 | /* if overflow */ | ||
183 | if (any && strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE) | ||
184 | break; | ||
185 | if (strlcat(buf, qib_status_str[i], PAGE_SIZE) >= | ||
186 | PAGE_SIZE) | ||
187 | break; | ||
188 | any = 1; | ||
189 | } | ||
190 | s >>= 1; | ||
191 | } | ||
192 | if (any) | ||
193 | strlcat(buf, "\n", PAGE_SIZE); | ||
194 | |||
195 | ret = strlen(buf); | ||
196 | |||
197 | bail: | ||
198 | return ret; | ||
199 | } | ||
200 | |||
201 | /* end of per-port functions */ | ||
202 | |||
203 | /* | ||
204 | * Start of per-port file structures and support code | ||
205 | * Because we are fitting into other infrastructure, we have to supply the | ||
206 | * full set of kobject/sysfs_ops structures and routines. | ||
207 | */ | ||
208 | #define QIB_PORT_ATTR(name, mode, show, store) \ | ||
209 | static struct qib_port_attr qib_port_attr_##name = \ | ||
210 | __ATTR(name, mode, show, store) | ||
211 | |||
212 | struct qib_port_attr { | ||
213 | struct attribute attr; | ||
214 | ssize_t (*show)(struct qib_pportdata *, char *); | ||
215 | ssize_t (*store)(struct qib_pportdata *, const char *, size_t); | ||
216 | }; | ||
217 | |||
218 | QIB_PORT_ATTR(loopback, S_IWUSR, NULL, store_loopback); | ||
219 | QIB_PORT_ATTR(led_override, S_IWUSR, NULL, store_led_override); | ||
220 | QIB_PORT_ATTR(hrtbt_enable, S_IWUSR | S_IRUGO, show_hrtbt_enb, | ||
221 | store_hrtbt_enb); | ||
222 | QIB_PORT_ATTR(status, S_IRUGO, show_status, NULL); | ||
223 | QIB_PORT_ATTR(status_str, S_IRUGO, show_status_str, NULL); | ||
224 | |||
225 | static struct attribute *port_default_attributes[] = { | ||
226 | &qib_port_attr_loopback.attr, | ||
227 | &qib_port_attr_led_override.attr, | ||
228 | &qib_port_attr_hrtbt_enable.attr, | ||
229 | &qib_port_attr_status.attr, | ||
230 | &qib_port_attr_status_str.attr, | ||
231 | NULL | ||
232 | }; | ||
233 | |||
234 | static ssize_t qib_portattr_show(struct kobject *kobj, | ||
235 | struct attribute *attr, char *buf) | ||
236 | { | ||
237 | struct qib_port_attr *pattr = | ||
238 | container_of(attr, struct qib_port_attr, attr); | ||
239 | struct qib_pportdata *ppd = | ||
240 | container_of(kobj, struct qib_pportdata, pport_kobj); | ||
241 | |||
242 | return pattr->show(ppd, buf); | ||
243 | } | ||
244 | |||
245 | static ssize_t qib_portattr_store(struct kobject *kobj, | ||
246 | struct attribute *attr, const char *buf, size_t len) | ||
247 | { | ||
248 | struct qib_port_attr *pattr = | ||
249 | container_of(attr, struct qib_port_attr, attr); | ||
250 | struct qib_pportdata *ppd = | ||
251 | container_of(kobj, struct qib_pportdata, pport_kobj); | ||
252 | |||
253 | return pattr->store(ppd, buf, len); | ||
254 | } | ||
255 | |||
256 | static void qib_port_release(struct kobject *kobj) | ||
257 | { | ||
258 | /* nothing to do since memory is freed by qib_free_devdata() */ | ||
259 | } | ||
260 | |||
261 | static const struct sysfs_ops qib_port_ops = { | ||
262 | .show = qib_portattr_show, | ||
263 | .store = qib_portattr_store, | ||
264 | }; | ||
265 | |||
266 | static struct kobj_type qib_port_ktype = { | ||
267 | .release = qib_port_release, | ||
268 | .sysfs_ops = &qib_port_ops, | ||
269 | .default_attrs = port_default_attributes | ||
270 | }; | ||
271 | |||
272 | /* Start sl2vl */ | ||
273 | |||
274 | #define QIB_SL2VL_ATTR(N) \ | ||
275 | static struct qib_sl2vl_attr qib_sl2vl_attr_##N = { \ | ||
276 | .attr = { .name = __stringify(N), .mode = 0444 }, \ | ||
277 | .sl = N \ | ||
278 | } | ||
279 | |||
280 | struct qib_sl2vl_attr { | ||
281 | struct attribute attr; | ||
282 | int sl; | ||
283 | }; | ||
284 | |||
285 | QIB_SL2VL_ATTR(0); | ||
286 | QIB_SL2VL_ATTR(1); | ||
287 | QIB_SL2VL_ATTR(2); | ||
288 | QIB_SL2VL_ATTR(3); | ||
289 | QIB_SL2VL_ATTR(4); | ||
290 | QIB_SL2VL_ATTR(5); | ||
291 | QIB_SL2VL_ATTR(6); | ||
292 | QIB_SL2VL_ATTR(7); | ||
293 | QIB_SL2VL_ATTR(8); | ||
294 | QIB_SL2VL_ATTR(9); | ||
295 | QIB_SL2VL_ATTR(10); | ||
296 | QIB_SL2VL_ATTR(11); | ||
297 | QIB_SL2VL_ATTR(12); | ||
298 | QIB_SL2VL_ATTR(13); | ||
299 | QIB_SL2VL_ATTR(14); | ||
300 | QIB_SL2VL_ATTR(15); | ||
301 | |||
302 | static struct attribute *sl2vl_default_attributes[] = { | ||
303 | &qib_sl2vl_attr_0.attr, | ||
304 | &qib_sl2vl_attr_1.attr, | ||
305 | &qib_sl2vl_attr_2.attr, | ||
306 | &qib_sl2vl_attr_3.attr, | ||
307 | &qib_sl2vl_attr_4.attr, | ||
308 | &qib_sl2vl_attr_5.attr, | ||
309 | &qib_sl2vl_attr_6.attr, | ||
310 | &qib_sl2vl_attr_7.attr, | ||
311 | &qib_sl2vl_attr_8.attr, | ||
312 | &qib_sl2vl_attr_9.attr, | ||
313 | &qib_sl2vl_attr_10.attr, | ||
314 | &qib_sl2vl_attr_11.attr, | ||
315 | &qib_sl2vl_attr_12.attr, | ||
316 | &qib_sl2vl_attr_13.attr, | ||
317 | &qib_sl2vl_attr_14.attr, | ||
318 | &qib_sl2vl_attr_15.attr, | ||
319 | NULL | ||
320 | }; | ||
321 | |||
322 | static ssize_t sl2vl_attr_show(struct kobject *kobj, struct attribute *attr, | ||
323 | char *buf) | ||
324 | { | ||
325 | struct qib_sl2vl_attr *sattr = | ||
326 | container_of(attr, struct qib_sl2vl_attr, attr); | ||
327 | struct qib_pportdata *ppd = | ||
328 | container_of(kobj, struct qib_pportdata, sl2vl_kobj); | ||
329 | struct qib_ibport *qibp = &ppd->ibport_data; | ||
330 | |||
331 | return sprintf(buf, "%u\n", qibp->sl_to_vl[sattr->sl]); | ||
332 | } | ||
333 | |||
334 | static const struct sysfs_ops qib_sl2vl_ops = { | ||
335 | .show = sl2vl_attr_show, | ||
336 | }; | ||
337 | |||
338 | static struct kobj_type qib_sl2vl_ktype = { | ||
339 | .release = qib_port_release, | ||
340 | .sysfs_ops = &qib_sl2vl_ops, | ||
341 | .default_attrs = sl2vl_default_attributes | ||
342 | }; | ||
343 | |||
344 | /* End sl2vl */ | ||
345 | |||
346 | /* Start diag_counters */ | ||
347 | |||
348 | #define QIB_DIAGC_ATTR(N) \ | ||
349 | static struct qib_diagc_attr qib_diagc_attr_##N = { \ | ||
350 | .attr = { .name = __stringify(N), .mode = 0444 }, \ | ||
351 | .counter = offsetof(struct qib_ibport, n_##N) \ | ||
352 | } | ||
353 | |||
354 | struct qib_diagc_attr { | ||
355 | struct attribute attr; | ||
356 | size_t counter; | ||
357 | }; | ||
358 | |||
359 | QIB_DIAGC_ATTR(rc_resends); | ||
360 | QIB_DIAGC_ATTR(rc_acks); | ||
361 | QIB_DIAGC_ATTR(rc_qacks); | ||
362 | QIB_DIAGC_ATTR(rc_delayed_comp); | ||
363 | QIB_DIAGC_ATTR(seq_naks); | ||
364 | QIB_DIAGC_ATTR(rdma_seq); | ||
365 | QIB_DIAGC_ATTR(rnr_naks); | ||
366 | QIB_DIAGC_ATTR(other_naks); | ||
367 | QIB_DIAGC_ATTR(rc_timeouts); | ||
368 | QIB_DIAGC_ATTR(loop_pkts); | ||
369 | QIB_DIAGC_ATTR(pkt_drops); | ||
370 | QIB_DIAGC_ATTR(dmawait); | ||
371 | QIB_DIAGC_ATTR(unaligned); | ||
372 | QIB_DIAGC_ATTR(rc_dupreq); | ||
373 | QIB_DIAGC_ATTR(rc_seqnak); | ||
374 | |||
375 | static struct attribute *diagc_default_attributes[] = { | ||
376 | &qib_diagc_attr_rc_resends.attr, | ||
377 | &qib_diagc_attr_rc_acks.attr, | ||
378 | &qib_diagc_attr_rc_qacks.attr, | ||
379 | &qib_diagc_attr_rc_delayed_comp.attr, | ||
380 | &qib_diagc_attr_seq_naks.attr, | ||
381 | &qib_diagc_attr_rdma_seq.attr, | ||
382 | &qib_diagc_attr_rnr_naks.attr, | ||
383 | &qib_diagc_attr_other_naks.attr, | ||
384 | &qib_diagc_attr_rc_timeouts.attr, | ||
385 | &qib_diagc_attr_loop_pkts.attr, | ||
386 | &qib_diagc_attr_pkt_drops.attr, | ||
387 | &qib_diagc_attr_dmawait.attr, | ||
388 | &qib_diagc_attr_unaligned.attr, | ||
389 | &qib_diagc_attr_rc_dupreq.attr, | ||
390 | &qib_diagc_attr_rc_seqnak.attr, | ||
391 | NULL | ||
392 | }; | ||
393 | |||
394 | static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr, | ||
395 | char *buf) | ||
396 | { | ||
397 | struct qib_diagc_attr *dattr = | ||
398 | container_of(attr, struct qib_diagc_attr, attr); | ||
399 | struct qib_pportdata *ppd = | ||
400 | container_of(kobj, struct qib_pportdata, diagc_kobj); | ||
401 | struct qib_ibport *qibp = &ppd->ibport_data; | ||
402 | |||
403 | return sprintf(buf, "%u\n", *(u32 *)((char *)qibp + dattr->counter)); | ||
404 | } | ||
405 | |||
406 | static const struct sysfs_ops qib_diagc_ops = { | ||
407 | .show = diagc_attr_show, | ||
408 | }; | ||
409 | |||
410 | static struct kobj_type qib_diagc_ktype = { | ||
411 | .release = qib_port_release, | ||
412 | .sysfs_ops = &qib_diagc_ops, | ||
413 | .default_attrs = diagc_default_attributes | ||
414 | }; | ||
415 | |||
416 | /* End diag_counters */ | ||
417 | |||
418 | /* end of per-port file structures and support code */ | ||
419 | |||
420 | /* | ||
421 | * Start of per-unit (or driver, in some cases, but replicated | ||
422 | * per unit) functions (these get a device *) | ||
423 | */ | ||
424 | static ssize_t show_rev(struct device *device, struct device_attribute *attr, | ||
425 | char *buf) | ||
426 | { | ||
427 | struct qib_ibdev *dev = | ||
428 | container_of(device, struct qib_ibdev, ibdev.dev); | ||
429 | |||
430 | return sprintf(buf, "%x\n", dd_from_dev(dev)->minrev); | ||
431 | } | ||
432 | |||
433 | static ssize_t show_hca(struct device *device, struct device_attribute *attr, | ||
434 | char *buf) | ||
435 | { | ||
436 | struct qib_ibdev *dev = | ||
437 | container_of(device, struct qib_ibdev, ibdev.dev); | ||
438 | struct qib_devdata *dd = dd_from_dev(dev); | ||
439 | int ret; | ||
440 | |||
441 | if (!dd->boardname) | ||
442 | ret = -EINVAL; | ||
443 | else | ||
444 | ret = scnprintf(buf, PAGE_SIZE, "%s\n", dd->boardname); | ||
445 | return ret; | ||
446 | } | ||
447 | |||
448 | static ssize_t show_version(struct device *device, | ||
449 | struct device_attribute *attr, char *buf) | ||
450 | { | ||
451 | /* The string printed here is already newline-terminated. */ | ||
452 | return scnprintf(buf, PAGE_SIZE, "%s", (char *)ib_qib_version); | ||
453 | } | ||
454 | |||
455 | static ssize_t show_boardversion(struct device *device, | ||
456 | struct device_attribute *attr, char *buf) | ||
457 | { | ||
458 | struct qib_ibdev *dev = | ||
459 | container_of(device, struct qib_ibdev, ibdev.dev); | ||
460 | struct qib_devdata *dd = dd_from_dev(dev); | ||
461 | |||
462 | /* The string printed here is already newline-terminated. */ | ||
463 | return scnprintf(buf, PAGE_SIZE, "%s", dd->boardversion); | ||
464 | } | ||
465 | |||
466 | |||
467 | static ssize_t show_localbus_info(struct device *device, | ||
468 | struct device_attribute *attr, char *buf) | ||
469 | { | ||
470 | struct qib_ibdev *dev = | ||
471 | container_of(device, struct qib_ibdev, ibdev.dev); | ||
472 | struct qib_devdata *dd = dd_from_dev(dev); | ||
473 | |||
474 | /* The string printed here is already newline-terminated. */ | ||
475 | return scnprintf(buf, PAGE_SIZE, "%s", dd->lbus_info); | ||
476 | } | ||
477 | |||
478 | |||
479 | static ssize_t show_nctxts(struct device *device, | ||
480 | struct device_attribute *attr, char *buf) | ||
481 | { | ||
482 | struct qib_ibdev *dev = | ||
483 | container_of(device, struct qib_ibdev, ibdev.dev); | ||
484 | struct qib_devdata *dd = dd_from_dev(dev); | ||
485 | |||
486 | /* Return the number of user ports (contexts) available. */ | ||
487 | return scnprintf(buf, PAGE_SIZE, "%u\n", dd->cfgctxts - | ||
488 | dd->first_user_ctxt); | ||
489 | } | ||
490 | |||
491 | static ssize_t show_serial(struct device *device, | ||
492 | struct device_attribute *attr, char *buf) | ||
493 | { | ||
494 | struct qib_ibdev *dev = | ||
495 | container_of(device, struct qib_ibdev, ibdev.dev); | ||
496 | struct qib_devdata *dd = dd_from_dev(dev); | ||
497 | |||
498 | buf[sizeof dd->serial] = '\0'; | ||
499 | memcpy(buf, dd->serial, sizeof dd->serial); | ||
500 | strcat(buf, "\n"); | ||
501 | return strlen(buf); | ||
502 | } | ||
503 | |||
504 | static ssize_t store_chip_reset(struct device *device, | ||
505 | struct device_attribute *attr, const char *buf, | ||
506 | size_t count) | ||
507 | { | ||
508 | struct qib_ibdev *dev = | ||
509 | container_of(device, struct qib_ibdev, ibdev.dev); | ||
510 | struct qib_devdata *dd = dd_from_dev(dev); | ||
511 | int ret; | ||
512 | |||
513 | if (count < 5 || memcmp(buf, "reset", 5) || !dd->diag_client) { | ||
514 | ret = -EINVAL; | ||
515 | goto bail; | ||
516 | } | ||
517 | |||
518 | ret = qib_reset_device(dd->unit); | ||
519 | bail: | ||
520 | return ret < 0 ? ret : count; | ||
521 | } | ||
522 | |||
523 | static ssize_t show_logged_errs(struct device *device, | ||
524 | struct device_attribute *attr, char *buf) | ||
525 | { | ||
526 | struct qib_ibdev *dev = | ||
527 | container_of(device, struct qib_ibdev, ibdev.dev); | ||
528 | struct qib_devdata *dd = dd_from_dev(dev); | ||
529 | int idx, count; | ||
530 | |||
531 | /* force consistency with actual EEPROM */ | ||
532 | if (qib_update_eeprom_log(dd) != 0) | ||
533 | return -ENXIO; | ||
534 | |||
535 | count = 0; | ||
536 | for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) { | ||
537 | count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c", | ||
538 | dd->eep_st_errs[idx], | ||
539 | idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' '); | ||
540 | } | ||
541 | |||
542 | return count; | ||
543 | } | ||
544 | |||
545 | /* | ||
546 | * Dump tempsense regs. in decimal, to ease shell-scripts. | ||
547 | */ | ||
548 | static ssize_t show_tempsense(struct device *device, | ||
549 | struct device_attribute *attr, char *buf) | ||
550 | { | ||
551 | struct qib_ibdev *dev = | ||
552 | container_of(device, struct qib_ibdev, ibdev.dev); | ||
553 | struct qib_devdata *dd = dd_from_dev(dev); | ||
554 | int ret; | ||
555 | int idx; | ||
556 | u8 regvals[8]; | ||
557 | |||
558 | ret = -ENXIO; | ||
559 | for (idx = 0; idx < 8; ++idx) { | ||
560 | if (idx == 6) | ||
561 | continue; | ||
562 | ret = dd->f_tempsense_rd(dd, idx); | ||
563 | if (ret < 0) | ||
564 | break; | ||
565 | regvals[idx] = ret; | ||
566 | } | ||
567 | if (idx == 8) | ||
568 | ret = scnprintf(buf, PAGE_SIZE, "%d %d %02X %02X %d %d\n", | ||
569 | *(signed char *)(regvals), | ||
570 | *(signed char *)(regvals + 1), | ||
571 | regvals[2], regvals[3], | ||
572 | *(signed char *)(regvals + 5), | ||
573 | *(signed char *)(regvals + 7)); | ||
574 | return ret; | ||
575 | } | ||
576 | |||
577 | /* | ||
578 | * end of per-unit (or driver, in some cases, but replicated | ||
579 | * per unit) functions | ||
580 | */ | ||
581 | |||
582 | /* start of per-unit file structures and support code */ | ||
583 | static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); | ||
584 | static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); | ||
585 | static DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL); | ||
586 | static DEVICE_ATTR(version, S_IRUGO, show_version, NULL); | ||
587 | static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL); | ||
588 | static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL); | ||
589 | static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL); | ||
590 | static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL); | ||
591 | static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL); | ||
592 | static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL); | ||
593 | static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset); | ||
594 | |||
595 | static struct device_attribute *qib_attributes[] = { | ||
596 | &dev_attr_hw_rev, | ||
597 | &dev_attr_hca_type, | ||
598 | &dev_attr_board_id, | ||
599 | &dev_attr_version, | ||
600 | &dev_attr_nctxts, | ||
601 | &dev_attr_serial, | ||
602 | &dev_attr_boardversion, | ||
603 | &dev_attr_logged_errors, | ||
604 | &dev_attr_tempsense, | ||
605 | &dev_attr_localbus_info, | ||
606 | &dev_attr_chip_reset, | ||
607 | }; | ||
608 | |||
609 | int qib_create_port_files(struct ib_device *ibdev, u8 port_num, | ||
610 | struct kobject *kobj) | ||
611 | { | ||
612 | struct qib_pportdata *ppd; | ||
613 | struct qib_devdata *dd = dd_from_ibdev(ibdev); | ||
614 | int ret; | ||
615 | |||
616 | if (!port_num || port_num > dd->num_pports) { | ||
617 | qib_dev_err(dd, "Skipping infiniband class with " | ||
618 | "invalid port %u\n", port_num); | ||
619 | ret = -ENODEV; | ||
620 | goto bail; | ||
621 | } | ||
622 | ppd = &dd->pport[port_num - 1]; | ||
623 | |||
624 | ret = kobject_init_and_add(&ppd->pport_kobj, &qib_port_ktype, kobj, | ||
625 | "linkcontrol"); | ||
626 | if (ret) { | ||
627 | qib_dev_err(dd, "Skipping linkcontrol sysfs info, " | ||
628 | "(err %d) port %u\n", ret, port_num); | ||
629 | goto bail; | ||
630 | } | ||
631 | kobject_uevent(&ppd->pport_kobj, KOBJ_ADD); | ||
632 | |||
633 | ret = kobject_init_and_add(&ppd->sl2vl_kobj, &qib_sl2vl_ktype, kobj, | ||
634 | "sl2vl"); | ||
635 | if (ret) { | ||
636 | qib_dev_err(dd, "Skipping sl2vl sysfs info, " | ||
637 | "(err %d) port %u\n", ret, port_num); | ||
638 | goto bail_sl; | ||
639 | } | ||
640 | kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD); | ||
641 | |||
642 | ret = kobject_init_and_add(&ppd->diagc_kobj, &qib_diagc_ktype, kobj, | ||
643 | "diag_counters"); | ||
644 | if (ret) { | ||
645 | qib_dev_err(dd, "Skipping diag_counters sysfs info, " | ||
646 | "(err %d) port %u\n", ret, port_num); | ||
647 | goto bail_diagc; | ||
648 | } | ||
649 | kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD); | ||
650 | |||
651 | return 0; | ||
652 | |||
653 | bail_diagc: | ||
654 | kobject_put(&ppd->sl2vl_kobj); | ||
655 | bail_sl: | ||
656 | kobject_put(&ppd->pport_kobj); | ||
657 | bail: | ||
658 | return ret; | ||
659 | } | ||
660 | |||
661 | /* | ||
662 | * Register and create our files in /sys/class/infiniband. | ||
663 | */ | ||
664 | int qib_verbs_register_sysfs(struct qib_devdata *dd) | ||
665 | { | ||
666 | struct ib_device *dev = &dd->verbs_dev.ibdev; | ||
667 | int i, ret; | ||
668 | |||
669 | for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i) { | ||
670 | ret = device_create_file(&dev->dev, qib_attributes[i]); | ||
671 | if (ret) | ||
672 | return ret; | ||
673 | } | ||
674 | |||
675 | return 0; | ||
676 | } | ||
677 | |||
678 | /* | ||
679 | * Unregister and remove our files in /sys/class/infiniband. | ||
680 | */ | ||
681 | void qib_verbs_unregister_sysfs(struct qib_devdata *dd) | ||
682 | { | ||
683 | struct qib_pportdata *ppd; | ||
684 | int i; | ||
685 | |||
686 | for (i = 0; i < dd->num_pports; i++) { | ||
687 | ppd = &dd->pport[i]; | ||
688 | kobject_put(&ppd->pport_kobj); | ||
689 | kobject_put(&ppd->sl2vl_kobj); | ||
690 | } | ||
691 | } | ||
diff --git a/drivers/infiniband/hw/qib/qib_twsi.c b/drivers/infiniband/hw/qib/qib_twsi.c new file mode 100644 index 000000000000..6f31ca5039db --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_twsi.c | |||
@@ -0,0 +1,498 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. | ||
3 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | |||
34 | #include <linux/delay.h> | ||
35 | #include <linux/pci.h> | ||
36 | #include <linux/vmalloc.h> | ||
37 | |||
38 | #include "qib.h" | ||
39 | |||
40 | /* | ||
41 | * QLogic_IB "Two Wire Serial Interface" driver. | ||
42 | * Originally written for a not-quite-i2c serial eeprom, which is | ||
43 | * still used on some supported boards. Later boards have added a | ||
44 | * variety of other uses, most board-specific, so teh bit-boffing | ||
45 | * part has been split off to this file, while the other parts | ||
46 | * have been moved to chip-specific files. | ||
47 | * | ||
48 | * We have also dropped all pretense of fully generic (e.g. pretend | ||
49 | * we don't know whether '1' is the higher voltage) interface, as | ||
50 | * the restrictions of the generic i2c interface (e.g. no access from | ||
51 | * driver itself) make it unsuitable for this use. | ||
52 | */ | ||
53 | |||
54 | #define READ_CMD 1 | ||
55 | #define WRITE_CMD 0 | ||
56 | |||
57 | /** | ||
58 | * i2c_wait_for_writes - wait for a write | ||
59 | * @dd: the qlogic_ib device | ||
60 | * | ||
61 | * We use this instead of udelay directly, so we can make sure | ||
62 | * that previous register writes have been flushed all the way | ||
63 | * to the chip. Since we are delaying anyway, the cost doesn't | ||
64 | * hurt, and makes the bit twiddling more regular | ||
65 | */ | ||
66 | static void i2c_wait_for_writes(struct qib_devdata *dd) | ||
67 | { | ||
68 | /* | ||
69 | * implicit read of EXTStatus is as good as explicit | ||
70 | * read of scratch, if all we want to do is flush | ||
71 | * writes. | ||
72 | */ | ||
73 | dd->f_gpio_mod(dd, 0, 0, 0); | ||
74 | rmb(); /* inlined, so prevent compiler reordering */ | ||
75 | } | ||
76 | |||
77 | /* | ||
78 | * QSFP modules are allowed to hold SCL low for 500uSec. Allow twice that | ||
79 | * for "almost compliant" modules | ||
80 | */ | ||
81 | #define SCL_WAIT_USEC 1000 | ||
82 | |||
83 | /* BUF_WAIT is time bus must be free between STOP or ACK and to next START. | ||
84 | * Should be 20, but some chips need more. | ||
85 | */ | ||
86 | #define TWSI_BUF_WAIT_USEC 60 | ||
87 | |||
88 | static void scl_out(struct qib_devdata *dd, u8 bit) | ||
89 | { | ||
90 | u32 mask; | ||
91 | |||
92 | udelay(1); | ||
93 | |||
94 | mask = 1UL << dd->gpio_scl_num; | ||
95 | |||
96 | /* SCL is meant to be bare-drain, so never set "OUT", just DIR */ | ||
97 | dd->f_gpio_mod(dd, 0, bit ? 0 : mask, mask); | ||
98 | |||
99 | /* | ||
100 | * Allow for slow slaves by simple | ||
101 | * delay for falling edge, sampling on rise. | ||
102 | */ | ||
103 | if (!bit) | ||
104 | udelay(2); | ||
105 | else { | ||
106 | int rise_usec; | ||
107 | for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) { | ||
108 | if (mask & dd->f_gpio_mod(dd, 0, 0, 0)) | ||
109 | break; | ||
110 | udelay(2); | ||
111 | } | ||
112 | if (rise_usec <= 0) | ||
113 | qib_dev_err(dd, "SCL interface stuck low > %d uSec\n", | ||
114 | SCL_WAIT_USEC); | ||
115 | } | ||
116 | i2c_wait_for_writes(dd); | ||
117 | } | ||
118 | |||
119 | static void sda_out(struct qib_devdata *dd, u8 bit) | ||
120 | { | ||
121 | u32 mask; | ||
122 | |||
123 | mask = 1UL << dd->gpio_sda_num; | ||
124 | |||
125 | /* SDA is meant to be bare-drain, so never set "OUT", just DIR */ | ||
126 | dd->f_gpio_mod(dd, 0, bit ? 0 : mask, mask); | ||
127 | |||
128 | i2c_wait_for_writes(dd); | ||
129 | udelay(2); | ||
130 | } | ||
131 | |||
132 | static u8 sda_in(struct qib_devdata *dd, int wait) | ||
133 | { | ||
134 | int bnum; | ||
135 | u32 read_val, mask; | ||
136 | |||
137 | bnum = dd->gpio_sda_num; | ||
138 | mask = (1UL << bnum); | ||
139 | /* SDA is meant to be bare-drain, so never set "OUT", just DIR */ | ||
140 | dd->f_gpio_mod(dd, 0, 0, mask); | ||
141 | read_val = dd->f_gpio_mod(dd, 0, 0, 0); | ||
142 | if (wait) | ||
143 | i2c_wait_for_writes(dd); | ||
144 | return (read_val & mask) >> bnum; | ||
145 | } | ||
146 | |||
147 | /** | ||
148 | * i2c_ackrcv - see if ack following write is true | ||
149 | * @dd: the qlogic_ib device | ||
150 | */ | ||
151 | static int i2c_ackrcv(struct qib_devdata *dd) | ||
152 | { | ||
153 | u8 ack_received; | ||
154 | |||
155 | /* AT ENTRY SCL = LOW */ | ||
156 | /* change direction, ignore data */ | ||
157 | ack_received = sda_in(dd, 1); | ||
158 | scl_out(dd, 1); | ||
159 | ack_received = sda_in(dd, 1) == 0; | ||
160 | scl_out(dd, 0); | ||
161 | return ack_received; | ||
162 | } | ||
163 | |||
164 | static void stop_cmd(struct qib_devdata *dd); | ||
165 | |||
166 | /** | ||
167 | * rd_byte - read a byte, sending STOP on last, else ACK | ||
168 | * @dd: the qlogic_ib device | ||
169 | * | ||
170 | * Returns byte shifted out of device | ||
171 | */ | ||
172 | static int rd_byte(struct qib_devdata *dd, int last) | ||
173 | { | ||
174 | int bit_cntr, data; | ||
175 | |||
176 | data = 0; | ||
177 | |||
178 | for (bit_cntr = 7; bit_cntr >= 0; --bit_cntr) { | ||
179 | data <<= 1; | ||
180 | scl_out(dd, 1); | ||
181 | data |= sda_in(dd, 0); | ||
182 | scl_out(dd, 0); | ||
183 | } | ||
184 | if (last) { | ||
185 | scl_out(dd, 1); | ||
186 | stop_cmd(dd); | ||
187 | } else { | ||
188 | sda_out(dd, 0); | ||
189 | scl_out(dd, 1); | ||
190 | scl_out(dd, 0); | ||
191 | sda_out(dd, 1); | ||
192 | } | ||
193 | return data; | ||
194 | } | ||
195 | |||
196 | /** | ||
197 | * wr_byte - write a byte, one bit at a time | ||
198 | * @dd: the qlogic_ib device | ||
199 | * @data: the byte to write | ||
200 | * | ||
201 | * Returns 0 if we got the following ack, otherwise 1 | ||
202 | */ | ||
203 | static int wr_byte(struct qib_devdata *dd, u8 data) | ||
204 | { | ||
205 | int bit_cntr; | ||
206 | u8 bit; | ||
207 | |||
208 | for (bit_cntr = 7; bit_cntr >= 0; bit_cntr--) { | ||
209 | bit = (data >> bit_cntr) & 1; | ||
210 | sda_out(dd, bit); | ||
211 | scl_out(dd, 1); | ||
212 | scl_out(dd, 0); | ||
213 | } | ||
214 | return (!i2c_ackrcv(dd)) ? 1 : 0; | ||
215 | } | ||
216 | |||
217 | /* | ||
218 | * issue TWSI start sequence: | ||
219 | * (both clock/data high, clock high, data low while clock is high) | ||
220 | */ | ||
221 | static void start_seq(struct qib_devdata *dd) | ||
222 | { | ||
223 | sda_out(dd, 1); | ||
224 | scl_out(dd, 1); | ||
225 | sda_out(dd, 0); | ||
226 | udelay(1); | ||
227 | scl_out(dd, 0); | ||
228 | } | ||
229 | |||
230 | /** | ||
231 | * stop_seq - transmit the stop sequence | ||
232 | * @dd: the qlogic_ib device | ||
233 | * | ||
234 | * (both clock/data low, clock high, data high while clock is high) | ||
235 | */ | ||
236 | static void stop_seq(struct qib_devdata *dd) | ||
237 | { | ||
238 | scl_out(dd, 0); | ||
239 | sda_out(dd, 0); | ||
240 | scl_out(dd, 1); | ||
241 | sda_out(dd, 1); | ||
242 | } | ||
243 | |||
244 | /** | ||
245 | * stop_cmd - transmit the stop condition | ||
246 | * @dd: the qlogic_ib device | ||
247 | * | ||
248 | * (both clock/data low, clock high, data high while clock is high) | ||
249 | */ | ||
250 | static void stop_cmd(struct qib_devdata *dd) | ||
251 | { | ||
252 | stop_seq(dd); | ||
253 | udelay(TWSI_BUF_WAIT_USEC); | ||
254 | } | ||
255 | |||
256 | /** | ||
257 | * qib_twsi_reset - reset I2C communication | ||
258 | * @dd: the qlogic_ib device | ||
259 | */ | ||
260 | |||
261 | int qib_twsi_reset(struct qib_devdata *dd) | ||
262 | { | ||
263 | int clock_cycles_left = 9; | ||
264 | int was_high = 0; | ||
265 | u32 pins, mask; | ||
266 | |||
267 | /* Both SCL and SDA should be high. If not, there | ||
268 | * is something wrong. | ||
269 | */ | ||
270 | mask = (1UL << dd->gpio_scl_num) | (1UL << dd->gpio_sda_num); | ||
271 | |||
272 | /* | ||
273 | * Force pins to desired innocuous state. | ||
274 | * This is the default power-on state with out=0 and dir=0, | ||
275 | * So tri-stated and should be floating high (barring HW problems) | ||
276 | */ | ||
277 | dd->f_gpio_mod(dd, 0, 0, mask); | ||
278 | |||
279 | /* | ||
280 | * Clock nine times to get all listeners into a sane state. | ||
281 | * If SDA does not go high at any point, we are wedged. | ||
282 | * One vendor recommends then issuing START followed by STOP. | ||
283 | * we cannot use our "normal" functions to do that, because | ||
284 | * if SCL drops between them, another vendor's part will | ||
285 | * wedge, dropping SDA and keeping it low forever, at the end of | ||
286 | * the next transaction (even if it was not the device addressed). | ||
287 | * So our START and STOP take place with SCL held high. | ||
288 | */ | ||
289 | while (clock_cycles_left--) { | ||
290 | scl_out(dd, 0); | ||
291 | scl_out(dd, 1); | ||
292 | /* Note if SDA is high, but keep clocking to sync slave */ | ||
293 | was_high |= sda_in(dd, 0); | ||
294 | } | ||
295 | |||
296 | if (was_high) { | ||
297 | /* | ||
298 | * We saw a high, which we hope means the slave is sync'd. | ||
299 | * Issue START, STOP, pause for T_BUF. | ||
300 | */ | ||
301 | |||
302 | pins = dd->f_gpio_mod(dd, 0, 0, 0); | ||
303 | if ((pins & mask) != mask) | ||
304 | qib_dev_err(dd, "GPIO pins not at rest: %d\n", | ||
305 | pins & mask); | ||
306 | /* Drop SDA to issue START */ | ||
307 | udelay(1); /* Guarantee .6 uSec setup */ | ||
308 | sda_out(dd, 0); | ||
309 | udelay(1); /* Guarantee .6 uSec hold */ | ||
310 | /* At this point, SCL is high, SDA low. Raise SDA for STOP */ | ||
311 | sda_out(dd, 1); | ||
312 | udelay(TWSI_BUF_WAIT_USEC); | ||
313 | } | ||
314 | |||
315 | return !was_high; | ||
316 | } | ||
317 | |||
318 | #define QIB_TWSI_START 0x100 | ||
319 | #define QIB_TWSI_STOP 0x200 | ||
320 | |||
321 | /* Write byte to TWSI, optionally prefixed with START or suffixed with | ||
322 | * STOP. | ||
323 | * returns 0 if OK (ACK received), else != 0 | ||
324 | */ | ||
325 | static int qib_twsi_wr(struct qib_devdata *dd, int data, int flags) | ||
326 | { | ||
327 | int ret = 1; | ||
328 | if (flags & QIB_TWSI_START) | ||
329 | start_seq(dd); | ||
330 | |||
331 | ret = wr_byte(dd, data); /* Leaves SCL low (from i2c_ackrcv()) */ | ||
332 | |||
333 | if (flags & QIB_TWSI_STOP) | ||
334 | stop_cmd(dd); | ||
335 | return ret; | ||
336 | } | ||
337 | |||
338 | /* Added functionality for IBA7220-based cards */ | ||
339 | #define QIB_TEMP_DEV 0x98 | ||
340 | |||
341 | /* | ||
342 | * qib_twsi_blk_rd | ||
343 | * Formerly called qib_eeprom_internal_read, and only used for eeprom, | ||
344 | * but now the general interface for data transfer from twsi devices. | ||
345 | * One vestige of its former role is that it recognizes a device | ||
346 | * QIB_TWSI_NO_DEV and does the correct operation for the legacy part, | ||
347 | * which responded to all TWSI device codes, interpreting them as | ||
348 | * address within device. On all other devices found on board handled by | ||
349 | * this driver, the device is followed by a one-byte "address" which selects | ||
350 | * the "register" or "offset" within the device from which data should | ||
351 | * be read. | ||
352 | */ | ||
353 | int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, | ||
354 | void *buffer, int len) | ||
355 | { | ||
356 | int ret; | ||
357 | u8 *bp = buffer; | ||
358 | |||
359 | ret = 1; | ||
360 | |||
361 | if (dev == QIB_TWSI_NO_DEV) { | ||
362 | /* legacy not-really-I2C */ | ||
363 | addr = (addr << 1) | READ_CMD; | ||
364 | ret = qib_twsi_wr(dd, addr, QIB_TWSI_START); | ||
365 | } else { | ||
366 | /* Actual I2C */ | ||
367 | ret = qib_twsi_wr(dd, dev | WRITE_CMD, QIB_TWSI_START); | ||
368 | if (ret) { | ||
369 | stop_cmd(dd); | ||
370 | ret = 1; | ||
371 | goto bail; | ||
372 | } | ||
373 | /* | ||
374 | * SFF spec claims we do _not_ stop after the addr | ||
375 | * but simply issue a start with the "read" dev-addr. | ||
376 | * Since we are implicitely waiting for ACK here, | ||
377 | * we need t_buf (nominally 20uSec) before that start, | ||
378 | * and cannot rely on the delay built in to the STOP | ||
379 | */ | ||
380 | ret = qib_twsi_wr(dd, addr, 0); | ||
381 | udelay(TWSI_BUF_WAIT_USEC); | ||
382 | |||
383 | if (ret) { | ||
384 | qib_dev_err(dd, | ||
385 | "Failed to write interface read addr %02X\n", | ||
386 | addr); | ||
387 | ret = 1; | ||
388 | goto bail; | ||
389 | } | ||
390 | ret = qib_twsi_wr(dd, dev | READ_CMD, QIB_TWSI_START); | ||
391 | } | ||
392 | if (ret) { | ||
393 | stop_cmd(dd); | ||
394 | ret = 1; | ||
395 | goto bail; | ||
396 | } | ||
397 | |||
398 | /* | ||
399 | * block devices keeps clocking data out as long as we ack, | ||
400 | * automatically incrementing the address. Some have "pages" | ||
401 | * whose boundaries will not be crossed, but the handling | ||
402 | * of these is left to the caller, who is in a better | ||
403 | * position to know. | ||
404 | */ | ||
405 | while (len-- > 0) { | ||
406 | /* | ||
407 | * Get and store data, sending ACK if length remaining, | ||
408 | * else STOP | ||
409 | */ | ||
410 | *bp++ = rd_byte(dd, !len); | ||
411 | } | ||
412 | |||
413 | ret = 0; | ||
414 | |||
415 | bail: | ||
416 | return ret; | ||
417 | } | ||
418 | |||
419 | /* | ||
420 | * qib_twsi_blk_wr | ||
421 | * Formerly called qib_eeprom_internal_write, and only used for eeprom, | ||
422 | * but now the general interface for data transfer to twsi devices. | ||
423 | * One vestige of its former role is that it recognizes a device | ||
424 | * QIB_TWSI_NO_DEV and does the correct operation for the legacy part, | ||
425 | * which responded to all TWSI device codes, interpreting them as | ||
426 | * address within device. On all other devices found on board handled by | ||
427 | * this driver, the device is followed by a one-byte "address" which selects | ||
428 | * the "register" or "offset" within the device to which data should | ||
429 | * be written. | ||
430 | */ | ||
431 | int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr, | ||
432 | const void *buffer, int len) | ||
433 | { | ||
434 | int sub_len; | ||
435 | const u8 *bp = buffer; | ||
436 | int max_wait_time, i; | ||
437 | int ret; | ||
438 | ret = 1; | ||
439 | |||
440 | while (len > 0) { | ||
441 | if (dev == QIB_TWSI_NO_DEV) { | ||
442 | if (qib_twsi_wr(dd, (addr << 1) | WRITE_CMD, | ||
443 | QIB_TWSI_START)) { | ||
444 | goto failed_write; | ||
445 | } | ||
446 | } else { | ||
447 | /* Real I2C */ | ||
448 | if (qib_twsi_wr(dd, dev | WRITE_CMD, QIB_TWSI_START)) | ||
449 | goto failed_write; | ||
450 | ret = qib_twsi_wr(dd, addr, 0); | ||
451 | if (ret) { | ||
452 | qib_dev_err(dd, "Failed to write interface" | ||
453 | " write addr %02X\n", addr); | ||
454 | goto failed_write; | ||
455 | } | ||
456 | } | ||
457 | |||
458 | sub_len = min(len, 4); | ||
459 | addr += sub_len; | ||
460 | len -= sub_len; | ||
461 | |||
462 | for (i = 0; i < sub_len; i++) | ||
463 | if (qib_twsi_wr(dd, *bp++, 0)) | ||
464 | goto failed_write; | ||
465 | |||
466 | stop_cmd(dd); | ||
467 | |||
468 | /* | ||
469 | * Wait for write complete by waiting for a successful | ||
470 | * read (the chip replies with a zero after the write | ||
471 | * cmd completes, and before it writes to the eeprom. | ||
472 | * The startcmd for the read will fail the ack until | ||
473 | * the writes have completed. We do this inline to avoid | ||
474 | * the debug prints that are in the real read routine | ||
475 | * if the startcmd fails. | ||
476 | * We also use the proper device address, so it doesn't matter | ||
477 | * whether we have real eeprom_dev. Legacy likes any address. | ||
478 | */ | ||
479 | max_wait_time = 100; | ||
480 | while (qib_twsi_wr(dd, dev | READ_CMD, QIB_TWSI_START)) { | ||
481 | stop_cmd(dd); | ||
482 | if (!--max_wait_time) | ||
483 | goto failed_write; | ||
484 | } | ||
485 | /* now read (and ignore) the resulting byte */ | ||
486 | rd_byte(dd, 1); | ||
487 | } | ||
488 | |||
489 | ret = 0; | ||
490 | goto bail; | ||
491 | |||
492 | failed_write: | ||
493 | stop_cmd(dd); | ||
494 | ret = 1; | ||
495 | |||
496 | bail: | ||
497 | return ret; | ||
498 | } | ||
diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c new file mode 100644 index 000000000000..f7eb1ddff5f3 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_tx.c | |||
@@ -0,0 +1,557 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #include <linux/spinlock.h> | ||
34 | #include <linux/pci.h> | ||
35 | #include <linux/io.h> | ||
36 | #include <linux/delay.h> | ||
37 | #include <linux/netdevice.h> | ||
38 | #include <linux/vmalloc.h> | ||
39 | |||
40 | #include "qib.h" | ||
41 | |||
42 | static unsigned qib_hol_timeout_ms = 3000; | ||
43 | module_param_named(hol_timeout_ms, qib_hol_timeout_ms, uint, S_IRUGO); | ||
44 | MODULE_PARM_DESC(hol_timeout_ms, | ||
45 | "duration of user app suspension after link failure"); | ||
46 | |||
47 | unsigned qib_sdma_fetch_arb = 1; | ||
48 | module_param_named(fetch_arb, qib_sdma_fetch_arb, uint, S_IRUGO); | ||
49 | MODULE_PARM_DESC(fetch_arb, "IBA7220: change SDMA descriptor arbitration"); | ||
50 | |||
51 | /** | ||
52 | * qib_disarm_piobufs - cancel a range of PIO buffers | ||
53 | * @dd: the qlogic_ib device | ||
54 | * @first: the first PIO buffer to cancel | ||
55 | * @cnt: the number of PIO buffers to cancel | ||
56 | * | ||
57 | * Cancel a range of PIO buffers. Used at user process close, | ||
58 | * in case it died while writing to a PIO buffer. | ||
59 | */ | ||
60 | void qib_disarm_piobufs(struct qib_devdata *dd, unsigned first, unsigned cnt) | ||
61 | { | ||
62 | unsigned long flags; | ||
63 | unsigned i; | ||
64 | unsigned last; | ||
65 | |||
66 | last = first + cnt; | ||
67 | spin_lock_irqsave(&dd->pioavail_lock, flags); | ||
68 | for (i = first; i < last; i++) { | ||
69 | __clear_bit(i, dd->pio_need_disarm); | ||
70 | dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i)); | ||
71 | } | ||
72 | spin_unlock_irqrestore(&dd->pioavail_lock, flags); | ||
73 | } | ||
74 | |||
75 | /* | ||
76 | * This is called by a user process when it sees the DISARM_BUFS event | ||
77 | * bit is set. | ||
78 | */ | ||
79 | int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *rcd) | ||
80 | { | ||
81 | struct qib_devdata *dd = rcd->dd; | ||
82 | unsigned i; | ||
83 | unsigned last; | ||
84 | unsigned n = 0; | ||
85 | |||
86 | last = rcd->pio_base + rcd->piocnt; | ||
87 | /* | ||
88 | * Don't need uctxt_lock here, since user has called in to us. | ||
89 | * Clear at start in case more interrupts set bits while we | ||
90 | * are disarming | ||
91 | */ | ||
92 | if (rcd->user_event_mask) { | ||
93 | /* | ||
94 | * subctxt_cnt is 0 if not shared, so do base | ||
95 | * separately, first, then remaining subctxt, if any | ||
96 | */ | ||
97 | clear_bit(_QIB_EVENT_DISARM_BUFS_BIT, &rcd->user_event_mask[0]); | ||
98 | for (i = 1; i < rcd->subctxt_cnt; i++) | ||
99 | clear_bit(_QIB_EVENT_DISARM_BUFS_BIT, | ||
100 | &rcd->user_event_mask[i]); | ||
101 | } | ||
102 | spin_lock_irq(&dd->pioavail_lock); | ||
103 | for (i = rcd->pio_base; i < last; i++) { | ||
104 | if (__test_and_clear_bit(i, dd->pio_need_disarm)) { | ||
105 | n++; | ||
106 | dd->f_sendctrl(rcd->ppd, QIB_SENDCTRL_DISARM_BUF(i)); | ||
107 | } | ||
108 | } | ||
109 | spin_unlock_irq(&dd->pioavail_lock); | ||
110 | return 0; | ||
111 | } | ||
112 | |||
113 | static struct qib_pportdata *is_sdma_buf(struct qib_devdata *dd, unsigned i) | ||
114 | { | ||
115 | struct qib_pportdata *ppd; | ||
116 | unsigned pidx; | ||
117 | |||
118 | for (pidx = 0; pidx < dd->num_pports; pidx++) { | ||
119 | ppd = dd->pport + pidx; | ||
120 | if (i >= ppd->sdma_state.first_sendbuf && | ||
121 | i < ppd->sdma_state.last_sendbuf) | ||
122 | return ppd; | ||
123 | } | ||
124 | return NULL; | ||
125 | } | ||
126 | |||
127 | /* | ||
128 | * Return true if send buffer is being used by a user context. | ||
129 | * Sets _QIB_EVENT_DISARM_BUFS_BIT in user_event_mask as a side effect | ||
130 | */ | ||
131 | static int find_ctxt(struct qib_devdata *dd, unsigned bufn) | ||
132 | { | ||
133 | struct qib_ctxtdata *rcd; | ||
134 | unsigned ctxt; | ||
135 | int ret = 0; | ||
136 | |||
137 | spin_lock(&dd->uctxt_lock); | ||
138 | for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { | ||
139 | rcd = dd->rcd[ctxt]; | ||
140 | if (!rcd || bufn < rcd->pio_base || | ||
141 | bufn >= rcd->pio_base + rcd->piocnt) | ||
142 | continue; | ||
143 | if (rcd->user_event_mask) { | ||
144 | int i; | ||
145 | /* | ||
146 | * subctxt_cnt is 0 if not shared, so do base | ||
147 | * separately, first, then remaining subctxt, if any | ||
148 | */ | ||
149 | set_bit(_QIB_EVENT_DISARM_BUFS_BIT, | ||
150 | &rcd->user_event_mask[0]); | ||
151 | for (i = 1; i < rcd->subctxt_cnt; i++) | ||
152 | set_bit(_QIB_EVENT_DISARM_BUFS_BIT, | ||
153 | &rcd->user_event_mask[i]); | ||
154 | } | ||
155 | ret = 1; | ||
156 | break; | ||
157 | } | ||
158 | spin_unlock(&dd->uctxt_lock); | ||
159 | |||
160 | return ret; | ||
161 | } | ||
162 | |||
163 | /* | ||
164 | * Disarm a set of send buffers. If the buffer might be actively being | ||
165 | * written to, mark the buffer to be disarmed later when it is not being | ||
166 | * written to. | ||
167 | * | ||
168 | * This should only be called from the IRQ error handler. | ||
169 | */ | ||
170 | void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask, | ||
171 | unsigned cnt) | ||
172 | { | ||
173 | struct qib_pportdata *ppd, *pppd[dd->num_pports]; | ||
174 | unsigned i; | ||
175 | unsigned long flags; | ||
176 | |||
177 | for (i = 0; i < dd->num_pports; i++) | ||
178 | pppd[i] = NULL; | ||
179 | |||
180 | for (i = 0; i < cnt; i++) { | ||
181 | int which; | ||
182 | if (!test_bit(i, mask)) | ||
183 | continue; | ||
184 | /* | ||
185 | * If the buffer is owned by the DMA hardware, | ||
186 | * reset the DMA engine. | ||
187 | */ | ||
188 | ppd = is_sdma_buf(dd, i); | ||
189 | if (ppd) { | ||
190 | pppd[ppd->port] = ppd; | ||
191 | continue; | ||
192 | } | ||
193 | /* | ||
194 | * If the kernel is writing the buffer or the buffer is | ||
195 | * owned by a user process, we can't clear it yet. | ||
196 | */ | ||
197 | spin_lock_irqsave(&dd->pioavail_lock, flags); | ||
198 | if (test_bit(i, dd->pio_writing) || | ||
199 | (!test_bit(i << 1, dd->pioavailkernel) && | ||
200 | find_ctxt(dd, i))) { | ||
201 | __set_bit(i, dd->pio_need_disarm); | ||
202 | which = 0; | ||
203 | } else { | ||
204 | which = 1; | ||
205 | dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i)); | ||
206 | } | ||
207 | spin_unlock_irqrestore(&dd->pioavail_lock, flags); | ||
208 | } | ||
209 | |||
210 | /* do cancel_sends once per port that had sdma piobufs in error */ | ||
211 | for (i = 0; i < dd->num_pports; i++) | ||
212 | if (pppd[i]) | ||
213 | qib_cancel_sends(pppd[i]); | ||
214 | } | ||
215 | |||
216 | /** | ||
217 | * update_send_bufs - update shadow copy of the PIO availability map | ||
218 | * @dd: the qlogic_ib device | ||
219 | * | ||
220 | * called whenever our local copy indicates we have run out of send buffers | ||
221 | */ | ||
222 | static void update_send_bufs(struct qib_devdata *dd) | ||
223 | { | ||
224 | unsigned long flags; | ||
225 | unsigned i; | ||
226 | const unsigned piobregs = dd->pioavregs; | ||
227 | |||
228 | /* | ||
229 | * If the generation (check) bits have changed, then we update the | ||
230 | * busy bit for the corresponding PIO buffer. This algorithm will | ||
231 | * modify positions to the value they already have in some cases | ||
232 | * (i.e., no change), but it's faster than changing only the bits | ||
233 | * that have changed. | ||
234 | * | ||
235 | * We would like to do this atomicly, to avoid spinlocks in the | ||
236 | * critical send path, but that's not really possible, given the | ||
237 | * type of changes, and that this routine could be called on | ||
238 | * multiple cpu's simultaneously, so we lock in this routine only, | ||
239 | * to avoid conflicting updates; all we change is the shadow, and | ||
240 | * it's a single 64 bit memory location, so by definition the update | ||
241 | * is atomic in terms of what other cpu's can see in testing the | ||
242 | * bits. The spin_lock overhead isn't too bad, since it only | ||
243 | * happens when all buffers are in use, so only cpu overhead, not | ||
244 | * latency or bandwidth is affected. | ||
245 | */ | ||
246 | if (!dd->pioavailregs_dma) | ||
247 | return; | ||
248 | spin_lock_irqsave(&dd->pioavail_lock, flags); | ||
249 | for (i = 0; i < piobregs; i++) { | ||
250 | u64 pchbusy, pchg, piov, pnew; | ||
251 | |||
252 | piov = le64_to_cpu(dd->pioavailregs_dma[i]); | ||
253 | pchg = dd->pioavailkernel[i] & | ||
254 | ~(dd->pioavailshadow[i] ^ piov); | ||
255 | pchbusy = pchg << QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT; | ||
256 | if (pchg && (pchbusy & dd->pioavailshadow[i])) { | ||
257 | pnew = dd->pioavailshadow[i] & ~pchbusy; | ||
258 | pnew |= piov & pchbusy; | ||
259 | dd->pioavailshadow[i] = pnew; | ||
260 | } | ||
261 | } | ||
262 | spin_unlock_irqrestore(&dd->pioavail_lock, flags); | ||
263 | } | ||
264 | |||
265 | /* | ||
266 | * Debugging code and stats updates if no pio buffers available. | ||
267 | */ | ||
268 | static noinline void no_send_bufs(struct qib_devdata *dd) | ||
269 | { | ||
270 | dd->upd_pio_shadow = 1; | ||
271 | |||
272 | /* not atomic, but if we lose a stat count in a while, that's OK */ | ||
273 | qib_stats.sps_nopiobufs++; | ||
274 | } | ||
275 | |||
276 | /* | ||
277 | * Common code for normal driver send buffer allocation, and reserved | ||
278 | * allocation. | ||
279 | * | ||
280 | * Do appropriate marking as busy, etc. | ||
281 | * Returns buffer pointer if one is found, otherwise NULL. | ||
282 | */ | ||
283 | u32 __iomem *qib_getsendbuf_range(struct qib_devdata *dd, u32 *pbufnum, | ||
284 | u32 first, u32 last) | ||
285 | { | ||
286 | unsigned i, j, updated = 0; | ||
287 | unsigned nbufs; | ||
288 | unsigned long flags; | ||
289 | unsigned long *shadow = dd->pioavailshadow; | ||
290 | u32 __iomem *buf; | ||
291 | |||
292 | if (!(dd->flags & QIB_PRESENT)) | ||
293 | return NULL; | ||
294 | |||
295 | nbufs = last - first + 1; /* number in range to check */ | ||
296 | if (dd->upd_pio_shadow) { | ||
297 | /* | ||
298 | * Minor optimization. If we had no buffers on last call, | ||
299 | * start out by doing the update; continue and do scan even | ||
300 | * if no buffers were updated, to be paranoid. | ||
301 | */ | ||
302 | update_send_bufs(dd); | ||
303 | updated++; | ||
304 | } | ||
305 | i = first; | ||
306 | rescan: | ||
307 | /* | ||
308 | * While test_and_set_bit() is atomic, we do that and then the | ||
309 | * change_bit(), and the pair is not. See if this is the cause | ||
310 | * of the remaining armlaunch errors. | ||
311 | */ | ||
312 | spin_lock_irqsave(&dd->pioavail_lock, flags); | ||
313 | for (j = 0; j < nbufs; j++, i++) { | ||
314 | if (i > last) | ||
315 | i = first; | ||
316 | if (__test_and_set_bit((2 * i) + 1, shadow)) | ||
317 | continue; | ||
318 | /* flip generation bit */ | ||
319 | __change_bit(2 * i, shadow); | ||
320 | /* remember that the buffer can be written to now */ | ||
321 | __set_bit(i, dd->pio_writing); | ||
322 | break; | ||
323 | } | ||
324 | spin_unlock_irqrestore(&dd->pioavail_lock, flags); | ||
325 | |||
326 | if (j == nbufs) { | ||
327 | if (!updated) { | ||
328 | /* | ||
329 | * First time through; shadow exhausted, but may be | ||
330 | * buffers available, try an update and then rescan. | ||
331 | */ | ||
332 | update_send_bufs(dd); | ||
333 | updated++; | ||
334 | i = first; | ||
335 | goto rescan; | ||
336 | } | ||
337 | no_send_bufs(dd); | ||
338 | buf = NULL; | ||
339 | } else { | ||
340 | if (i < dd->piobcnt2k) | ||
341 | buf = (u32 __iomem *)(dd->pio2kbase + | ||
342 | i * dd->palign); | ||
343 | else | ||
344 | buf = (u32 __iomem *)(dd->pio4kbase + | ||
345 | (i - dd->piobcnt2k) * dd->align4k); | ||
346 | if (pbufnum) | ||
347 | *pbufnum = i; | ||
348 | dd->upd_pio_shadow = 0; | ||
349 | } | ||
350 | |||
351 | return buf; | ||
352 | } | ||
353 | |||
354 | /* | ||
355 | * Record that the caller is finished writing to the buffer so we don't | ||
356 | * disarm it while it is being written and disarm it now if needed. | ||
357 | */ | ||
358 | void qib_sendbuf_done(struct qib_devdata *dd, unsigned n) | ||
359 | { | ||
360 | unsigned long flags; | ||
361 | |||
362 | spin_lock_irqsave(&dd->pioavail_lock, flags); | ||
363 | __clear_bit(n, dd->pio_writing); | ||
364 | if (__test_and_clear_bit(n, dd->pio_need_disarm)) | ||
365 | dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(n)); | ||
366 | spin_unlock_irqrestore(&dd->pioavail_lock, flags); | ||
367 | } | ||
368 | |||
369 | /** | ||
370 | * qib_chg_pioavailkernel - change which send buffers are available for kernel | ||
371 | * @dd: the qlogic_ib device | ||
372 | * @start: the starting send buffer number | ||
373 | * @len: the number of send buffers | ||
374 | * @avail: true if the buffers are available for kernel use, false otherwise | ||
375 | */ | ||
376 | void qib_chg_pioavailkernel(struct qib_devdata *dd, unsigned start, | ||
377 | unsigned len, u32 avail, struct qib_ctxtdata *rcd) | ||
378 | { | ||
379 | unsigned long flags; | ||
380 | unsigned end; | ||
381 | unsigned ostart = start; | ||
382 | |||
383 | /* There are two bits per send buffer (busy and generation) */ | ||
384 | start *= 2; | ||
385 | end = start + len * 2; | ||
386 | |||
387 | spin_lock_irqsave(&dd->pioavail_lock, flags); | ||
388 | /* Set or clear the busy bit in the shadow. */ | ||
389 | while (start < end) { | ||
390 | if (avail) { | ||
391 | unsigned long dma; | ||
392 | int i; | ||
393 | |||
394 | /* | ||
395 | * The BUSY bit will never be set, because we disarm | ||
396 | * the user buffers before we hand them back to the | ||
397 | * kernel. We do have to make sure the generation | ||
398 | * bit is set correctly in shadow, since it could | ||
399 | * have changed many times while allocated to user. | ||
400 | * We can't use the bitmap functions on the full | ||
401 | * dma array because it is always little-endian, so | ||
402 | * we have to flip to host-order first. | ||
403 | * BITS_PER_LONG is slightly wrong, since it's | ||
404 | * always 64 bits per register in chip... | ||
405 | * We only work on 64 bit kernels, so that's OK. | ||
406 | */ | ||
407 | i = start / BITS_PER_LONG; | ||
408 | __clear_bit(QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT + start, | ||
409 | dd->pioavailshadow); | ||
410 | dma = (unsigned long) | ||
411 | le64_to_cpu(dd->pioavailregs_dma[i]); | ||
412 | if (test_bit((QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT + | ||
413 | start) % BITS_PER_LONG, &dma)) | ||
414 | __set_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT + | ||
415 | start, dd->pioavailshadow); | ||
416 | else | ||
417 | __clear_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT | ||
418 | + start, dd->pioavailshadow); | ||
419 | __set_bit(start, dd->pioavailkernel); | ||
420 | } else { | ||
421 | __set_bit(start + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT, | ||
422 | dd->pioavailshadow); | ||
423 | __clear_bit(start, dd->pioavailkernel); | ||
424 | } | ||
425 | start += 2; | ||
426 | } | ||
427 | |||
428 | spin_unlock_irqrestore(&dd->pioavail_lock, flags); | ||
429 | |||
430 | dd->f_txchk_change(dd, ostart, len, avail, rcd); | ||
431 | } | ||
432 | |||
433 | /* | ||
434 | * Flush all sends that might be in the ready to send state, as well as any | ||
435 | * that are in the process of being sent. Used whenever we need to be | ||
436 | * sure the send side is idle. Cleans up all buffer state by canceling | ||
437 | * all pio buffers, and issuing an abort, which cleans up anything in the | ||
438 | * launch fifo. The cancel is superfluous on some chip versions, but | ||
439 | * it's safer to always do it. | ||
440 | * PIOAvail bits are updated by the chip as if a normal send had happened. | ||
441 | */ | ||
442 | void qib_cancel_sends(struct qib_pportdata *ppd) | ||
443 | { | ||
444 | struct qib_devdata *dd = ppd->dd; | ||
445 | struct qib_ctxtdata *rcd; | ||
446 | unsigned long flags; | ||
447 | unsigned ctxt; | ||
448 | unsigned i; | ||
449 | unsigned last; | ||
450 | |||
451 | /* | ||
452 | * Tell PSM to disarm buffers again before trying to reuse them. | ||
453 | * We need to be sure the rcd doesn't change out from under us | ||
454 | * while we do so. We hold the two locks sequentially. We might | ||
455 | * needlessly set some need_disarm bits as a result, if the | ||
456 | * context is closed after we release the uctxt_lock, but that's | ||
457 | * fairly benign, and safer than nesting the locks. | ||
458 | */ | ||
459 | for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { | ||
460 | spin_lock_irqsave(&dd->uctxt_lock, flags); | ||
461 | rcd = dd->rcd[ctxt]; | ||
462 | if (rcd && rcd->ppd == ppd) { | ||
463 | last = rcd->pio_base + rcd->piocnt; | ||
464 | if (rcd->user_event_mask) { | ||
465 | /* | ||
466 | * subctxt_cnt is 0 if not shared, so do base | ||
467 | * separately, first, then remaining subctxt, | ||
468 | * if any | ||
469 | */ | ||
470 | set_bit(_QIB_EVENT_DISARM_BUFS_BIT, | ||
471 | &rcd->user_event_mask[0]); | ||
472 | for (i = 1; i < rcd->subctxt_cnt; i++) | ||
473 | set_bit(_QIB_EVENT_DISARM_BUFS_BIT, | ||
474 | &rcd->user_event_mask[i]); | ||
475 | } | ||
476 | i = rcd->pio_base; | ||
477 | spin_unlock_irqrestore(&dd->uctxt_lock, flags); | ||
478 | spin_lock_irqsave(&dd->pioavail_lock, flags); | ||
479 | for (; i < last; i++) | ||
480 | __set_bit(i, dd->pio_need_disarm); | ||
481 | spin_unlock_irqrestore(&dd->pioavail_lock, flags); | ||
482 | } else | ||
483 | spin_unlock_irqrestore(&dd->uctxt_lock, flags); | ||
484 | } | ||
485 | |||
486 | if (!(dd->flags & QIB_HAS_SEND_DMA)) | ||
487 | dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_ALL | | ||
488 | QIB_SENDCTRL_FLUSH); | ||
489 | } | ||
490 | |||
491 | /* | ||
492 | * Force an update of in-memory copy of the pioavail registers, when | ||
493 | * needed for any of a variety of reasons. | ||
494 | * If already off, this routine is a nop, on the assumption that the | ||
495 | * caller (or set of callers) will "do the right thing". | ||
496 | * This is a per-device operation, so just the first port. | ||
497 | */ | ||
498 | void qib_force_pio_avail_update(struct qib_devdata *dd) | ||
499 | { | ||
500 | dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); | ||
501 | } | ||
502 | |||
503 | void qib_hol_down(struct qib_pportdata *ppd) | ||
504 | { | ||
505 | /* | ||
506 | * Cancel sends when the link goes DOWN so that we aren't doing it | ||
507 | * at INIT when we might be trying to send SMI packets. | ||
508 | */ | ||
509 | if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) | ||
510 | qib_cancel_sends(ppd); | ||
511 | } | ||
512 | |||
513 | /* | ||
514 | * Link is at INIT. | ||
515 | * We start the HoL timer so we can detect stuck packets blocking SMP replies. | ||
516 | * Timer may already be running, so use mod_timer, not add_timer. | ||
517 | */ | ||
518 | void qib_hol_init(struct qib_pportdata *ppd) | ||
519 | { | ||
520 | if (ppd->hol_state != QIB_HOL_INIT) { | ||
521 | ppd->hol_state = QIB_HOL_INIT; | ||
522 | mod_timer(&ppd->hol_timer, | ||
523 | jiffies + msecs_to_jiffies(qib_hol_timeout_ms)); | ||
524 | } | ||
525 | } | ||
526 | |||
527 | /* | ||
528 | * Link is up, continue any user processes, and ensure timer | ||
529 | * is a nop, if running. Let timer keep running, if set; it | ||
530 | * will nop when it sees the link is up. | ||
531 | */ | ||
532 | void qib_hol_up(struct qib_pportdata *ppd) | ||
533 | { | ||
534 | ppd->hol_state = QIB_HOL_UP; | ||
535 | } | ||
536 | |||
537 | /* | ||
538 | * This is only called via the timer. | ||
539 | */ | ||
540 | void qib_hol_event(unsigned long opaque) | ||
541 | { | ||
542 | struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; | ||
543 | |||
544 | /* If hardware error, etc, skip. */ | ||
545 | if (!(ppd->dd->flags & QIB_INITTED)) | ||
546 | return; | ||
547 | |||
548 | if (ppd->hol_state != QIB_HOL_UP) { | ||
549 | /* | ||
550 | * Try to flush sends in case a stuck packet is blocking | ||
551 | * SMP replies. | ||
552 | */ | ||
553 | qib_hol_down(ppd); | ||
554 | mod_timer(&ppd->hol_timer, | ||
555 | jiffies + msecs_to_jiffies(qib_hol_timeout_ms)); | ||
556 | } | ||
557 | } | ||
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c new file mode 100644 index 000000000000..6c7fe78cca64 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_uc.c | |||
@@ -0,0 +1,555 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. | ||
3 | * All rights reserved. | ||
4 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #include "qib.h" | ||
36 | |||
37 | /* cut down ridiculously long IB macro names */ | ||
38 | #define OP(x) IB_OPCODE_UC_##x | ||
39 | |||
40 | /** | ||
41 | * qib_make_uc_req - construct a request packet (SEND, RDMA write) | ||
42 | * @qp: a pointer to the QP | ||
43 | * | ||
44 | * Return 1 if constructed; otherwise, return 0. | ||
45 | */ | ||
46 | int qib_make_uc_req(struct qib_qp *qp) | ||
47 | { | ||
48 | struct qib_other_headers *ohdr; | ||
49 | struct qib_swqe *wqe; | ||
50 | unsigned long flags; | ||
51 | u32 hwords; | ||
52 | u32 bth0; | ||
53 | u32 len; | ||
54 | u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); | ||
55 | int ret = 0; | ||
56 | |||
57 | spin_lock_irqsave(&qp->s_lock, flags); | ||
58 | |||
59 | if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) { | ||
60 | if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND)) | ||
61 | goto bail; | ||
62 | /* We are in the error state, flush the work request. */ | ||
63 | if (qp->s_last == qp->s_head) | ||
64 | goto bail; | ||
65 | /* If DMAs are in progress, we can't flush immediately. */ | ||
66 | if (atomic_read(&qp->s_dma_busy)) { | ||
67 | qp->s_flags |= QIB_S_WAIT_DMA; | ||
68 | goto bail; | ||
69 | } | ||
70 | wqe = get_swqe_ptr(qp, qp->s_last); | ||
71 | qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); | ||
72 | goto done; | ||
73 | } | ||
74 | |||
75 | ohdr = &qp->s_hdr.u.oth; | ||
76 | if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) | ||
77 | ohdr = &qp->s_hdr.u.l.oth; | ||
78 | |||
79 | /* header size in 32-bit words LRH+BTH = (8+12)/4. */ | ||
80 | hwords = 5; | ||
81 | bth0 = 0; | ||
82 | |||
83 | /* Get the next send request. */ | ||
84 | wqe = get_swqe_ptr(qp, qp->s_cur); | ||
85 | qp->s_wqe = NULL; | ||
86 | switch (qp->s_state) { | ||
87 | default: | ||
88 | if (!(ib_qib_state_ops[qp->state] & | ||
89 | QIB_PROCESS_NEXT_SEND_OK)) | ||
90 | goto bail; | ||
91 | /* Check if send work queue is empty. */ | ||
92 | if (qp->s_cur == qp->s_head) | ||
93 | goto bail; | ||
94 | /* | ||
95 | * Start a new request. | ||
96 | */ | ||
97 | wqe->psn = qp->s_next_psn; | ||
98 | qp->s_psn = qp->s_next_psn; | ||
99 | qp->s_sge.sge = wqe->sg_list[0]; | ||
100 | qp->s_sge.sg_list = wqe->sg_list + 1; | ||
101 | qp->s_sge.num_sge = wqe->wr.num_sge; | ||
102 | qp->s_sge.total_len = wqe->length; | ||
103 | len = wqe->length; | ||
104 | qp->s_len = len; | ||
105 | switch (wqe->wr.opcode) { | ||
106 | case IB_WR_SEND: | ||
107 | case IB_WR_SEND_WITH_IMM: | ||
108 | if (len > pmtu) { | ||
109 | qp->s_state = OP(SEND_FIRST); | ||
110 | len = pmtu; | ||
111 | break; | ||
112 | } | ||
113 | if (wqe->wr.opcode == IB_WR_SEND) | ||
114 | qp->s_state = OP(SEND_ONLY); | ||
115 | else { | ||
116 | qp->s_state = | ||
117 | OP(SEND_ONLY_WITH_IMMEDIATE); | ||
118 | /* Immediate data comes after the BTH */ | ||
119 | ohdr->u.imm_data = wqe->wr.ex.imm_data; | ||
120 | hwords += 1; | ||
121 | } | ||
122 | if (wqe->wr.send_flags & IB_SEND_SOLICITED) | ||
123 | bth0 |= IB_BTH_SOLICITED; | ||
124 | qp->s_wqe = wqe; | ||
125 | if (++qp->s_cur >= qp->s_size) | ||
126 | qp->s_cur = 0; | ||
127 | break; | ||
128 | |||
129 | case IB_WR_RDMA_WRITE: | ||
130 | case IB_WR_RDMA_WRITE_WITH_IMM: | ||
131 | ohdr->u.rc.reth.vaddr = | ||
132 | cpu_to_be64(wqe->wr.wr.rdma.remote_addr); | ||
133 | ohdr->u.rc.reth.rkey = | ||
134 | cpu_to_be32(wqe->wr.wr.rdma.rkey); | ||
135 | ohdr->u.rc.reth.length = cpu_to_be32(len); | ||
136 | hwords += sizeof(struct ib_reth) / 4; | ||
137 | if (len > pmtu) { | ||
138 | qp->s_state = OP(RDMA_WRITE_FIRST); | ||
139 | len = pmtu; | ||
140 | break; | ||
141 | } | ||
142 | if (wqe->wr.opcode == IB_WR_RDMA_WRITE) | ||
143 | qp->s_state = OP(RDMA_WRITE_ONLY); | ||
144 | else { | ||
145 | qp->s_state = | ||
146 | OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE); | ||
147 | /* Immediate data comes after the RETH */ | ||
148 | ohdr->u.rc.imm_data = wqe->wr.ex.imm_data; | ||
149 | hwords += 1; | ||
150 | if (wqe->wr.send_flags & IB_SEND_SOLICITED) | ||
151 | bth0 |= IB_BTH_SOLICITED; | ||
152 | } | ||
153 | qp->s_wqe = wqe; | ||
154 | if (++qp->s_cur >= qp->s_size) | ||
155 | qp->s_cur = 0; | ||
156 | break; | ||
157 | |||
158 | default: | ||
159 | goto bail; | ||
160 | } | ||
161 | break; | ||
162 | |||
163 | case OP(SEND_FIRST): | ||
164 | qp->s_state = OP(SEND_MIDDLE); | ||
165 | /* FALLTHROUGH */ | ||
166 | case OP(SEND_MIDDLE): | ||
167 | len = qp->s_len; | ||
168 | if (len > pmtu) { | ||
169 | len = pmtu; | ||
170 | break; | ||
171 | } | ||
172 | if (wqe->wr.opcode == IB_WR_SEND) | ||
173 | qp->s_state = OP(SEND_LAST); | ||
174 | else { | ||
175 | qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); | ||
176 | /* Immediate data comes after the BTH */ | ||
177 | ohdr->u.imm_data = wqe->wr.ex.imm_data; | ||
178 | hwords += 1; | ||
179 | } | ||
180 | if (wqe->wr.send_flags & IB_SEND_SOLICITED) | ||
181 | bth0 |= IB_BTH_SOLICITED; | ||
182 | qp->s_wqe = wqe; | ||
183 | if (++qp->s_cur >= qp->s_size) | ||
184 | qp->s_cur = 0; | ||
185 | break; | ||
186 | |||
187 | case OP(RDMA_WRITE_FIRST): | ||
188 | qp->s_state = OP(RDMA_WRITE_MIDDLE); | ||
189 | /* FALLTHROUGH */ | ||
190 | case OP(RDMA_WRITE_MIDDLE): | ||
191 | len = qp->s_len; | ||
192 | if (len > pmtu) { | ||
193 | len = pmtu; | ||
194 | break; | ||
195 | } | ||
196 | if (wqe->wr.opcode == IB_WR_RDMA_WRITE) | ||
197 | qp->s_state = OP(RDMA_WRITE_LAST); | ||
198 | else { | ||
199 | qp->s_state = | ||
200 | OP(RDMA_WRITE_LAST_WITH_IMMEDIATE); | ||
201 | /* Immediate data comes after the BTH */ | ||
202 | ohdr->u.imm_data = wqe->wr.ex.imm_data; | ||
203 | hwords += 1; | ||
204 | if (wqe->wr.send_flags & IB_SEND_SOLICITED) | ||
205 | bth0 |= IB_BTH_SOLICITED; | ||
206 | } | ||
207 | qp->s_wqe = wqe; | ||
208 | if (++qp->s_cur >= qp->s_size) | ||
209 | qp->s_cur = 0; | ||
210 | break; | ||
211 | } | ||
212 | qp->s_len -= len; | ||
213 | qp->s_hdrwords = hwords; | ||
214 | qp->s_cur_sge = &qp->s_sge; | ||
215 | qp->s_cur_size = len; | ||
216 | qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), | ||
217 | qp->s_next_psn++ & QIB_PSN_MASK); | ||
218 | done: | ||
219 | ret = 1; | ||
220 | goto unlock; | ||
221 | |||
222 | bail: | ||
223 | qp->s_flags &= ~QIB_S_BUSY; | ||
224 | unlock: | ||
225 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
226 | return ret; | ||
227 | } | ||
228 | |||
229 | /** | ||
230 | * qib_uc_rcv - handle an incoming UC packet | ||
231 | * @ibp: the port the packet came in on | ||
232 | * @hdr: the header of the packet | ||
233 | * @has_grh: true if the packet has a GRH | ||
234 | * @data: the packet data | ||
235 | * @tlen: the length of the packet | ||
236 | * @qp: the QP for this packet. | ||
237 | * | ||
238 | * This is called from qib_qp_rcv() to process an incoming UC packet | ||
239 | * for the given QP. | ||
240 | * Called at interrupt level. | ||
241 | */ | ||
242 | void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | ||
243 | int has_grh, void *data, u32 tlen, struct qib_qp *qp) | ||
244 | { | ||
245 | struct qib_other_headers *ohdr; | ||
246 | unsigned long flags; | ||
247 | u32 opcode; | ||
248 | u32 hdrsize; | ||
249 | u32 psn; | ||
250 | u32 pad; | ||
251 | struct ib_wc wc; | ||
252 | u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); | ||
253 | struct ib_reth *reth; | ||
254 | int ret; | ||
255 | |||
256 | /* Check for GRH */ | ||
257 | if (!has_grh) { | ||
258 | ohdr = &hdr->u.oth; | ||
259 | hdrsize = 8 + 12; /* LRH + BTH */ | ||
260 | } else { | ||
261 | ohdr = &hdr->u.l.oth; | ||
262 | hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */ | ||
263 | } | ||
264 | |||
265 | opcode = be32_to_cpu(ohdr->bth[0]); | ||
266 | spin_lock_irqsave(&qp->s_lock, flags); | ||
267 | if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode)) | ||
268 | goto sunlock; | ||
269 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
270 | |||
271 | psn = be32_to_cpu(ohdr->bth[2]); | ||
272 | opcode >>= 24; | ||
273 | memset(&wc, 0, sizeof wc); | ||
274 | |||
275 | /* Prevent simultaneous processing after APM on different CPUs */ | ||
276 | spin_lock(&qp->r_lock); | ||
277 | |||
278 | /* Compare the PSN verses the expected PSN. */ | ||
279 | if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) { | ||
280 | /* | ||
281 | * Handle a sequence error. | ||
282 | * Silently drop any current message. | ||
283 | */ | ||
284 | qp->r_psn = psn; | ||
285 | inv: | ||
286 | if (qp->r_state == OP(SEND_FIRST) || | ||
287 | qp->r_state == OP(SEND_MIDDLE)) { | ||
288 | set_bit(QIB_R_REWIND_SGE, &qp->r_aflags); | ||
289 | qp->r_sge.num_sge = 0; | ||
290 | } else | ||
291 | while (qp->r_sge.num_sge) { | ||
292 | atomic_dec(&qp->r_sge.sge.mr->refcount); | ||
293 | if (--qp->r_sge.num_sge) | ||
294 | qp->r_sge.sge = *qp->r_sge.sg_list++; | ||
295 | } | ||
296 | qp->r_state = OP(SEND_LAST); | ||
297 | switch (opcode) { | ||
298 | case OP(SEND_FIRST): | ||
299 | case OP(SEND_ONLY): | ||
300 | case OP(SEND_ONLY_WITH_IMMEDIATE): | ||
301 | goto send_first; | ||
302 | |||
303 | case OP(RDMA_WRITE_FIRST): | ||
304 | case OP(RDMA_WRITE_ONLY): | ||
305 | case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): | ||
306 | goto rdma_first; | ||
307 | |||
308 | default: | ||
309 | goto drop; | ||
310 | } | ||
311 | } | ||
312 | |||
313 | /* Check for opcode sequence errors. */ | ||
314 | switch (qp->r_state) { | ||
315 | case OP(SEND_FIRST): | ||
316 | case OP(SEND_MIDDLE): | ||
317 | if (opcode == OP(SEND_MIDDLE) || | ||
318 | opcode == OP(SEND_LAST) || | ||
319 | opcode == OP(SEND_LAST_WITH_IMMEDIATE)) | ||
320 | break; | ||
321 | goto inv; | ||
322 | |||
323 | case OP(RDMA_WRITE_FIRST): | ||
324 | case OP(RDMA_WRITE_MIDDLE): | ||
325 | if (opcode == OP(RDMA_WRITE_MIDDLE) || | ||
326 | opcode == OP(RDMA_WRITE_LAST) || | ||
327 | opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE)) | ||
328 | break; | ||
329 | goto inv; | ||
330 | |||
331 | default: | ||
332 | if (opcode == OP(SEND_FIRST) || | ||
333 | opcode == OP(SEND_ONLY) || | ||
334 | opcode == OP(SEND_ONLY_WITH_IMMEDIATE) || | ||
335 | opcode == OP(RDMA_WRITE_FIRST) || | ||
336 | opcode == OP(RDMA_WRITE_ONLY) || | ||
337 | opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) | ||
338 | break; | ||
339 | goto inv; | ||
340 | } | ||
341 | |||
342 | if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) { | ||
343 | qp->r_flags |= QIB_R_COMM_EST; | ||
344 | if (qp->ibqp.event_handler) { | ||
345 | struct ib_event ev; | ||
346 | |||
347 | ev.device = qp->ibqp.device; | ||
348 | ev.element.qp = &qp->ibqp; | ||
349 | ev.event = IB_EVENT_COMM_EST; | ||
350 | qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); | ||
351 | } | ||
352 | } | ||
353 | |||
354 | /* OK, process the packet. */ | ||
355 | switch (opcode) { | ||
356 | case OP(SEND_FIRST): | ||
357 | case OP(SEND_ONLY): | ||
358 | case OP(SEND_ONLY_WITH_IMMEDIATE): | ||
359 | send_first: | ||
360 | if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) | ||
361 | qp->r_sge = qp->s_rdma_read_sge; | ||
362 | else { | ||
363 | ret = qib_get_rwqe(qp, 0); | ||
364 | if (ret < 0) | ||
365 | goto op_err; | ||
366 | if (!ret) | ||
367 | goto drop; | ||
368 | /* | ||
369 | * qp->s_rdma_read_sge will be the owner | ||
370 | * of the mr references. | ||
371 | */ | ||
372 | qp->s_rdma_read_sge = qp->r_sge; | ||
373 | } | ||
374 | qp->r_rcv_len = 0; | ||
375 | if (opcode == OP(SEND_ONLY)) | ||
376 | goto send_last; | ||
377 | else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE)) | ||
378 | goto send_last_imm; | ||
379 | /* FALLTHROUGH */ | ||
380 | case OP(SEND_MIDDLE): | ||
381 | /* Check for invalid length PMTU or posted rwqe len. */ | ||
382 | if (unlikely(tlen != (hdrsize + pmtu + 4))) | ||
383 | goto rewind; | ||
384 | qp->r_rcv_len += pmtu; | ||
385 | if (unlikely(qp->r_rcv_len > qp->r_len)) | ||
386 | goto rewind; | ||
387 | qib_copy_sge(&qp->r_sge, data, pmtu, 0); | ||
388 | break; | ||
389 | |||
390 | case OP(SEND_LAST_WITH_IMMEDIATE): | ||
391 | send_last_imm: | ||
392 | wc.ex.imm_data = ohdr->u.imm_data; | ||
393 | hdrsize += 4; | ||
394 | wc.wc_flags = IB_WC_WITH_IMM; | ||
395 | /* FALLTHROUGH */ | ||
396 | case OP(SEND_LAST): | ||
397 | send_last: | ||
398 | /* Get the number of bytes the message was padded by. */ | ||
399 | pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; | ||
400 | /* Check for invalid length. */ | ||
401 | /* XXX LAST len should be >= 1 */ | ||
402 | if (unlikely(tlen < (hdrsize + pad + 4))) | ||
403 | goto rewind; | ||
404 | /* Don't count the CRC. */ | ||
405 | tlen -= (hdrsize + pad + 4); | ||
406 | wc.byte_len = tlen + qp->r_rcv_len; | ||
407 | if (unlikely(wc.byte_len > qp->r_len)) | ||
408 | goto rewind; | ||
409 | wc.opcode = IB_WC_RECV; | ||
410 | last_imm: | ||
411 | qib_copy_sge(&qp->r_sge, data, tlen, 0); | ||
412 | while (qp->s_rdma_read_sge.num_sge) { | ||
413 | atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount); | ||
414 | if (--qp->s_rdma_read_sge.num_sge) | ||
415 | qp->s_rdma_read_sge.sge = | ||
416 | *qp->s_rdma_read_sge.sg_list++; | ||
417 | } | ||
418 | wc.wr_id = qp->r_wr_id; | ||
419 | wc.status = IB_WC_SUCCESS; | ||
420 | wc.qp = &qp->ibqp; | ||
421 | wc.src_qp = qp->remote_qpn; | ||
422 | wc.slid = qp->remote_ah_attr.dlid; | ||
423 | wc.sl = qp->remote_ah_attr.sl; | ||
424 | /* Signal completion event if the solicited bit is set. */ | ||
425 | qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, | ||
426 | (ohdr->bth[0] & | ||
427 | cpu_to_be32(IB_BTH_SOLICITED)) != 0); | ||
428 | break; | ||
429 | |||
430 | case OP(RDMA_WRITE_FIRST): | ||
431 | case OP(RDMA_WRITE_ONLY): | ||
432 | case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */ | ||
433 | rdma_first: | ||
434 | if (unlikely(!(qp->qp_access_flags & | ||
435 | IB_ACCESS_REMOTE_WRITE))) { | ||
436 | goto drop; | ||
437 | } | ||
438 | reth = &ohdr->u.rc.reth; | ||
439 | hdrsize += sizeof(*reth); | ||
440 | qp->r_len = be32_to_cpu(reth->length); | ||
441 | qp->r_rcv_len = 0; | ||
442 | qp->r_sge.sg_list = NULL; | ||
443 | if (qp->r_len != 0) { | ||
444 | u32 rkey = be32_to_cpu(reth->rkey); | ||
445 | u64 vaddr = be64_to_cpu(reth->vaddr); | ||
446 | int ok; | ||
447 | |||
448 | /* Check rkey */ | ||
449 | ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, | ||
450 | vaddr, rkey, IB_ACCESS_REMOTE_WRITE); | ||
451 | if (unlikely(!ok)) | ||
452 | goto drop; | ||
453 | qp->r_sge.num_sge = 1; | ||
454 | } else { | ||
455 | qp->r_sge.num_sge = 0; | ||
456 | qp->r_sge.sge.mr = NULL; | ||
457 | qp->r_sge.sge.vaddr = NULL; | ||
458 | qp->r_sge.sge.length = 0; | ||
459 | qp->r_sge.sge.sge_length = 0; | ||
460 | } | ||
461 | if (opcode == OP(RDMA_WRITE_ONLY)) | ||
462 | goto rdma_last; | ||
463 | else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) | ||
464 | goto rdma_last_imm; | ||
465 | /* FALLTHROUGH */ | ||
466 | case OP(RDMA_WRITE_MIDDLE): | ||
467 | /* Check for invalid length PMTU or posted rwqe len. */ | ||
468 | if (unlikely(tlen != (hdrsize + pmtu + 4))) | ||
469 | goto drop; | ||
470 | qp->r_rcv_len += pmtu; | ||
471 | if (unlikely(qp->r_rcv_len > qp->r_len)) | ||
472 | goto drop; | ||
473 | qib_copy_sge(&qp->r_sge, data, pmtu, 1); | ||
474 | break; | ||
475 | |||
476 | case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE): | ||
477 | rdma_last_imm: | ||
478 | wc.ex.imm_data = ohdr->u.imm_data; | ||
479 | hdrsize += 4; | ||
480 | wc.wc_flags = IB_WC_WITH_IMM; | ||
481 | |||
482 | /* Get the number of bytes the message was padded by. */ | ||
483 | pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; | ||
484 | /* Check for invalid length. */ | ||
485 | /* XXX LAST len should be >= 1 */ | ||
486 | if (unlikely(tlen < (hdrsize + pad + 4))) | ||
487 | goto drop; | ||
488 | /* Don't count the CRC. */ | ||
489 | tlen -= (hdrsize + pad + 4); | ||
490 | if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) | ||
491 | goto drop; | ||
492 | if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) | ||
493 | while (qp->s_rdma_read_sge.num_sge) { | ||
494 | atomic_dec(&qp->s_rdma_read_sge.sge.mr-> | ||
495 | refcount); | ||
496 | if (--qp->s_rdma_read_sge.num_sge) | ||
497 | qp->s_rdma_read_sge.sge = | ||
498 | *qp->s_rdma_read_sge.sg_list++; | ||
499 | } | ||
500 | else { | ||
501 | ret = qib_get_rwqe(qp, 1); | ||
502 | if (ret < 0) | ||
503 | goto op_err; | ||
504 | if (!ret) | ||
505 | goto drop; | ||
506 | } | ||
507 | wc.byte_len = qp->r_len; | ||
508 | wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; | ||
509 | goto last_imm; | ||
510 | |||
511 | case OP(RDMA_WRITE_LAST): | ||
512 | rdma_last: | ||
513 | /* Get the number of bytes the message was padded by. */ | ||
514 | pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; | ||
515 | /* Check for invalid length. */ | ||
516 | /* XXX LAST len should be >= 1 */ | ||
517 | if (unlikely(tlen < (hdrsize + pad + 4))) | ||
518 | goto drop; | ||
519 | /* Don't count the CRC. */ | ||
520 | tlen -= (hdrsize + pad + 4); | ||
521 | if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) | ||
522 | goto drop; | ||
523 | qib_copy_sge(&qp->r_sge, data, tlen, 1); | ||
524 | while (qp->r_sge.num_sge) { | ||
525 | atomic_dec(&qp->r_sge.sge.mr->refcount); | ||
526 | if (--qp->r_sge.num_sge) | ||
527 | qp->r_sge.sge = *qp->r_sge.sg_list++; | ||
528 | } | ||
529 | break; | ||
530 | |||
531 | default: | ||
532 | /* Drop packet for unknown opcodes. */ | ||
533 | goto drop; | ||
534 | } | ||
535 | qp->r_psn++; | ||
536 | qp->r_state = opcode; | ||
537 | spin_unlock(&qp->r_lock); | ||
538 | return; | ||
539 | |||
540 | rewind: | ||
541 | set_bit(QIB_R_REWIND_SGE, &qp->r_aflags); | ||
542 | qp->r_sge.num_sge = 0; | ||
543 | drop: | ||
544 | ibp->n_pkt_drops++; | ||
545 | spin_unlock(&qp->r_lock); | ||
546 | return; | ||
547 | |||
548 | op_err: | ||
549 | qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); | ||
550 | spin_unlock(&qp->r_lock); | ||
551 | return; | ||
552 | |||
553 | sunlock: | ||
554 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
555 | } | ||
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c new file mode 100644 index 000000000000..c838cda73347 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_ud.c | |||
@@ -0,0 +1,607 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. | ||
3 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | |||
34 | #include <rdma/ib_smi.h> | ||
35 | |||
36 | #include "qib.h" | ||
37 | #include "qib_mad.h" | ||
38 | |||
39 | /** | ||
40 | * qib_ud_loopback - handle send on loopback QPs | ||
41 | * @sqp: the sending QP | ||
42 | * @swqe: the send work request | ||
43 | * | ||
44 | * This is called from qib_make_ud_req() to forward a WQE addressed | ||
45 | * to the same HCA. | ||
46 | * Note that the receive interrupt handler may be calling qib_ud_rcv() | ||
47 | * while this is being called. | ||
48 | */ | ||
49 | static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe) | ||
50 | { | ||
51 | struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num); | ||
52 | struct qib_pportdata *ppd; | ||
53 | struct qib_qp *qp; | ||
54 | struct ib_ah_attr *ah_attr; | ||
55 | unsigned long flags; | ||
56 | struct qib_sge_state ssge; | ||
57 | struct qib_sge *sge; | ||
58 | struct ib_wc wc; | ||
59 | u32 length; | ||
60 | |||
61 | qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn); | ||
62 | if (!qp) { | ||
63 | ibp->n_pkt_drops++; | ||
64 | return; | ||
65 | } | ||
66 | if (qp->ibqp.qp_type != sqp->ibqp.qp_type || | ||
67 | !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { | ||
68 | ibp->n_pkt_drops++; | ||
69 | goto drop; | ||
70 | } | ||
71 | |||
72 | ah_attr = &to_iah(swqe->wr.wr.ud.ah)->attr; | ||
73 | ppd = ppd_from_ibp(ibp); | ||
74 | |||
75 | if (qp->ibqp.qp_num > 1) { | ||
76 | u16 pkey1; | ||
77 | u16 pkey2; | ||
78 | u16 lid; | ||
79 | |||
80 | pkey1 = qib_get_pkey(ibp, sqp->s_pkey_index); | ||
81 | pkey2 = qib_get_pkey(ibp, qp->s_pkey_index); | ||
82 | if (unlikely(!qib_pkey_ok(pkey1, pkey2))) { | ||
83 | lid = ppd->lid | (ah_attr->src_path_bits & | ||
84 | ((1 << ppd->lmc) - 1)); | ||
85 | qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, pkey1, | ||
86 | ah_attr->sl, | ||
87 | sqp->ibqp.qp_num, qp->ibqp.qp_num, | ||
88 | cpu_to_be16(lid), | ||
89 | cpu_to_be16(ah_attr->dlid)); | ||
90 | goto drop; | ||
91 | } | ||
92 | } | ||
93 | |||
94 | /* | ||
95 | * Check that the qkey matches (except for QP0, see 9.6.1.4.1). | ||
96 | * Qkeys with the high order bit set mean use the | ||
97 | * qkey from the QP context instead of the WR (see 10.2.5). | ||
98 | */ | ||
99 | if (qp->ibqp.qp_num) { | ||
100 | u32 qkey; | ||
101 | |||
102 | qkey = (int)swqe->wr.wr.ud.remote_qkey < 0 ? | ||
103 | sqp->qkey : swqe->wr.wr.ud.remote_qkey; | ||
104 | if (unlikely(qkey != qp->qkey)) { | ||
105 | u16 lid; | ||
106 | |||
107 | lid = ppd->lid | (ah_attr->src_path_bits & | ||
108 | ((1 << ppd->lmc) - 1)); | ||
109 | qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey, | ||
110 | ah_attr->sl, | ||
111 | sqp->ibqp.qp_num, qp->ibqp.qp_num, | ||
112 | cpu_to_be16(lid), | ||
113 | cpu_to_be16(ah_attr->dlid)); | ||
114 | goto drop; | ||
115 | } | ||
116 | } | ||
117 | |||
118 | /* | ||
119 | * A GRH is expected to preceed the data even if not | ||
120 | * present on the wire. | ||
121 | */ | ||
122 | length = swqe->length; | ||
123 | memset(&wc, 0, sizeof wc); | ||
124 | wc.byte_len = length + sizeof(struct ib_grh); | ||
125 | |||
126 | if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) { | ||
127 | wc.wc_flags = IB_WC_WITH_IMM; | ||
128 | wc.ex.imm_data = swqe->wr.ex.imm_data; | ||
129 | } | ||
130 | |||
131 | spin_lock_irqsave(&qp->r_lock, flags); | ||
132 | |||
133 | /* | ||
134 | * Get the next work request entry to find where to put the data. | ||
135 | */ | ||
136 | if (qp->r_flags & QIB_R_REUSE_SGE) | ||
137 | qp->r_flags &= ~QIB_R_REUSE_SGE; | ||
138 | else { | ||
139 | int ret; | ||
140 | |||
141 | ret = qib_get_rwqe(qp, 0); | ||
142 | if (ret < 0) { | ||
143 | qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); | ||
144 | goto bail_unlock; | ||
145 | } | ||
146 | if (!ret) { | ||
147 | if (qp->ibqp.qp_num == 0) | ||
148 | ibp->n_vl15_dropped++; | ||
149 | goto bail_unlock; | ||
150 | } | ||
151 | } | ||
152 | /* Silently drop packets which are too big. */ | ||
153 | if (unlikely(wc.byte_len > qp->r_len)) { | ||
154 | qp->r_flags |= QIB_R_REUSE_SGE; | ||
155 | ibp->n_pkt_drops++; | ||
156 | goto bail_unlock; | ||
157 | } | ||
158 | |||
159 | if (ah_attr->ah_flags & IB_AH_GRH) { | ||
160 | qib_copy_sge(&qp->r_sge, &ah_attr->grh, | ||
161 | sizeof(struct ib_grh), 1); | ||
162 | wc.wc_flags |= IB_WC_GRH; | ||
163 | } else | ||
164 | qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1); | ||
165 | ssge.sg_list = swqe->sg_list + 1; | ||
166 | ssge.sge = *swqe->sg_list; | ||
167 | ssge.num_sge = swqe->wr.num_sge; | ||
168 | sge = &ssge.sge; | ||
169 | while (length) { | ||
170 | u32 len = sge->length; | ||
171 | |||
172 | if (len > length) | ||
173 | len = length; | ||
174 | if (len > sge->sge_length) | ||
175 | len = sge->sge_length; | ||
176 | BUG_ON(len == 0); | ||
177 | qib_copy_sge(&qp->r_sge, sge->vaddr, len, 1); | ||
178 | sge->vaddr += len; | ||
179 | sge->length -= len; | ||
180 | sge->sge_length -= len; | ||
181 | if (sge->sge_length == 0) { | ||
182 | if (--ssge.num_sge) | ||
183 | *sge = *ssge.sg_list++; | ||
184 | } else if (sge->length == 0 && sge->mr->lkey) { | ||
185 | if (++sge->n >= QIB_SEGSZ) { | ||
186 | if (++sge->m >= sge->mr->mapsz) | ||
187 | break; | ||
188 | sge->n = 0; | ||
189 | } | ||
190 | sge->vaddr = | ||
191 | sge->mr->map[sge->m]->segs[sge->n].vaddr; | ||
192 | sge->length = | ||
193 | sge->mr->map[sge->m]->segs[sge->n].length; | ||
194 | } | ||
195 | length -= len; | ||
196 | } | ||
197 | while (qp->r_sge.num_sge) { | ||
198 | atomic_dec(&qp->r_sge.sge.mr->refcount); | ||
199 | if (--qp->r_sge.num_sge) | ||
200 | qp->r_sge.sge = *qp->r_sge.sg_list++; | ||
201 | } | ||
202 | if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) | ||
203 | goto bail_unlock; | ||
204 | wc.wr_id = qp->r_wr_id; | ||
205 | wc.status = IB_WC_SUCCESS; | ||
206 | wc.opcode = IB_WC_RECV; | ||
207 | wc.qp = &qp->ibqp; | ||
208 | wc.src_qp = sqp->ibqp.qp_num; | ||
209 | wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ? | ||
210 | swqe->wr.wr.ud.pkey_index : 0; | ||
211 | wc.slid = ppd->lid | (ah_attr->src_path_bits & ((1 << ppd->lmc) - 1)); | ||
212 | wc.sl = ah_attr->sl; | ||
213 | wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1); | ||
214 | wc.port_num = qp->port_num; | ||
215 | /* Signal completion event if the solicited bit is set. */ | ||
216 | qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, | ||
217 | swqe->wr.send_flags & IB_SEND_SOLICITED); | ||
218 | ibp->n_loop_pkts++; | ||
219 | bail_unlock: | ||
220 | spin_unlock_irqrestore(&qp->r_lock, flags); | ||
221 | drop: | ||
222 | if (atomic_dec_and_test(&qp->refcount)) | ||
223 | wake_up(&qp->wait); | ||
224 | } | ||
225 | |||
226 | /** | ||
227 | * qib_make_ud_req - construct a UD request packet | ||
228 | * @qp: the QP | ||
229 | * | ||
230 | * Return 1 if constructed; otherwise, return 0. | ||
231 | */ | ||
232 | int qib_make_ud_req(struct qib_qp *qp) | ||
233 | { | ||
234 | struct qib_other_headers *ohdr; | ||
235 | struct ib_ah_attr *ah_attr; | ||
236 | struct qib_pportdata *ppd; | ||
237 | struct qib_ibport *ibp; | ||
238 | struct qib_swqe *wqe; | ||
239 | unsigned long flags; | ||
240 | u32 nwords; | ||
241 | u32 extra_bytes; | ||
242 | u32 bth0; | ||
243 | u16 lrh0; | ||
244 | u16 lid; | ||
245 | int ret = 0; | ||
246 | int next_cur; | ||
247 | |||
248 | spin_lock_irqsave(&qp->s_lock, flags); | ||
249 | |||
250 | if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK)) { | ||
251 | if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND)) | ||
252 | goto bail; | ||
253 | /* We are in the error state, flush the work request. */ | ||
254 | if (qp->s_last == qp->s_head) | ||
255 | goto bail; | ||
256 | /* If DMAs are in progress, we can't flush immediately. */ | ||
257 | if (atomic_read(&qp->s_dma_busy)) { | ||
258 | qp->s_flags |= QIB_S_WAIT_DMA; | ||
259 | goto bail; | ||
260 | } | ||
261 | wqe = get_swqe_ptr(qp, qp->s_last); | ||
262 | qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); | ||
263 | goto done; | ||
264 | } | ||
265 | |||
266 | if (qp->s_cur == qp->s_head) | ||
267 | goto bail; | ||
268 | |||
269 | wqe = get_swqe_ptr(qp, qp->s_cur); | ||
270 | next_cur = qp->s_cur + 1; | ||
271 | if (next_cur >= qp->s_size) | ||
272 | next_cur = 0; | ||
273 | |||
274 | /* Construct the header. */ | ||
275 | ibp = to_iport(qp->ibqp.device, qp->port_num); | ||
276 | ppd = ppd_from_ibp(ibp); | ||
277 | ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr; | ||
278 | if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE) { | ||
279 | if (ah_attr->dlid != QIB_PERMISSIVE_LID) | ||
280 | ibp->n_multicast_xmit++; | ||
281 | else | ||
282 | ibp->n_unicast_xmit++; | ||
283 | } else { | ||
284 | ibp->n_unicast_xmit++; | ||
285 | lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1); | ||
286 | if (unlikely(lid == ppd->lid)) { | ||
287 | /* | ||
288 | * If DMAs are in progress, we can't generate | ||
289 | * a completion for the loopback packet since | ||
290 | * it would be out of order. | ||
291 | * XXX Instead of waiting, we could queue a | ||
292 | * zero length descriptor so we get a callback. | ||
293 | */ | ||
294 | if (atomic_read(&qp->s_dma_busy)) { | ||
295 | qp->s_flags |= QIB_S_WAIT_DMA; | ||
296 | goto bail; | ||
297 | } | ||
298 | qp->s_cur = next_cur; | ||
299 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
300 | qib_ud_loopback(qp, wqe); | ||
301 | spin_lock_irqsave(&qp->s_lock, flags); | ||
302 | qib_send_complete(qp, wqe, IB_WC_SUCCESS); | ||
303 | goto done; | ||
304 | } | ||
305 | } | ||
306 | |||
307 | qp->s_cur = next_cur; | ||
308 | extra_bytes = -wqe->length & 3; | ||
309 | nwords = (wqe->length + extra_bytes) >> 2; | ||
310 | |||
311 | /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */ | ||
312 | qp->s_hdrwords = 7; | ||
313 | qp->s_cur_size = wqe->length; | ||
314 | qp->s_cur_sge = &qp->s_sge; | ||
315 | qp->s_srate = ah_attr->static_rate; | ||
316 | qp->s_wqe = wqe; | ||
317 | qp->s_sge.sge = wqe->sg_list[0]; | ||
318 | qp->s_sge.sg_list = wqe->sg_list + 1; | ||
319 | qp->s_sge.num_sge = wqe->wr.num_sge; | ||
320 | qp->s_sge.total_len = wqe->length; | ||
321 | |||
322 | if (ah_attr->ah_flags & IB_AH_GRH) { | ||
323 | /* Header size in 32-bit words. */ | ||
324 | qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr.u.l.grh, | ||
325 | &ah_attr->grh, | ||
326 | qp->s_hdrwords, nwords); | ||
327 | lrh0 = QIB_LRH_GRH; | ||
328 | ohdr = &qp->s_hdr.u.l.oth; | ||
329 | /* | ||
330 | * Don't worry about sending to locally attached multicast | ||
331 | * QPs. It is unspecified by the spec. what happens. | ||
332 | */ | ||
333 | } else { | ||
334 | /* Header size in 32-bit words. */ | ||
335 | lrh0 = QIB_LRH_BTH; | ||
336 | ohdr = &qp->s_hdr.u.oth; | ||
337 | } | ||
338 | if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { | ||
339 | qp->s_hdrwords++; | ||
340 | ohdr->u.ud.imm_data = wqe->wr.ex.imm_data; | ||
341 | bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24; | ||
342 | } else | ||
343 | bth0 = IB_OPCODE_UD_SEND_ONLY << 24; | ||
344 | lrh0 |= ah_attr->sl << 4; | ||
345 | if (qp->ibqp.qp_type == IB_QPT_SMI) | ||
346 | lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */ | ||
347 | else | ||
348 | lrh0 |= ibp->sl_to_vl[ah_attr->sl] << 12; | ||
349 | qp->s_hdr.lrh[0] = cpu_to_be16(lrh0); | ||
350 | qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */ | ||
351 | qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); | ||
352 | lid = ppd->lid; | ||
353 | if (lid) { | ||
354 | lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1); | ||
355 | qp->s_hdr.lrh[3] = cpu_to_be16(lid); | ||
356 | } else | ||
357 | qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE; | ||
358 | if (wqe->wr.send_flags & IB_SEND_SOLICITED) | ||
359 | bth0 |= IB_BTH_SOLICITED; | ||
360 | bth0 |= extra_bytes << 20; | ||
361 | bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? QIB_DEFAULT_P_KEY : | ||
362 | qib_get_pkey(ibp, qp->ibqp.qp_type == IB_QPT_GSI ? | ||
363 | wqe->wr.wr.ud.pkey_index : qp->s_pkey_index); | ||
364 | ohdr->bth[0] = cpu_to_be32(bth0); | ||
365 | /* | ||
366 | * Use the multicast QP if the destination LID is a multicast LID. | ||
367 | */ | ||
368 | ohdr->bth[1] = ah_attr->dlid >= QIB_MULTICAST_LID_BASE && | ||
369 | ah_attr->dlid != QIB_PERMISSIVE_LID ? | ||
370 | cpu_to_be32(QIB_MULTICAST_QPN) : | ||
371 | cpu_to_be32(wqe->wr.wr.ud.remote_qpn); | ||
372 | ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & QIB_PSN_MASK); | ||
373 | /* | ||
374 | * Qkeys with the high order bit set mean use the | ||
375 | * qkey from the QP context instead of the WR (see 10.2.5). | ||
376 | */ | ||
377 | ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->wr.wr.ud.remote_qkey < 0 ? | ||
378 | qp->qkey : wqe->wr.wr.ud.remote_qkey); | ||
379 | ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num); | ||
380 | |||
381 | done: | ||
382 | ret = 1; | ||
383 | goto unlock; | ||
384 | |||
385 | bail: | ||
386 | qp->s_flags &= ~QIB_S_BUSY; | ||
387 | unlock: | ||
388 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
389 | return ret; | ||
390 | } | ||
391 | |||
392 | static unsigned qib_lookup_pkey(struct qib_ibport *ibp, u16 pkey) | ||
393 | { | ||
394 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | ||
395 | struct qib_devdata *dd = ppd->dd; | ||
396 | unsigned ctxt = ppd->hw_pidx; | ||
397 | unsigned i; | ||
398 | |||
399 | pkey &= 0x7fff; /* remove limited/full membership bit */ | ||
400 | |||
401 | for (i = 0; i < ARRAY_SIZE(dd->rcd[ctxt]->pkeys); ++i) | ||
402 | if ((dd->rcd[ctxt]->pkeys[i] & 0x7fff) == pkey) | ||
403 | return i; | ||
404 | |||
405 | /* | ||
406 | * Should not get here, this means hardware failed to validate pkeys. | ||
407 | * Punt and return index 0. | ||
408 | */ | ||
409 | return 0; | ||
410 | } | ||
411 | |||
412 | /** | ||
413 | * qib_ud_rcv - receive an incoming UD packet | ||
414 | * @ibp: the port the packet came in on | ||
415 | * @hdr: the packet header | ||
416 | * @has_grh: true if the packet has a GRH | ||
417 | * @data: the packet data | ||
418 | * @tlen: the packet length | ||
419 | * @qp: the QP the packet came on | ||
420 | * | ||
421 | * This is called from qib_qp_rcv() to process an incoming UD packet | ||
422 | * for the given QP. | ||
423 | * Called at interrupt level. | ||
424 | */ | ||
425 | void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | ||
426 | int has_grh, void *data, u32 tlen, struct qib_qp *qp) | ||
427 | { | ||
428 | struct qib_other_headers *ohdr; | ||
429 | int opcode; | ||
430 | u32 hdrsize; | ||
431 | u32 pad; | ||
432 | struct ib_wc wc; | ||
433 | u32 qkey; | ||
434 | u32 src_qp; | ||
435 | u16 dlid; | ||
436 | |||
437 | /* Check for GRH */ | ||
438 | if (!has_grh) { | ||
439 | ohdr = &hdr->u.oth; | ||
440 | hdrsize = 8 + 12 + 8; /* LRH + BTH + DETH */ | ||
441 | } else { | ||
442 | ohdr = &hdr->u.l.oth; | ||
443 | hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */ | ||
444 | } | ||
445 | qkey = be32_to_cpu(ohdr->u.ud.deth[0]); | ||
446 | src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK; | ||
447 | |||
448 | /* Get the number of bytes the message was padded by. */ | ||
449 | pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; | ||
450 | if (unlikely(tlen < (hdrsize + pad + 4))) { | ||
451 | /* Drop incomplete packets. */ | ||
452 | ibp->n_pkt_drops++; | ||
453 | goto bail; | ||
454 | } | ||
455 | tlen -= hdrsize + pad + 4; | ||
456 | |||
457 | /* | ||
458 | * Check that the permissive LID is only used on QP0 | ||
459 | * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1). | ||
460 | */ | ||
461 | if (qp->ibqp.qp_num) { | ||
462 | if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE || | ||
463 | hdr->lrh[3] == IB_LID_PERMISSIVE)) { | ||
464 | ibp->n_pkt_drops++; | ||
465 | goto bail; | ||
466 | } | ||
467 | if (qp->ibqp.qp_num > 1) { | ||
468 | u16 pkey1, pkey2; | ||
469 | |||
470 | pkey1 = be32_to_cpu(ohdr->bth[0]); | ||
471 | pkey2 = qib_get_pkey(ibp, qp->s_pkey_index); | ||
472 | if (unlikely(!qib_pkey_ok(pkey1, pkey2))) { | ||
473 | qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, | ||
474 | pkey1, | ||
475 | (be16_to_cpu(hdr->lrh[0]) >> 4) & | ||
476 | 0xF, | ||
477 | src_qp, qp->ibqp.qp_num, | ||
478 | hdr->lrh[3], hdr->lrh[1]); | ||
479 | goto bail; | ||
480 | } | ||
481 | } | ||
482 | if (unlikely(qkey != qp->qkey)) { | ||
483 | qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey, | ||
484 | (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF, | ||
485 | src_qp, qp->ibqp.qp_num, | ||
486 | hdr->lrh[3], hdr->lrh[1]); | ||
487 | goto bail; | ||
488 | } | ||
489 | /* Drop invalid MAD packets (see 13.5.3.1). */ | ||
490 | if (unlikely(qp->ibqp.qp_num == 1 && | ||
491 | (tlen != 256 || | ||
492 | (be16_to_cpu(hdr->lrh[0]) >> 12) == 15))) { | ||
493 | ibp->n_pkt_drops++; | ||
494 | goto bail; | ||
495 | } | ||
496 | } else { | ||
497 | struct ib_smp *smp; | ||
498 | |||
499 | /* Drop invalid MAD packets (see 13.5.3.1). */ | ||
500 | if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15) { | ||
501 | ibp->n_pkt_drops++; | ||
502 | goto bail; | ||
503 | } | ||
504 | smp = (struct ib_smp *) data; | ||
505 | if ((hdr->lrh[1] == IB_LID_PERMISSIVE || | ||
506 | hdr->lrh[3] == IB_LID_PERMISSIVE) && | ||
507 | smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { | ||
508 | ibp->n_pkt_drops++; | ||
509 | goto bail; | ||
510 | } | ||
511 | } | ||
512 | |||
513 | /* | ||
514 | * The opcode is in the low byte when its in network order | ||
515 | * (top byte when in host order). | ||
516 | */ | ||
517 | opcode = be32_to_cpu(ohdr->bth[0]) >> 24; | ||
518 | if (qp->ibqp.qp_num > 1 && | ||
519 | opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { | ||
520 | wc.ex.imm_data = ohdr->u.ud.imm_data; | ||
521 | wc.wc_flags = IB_WC_WITH_IMM; | ||
522 | hdrsize += sizeof(u32); | ||
523 | } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { | ||
524 | wc.ex.imm_data = 0; | ||
525 | wc.wc_flags = 0; | ||
526 | } else { | ||
527 | ibp->n_pkt_drops++; | ||
528 | goto bail; | ||
529 | } | ||
530 | |||
531 | /* | ||
532 | * A GRH is expected to preceed the data even if not | ||
533 | * present on the wire. | ||
534 | */ | ||
535 | wc.byte_len = tlen + sizeof(struct ib_grh); | ||
536 | |||
537 | /* | ||
538 | * We need to serialize getting a receive work queue entry and | ||
539 | * generating a completion for it against QPs sending to this QP | ||
540 | * locally. | ||
541 | */ | ||
542 | spin_lock(&qp->r_lock); | ||
543 | |||
544 | /* | ||
545 | * Get the next work request entry to find where to put the data. | ||
546 | */ | ||
547 | if (qp->r_flags & QIB_R_REUSE_SGE) | ||
548 | qp->r_flags &= ~QIB_R_REUSE_SGE; | ||
549 | else { | ||
550 | int ret; | ||
551 | |||
552 | ret = qib_get_rwqe(qp, 0); | ||
553 | if (ret < 0) { | ||
554 | qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); | ||
555 | goto bail_unlock; | ||
556 | } | ||
557 | if (!ret) { | ||
558 | if (qp->ibqp.qp_num == 0) | ||
559 | ibp->n_vl15_dropped++; | ||
560 | goto bail_unlock; | ||
561 | } | ||
562 | } | ||
563 | /* Silently drop packets which are too big. */ | ||
564 | if (unlikely(wc.byte_len > qp->r_len)) { | ||
565 | qp->r_flags |= QIB_R_REUSE_SGE; | ||
566 | ibp->n_pkt_drops++; | ||
567 | goto bail_unlock; | ||
568 | } | ||
569 | if (has_grh) { | ||
570 | qib_copy_sge(&qp->r_sge, &hdr->u.l.grh, | ||
571 | sizeof(struct ib_grh), 1); | ||
572 | wc.wc_flags |= IB_WC_GRH; | ||
573 | } else | ||
574 | qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1); | ||
575 | qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1); | ||
576 | while (qp->r_sge.num_sge) { | ||
577 | atomic_dec(&qp->r_sge.sge.mr->refcount); | ||
578 | if (--qp->r_sge.num_sge) | ||
579 | qp->r_sge.sge = *qp->r_sge.sg_list++; | ||
580 | } | ||
581 | if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) | ||
582 | goto bail_unlock; | ||
583 | wc.wr_id = qp->r_wr_id; | ||
584 | wc.status = IB_WC_SUCCESS; | ||
585 | wc.opcode = IB_WC_RECV; | ||
586 | wc.vendor_err = 0; | ||
587 | wc.qp = &qp->ibqp; | ||
588 | wc.src_qp = src_qp; | ||
589 | wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ? | ||
590 | qib_lookup_pkey(ibp, be32_to_cpu(ohdr->bth[0])) : 0; | ||
591 | wc.slid = be16_to_cpu(hdr->lrh[3]); | ||
592 | wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF; | ||
593 | dlid = be16_to_cpu(hdr->lrh[1]); | ||
594 | /* | ||
595 | * Save the LMC lower bits if the destination LID is a unicast LID. | ||
596 | */ | ||
597 | wc.dlid_path_bits = dlid >= QIB_MULTICAST_LID_BASE ? 0 : | ||
598 | dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1); | ||
599 | wc.port_num = qp->port_num; | ||
600 | /* Signal completion event if the solicited bit is set. */ | ||
601 | qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, | ||
602 | (ohdr->bth[0] & | ||
603 | cpu_to_be32(IB_BTH_SOLICITED)) != 0); | ||
604 | bail_unlock: | ||
605 | spin_unlock(&qp->r_lock); | ||
606 | bail:; | ||
607 | } | ||
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c new file mode 100644 index 000000000000..d7a26c1d4f37 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_user_pages.c | |||
@@ -0,0 +1,157 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. | ||
3 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | |||
34 | #include <linux/mm.h> | ||
35 | #include <linux/device.h> | ||
36 | |||
37 | #include "qib.h" | ||
38 | |||
39 | static void __qib_release_user_pages(struct page **p, size_t num_pages, | ||
40 | int dirty) | ||
41 | { | ||
42 | size_t i; | ||
43 | |||
44 | for (i = 0; i < num_pages; i++) { | ||
45 | if (dirty) | ||
46 | set_page_dirty_lock(p[i]); | ||
47 | put_page(p[i]); | ||
48 | } | ||
49 | } | ||
50 | |||
51 | /* | ||
52 | * Call with current->mm->mmap_sem held. | ||
53 | */ | ||
54 | static int __get_user_pages(unsigned long start_page, size_t num_pages, | ||
55 | struct page **p, struct vm_area_struct **vma) | ||
56 | { | ||
57 | unsigned long lock_limit; | ||
58 | size_t got; | ||
59 | int ret; | ||
60 | |||
61 | lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; | ||
62 | |||
63 | if (num_pages > lock_limit && !capable(CAP_IPC_LOCK)) { | ||
64 | ret = -ENOMEM; | ||
65 | goto bail; | ||
66 | } | ||
67 | |||
68 | for (got = 0; got < num_pages; got += ret) { | ||
69 | ret = get_user_pages(current, current->mm, | ||
70 | start_page + got * PAGE_SIZE, | ||
71 | num_pages - got, 1, 1, | ||
72 | p + got, vma); | ||
73 | if (ret < 0) | ||
74 | goto bail_release; | ||
75 | } | ||
76 | |||
77 | current->mm->locked_vm += num_pages; | ||
78 | |||
79 | ret = 0; | ||
80 | goto bail; | ||
81 | |||
82 | bail_release: | ||
83 | __qib_release_user_pages(p, got, 0); | ||
84 | bail: | ||
85 | return ret; | ||
86 | } | ||
87 | |||
88 | /** | ||
89 | * qib_map_page - a safety wrapper around pci_map_page() | ||
90 | * | ||
91 | * A dma_addr of all 0's is interpreted by the chip as "disabled". | ||
92 | * Unfortunately, it can also be a valid dma_addr returned on some | ||
93 | * architectures. | ||
94 | * | ||
95 | * The powerpc iommu assigns dma_addrs in ascending order, so we don't | ||
96 | * have to bother with retries or mapping a dummy page to insure we | ||
97 | * don't just get the same mapping again. | ||
98 | * | ||
99 | * I'm sure we won't be so lucky with other iommu's, so FIXME. | ||
100 | */ | ||
101 | dma_addr_t qib_map_page(struct pci_dev *hwdev, struct page *page, | ||
102 | unsigned long offset, size_t size, int direction) | ||
103 | { | ||
104 | dma_addr_t phys; | ||
105 | |||
106 | phys = pci_map_page(hwdev, page, offset, size, direction); | ||
107 | |||
108 | if (phys == 0) { | ||
109 | pci_unmap_page(hwdev, phys, size, direction); | ||
110 | phys = pci_map_page(hwdev, page, offset, size, direction); | ||
111 | /* | ||
112 | * FIXME: If we get 0 again, we should keep this page, | ||
113 | * map another, then free the 0 page. | ||
114 | */ | ||
115 | } | ||
116 | |||
117 | return phys; | ||
118 | } | ||
119 | |||
120 | /** | ||
121 | * qib_get_user_pages - lock user pages into memory | ||
122 | * @start_page: the start page | ||
123 | * @num_pages: the number of pages | ||
124 | * @p: the output page structures | ||
125 | * | ||
126 | * This function takes a given start page (page aligned user virtual | ||
127 | * address) and pins it and the following specified number of pages. For | ||
128 | * now, num_pages is always 1, but that will probably change at some point | ||
129 | * (because caller is doing expected sends on a single virtually contiguous | ||
130 | * buffer, so we can do all pages at once). | ||
131 | */ | ||
132 | int qib_get_user_pages(unsigned long start_page, size_t num_pages, | ||
133 | struct page **p) | ||
134 | { | ||
135 | int ret; | ||
136 | |||
137 | down_write(¤t->mm->mmap_sem); | ||
138 | |||
139 | ret = __get_user_pages(start_page, num_pages, p, NULL); | ||
140 | |||
141 | up_write(¤t->mm->mmap_sem); | ||
142 | |||
143 | return ret; | ||
144 | } | ||
145 | |||
146 | void qib_release_user_pages(struct page **p, size_t num_pages) | ||
147 | { | ||
148 | if (current->mm) /* during close after signal, mm can be NULL */ | ||
149 | down_write(¤t->mm->mmap_sem); | ||
150 | |||
151 | __qib_release_user_pages(p, num_pages, 1); | ||
152 | |||
153 | if (current->mm) { | ||
154 | current->mm->locked_vm -= num_pages; | ||
155 | up_write(¤t->mm->mmap_sem); | ||
156 | } | ||
157 | } | ||
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c new file mode 100644 index 000000000000..4c19e06b5e85 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_user_sdma.c | |||
@@ -0,0 +1,897 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007, 2008, 2009 QLogic Corporation. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | #include <linux/mm.h> | ||
33 | #include <linux/types.h> | ||
34 | #include <linux/device.h> | ||
35 | #include <linux/dmapool.h> | ||
36 | #include <linux/slab.h> | ||
37 | #include <linux/list.h> | ||
38 | #include <linux/highmem.h> | ||
39 | #include <linux/io.h> | ||
40 | #include <linux/uio.h> | ||
41 | #include <linux/rbtree.h> | ||
42 | #include <linux/spinlock.h> | ||
43 | #include <linux/delay.h> | ||
44 | |||
45 | #include "qib.h" | ||
46 | #include "qib_user_sdma.h" | ||
47 | |||
48 | /* minimum size of header */ | ||
49 | #define QIB_USER_SDMA_MIN_HEADER_LENGTH 64 | ||
50 | /* expected size of headers (for dma_pool) */ | ||
51 | #define QIB_USER_SDMA_EXP_HEADER_LENGTH 64 | ||
52 | /* attempt to drain the queue for 5secs */ | ||
53 | #define QIB_USER_SDMA_DRAIN_TIMEOUT 500 | ||
54 | |||
55 | struct qib_user_sdma_pkt { | ||
56 | u8 naddr; /* dimension of addr (1..3) ... */ | ||
57 | u32 counter; /* sdma pkts queued counter for this entry */ | ||
58 | u64 added; /* global descq number of entries */ | ||
59 | |||
60 | struct { | ||
61 | u32 offset; /* offset for kvaddr, addr */ | ||
62 | u32 length; /* length in page */ | ||
63 | u8 put_page; /* should we put_page? */ | ||
64 | u8 dma_mapped; /* is page dma_mapped? */ | ||
65 | struct page *page; /* may be NULL (coherent mem) */ | ||
66 | void *kvaddr; /* FIXME: only for pio hack */ | ||
67 | dma_addr_t addr; | ||
68 | } addr[4]; /* max pages, any more and we coalesce */ | ||
69 | struct list_head list; /* list element */ | ||
70 | }; | ||
71 | |||
72 | struct qib_user_sdma_queue { | ||
73 | /* | ||
74 | * pkts sent to dma engine are queued on this | ||
75 | * list head. the type of the elements of this | ||
76 | * list are struct qib_user_sdma_pkt... | ||
77 | */ | ||
78 | struct list_head sent; | ||
79 | |||
80 | /* headers with expected length are allocated from here... */ | ||
81 | char header_cache_name[64]; | ||
82 | struct dma_pool *header_cache; | ||
83 | |||
84 | /* packets are allocated from the slab cache... */ | ||
85 | char pkt_slab_name[64]; | ||
86 | struct kmem_cache *pkt_slab; | ||
87 | |||
88 | /* as packets go on the queued queue, they are counted... */ | ||
89 | u32 counter; | ||
90 | u32 sent_counter; | ||
91 | |||
92 | /* dma page table */ | ||
93 | struct rb_root dma_pages_root; | ||
94 | |||
95 | /* protect everything above... */ | ||
96 | struct mutex lock; | ||
97 | }; | ||
98 | |||
99 | struct qib_user_sdma_queue * | ||
100 | qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt) | ||
101 | { | ||
102 | struct qib_user_sdma_queue *pq = | ||
103 | kmalloc(sizeof(struct qib_user_sdma_queue), GFP_KERNEL); | ||
104 | |||
105 | if (!pq) | ||
106 | goto done; | ||
107 | |||
108 | pq->counter = 0; | ||
109 | pq->sent_counter = 0; | ||
110 | INIT_LIST_HEAD(&pq->sent); | ||
111 | |||
112 | mutex_init(&pq->lock); | ||
113 | |||
114 | snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name), | ||
115 | "qib-user-sdma-pkts-%u-%02u.%02u", unit, ctxt, sctxt); | ||
116 | pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name, | ||
117 | sizeof(struct qib_user_sdma_pkt), | ||
118 | 0, 0, NULL); | ||
119 | |||
120 | if (!pq->pkt_slab) | ||
121 | goto err_kfree; | ||
122 | |||
123 | snprintf(pq->header_cache_name, sizeof(pq->header_cache_name), | ||
124 | "qib-user-sdma-headers-%u-%02u.%02u", unit, ctxt, sctxt); | ||
125 | pq->header_cache = dma_pool_create(pq->header_cache_name, | ||
126 | dev, | ||
127 | QIB_USER_SDMA_EXP_HEADER_LENGTH, | ||
128 | 4, 0); | ||
129 | if (!pq->header_cache) | ||
130 | goto err_slab; | ||
131 | |||
132 | pq->dma_pages_root = RB_ROOT; | ||
133 | |||
134 | goto done; | ||
135 | |||
136 | err_slab: | ||
137 | kmem_cache_destroy(pq->pkt_slab); | ||
138 | err_kfree: | ||
139 | kfree(pq); | ||
140 | pq = NULL; | ||
141 | |||
142 | done: | ||
143 | return pq; | ||
144 | } | ||
145 | |||
146 | static void qib_user_sdma_init_frag(struct qib_user_sdma_pkt *pkt, | ||
147 | int i, size_t offset, size_t len, | ||
148 | int put_page, int dma_mapped, | ||
149 | struct page *page, | ||
150 | void *kvaddr, dma_addr_t dma_addr) | ||
151 | { | ||
152 | pkt->addr[i].offset = offset; | ||
153 | pkt->addr[i].length = len; | ||
154 | pkt->addr[i].put_page = put_page; | ||
155 | pkt->addr[i].dma_mapped = dma_mapped; | ||
156 | pkt->addr[i].page = page; | ||
157 | pkt->addr[i].kvaddr = kvaddr; | ||
158 | pkt->addr[i].addr = dma_addr; | ||
159 | } | ||
160 | |||
161 | static void qib_user_sdma_init_header(struct qib_user_sdma_pkt *pkt, | ||
162 | u32 counter, size_t offset, | ||
163 | size_t len, int dma_mapped, | ||
164 | struct page *page, | ||
165 | void *kvaddr, dma_addr_t dma_addr) | ||
166 | { | ||
167 | pkt->naddr = 1; | ||
168 | pkt->counter = counter; | ||
169 | qib_user_sdma_init_frag(pkt, 0, offset, len, 0, dma_mapped, page, | ||
170 | kvaddr, dma_addr); | ||
171 | } | ||
172 | |||
173 | /* we've too many pages in the iovec, coalesce to a single page */ | ||
174 | static int qib_user_sdma_coalesce(const struct qib_devdata *dd, | ||
175 | struct qib_user_sdma_pkt *pkt, | ||
176 | const struct iovec *iov, | ||
177 | unsigned long niov) | ||
178 | { | ||
179 | int ret = 0; | ||
180 | struct page *page = alloc_page(GFP_KERNEL); | ||
181 | void *mpage_save; | ||
182 | char *mpage; | ||
183 | int i; | ||
184 | int len = 0; | ||
185 | dma_addr_t dma_addr; | ||
186 | |||
187 | if (!page) { | ||
188 | ret = -ENOMEM; | ||
189 | goto done; | ||
190 | } | ||
191 | |||
192 | mpage = kmap(page); | ||
193 | mpage_save = mpage; | ||
194 | for (i = 0; i < niov; i++) { | ||
195 | int cfur; | ||
196 | |||
197 | cfur = copy_from_user(mpage, | ||
198 | iov[i].iov_base, iov[i].iov_len); | ||
199 | if (cfur) { | ||
200 | ret = -EFAULT; | ||
201 | goto free_unmap; | ||
202 | } | ||
203 | |||
204 | mpage += iov[i].iov_len; | ||
205 | len += iov[i].iov_len; | ||
206 | } | ||
207 | |||
208 | dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len, | ||
209 | DMA_TO_DEVICE); | ||
210 | if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) { | ||
211 | ret = -ENOMEM; | ||
212 | goto free_unmap; | ||
213 | } | ||
214 | |||
215 | qib_user_sdma_init_frag(pkt, 1, 0, len, 0, 1, page, mpage_save, | ||
216 | dma_addr); | ||
217 | pkt->naddr = 2; | ||
218 | |||
219 | goto done; | ||
220 | |||
221 | free_unmap: | ||
222 | kunmap(page); | ||
223 | __free_page(page); | ||
224 | done: | ||
225 | return ret; | ||
226 | } | ||
227 | |||
228 | /* | ||
229 | * How many pages in this iovec element? | ||
230 | */ | ||
231 | static int qib_user_sdma_num_pages(const struct iovec *iov) | ||
232 | { | ||
233 | const unsigned long addr = (unsigned long) iov->iov_base; | ||
234 | const unsigned long len = iov->iov_len; | ||
235 | const unsigned long spage = addr & PAGE_MASK; | ||
236 | const unsigned long epage = (addr + len - 1) & PAGE_MASK; | ||
237 | |||
238 | return 1 + ((epage - spage) >> PAGE_SHIFT); | ||
239 | } | ||
240 | |||
241 | /* | ||
242 | * Truncate length to page boundry. | ||
243 | */ | ||
244 | static int qib_user_sdma_page_length(unsigned long addr, unsigned long len) | ||
245 | { | ||
246 | const unsigned long offset = addr & ~PAGE_MASK; | ||
247 | |||
248 | return ((offset + len) > PAGE_SIZE) ? (PAGE_SIZE - offset) : len; | ||
249 | } | ||
250 | |||
251 | static void qib_user_sdma_free_pkt_frag(struct device *dev, | ||
252 | struct qib_user_sdma_queue *pq, | ||
253 | struct qib_user_sdma_pkt *pkt, | ||
254 | int frag) | ||
255 | { | ||
256 | const int i = frag; | ||
257 | |||
258 | if (pkt->addr[i].page) { | ||
259 | if (pkt->addr[i].dma_mapped) | ||
260 | dma_unmap_page(dev, | ||
261 | pkt->addr[i].addr, | ||
262 | pkt->addr[i].length, | ||
263 | DMA_TO_DEVICE); | ||
264 | |||
265 | if (pkt->addr[i].kvaddr) | ||
266 | kunmap(pkt->addr[i].page); | ||
267 | |||
268 | if (pkt->addr[i].put_page) | ||
269 | put_page(pkt->addr[i].page); | ||
270 | else | ||
271 | __free_page(pkt->addr[i].page); | ||
272 | } else if (pkt->addr[i].kvaddr) | ||
273 | /* free coherent mem from cache... */ | ||
274 | dma_pool_free(pq->header_cache, | ||
275 | pkt->addr[i].kvaddr, pkt->addr[i].addr); | ||
276 | } | ||
277 | |||
278 | /* return number of pages pinned... */ | ||
279 | static int qib_user_sdma_pin_pages(const struct qib_devdata *dd, | ||
280 | struct qib_user_sdma_pkt *pkt, | ||
281 | unsigned long addr, int tlen, int npages) | ||
282 | { | ||
283 | struct page *pages[2]; | ||
284 | int j; | ||
285 | int ret; | ||
286 | |||
287 | ret = get_user_pages(current, current->mm, addr, | ||
288 | npages, 0, 1, pages, NULL); | ||
289 | |||
290 | if (ret != npages) { | ||
291 | int i; | ||
292 | |||
293 | for (i = 0; i < ret; i++) | ||
294 | put_page(pages[i]); | ||
295 | |||
296 | ret = -ENOMEM; | ||
297 | goto done; | ||
298 | } | ||
299 | |||
300 | for (j = 0; j < npages; j++) { | ||
301 | /* map the pages... */ | ||
302 | const int flen = qib_user_sdma_page_length(addr, tlen); | ||
303 | dma_addr_t dma_addr = | ||
304 | dma_map_page(&dd->pcidev->dev, | ||
305 | pages[j], 0, flen, DMA_TO_DEVICE); | ||
306 | unsigned long fofs = addr & ~PAGE_MASK; | ||
307 | |||
308 | if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) { | ||
309 | ret = -ENOMEM; | ||
310 | goto done; | ||
311 | } | ||
312 | |||
313 | qib_user_sdma_init_frag(pkt, pkt->naddr, fofs, flen, 1, 1, | ||
314 | pages[j], kmap(pages[j]), dma_addr); | ||
315 | |||
316 | pkt->naddr++; | ||
317 | addr += flen; | ||
318 | tlen -= flen; | ||
319 | } | ||
320 | |||
321 | done: | ||
322 | return ret; | ||
323 | } | ||
324 | |||
325 | static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd, | ||
326 | struct qib_user_sdma_queue *pq, | ||
327 | struct qib_user_sdma_pkt *pkt, | ||
328 | const struct iovec *iov, | ||
329 | unsigned long niov) | ||
330 | { | ||
331 | int ret = 0; | ||
332 | unsigned long idx; | ||
333 | |||
334 | for (idx = 0; idx < niov; idx++) { | ||
335 | const int npages = qib_user_sdma_num_pages(iov + idx); | ||
336 | const unsigned long addr = (unsigned long) iov[idx].iov_base; | ||
337 | |||
338 | ret = qib_user_sdma_pin_pages(dd, pkt, addr, | ||
339 | iov[idx].iov_len, npages); | ||
340 | if (ret < 0) | ||
341 | goto free_pkt; | ||
342 | } | ||
343 | |||
344 | goto done; | ||
345 | |||
346 | free_pkt: | ||
347 | for (idx = 0; idx < pkt->naddr; idx++) | ||
348 | qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx); | ||
349 | |||
350 | done: | ||
351 | return ret; | ||
352 | } | ||
353 | |||
354 | static int qib_user_sdma_init_payload(const struct qib_devdata *dd, | ||
355 | struct qib_user_sdma_queue *pq, | ||
356 | struct qib_user_sdma_pkt *pkt, | ||
357 | const struct iovec *iov, | ||
358 | unsigned long niov, int npages) | ||
359 | { | ||
360 | int ret = 0; | ||
361 | |||
362 | if (npages >= ARRAY_SIZE(pkt->addr)) | ||
363 | ret = qib_user_sdma_coalesce(dd, pkt, iov, niov); | ||
364 | else | ||
365 | ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov); | ||
366 | |||
367 | return ret; | ||
368 | } | ||
369 | |||
370 | /* free a packet list -- return counter value of last packet */ | ||
371 | static void qib_user_sdma_free_pkt_list(struct device *dev, | ||
372 | struct qib_user_sdma_queue *pq, | ||
373 | struct list_head *list) | ||
374 | { | ||
375 | struct qib_user_sdma_pkt *pkt, *pkt_next; | ||
376 | |||
377 | list_for_each_entry_safe(pkt, pkt_next, list, list) { | ||
378 | int i; | ||
379 | |||
380 | for (i = 0; i < pkt->naddr; i++) | ||
381 | qib_user_sdma_free_pkt_frag(dev, pq, pkt, i); | ||
382 | |||
383 | kmem_cache_free(pq->pkt_slab, pkt); | ||
384 | } | ||
385 | } | ||
386 | |||
387 | /* | ||
388 | * copy headers, coalesce etc -- pq->lock must be held | ||
389 | * | ||
390 | * we queue all the packets to list, returning the | ||
391 | * number of bytes total. list must be empty initially, | ||
392 | * as, if there is an error we clean it... | ||
393 | */ | ||
394 | static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd, | ||
395 | struct qib_user_sdma_queue *pq, | ||
396 | struct list_head *list, | ||
397 | const struct iovec *iov, | ||
398 | unsigned long niov, | ||
399 | int maxpkts) | ||
400 | { | ||
401 | unsigned long idx = 0; | ||
402 | int ret = 0; | ||
403 | int npkts = 0; | ||
404 | struct page *page = NULL; | ||
405 | __le32 *pbc; | ||
406 | dma_addr_t dma_addr; | ||
407 | struct qib_user_sdma_pkt *pkt = NULL; | ||
408 | size_t len; | ||
409 | size_t nw; | ||
410 | u32 counter = pq->counter; | ||
411 | int dma_mapped = 0; | ||
412 | |||
413 | while (idx < niov && npkts < maxpkts) { | ||
414 | const unsigned long addr = (unsigned long) iov[idx].iov_base; | ||
415 | const unsigned long idx_save = idx; | ||
416 | unsigned pktnw; | ||
417 | unsigned pktnwc; | ||
418 | int nfrags = 0; | ||
419 | int npages = 0; | ||
420 | int cfur; | ||
421 | |||
422 | dma_mapped = 0; | ||
423 | len = iov[idx].iov_len; | ||
424 | nw = len >> 2; | ||
425 | page = NULL; | ||
426 | |||
427 | pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL); | ||
428 | if (!pkt) { | ||
429 | ret = -ENOMEM; | ||
430 | goto free_list; | ||
431 | } | ||
432 | |||
433 | if (len < QIB_USER_SDMA_MIN_HEADER_LENGTH || | ||
434 | len > PAGE_SIZE || len & 3 || addr & 3) { | ||
435 | ret = -EINVAL; | ||
436 | goto free_pkt; | ||
437 | } | ||
438 | |||
439 | if (len == QIB_USER_SDMA_EXP_HEADER_LENGTH) | ||
440 | pbc = dma_pool_alloc(pq->header_cache, GFP_KERNEL, | ||
441 | &dma_addr); | ||
442 | else | ||
443 | pbc = NULL; | ||
444 | |||
445 | if (!pbc) { | ||
446 | page = alloc_page(GFP_KERNEL); | ||
447 | if (!page) { | ||
448 | ret = -ENOMEM; | ||
449 | goto free_pkt; | ||
450 | } | ||
451 | pbc = kmap(page); | ||
452 | } | ||
453 | |||
454 | cfur = copy_from_user(pbc, iov[idx].iov_base, len); | ||
455 | if (cfur) { | ||
456 | ret = -EFAULT; | ||
457 | goto free_pbc; | ||
458 | } | ||
459 | |||
460 | /* | ||
461 | * This assignment is a bit strange. it's because the | ||
462 | * the pbc counts the number of 32 bit words in the full | ||
463 | * packet _except_ the first word of the pbc itself... | ||
464 | */ | ||
465 | pktnwc = nw - 1; | ||
466 | |||
467 | /* | ||
468 | * pktnw computation yields the number of 32 bit words | ||
469 | * that the caller has indicated in the PBC. note that | ||
470 | * this is one less than the total number of words that | ||
471 | * goes to the send DMA engine as the first 32 bit word | ||
472 | * of the PBC itself is not counted. Armed with this count, | ||
473 | * we can verify that the packet is consistent with the | ||
474 | * iovec lengths. | ||
475 | */ | ||
476 | pktnw = le32_to_cpu(*pbc) & QIB_PBC_LENGTH_MASK; | ||
477 | if (pktnw < pktnwc || pktnw > pktnwc + (PAGE_SIZE >> 2)) { | ||
478 | ret = -EINVAL; | ||
479 | goto free_pbc; | ||
480 | } | ||
481 | |||
482 | idx++; | ||
483 | while (pktnwc < pktnw && idx < niov) { | ||
484 | const size_t slen = iov[idx].iov_len; | ||
485 | const unsigned long faddr = | ||
486 | (unsigned long) iov[idx].iov_base; | ||
487 | |||
488 | if (slen & 3 || faddr & 3 || !slen || | ||
489 | slen > PAGE_SIZE) { | ||
490 | ret = -EINVAL; | ||
491 | goto free_pbc; | ||
492 | } | ||
493 | |||
494 | npages++; | ||
495 | if ((faddr & PAGE_MASK) != | ||
496 | ((faddr + slen - 1) & PAGE_MASK)) | ||
497 | npages++; | ||
498 | |||
499 | pktnwc += slen >> 2; | ||
500 | idx++; | ||
501 | nfrags++; | ||
502 | } | ||
503 | |||
504 | if (pktnwc != pktnw) { | ||
505 | ret = -EINVAL; | ||
506 | goto free_pbc; | ||
507 | } | ||
508 | |||
509 | if (page) { | ||
510 | dma_addr = dma_map_page(&dd->pcidev->dev, | ||
511 | page, 0, len, DMA_TO_DEVICE); | ||
512 | if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) { | ||
513 | ret = -ENOMEM; | ||
514 | goto free_pbc; | ||
515 | } | ||
516 | |||
517 | dma_mapped = 1; | ||
518 | } | ||
519 | |||
520 | qib_user_sdma_init_header(pkt, counter, 0, len, dma_mapped, | ||
521 | page, pbc, dma_addr); | ||
522 | |||
523 | if (nfrags) { | ||
524 | ret = qib_user_sdma_init_payload(dd, pq, pkt, | ||
525 | iov + idx_save + 1, | ||
526 | nfrags, npages); | ||
527 | if (ret < 0) | ||
528 | goto free_pbc_dma; | ||
529 | } | ||
530 | |||
531 | counter++; | ||
532 | npkts++; | ||
533 | |||
534 | list_add_tail(&pkt->list, list); | ||
535 | } | ||
536 | |||
537 | ret = idx; | ||
538 | goto done; | ||
539 | |||
540 | free_pbc_dma: | ||
541 | if (dma_mapped) | ||
542 | dma_unmap_page(&dd->pcidev->dev, dma_addr, len, DMA_TO_DEVICE); | ||
543 | free_pbc: | ||
544 | if (page) { | ||
545 | kunmap(page); | ||
546 | __free_page(page); | ||
547 | } else | ||
548 | dma_pool_free(pq->header_cache, pbc, dma_addr); | ||
549 | free_pkt: | ||
550 | kmem_cache_free(pq->pkt_slab, pkt); | ||
551 | free_list: | ||
552 | qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list); | ||
553 | done: | ||
554 | return ret; | ||
555 | } | ||
556 | |||
557 | static void qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue *pq, | ||
558 | u32 c) | ||
559 | { | ||
560 | pq->sent_counter = c; | ||
561 | } | ||
562 | |||
563 | /* try to clean out queue -- needs pq->lock */ | ||
564 | static int qib_user_sdma_queue_clean(struct qib_pportdata *ppd, | ||
565 | struct qib_user_sdma_queue *pq) | ||
566 | { | ||
567 | struct qib_devdata *dd = ppd->dd; | ||
568 | struct list_head free_list; | ||
569 | struct qib_user_sdma_pkt *pkt; | ||
570 | struct qib_user_sdma_pkt *pkt_prev; | ||
571 | int ret = 0; | ||
572 | |||
573 | INIT_LIST_HEAD(&free_list); | ||
574 | |||
575 | list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) { | ||
576 | s64 descd = ppd->sdma_descq_removed - pkt->added; | ||
577 | |||
578 | if (descd < 0) | ||
579 | break; | ||
580 | |||
581 | list_move_tail(&pkt->list, &free_list); | ||
582 | |||
583 | /* one more packet cleaned */ | ||
584 | ret++; | ||
585 | } | ||
586 | |||
587 | if (!list_empty(&free_list)) { | ||
588 | u32 counter; | ||
589 | |||
590 | pkt = list_entry(free_list.prev, | ||
591 | struct qib_user_sdma_pkt, list); | ||
592 | counter = pkt->counter; | ||
593 | |||
594 | qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list); | ||
595 | qib_user_sdma_set_complete_counter(pq, counter); | ||
596 | } | ||
597 | |||
598 | return ret; | ||
599 | } | ||
600 | |||
601 | void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq) | ||
602 | { | ||
603 | if (!pq) | ||
604 | return; | ||
605 | |||
606 | kmem_cache_destroy(pq->pkt_slab); | ||
607 | dma_pool_destroy(pq->header_cache); | ||
608 | kfree(pq); | ||
609 | } | ||
610 | |||
611 | /* clean descriptor queue, returns > 0 if some elements cleaned */ | ||
612 | static int qib_user_sdma_hwqueue_clean(struct qib_pportdata *ppd) | ||
613 | { | ||
614 | int ret; | ||
615 | unsigned long flags; | ||
616 | |||
617 | spin_lock_irqsave(&ppd->sdma_lock, flags); | ||
618 | ret = qib_sdma_make_progress(ppd); | ||
619 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); | ||
620 | |||
621 | return ret; | ||
622 | } | ||
623 | |||
624 | /* we're in close, drain packets so that we can cleanup successfully... */ | ||
625 | void qib_user_sdma_queue_drain(struct qib_pportdata *ppd, | ||
626 | struct qib_user_sdma_queue *pq) | ||
627 | { | ||
628 | struct qib_devdata *dd = ppd->dd; | ||
629 | int i; | ||
630 | |||
631 | if (!pq) | ||
632 | return; | ||
633 | |||
634 | for (i = 0; i < QIB_USER_SDMA_DRAIN_TIMEOUT; i++) { | ||
635 | mutex_lock(&pq->lock); | ||
636 | if (list_empty(&pq->sent)) { | ||
637 | mutex_unlock(&pq->lock); | ||
638 | break; | ||
639 | } | ||
640 | qib_user_sdma_hwqueue_clean(ppd); | ||
641 | qib_user_sdma_queue_clean(ppd, pq); | ||
642 | mutex_unlock(&pq->lock); | ||
643 | msleep(10); | ||
644 | } | ||
645 | |||
646 | if (!list_empty(&pq->sent)) { | ||
647 | struct list_head free_list; | ||
648 | |||
649 | qib_dev_err(dd, "user sdma lists not empty: forcing!\n"); | ||
650 | INIT_LIST_HEAD(&free_list); | ||
651 | mutex_lock(&pq->lock); | ||
652 | list_splice_init(&pq->sent, &free_list); | ||
653 | qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list); | ||
654 | mutex_unlock(&pq->lock); | ||
655 | } | ||
656 | } | ||
657 | |||
658 | static inline __le64 qib_sdma_make_desc0(struct qib_pportdata *ppd, | ||
659 | u64 addr, u64 dwlen, u64 dwoffset) | ||
660 | { | ||
661 | u8 tmpgen; | ||
662 | |||
663 | tmpgen = ppd->sdma_generation; | ||
664 | |||
665 | return cpu_to_le64(/* SDmaPhyAddr[31:0] */ | ||
666 | ((addr & 0xfffffffcULL) << 32) | | ||
667 | /* SDmaGeneration[1:0] */ | ||
668 | ((tmpgen & 3ULL) << 30) | | ||
669 | /* SDmaDwordCount[10:0] */ | ||
670 | ((dwlen & 0x7ffULL) << 16) | | ||
671 | /* SDmaBufOffset[12:2] */ | ||
672 | (dwoffset & 0x7ffULL)); | ||
673 | } | ||
674 | |||
675 | static inline __le64 qib_sdma_make_first_desc0(__le64 descq) | ||
676 | { | ||
677 | return descq | cpu_to_le64(1ULL << 12); | ||
678 | } | ||
679 | |||
680 | static inline __le64 qib_sdma_make_last_desc0(__le64 descq) | ||
681 | { | ||
682 | /* last */ /* dma head */ | ||
683 | return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13); | ||
684 | } | ||
685 | |||
686 | static inline __le64 qib_sdma_make_desc1(u64 addr) | ||
687 | { | ||
688 | /* SDmaPhyAddr[47:32] */ | ||
689 | return cpu_to_le64(addr >> 32); | ||
690 | } | ||
691 | |||
692 | static void qib_user_sdma_send_frag(struct qib_pportdata *ppd, | ||
693 | struct qib_user_sdma_pkt *pkt, int idx, | ||
694 | unsigned ofs, u16 tail) | ||
695 | { | ||
696 | const u64 addr = (u64) pkt->addr[idx].addr + | ||
697 | (u64) pkt->addr[idx].offset; | ||
698 | const u64 dwlen = (u64) pkt->addr[idx].length / 4; | ||
699 | __le64 *descqp; | ||
700 | __le64 descq0; | ||
701 | |||
702 | descqp = &ppd->sdma_descq[tail].qw[0]; | ||
703 | |||
704 | descq0 = qib_sdma_make_desc0(ppd, addr, dwlen, ofs); | ||
705 | if (idx == 0) | ||
706 | descq0 = qib_sdma_make_first_desc0(descq0); | ||
707 | if (idx == pkt->naddr - 1) | ||
708 | descq0 = qib_sdma_make_last_desc0(descq0); | ||
709 | |||
710 | descqp[0] = descq0; | ||
711 | descqp[1] = qib_sdma_make_desc1(addr); | ||
712 | } | ||
713 | |||
714 | /* pq->lock must be held, get packets on the wire... */ | ||
715 | static int qib_user_sdma_push_pkts(struct qib_pportdata *ppd, | ||
716 | struct qib_user_sdma_queue *pq, | ||
717 | struct list_head *pktlist) | ||
718 | { | ||
719 | struct qib_devdata *dd = ppd->dd; | ||
720 | int ret = 0; | ||
721 | unsigned long flags; | ||
722 | u16 tail; | ||
723 | u8 generation; | ||
724 | u64 descq_added; | ||
725 | |||
726 | if (list_empty(pktlist)) | ||
727 | return 0; | ||
728 | |||
729 | if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE))) | ||
730 | return -ECOMM; | ||
731 | |||
732 | spin_lock_irqsave(&ppd->sdma_lock, flags); | ||
733 | |||
734 | /* keep a copy for restoring purposes in case of problems */ | ||
735 | generation = ppd->sdma_generation; | ||
736 | descq_added = ppd->sdma_descq_added; | ||
737 | |||
738 | if (unlikely(!__qib_sdma_running(ppd))) { | ||
739 | ret = -ECOMM; | ||
740 | goto unlock; | ||
741 | } | ||
742 | |||
743 | tail = ppd->sdma_descq_tail; | ||
744 | while (!list_empty(pktlist)) { | ||
745 | struct qib_user_sdma_pkt *pkt = | ||
746 | list_entry(pktlist->next, struct qib_user_sdma_pkt, | ||
747 | list); | ||
748 | int i; | ||
749 | unsigned ofs = 0; | ||
750 | u16 dtail = tail; | ||
751 | |||
752 | if (pkt->naddr > qib_sdma_descq_freecnt(ppd)) | ||
753 | goto unlock_check_tail; | ||
754 | |||
755 | for (i = 0; i < pkt->naddr; i++) { | ||
756 | qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail); | ||
757 | ofs += pkt->addr[i].length >> 2; | ||
758 | |||
759 | if (++tail == ppd->sdma_descq_cnt) { | ||
760 | tail = 0; | ||
761 | ++ppd->sdma_generation; | ||
762 | } | ||
763 | } | ||
764 | |||
765 | if ((ofs << 2) > ppd->ibmaxlen) { | ||
766 | ret = -EMSGSIZE; | ||
767 | goto unlock; | ||
768 | } | ||
769 | |||
770 | /* | ||
771 | * If the packet is >= 2KB mtu equivalent, we have to use | ||
772 | * the large buffers, and have to mark each descriptor as | ||
773 | * part of a large buffer packet. | ||
774 | */ | ||
775 | if (ofs > dd->piosize2kmax_dwords) { | ||
776 | for (i = 0; i < pkt->naddr; i++) { | ||
777 | ppd->sdma_descq[dtail].qw[0] |= | ||
778 | cpu_to_le64(1ULL << 14); | ||
779 | if (++dtail == ppd->sdma_descq_cnt) | ||
780 | dtail = 0; | ||
781 | } | ||
782 | } | ||
783 | |||
784 | ppd->sdma_descq_added += pkt->naddr; | ||
785 | pkt->added = ppd->sdma_descq_added; | ||
786 | list_move_tail(&pkt->list, &pq->sent); | ||
787 | ret++; | ||
788 | } | ||
789 | |||
790 | unlock_check_tail: | ||
791 | /* advance the tail on the chip if necessary */ | ||
792 | if (ppd->sdma_descq_tail != tail) | ||
793 | dd->f_sdma_update_tail(ppd, tail); | ||
794 | |||
795 | unlock: | ||
796 | if (unlikely(ret < 0)) { | ||
797 | ppd->sdma_generation = generation; | ||
798 | ppd->sdma_descq_added = descq_added; | ||
799 | } | ||
800 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); | ||
801 | |||
802 | return ret; | ||
803 | } | ||
804 | |||
805 | int qib_user_sdma_writev(struct qib_ctxtdata *rcd, | ||
806 | struct qib_user_sdma_queue *pq, | ||
807 | const struct iovec *iov, | ||
808 | unsigned long dim) | ||
809 | { | ||
810 | struct qib_devdata *dd = rcd->dd; | ||
811 | struct qib_pportdata *ppd = rcd->ppd; | ||
812 | int ret = 0; | ||
813 | struct list_head list; | ||
814 | int npkts = 0; | ||
815 | |||
816 | INIT_LIST_HEAD(&list); | ||
817 | |||
818 | mutex_lock(&pq->lock); | ||
819 | |||
820 | /* why not -ECOMM like qib_user_sdma_push_pkts() below? */ | ||
821 | if (!qib_sdma_running(ppd)) | ||
822 | goto done_unlock; | ||
823 | |||
824 | if (ppd->sdma_descq_added != ppd->sdma_descq_removed) { | ||
825 | qib_user_sdma_hwqueue_clean(ppd); | ||
826 | qib_user_sdma_queue_clean(ppd, pq); | ||
827 | } | ||
828 | |||
829 | while (dim) { | ||
830 | const int mxp = 8; | ||
831 | |||
832 | down_write(¤t->mm->mmap_sem); | ||
833 | ret = qib_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp); | ||
834 | up_write(¤t->mm->mmap_sem); | ||
835 | |||
836 | if (ret <= 0) | ||
837 | goto done_unlock; | ||
838 | else { | ||
839 | dim -= ret; | ||
840 | iov += ret; | ||
841 | } | ||
842 | |||
843 | /* force packets onto the sdma hw queue... */ | ||
844 | if (!list_empty(&list)) { | ||
845 | /* | ||
846 | * Lazily clean hw queue. the 4 is a guess of about | ||
847 | * how many sdma descriptors a packet will take (it | ||
848 | * doesn't have to be perfect). | ||
849 | */ | ||
850 | if (qib_sdma_descq_freecnt(ppd) < ret * 4) { | ||
851 | qib_user_sdma_hwqueue_clean(ppd); | ||
852 | qib_user_sdma_queue_clean(ppd, pq); | ||
853 | } | ||
854 | |||
855 | ret = qib_user_sdma_push_pkts(ppd, pq, &list); | ||
856 | if (ret < 0) | ||
857 | goto done_unlock; | ||
858 | else { | ||
859 | npkts += ret; | ||
860 | pq->counter += ret; | ||
861 | |||
862 | if (!list_empty(&list)) | ||
863 | goto done_unlock; | ||
864 | } | ||
865 | } | ||
866 | } | ||
867 | |||
868 | done_unlock: | ||
869 | if (!list_empty(&list)) | ||
870 | qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list); | ||
871 | mutex_unlock(&pq->lock); | ||
872 | |||
873 | return (ret < 0) ? ret : npkts; | ||
874 | } | ||
875 | |||
876 | int qib_user_sdma_make_progress(struct qib_pportdata *ppd, | ||
877 | struct qib_user_sdma_queue *pq) | ||
878 | { | ||
879 | int ret = 0; | ||
880 | |||
881 | mutex_lock(&pq->lock); | ||
882 | qib_user_sdma_hwqueue_clean(ppd); | ||
883 | ret = qib_user_sdma_queue_clean(ppd, pq); | ||
884 | mutex_unlock(&pq->lock); | ||
885 | |||
886 | return ret; | ||
887 | } | ||
888 | |||
889 | u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq) | ||
890 | { | ||
891 | return pq ? pq->sent_counter : 0; | ||
892 | } | ||
893 | |||
894 | u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq) | ||
895 | { | ||
896 | return pq ? pq->counter : 0; | ||
897 | } | ||
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.h b/drivers/infiniband/hw/qib/qib_user_sdma.h new file mode 100644 index 000000000000..ce8cbaf6a5c2 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_user_sdma.h | |||
@@ -0,0 +1,52 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | #include <linux/device.h> | ||
33 | |||
34 | struct qib_user_sdma_queue; | ||
35 | |||
36 | struct qib_user_sdma_queue * | ||
37 | qib_user_sdma_queue_create(struct device *dev, int unit, int port, int sport); | ||
38 | void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq); | ||
39 | |||
40 | int qib_user_sdma_writev(struct qib_ctxtdata *pd, | ||
41 | struct qib_user_sdma_queue *pq, | ||
42 | const struct iovec *iov, | ||
43 | unsigned long dim); | ||
44 | |||
45 | int qib_user_sdma_make_progress(struct qib_pportdata *ppd, | ||
46 | struct qib_user_sdma_queue *pq); | ||
47 | |||
48 | void qib_user_sdma_queue_drain(struct qib_pportdata *ppd, | ||
49 | struct qib_user_sdma_queue *pq); | ||
50 | |||
51 | u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq); | ||
52 | u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq); | ||
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c new file mode 100644 index 000000000000..cda8f4173d23 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_verbs.c | |||
@@ -0,0 +1,2248 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. | ||
3 | * All rights reserved. | ||
4 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #include <rdma/ib_mad.h> | ||
36 | #include <rdma/ib_user_verbs.h> | ||
37 | #include <linux/io.h> | ||
38 | #include <linux/utsname.h> | ||
39 | #include <linux/rculist.h> | ||
40 | #include <linux/mm.h> | ||
41 | |||
42 | #include "qib.h" | ||
43 | #include "qib_common.h" | ||
44 | |||
45 | static unsigned int ib_qib_qp_table_size = 251; | ||
46 | module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO); | ||
47 | MODULE_PARM_DESC(qp_table_size, "QP table size"); | ||
48 | |||
49 | unsigned int ib_qib_lkey_table_size = 16; | ||
50 | module_param_named(lkey_table_size, ib_qib_lkey_table_size, uint, | ||
51 | S_IRUGO); | ||
52 | MODULE_PARM_DESC(lkey_table_size, | ||
53 | "LKEY table size in bits (2^n, 1 <= n <= 23)"); | ||
54 | |||
55 | static unsigned int ib_qib_max_pds = 0xFFFF; | ||
56 | module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO); | ||
57 | MODULE_PARM_DESC(max_pds, | ||
58 | "Maximum number of protection domains to support"); | ||
59 | |||
60 | static unsigned int ib_qib_max_ahs = 0xFFFF; | ||
61 | module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO); | ||
62 | MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support"); | ||
63 | |||
64 | unsigned int ib_qib_max_cqes = 0x2FFFF; | ||
65 | module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO); | ||
66 | MODULE_PARM_DESC(max_cqes, | ||
67 | "Maximum number of completion queue entries to support"); | ||
68 | |||
69 | unsigned int ib_qib_max_cqs = 0x1FFFF; | ||
70 | module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO); | ||
71 | MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support"); | ||
72 | |||
73 | unsigned int ib_qib_max_qp_wrs = 0x3FFF; | ||
74 | module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO); | ||
75 | MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support"); | ||
76 | |||
77 | unsigned int ib_qib_max_qps = 16384; | ||
78 | module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO); | ||
79 | MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support"); | ||
80 | |||
81 | unsigned int ib_qib_max_sges = 0x60; | ||
82 | module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO); | ||
83 | MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support"); | ||
84 | |||
85 | unsigned int ib_qib_max_mcast_grps = 16384; | ||
86 | module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO); | ||
87 | MODULE_PARM_DESC(max_mcast_grps, | ||
88 | "Maximum number of multicast groups to support"); | ||
89 | |||
90 | unsigned int ib_qib_max_mcast_qp_attached = 16; | ||
91 | module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached, | ||
92 | uint, S_IRUGO); | ||
93 | MODULE_PARM_DESC(max_mcast_qp_attached, | ||
94 | "Maximum number of attached QPs to support"); | ||
95 | |||
96 | unsigned int ib_qib_max_srqs = 1024; | ||
97 | module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO); | ||
98 | MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support"); | ||
99 | |||
100 | unsigned int ib_qib_max_srq_sges = 128; | ||
101 | module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO); | ||
102 | MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support"); | ||
103 | |||
104 | unsigned int ib_qib_max_srq_wrs = 0x1FFFF; | ||
105 | module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO); | ||
106 | MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support"); | ||
107 | |||
108 | static unsigned int ib_qib_disable_sma; | ||
109 | module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO); | ||
110 | MODULE_PARM_DESC(disable_sma, "Disable the SMA"); | ||
111 | |||
112 | /* | ||
113 | * Note that it is OK to post send work requests in the SQE and ERR | ||
114 | * states; qib_do_send() will process them and generate error | ||
115 | * completions as per IB 1.2 C10-96. | ||
116 | */ | ||
117 | const int ib_qib_state_ops[IB_QPS_ERR + 1] = { | ||
118 | [IB_QPS_RESET] = 0, | ||
119 | [IB_QPS_INIT] = QIB_POST_RECV_OK, | ||
120 | [IB_QPS_RTR] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK, | ||
121 | [IB_QPS_RTS] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK | | ||
122 | QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK | | ||
123 | QIB_PROCESS_NEXT_SEND_OK, | ||
124 | [IB_QPS_SQD] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK | | ||
125 | QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK, | ||
126 | [IB_QPS_SQE] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK | | ||
127 | QIB_POST_SEND_OK | QIB_FLUSH_SEND, | ||
128 | [IB_QPS_ERR] = QIB_POST_RECV_OK | QIB_FLUSH_RECV | | ||
129 | QIB_POST_SEND_OK | QIB_FLUSH_SEND, | ||
130 | }; | ||
131 | |||
132 | struct qib_ucontext { | ||
133 | struct ib_ucontext ibucontext; | ||
134 | }; | ||
135 | |||
136 | static inline struct qib_ucontext *to_iucontext(struct ib_ucontext | ||
137 | *ibucontext) | ||
138 | { | ||
139 | return container_of(ibucontext, struct qib_ucontext, ibucontext); | ||
140 | } | ||
141 | |||
142 | /* | ||
143 | * Translate ib_wr_opcode into ib_wc_opcode. | ||
144 | */ | ||
145 | const enum ib_wc_opcode ib_qib_wc_opcode[] = { | ||
146 | [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE, | ||
147 | [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE, | ||
148 | [IB_WR_SEND] = IB_WC_SEND, | ||
149 | [IB_WR_SEND_WITH_IMM] = IB_WC_SEND, | ||
150 | [IB_WR_RDMA_READ] = IB_WC_RDMA_READ, | ||
151 | [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP, | ||
152 | [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD | ||
153 | }; | ||
154 | |||
155 | /* | ||
156 | * System image GUID. | ||
157 | */ | ||
158 | __be64 ib_qib_sys_image_guid; | ||
159 | |||
160 | /** | ||
161 | * qib_copy_sge - copy data to SGE memory | ||
162 | * @ss: the SGE state | ||
163 | * @data: the data to copy | ||
164 | * @length: the length of the data | ||
165 | */ | ||
166 | void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release) | ||
167 | { | ||
168 | struct qib_sge *sge = &ss->sge; | ||
169 | |||
170 | while (length) { | ||
171 | u32 len = sge->length; | ||
172 | |||
173 | if (len > length) | ||
174 | len = length; | ||
175 | if (len > sge->sge_length) | ||
176 | len = sge->sge_length; | ||
177 | BUG_ON(len == 0); | ||
178 | memcpy(sge->vaddr, data, len); | ||
179 | sge->vaddr += len; | ||
180 | sge->length -= len; | ||
181 | sge->sge_length -= len; | ||
182 | if (sge->sge_length == 0) { | ||
183 | if (release) | ||
184 | atomic_dec(&sge->mr->refcount); | ||
185 | if (--ss->num_sge) | ||
186 | *sge = *ss->sg_list++; | ||
187 | } else if (sge->length == 0 && sge->mr->lkey) { | ||
188 | if (++sge->n >= QIB_SEGSZ) { | ||
189 | if (++sge->m >= sge->mr->mapsz) | ||
190 | break; | ||
191 | sge->n = 0; | ||
192 | } | ||
193 | sge->vaddr = | ||
194 | sge->mr->map[sge->m]->segs[sge->n].vaddr; | ||
195 | sge->length = | ||
196 | sge->mr->map[sge->m]->segs[sge->n].length; | ||
197 | } | ||
198 | data += len; | ||
199 | length -= len; | ||
200 | } | ||
201 | } | ||
202 | |||
203 | /** | ||
204 | * qib_skip_sge - skip over SGE memory - XXX almost dup of prev func | ||
205 | * @ss: the SGE state | ||
206 | * @length: the number of bytes to skip | ||
207 | */ | ||
208 | void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release) | ||
209 | { | ||
210 | struct qib_sge *sge = &ss->sge; | ||
211 | |||
212 | while (length) { | ||
213 | u32 len = sge->length; | ||
214 | |||
215 | if (len > length) | ||
216 | len = length; | ||
217 | if (len > sge->sge_length) | ||
218 | len = sge->sge_length; | ||
219 | BUG_ON(len == 0); | ||
220 | sge->vaddr += len; | ||
221 | sge->length -= len; | ||
222 | sge->sge_length -= len; | ||
223 | if (sge->sge_length == 0) { | ||
224 | if (release) | ||
225 | atomic_dec(&sge->mr->refcount); | ||
226 | if (--ss->num_sge) | ||
227 | *sge = *ss->sg_list++; | ||
228 | } else if (sge->length == 0 && sge->mr->lkey) { | ||
229 | if (++sge->n >= QIB_SEGSZ) { | ||
230 | if (++sge->m >= sge->mr->mapsz) | ||
231 | break; | ||
232 | sge->n = 0; | ||
233 | } | ||
234 | sge->vaddr = | ||
235 | sge->mr->map[sge->m]->segs[sge->n].vaddr; | ||
236 | sge->length = | ||
237 | sge->mr->map[sge->m]->segs[sge->n].length; | ||
238 | } | ||
239 | length -= len; | ||
240 | } | ||
241 | } | ||
242 | |||
243 | /* | ||
244 | * Count the number of DMA descriptors needed to send length bytes of data. | ||
245 | * Don't modify the qib_sge_state to get the count. | ||
246 | * Return zero if any of the segments is not aligned. | ||
247 | */ | ||
248 | static u32 qib_count_sge(struct qib_sge_state *ss, u32 length) | ||
249 | { | ||
250 | struct qib_sge *sg_list = ss->sg_list; | ||
251 | struct qib_sge sge = ss->sge; | ||
252 | u8 num_sge = ss->num_sge; | ||
253 | u32 ndesc = 1; /* count the header */ | ||
254 | |||
255 | while (length) { | ||
256 | u32 len = sge.length; | ||
257 | |||
258 | if (len > length) | ||
259 | len = length; | ||
260 | if (len > sge.sge_length) | ||
261 | len = sge.sge_length; | ||
262 | BUG_ON(len == 0); | ||
263 | if (((long) sge.vaddr & (sizeof(u32) - 1)) || | ||
264 | (len != length && (len & (sizeof(u32) - 1)))) { | ||
265 | ndesc = 0; | ||
266 | break; | ||
267 | } | ||
268 | ndesc++; | ||
269 | sge.vaddr += len; | ||
270 | sge.length -= len; | ||
271 | sge.sge_length -= len; | ||
272 | if (sge.sge_length == 0) { | ||
273 | if (--num_sge) | ||
274 | sge = *sg_list++; | ||
275 | } else if (sge.length == 0 && sge.mr->lkey) { | ||
276 | if (++sge.n >= QIB_SEGSZ) { | ||
277 | if (++sge.m >= sge.mr->mapsz) | ||
278 | break; | ||
279 | sge.n = 0; | ||
280 | } | ||
281 | sge.vaddr = | ||
282 | sge.mr->map[sge.m]->segs[sge.n].vaddr; | ||
283 | sge.length = | ||
284 | sge.mr->map[sge.m]->segs[sge.n].length; | ||
285 | } | ||
286 | length -= len; | ||
287 | } | ||
288 | return ndesc; | ||
289 | } | ||
290 | |||
291 | /* | ||
292 | * Copy from the SGEs to the data buffer. | ||
293 | */ | ||
294 | static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length) | ||
295 | { | ||
296 | struct qib_sge *sge = &ss->sge; | ||
297 | |||
298 | while (length) { | ||
299 | u32 len = sge->length; | ||
300 | |||
301 | if (len > length) | ||
302 | len = length; | ||
303 | if (len > sge->sge_length) | ||
304 | len = sge->sge_length; | ||
305 | BUG_ON(len == 0); | ||
306 | memcpy(data, sge->vaddr, len); | ||
307 | sge->vaddr += len; | ||
308 | sge->length -= len; | ||
309 | sge->sge_length -= len; | ||
310 | if (sge->sge_length == 0) { | ||
311 | if (--ss->num_sge) | ||
312 | *sge = *ss->sg_list++; | ||
313 | } else if (sge->length == 0 && sge->mr->lkey) { | ||
314 | if (++sge->n >= QIB_SEGSZ) { | ||
315 | if (++sge->m >= sge->mr->mapsz) | ||
316 | break; | ||
317 | sge->n = 0; | ||
318 | } | ||
319 | sge->vaddr = | ||
320 | sge->mr->map[sge->m]->segs[sge->n].vaddr; | ||
321 | sge->length = | ||
322 | sge->mr->map[sge->m]->segs[sge->n].length; | ||
323 | } | ||
324 | data += len; | ||
325 | length -= len; | ||
326 | } | ||
327 | } | ||
328 | |||
329 | /** | ||
330 | * qib_post_one_send - post one RC, UC, or UD send work request | ||
331 | * @qp: the QP to post on | ||
332 | * @wr: the work request to send | ||
333 | */ | ||
334 | static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr) | ||
335 | { | ||
336 | struct qib_swqe *wqe; | ||
337 | u32 next; | ||
338 | int i; | ||
339 | int j; | ||
340 | int acc; | ||
341 | int ret; | ||
342 | unsigned long flags; | ||
343 | struct qib_lkey_table *rkt; | ||
344 | struct qib_pd *pd; | ||
345 | |||
346 | spin_lock_irqsave(&qp->s_lock, flags); | ||
347 | |||
348 | /* Check that state is OK to post send. */ | ||
349 | if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK))) | ||
350 | goto bail_inval; | ||
351 | |||
352 | /* IB spec says that num_sge == 0 is OK. */ | ||
353 | if (wr->num_sge > qp->s_max_sge) | ||
354 | goto bail_inval; | ||
355 | |||
356 | /* | ||
357 | * Don't allow RDMA reads or atomic operations on UC or | ||
358 | * undefined operations. | ||
359 | * Make sure buffer is large enough to hold the result for atomics. | ||
360 | */ | ||
361 | if (wr->opcode == IB_WR_FAST_REG_MR) { | ||
362 | if (qib_fast_reg_mr(qp, wr)) | ||
363 | goto bail_inval; | ||
364 | } else if (qp->ibqp.qp_type == IB_QPT_UC) { | ||
365 | if ((unsigned) wr->opcode >= IB_WR_RDMA_READ) | ||
366 | goto bail_inval; | ||
367 | } else if (qp->ibqp.qp_type != IB_QPT_RC) { | ||
368 | /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */ | ||
369 | if (wr->opcode != IB_WR_SEND && | ||
370 | wr->opcode != IB_WR_SEND_WITH_IMM) | ||
371 | goto bail_inval; | ||
372 | /* Check UD destination address PD */ | ||
373 | if (qp->ibqp.pd != wr->wr.ud.ah->pd) | ||
374 | goto bail_inval; | ||
375 | } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) | ||
376 | goto bail_inval; | ||
377 | else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP && | ||
378 | (wr->num_sge == 0 || | ||
379 | wr->sg_list[0].length < sizeof(u64) || | ||
380 | wr->sg_list[0].addr & (sizeof(u64) - 1))) | ||
381 | goto bail_inval; | ||
382 | else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) | ||
383 | goto bail_inval; | ||
384 | |||
385 | next = qp->s_head + 1; | ||
386 | if (next >= qp->s_size) | ||
387 | next = 0; | ||
388 | if (next == qp->s_last) { | ||
389 | ret = -ENOMEM; | ||
390 | goto bail; | ||
391 | } | ||
392 | |||
393 | rkt = &to_idev(qp->ibqp.device)->lk_table; | ||
394 | pd = to_ipd(qp->ibqp.pd); | ||
395 | wqe = get_swqe_ptr(qp, qp->s_head); | ||
396 | wqe->wr = *wr; | ||
397 | wqe->length = 0; | ||
398 | j = 0; | ||
399 | if (wr->num_sge) { | ||
400 | acc = wr->opcode >= IB_WR_RDMA_READ ? | ||
401 | IB_ACCESS_LOCAL_WRITE : 0; | ||
402 | for (i = 0; i < wr->num_sge; i++) { | ||
403 | u32 length = wr->sg_list[i].length; | ||
404 | int ok; | ||
405 | |||
406 | if (length == 0) | ||
407 | continue; | ||
408 | ok = qib_lkey_ok(rkt, pd, &wqe->sg_list[j], | ||
409 | &wr->sg_list[i], acc); | ||
410 | if (!ok) | ||
411 | goto bail_inval_free; | ||
412 | wqe->length += length; | ||
413 | j++; | ||
414 | } | ||
415 | wqe->wr.num_sge = j; | ||
416 | } | ||
417 | if (qp->ibqp.qp_type == IB_QPT_UC || | ||
418 | qp->ibqp.qp_type == IB_QPT_RC) { | ||
419 | if (wqe->length > 0x80000000U) | ||
420 | goto bail_inval_free; | ||
421 | } else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport + | ||
422 | qp->port_num - 1)->ibmtu) | ||
423 | goto bail_inval_free; | ||
424 | else | ||
425 | atomic_inc(&to_iah(wr->wr.ud.ah)->refcount); | ||
426 | wqe->ssn = qp->s_ssn++; | ||
427 | qp->s_head = next; | ||
428 | |||
429 | ret = 0; | ||
430 | goto bail; | ||
431 | |||
432 | bail_inval_free: | ||
433 | while (j) { | ||
434 | struct qib_sge *sge = &wqe->sg_list[--j]; | ||
435 | |||
436 | atomic_dec(&sge->mr->refcount); | ||
437 | } | ||
438 | bail_inval: | ||
439 | ret = -EINVAL; | ||
440 | bail: | ||
441 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
442 | return ret; | ||
443 | } | ||
444 | |||
445 | /** | ||
446 | * qib_post_send - post a send on a QP | ||
447 | * @ibqp: the QP to post the send on | ||
448 | * @wr: the list of work requests to post | ||
449 | * @bad_wr: the first bad WR is put here | ||
450 | * | ||
451 | * This may be called from interrupt context. | ||
452 | */ | ||
453 | static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | ||
454 | struct ib_send_wr **bad_wr) | ||
455 | { | ||
456 | struct qib_qp *qp = to_iqp(ibqp); | ||
457 | int err = 0; | ||
458 | |||
459 | for (; wr; wr = wr->next) { | ||
460 | err = qib_post_one_send(qp, wr); | ||
461 | if (err) { | ||
462 | *bad_wr = wr; | ||
463 | goto bail; | ||
464 | } | ||
465 | } | ||
466 | |||
467 | /* Try to do the send work in the caller's context. */ | ||
468 | qib_do_send(&qp->s_work); | ||
469 | |||
470 | bail: | ||
471 | return err; | ||
472 | } | ||
473 | |||
474 | /** | ||
475 | * qib_post_receive - post a receive on a QP | ||
476 | * @ibqp: the QP to post the receive on | ||
477 | * @wr: the WR to post | ||
478 | * @bad_wr: the first bad WR is put here | ||
479 | * | ||
480 | * This may be called from interrupt context. | ||
481 | */ | ||
482 | static int qib_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | ||
483 | struct ib_recv_wr **bad_wr) | ||
484 | { | ||
485 | struct qib_qp *qp = to_iqp(ibqp); | ||
486 | struct qib_rwq *wq = qp->r_rq.wq; | ||
487 | unsigned long flags; | ||
488 | int ret; | ||
489 | |||
490 | /* Check that state is OK to post receive. */ | ||
491 | if (!(ib_qib_state_ops[qp->state] & QIB_POST_RECV_OK) || !wq) { | ||
492 | *bad_wr = wr; | ||
493 | ret = -EINVAL; | ||
494 | goto bail; | ||
495 | } | ||
496 | |||
497 | for (; wr; wr = wr->next) { | ||
498 | struct qib_rwqe *wqe; | ||
499 | u32 next; | ||
500 | int i; | ||
501 | |||
502 | if ((unsigned) wr->num_sge > qp->r_rq.max_sge) { | ||
503 | *bad_wr = wr; | ||
504 | ret = -EINVAL; | ||
505 | goto bail; | ||
506 | } | ||
507 | |||
508 | spin_lock_irqsave(&qp->r_rq.lock, flags); | ||
509 | next = wq->head + 1; | ||
510 | if (next >= qp->r_rq.size) | ||
511 | next = 0; | ||
512 | if (next == wq->tail) { | ||
513 | spin_unlock_irqrestore(&qp->r_rq.lock, flags); | ||
514 | *bad_wr = wr; | ||
515 | ret = -ENOMEM; | ||
516 | goto bail; | ||
517 | } | ||
518 | |||
519 | wqe = get_rwqe_ptr(&qp->r_rq, wq->head); | ||
520 | wqe->wr_id = wr->wr_id; | ||
521 | wqe->num_sge = wr->num_sge; | ||
522 | for (i = 0; i < wr->num_sge; i++) | ||
523 | wqe->sg_list[i] = wr->sg_list[i]; | ||
524 | /* Make sure queue entry is written before the head index. */ | ||
525 | smp_wmb(); | ||
526 | wq->head = next; | ||
527 | spin_unlock_irqrestore(&qp->r_rq.lock, flags); | ||
528 | } | ||
529 | ret = 0; | ||
530 | |||
531 | bail: | ||
532 | return ret; | ||
533 | } | ||
534 | |||
535 | /** | ||
536 | * qib_qp_rcv - processing an incoming packet on a QP | ||
537 | * @rcd: the context pointer | ||
538 | * @hdr: the packet header | ||
539 | * @has_grh: true if the packet has a GRH | ||
540 | * @data: the packet data | ||
541 | * @tlen: the packet length | ||
542 | * @qp: the QP the packet came on | ||
543 | * | ||
544 | * This is called from qib_ib_rcv() to process an incoming packet | ||
545 | * for the given QP. | ||
546 | * Called at interrupt level. | ||
547 | */ | ||
548 | static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, | ||
549 | int has_grh, void *data, u32 tlen, struct qib_qp *qp) | ||
550 | { | ||
551 | struct qib_ibport *ibp = &rcd->ppd->ibport_data; | ||
552 | |||
553 | /* Check for valid receive state. */ | ||
554 | if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { | ||
555 | ibp->n_pkt_drops++; | ||
556 | return; | ||
557 | } | ||
558 | |||
559 | switch (qp->ibqp.qp_type) { | ||
560 | case IB_QPT_SMI: | ||
561 | case IB_QPT_GSI: | ||
562 | if (ib_qib_disable_sma) | ||
563 | break; | ||
564 | /* FALLTHROUGH */ | ||
565 | case IB_QPT_UD: | ||
566 | qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp); | ||
567 | break; | ||
568 | |||
569 | case IB_QPT_RC: | ||
570 | qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp); | ||
571 | break; | ||
572 | |||
573 | case IB_QPT_UC: | ||
574 | qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp); | ||
575 | break; | ||
576 | |||
577 | default: | ||
578 | break; | ||
579 | } | ||
580 | } | ||
581 | |||
582 | /** | ||
583 | * qib_ib_rcv - process an incoming packet | ||
584 | * @rcd: the context pointer | ||
585 | * @rhdr: the header of the packet | ||
586 | * @data: the packet payload | ||
587 | * @tlen: the packet length | ||
588 | * | ||
589 | * This is called from qib_kreceive() to process an incoming packet at | ||
590 | * interrupt level. Tlen is the length of the header + data + CRC in bytes. | ||
591 | */ | ||
592 | void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen) | ||
593 | { | ||
594 | struct qib_pportdata *ppd = rcd->ppd; | ||
595 | struct qib_ibport *ibp = &ppd->ibport_data; | ||
596 | struct qib_ib_header *hdr = rhdr; | ||
597 | struct qib_other_headers *ohdr; | ||
598 | struct qib_qp *qp; | ||
599 | u32 qp_num; | ||
600 | int lnh; | ||
601 | u8 opcode; | ||
602 | u16 lid; | ||
603 | |||
604 | /* 24 == LRH+BTH+CRC */ | ||
605 | if (unlikely(tlen < 24)) | ||
606 | goto drop; | ||
607 | |||
608 | /* Check for a valid destination LID (see ch. 7.11.1). */ | ||
609 | lid = be16_to_cpu(hdr->lrh[1]); | ||
610 | if (lid < QIB_MULTICAST_LID_BASE) { | ||
611 | lid &= ~((1 << ppd->lmc) - 1); | ||
612 | if (unlikely(lid != ppd->lid)) | ||
613 | goto drop; | ||
614 | } | ||
615 | |||
616 | /* Check for GRH */ | ||
617 | lnh = be16_to_cpu(hdr->lrh[0]) & 3; | ||
618 | if (lnh == QIB_LRH_BTH) | ||
619 | ohdr = &hdr->u.oth; | ||
620 | else if (lnh == QIB_LRH_GRH) { | ||
621 | u32 vtf; | ||
622 | |||
623 | ohdr = &hdr->u.l.oth; | ||
624 | if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR) | ||
625 | goto drop; | ||
626 | vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow); | ||
627 | if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) | ||
628 | goto drop; | ||
629 | } else | ||
630 | goto drop; | ||
631 | |||
632 | opcode = be32_to_cpu(ohdr->bth[0]) >> 24; | ||
633 | ibp->opstats[opcode & 0x7f].n_bytes += tlen; | ||
634 | ibp->opstats[opcode & 0x7f].n_packets++; | ||
635 | |||
636 | /* Get the destination QP number. */ | ||
637 | qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK; | ||
638 | if (qp_num == QIB_MULTICAST_QPN) { | ||
639 | struct qib_mcast *mcast; | ||
640 | struct qib_mcast_qp *p; | ||
641 | |||
642 | if (lnh != QIB_LRH_GRH) | ||
643 | goto drop; | ||
644 | mcast = qib_mcast_find(ibp, &hdr->u.l.grh.dgid); | ||
645 | if (mcast == NULL) | ||
646 | goto drop; | ||
647 | ibp->n_multicast_rcv++; | ||
648 | list_for_each_entry_rcu(p, &mcast->qp_list, list) | ||
649 | qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp); | ||
650 | /* | ||
651 | * Notify qib_multicast_detach() if it is waiting for us | ||
652 | * to finish. | ||
653 | */ | ||
654 | if (atomic_dec_return(&mcast->refcount) <= 1) | ||
655 | wake_up(&mcast->wait); | ||
656 | } else { | ||
657 | qp = qib_lookup_qpn(ibp, qp_num); | ||
658 | if (!qp) | ||
659 | goto drop; | ||
660 | ibp->n_unicast_rcv++; | ||
661 | qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp); | ||
662 | /* | ||
663 | * Notify qib_destroy_qp() if it is waiting | ||
664 | * for us to finish. | ||
665 | */ | ||
666 | if (atomic_dec_and_test(&qp->refcount)) | ||
667 | wake_up(&qp->wait); | ||
668 | } | ||
669 | return; | ||
670 | |||
671 | drop: | ||
672 | ibp->n_pkt_drops++; | ||
673 | } | ||
674 | |||
675 | /* | ||
676 | * This is called from a timer to check for QPs | ||
677 | * which need kernel memory in order to send a packet. | ||
678 | */ | ||
679 | static void mem_timer(unsigned long data) | ||
680 | { | ||
681 | struct qib_ibdev *dev = (struct qib_ibdev *) data; | ||
682 | struct list_head *list = &dev->memwait; | ||
683 | struct qib_qp *qp = NULL; | ||
684 | unsigned long flags; | ||
685 | |||
686 | spin_lock_irqsave(&dev->pending_lock, flags); | ||
687 | if (!list_empty(list)) { | ||
688 | qp = list_entry(list->next, struct qib_qp, iowait); | ||
689 | list_del_init(&qp->iowait); | ||
690 | atomic_inc(&qp->refcount); | ||
691 | if (!list_empty(list)) | ||
692 | mod_timer(&dev->mem_timer, jiffies + 1); | ||
693 | } | ||
694 | spin_unlock_irqrestore(&dev->pending_lock, flags); | ||
695 | |||
696 | if (qp) { | ||
697 | spin_lock_irqsave(&qp->s_lock, flags); | ||
698 | if (qp->s_flags & QIB_S_WAIT_KMEM) { | ||
699 | qp->s_flags &= ~QIB_S_WAIT_KMEM; | ||
700 | qib_schedule_send(qp); | ||
701 | } | ||
702 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
703 | if (atomic_dec_and_test(&qp->refcount)) | ||
704 | wake_up(&qp->wait); | ||
705 | } | ||
706 | } | ||
707 | |||
708 | static void update_sge(struct qib_sge_state *ss, u32 length) | ||
709 | { | ||
710 | struct qib_sge *sge = &ss->sge; | ||
711 | |||
712 | sge->vaddr += length; | ||
713 | sge->length -= length; | ||
714 | sge->sge_length -= length; | ||
715 | if (sge->sge_length == 0) { | ||
716 | if (--ss->num_sge) | ||
717 | *sge = *ss->sg_list++; | ||
718 | } else if (sge->length == 0 && sge->mr->lkey) { | ||
719 | if (++sge->n >= QIB_SEGSZ) { | ||
720 | if (++sge->m >= sge->mr->mapsz) | ||
721 | return; | ||
722 | sge->n = 0; | ||
723 | } | ||
724 | sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; | ||
725 | sge->length = sge->mr->map[sge->m]->segs[sge->n].length; | ||
726 | } | ||
727 | } | ||
728 | |||
729 | #ifdef __LITTLE_ENDIAN | ||
730 | static inline u32 get_upper_bits(u32 data, u32 shift) | ||
731 | { | ||
732 | return data >> shift; | ||
733 | } | ||
734 | |||
735 | static inline u32 set_upper_bits(u32 data, u32 shift) | ||
736 | { | ||
737 | return data << shift; | ||
738 | } | ||
739 | |||
740 | static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off) | ||
741 | { | ||
742 | data <<= ((sizeof(u32) - n) * BITS_PER_BYTE); | ||
743 | data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE); | ||
744 | return data; | ||
745 | } | ||
746 | #else | ||
747 | static inline u32 get_upper_bits(u32 data, u32 shift) | ||
748 | { | ||
749 | return data << shift; | ||
750 | } | ||
751 | |||
752 | static inline u32 set_upper_bits(u32 data, u32 shift) | ||
753 | { | ||
754 | return data >> shift; | ||
755 | } | ||
756 | |||
757 | static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off) | ||
758 | { | ||
759 | data >>= ((sizeof(u32) - n) * BITS_PER_BYTE); | ||
760 | data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE); | ||
761 | return data; | ||
762 | } | ||
763 | #endif | ||
764 | |||
765 | static void copy_io(u32 __iomem *piobuf, struct qib_sge_state *ss, | ||
766 | u32 length, unsigned flush_wc) | ||
767 | { | ||
768 | u32 extra = 0; | ||
769 | u32 data = 0; | ||
770 | u32 last; | ||
771 | |||
772 | while (1) { | ||
773 | u32 len = ss->sge.length; | ||
774 | u32 off; | ||
775 | |||
776 | if (len > length) | ||
777 | len = length; | ||
778 | if (len > ss->sge.sge_length) | ||
779 | len = ss->sge.sge_length; | ||
780 | BUG_ON(len == 0); | ||
781 | /* If the source address is not aligned, try to align it. */ | ||
782 | off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1); | ||
783 | if (off) { | ||
784 | u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr & | ||
785 | ~(sizeof(u32) - 1)); | ||
786 | u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE); | ||
787 | u32 y; | ||
788 | |||
789 | y = sizeof(u32) - off; | ||
790 | if (len > y) | ||
791 | len = y; | ||
792 | if (len + extra >= sizeof(u32)) { | ||
793 | data |= set_upper_bits(v, extra * | ||
794 | BITS_PER_BYTE); | ||
795 | len = sizeof(u32) - extra; | ||
796 | if (len == length) { | ||
797 | last = data; | ||
798 | break; | ||
799 | } | ||
800 | __raw_writel(data, piobuf); | ||
801 | piobuf++; | ||
802 | extra = 0; | ||
803 | data = 0; | ||
804 | } else { | ||
805 | /* Clear unused upper bytes */ | ||
806 | data |= clear_upper_bytes(v, len, extra); | ||
807 | if (len == length) { | ||
808 | last = data; | ||
809 | break; | ||
810 | } | ||
811 | extra += len; | ||
812 | } | ||
813 | } else if (extra) { | ||
814 | /* Source address is aligned. */ | ||
815 | u32 *addr = (u32 *) ss->sge.vaddr; | ||
816 | int shift = extra * BITS_PER_BYTE; | ||
817 | int ushift = 32 - shift; | ||
818 | u32 l = len; | ||
819 | |||
820 | while (l >= sizeof(u32)) { | ||
821 | u32 v = *addr; | ||
822 | |||
823 | data |= set_upper_bits(v, shift); | ||
824 | __raw_writel(data, piobuf); | ||
825 | data = get_upper_bits(v, ushift); | ||
826 | piobuf++; | ||
827 | addr++; | ||
828 | l -= sizeof(u32); | ||
829 | } | ||
830 | /* | ||
831 | * We still have 'extra' number of bytes leftover. | ||
832 | */ | ||
833 | if (l) { | ||
834 | u32 v = *addr; | ||
835 | |||
836 | if (l + extra >= sizeof(u32)) { | ||
837 | data |= set_upper_bits(v, shift); | ||
838 | len -= l + extra - sizeof(u32); | ||
839 | if (len == length) { | ||
840 | last = data; | ||
841 | break; | ||
842 | } | ||
843 | __raw_writel(data, piobuf); | ||
844 | piobuf++; | ||
845 | extra = 0; | ||
846 | data = 0; | ||
847 | } else { | ||
848 | /* Clear unused upper bytes */ | ||
849 | data |= clear_upper_bytes(v, l, extra); | ||
850 | if (len == length) { | ||
851 | last = data; | ||
852 | break; | ||
853 | } | ||
854 | extra += l; | ||
855 | } | ||
856 | } else if (len == length) { | ||
857 | last = data; | ||
858 | break; | ||
859 | } | ||
860 | } else if (len == length) { | ||
861 | u32 w; | ||
862 | |||
863 | /* | ||
864 | * Need to round up for the last dword in the | ||
865 | * packet. | ||
866 | */ | ||
867 | w = (len + 3) >> 2; | ||
868 | qib_pio_copy(piobuf, ss->sge.vaddr, w - 1); | ||
869 | piobuf += w - 1; | ||
870 | last = ((u32 *) ss->sge.vaddr)[w - 1]; | ||
871 | break; | ||
872 | } else { | ||
873 | u32 w = len >> 2; | ||
874 | |||
875 | qib_pio_copy(piobuf, ss->sge.vaddr, w); | ||
876 | piobuf += w; | ||
877 | |||
878 | extra = len & (sizeof(u32) - 1); | ||
879 | if (extra) { | ||
880 | u32 v = ((u32 *) ss->sge.vaddr)[w]; | ||
881 | |||
882 | /* Clear unused upper bytes */ | ||
883 | data = clear_upper_bytes(v, extra, 0); | ||
884 | } | ||
885 | } | ||
886 | update_sge(ss, len); | ||
887 | length -= len; | ||
888 | } | ||
889 | /* Update address before sending packet. */ | ||
890 | update_sge(ss, length); | ||
891 | if (flush_wc) { | ||
892 | /* must flush early everything before trigger word */ | ||
893 | qib_flush_wc(); | ||
894 | __raw_writel(last, piobuf); | ||
895 | /* be sure trigger word is written */ | ||
896 | qib_flush_wc(); | ||
897 | } else | ||
898 | __raw_writel(last, piobuf); | ||
899 | } | ||
900 | |||
901 | static struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev, | ||
902 | struct qib_qp *qp, int *retp) | ||
903 | { | ||
904 | struct qib_verbs_txreq *tx; | ||
905 | unsigned long flags; | ||
906 | |||
907 | spin_lock_irqsave(&qp->s_lock, flags); | ||
908 | spin_lock(&dev->pending_lock); | ||
909 | |||
910 | if (!list_empty(&dev->txreq_free)) { | ||
911 | struct list_head *l = dev->txreq_free.next; | ||
912 | |||
913 | list_del(l); | ||
914 | tx = list_entry(l, struct qib_verbs_txreq, txreq.list); | ||
915 | *retp = 0; | ||
916 | } else { | ||
917 | if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK && | ||
918 | list_empty(&qp->iowait)) { | ||
919 | dev->n_txwait++; | ||
920 | qp->s_flags |= QIB_S_WAIT_TX; | ||
921 | list_add_tail(&qp->iowait, &dev->txwait); | ||
922 | } | ||
923 | tx = NULL; | ||
924 | qp->s_flags &= ~QIB_S_BUSY; | ||
925 | *retp = -EBUSY; | ||
926 | } | ||
927 | |||
928 | spin_unlock(&dev->pending_lock); | ||
929 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
930 | |||
931 | return tx; | ||
932 | } | ||
933 | |||
934 | void qib_put_txreq(struct qib_verbs_txreq *tx) | ||
935 | { | ||
936 | struct qib_ibdev *dev; | ||
937 | struct qib_qp *qp; | ||
938 | unsigned long flags; | ||
939 | |||
940 | qp = tx->qp; | ||
941 | dev = to_idev(qp->ibqp.device); | ||
942 | |||
943 | if (atomic_dec_and_test(&qp->refcount)) | ||
944 | wake_up(&qp->wait); | ||
945 | if (tx->mr) { | ||
946 | atomic_dec(&tx->mr->refcount); | ||
947 | tx->mr = NULL; | ||
948 | } | ||
949 | if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) { | ||
950 | tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF; | ||
951 | dma_unmap_single(&dd_from_dev(dev)->pcidev->dev, | ||
952 | tx->txreq.addr, tx->hdr_dwords << 2, | ||
953 | DMA_TO_DEVICE); | ||
954 | kfree(tx->align_buf); | ||
955 | } | ||
956 | |||
957 | spin_lock_irqsave(&dev->pending_lock, flags); | ||
958 | |||
959 | /* Put struct back on free list */ | ||
960 | list_add(&tx->txreq.list, &dev->txreq_free); | ||
961 | |||
962 | if (!list_empty(&dev->txwait)) { | ||
963 | /* Wake up first QP wanting a free struct */ | ||
964 | qp = list_entry(dev->txwait.next, struct qib_qp, iowait); | ||
965 | list_del_init(&qp->iowait); | ||
966 | atomic_inc(&qp->refcount); | ||
967 | spin_unlock_irqrestore(&dev->pending_lock, flags); | ||
968 | |||
969 | spin_lock_irqsave(&qp->s_lock, flags); | ||
970 | if (qp->s_flags & QIB_S_WAIT_TX) { | ||
971 | qp->s_flags &= ~QIB_S_WAIT_TX; | ||
972 | qib_schedule_send(qp); | ||
973 | } | ||
974 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
975 | |||
976 | if (atomic_dec_and_test(&qp->refcount)) | ||
977 | wake_up(&qp->wait); | ||
978 | } else | ||
979 | spin_unlock_irqrestore(&dev->pending_lock, flags); | ||
980 | } | ||
981 | |||
982 | /* | ||
983 | * This is called when there are send DMA descriptors that might be | ||
984 | * available. | ||
985 | * | ||
986 | * This is called with ppd->sdma_lock held. | ||
987 | */ | ||
988 | void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail) | ||
989 | { | ||
990 | struct qib_qp *qp, *nqp; | ||
991 | struct qib_qp *qps[20]; | ||
992 | struct qib_ibdev *dev; | ||
993 | unsigned i, n; | ||
994 | |||
995 | n = 0; | ||
996 | dev = &ppd->dd->verbs_dev; | ||
997 | spin_lock(&dev->pending_lock); | ||
998 | |||
999 | /* Search wait list for first QP wanting DMA descriptors. */ | ||
1000 | list_for_each_entry_safe(qp, nqp, &dev->dmawait, iowait) { | ||
1001 | if (qp->port_num != ppd->port) | ||
1002 | continue; | ||
1003 | if (n == ARRAY_SIZE(qps)) | ||
1004 | break; | ||
1005 | if (qp->s_tx->txreq.sg_count > avail) | ||
1006 | break; | ||
1007 | avail -= qp->s_tx->txreq.sg_count; | ||
1008 | list_del_init(&qp->iowait); | ||
1009 | atomic_inc(&qp->refcount); | ||
1010 | qps[n++] = qp; | ||
1011 | } | ||
1012 | |||
1013 | spin_unlock(&dev->pending_lock); | ||
1014 | |||
1015 | for (i = 0; i < n; i++) { | ||
1016 | qp = qps[i]; | ||
1017 | spin_lock(&qp->s_lock); | ||
1018 | if (qp->s_flags & QIB_S_WAIT_DMA_DESC) { | ||
1019 | qp->s_flags &= ~QIB_S_WAIT_DMA_DESC; | ||
1020 | qib_schedule_send(qp); | ||
1021 | } | ||
1022 | spin_unlock(&qp->s_lock); | ||
1023 | if (atomic_dec_and_test(&qp->refcount)) | ||
1024 | wake_up(&qp->wait); | ||
1025 | } | ||
1026 | } | ||
1027 | |||
1028 | /* | ||
1029 | * This is called with ppd->sdma_lock held. | ||
1030 | */ | ||
1031 | static void sdma_complete(struct qib_sdma_txreq *cookie, int status) | ||
1032 | { | ||
1033 | struct qib_verbs_txreq *tx = | ||
1034 | container_of(cookie, struct qib_verbs_txreq, txreq); | ||
1035 | struct qib_qp *qp = tx->qp; | ||
1036 | |||
1037 | spin_lock(&qp->s_lock); | ||
1038 | if (tx->wqe) | ||
1039 | qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS); | ||
1040 | else if (qp->ibqp.qp_type == IB_QPT_RC) { | ||
1041 | struct qib_ib_header *hdr; | ||
1042 | |||
1043 | if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) | ||
1044 | hdr = &tx->align_buf->hdr; | ||
1045 | else { | ||
1046 | struct qib_ibdev *dev = to_idev(qp->ibqp.device); | ||
1047 | |||
1048 | hdr = &dev->pio_hdrs[tx->hdr_inx].hdr; | ||
1049 | } | ||
1050 | qib_rc_send_complete(qp, hdr); | ||
1051 | } | ||
1052 | if (atomic_dec_and_test(&qp->s_dma_busy)) { | ||
1053 | if (qp->state == IB_QPS_RESET) | ||
1054 | wake_up(&qp->wait_dma); | ||
1055 | else if (qp->s_flags & QIB_S_WAIT_DMA) { | ||
1056 | qp->s_flags &= ~QIB_S_WAIT_DMA; | ||
1057 | qib_schedule_send(qp); | ||
1058 | } | ||
1059 | } | ||
1060 | spin_unlock(&qp->s_lock); | ||
1061 | |||
1062 | qib_put_txreq(tx); | ||
1063 | } | ||
1064 | |||
1065 | static int wait_kmem(struct qib_ibdev *dev, struct qib_qp *qp) | ||
1066 | { | ||
1067 | unsigned long flags; | ||
1068 | int ret = 0; | ||
1069 | |||
1070 | spin_lock_irqsave(&qp->s_lock, flags); | ||
1071 | if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { | ||
1072 | spin_lock(&dev->pending_lock); | ||
1073 | if (list_empty(&qp->iowait)) { | ||
1074 | if (list_empty(&dev->memwait)) | ||
1075 | mod_timer(&dev->mem_timer, jiffies + 1); | ||
1076 | qp->s_flags |= QIB_S_WAIT_KMEM; | ||
1077 | list_add_tail(&qp->iowait, &dev->memwait); | ||
1078 | } | ||
1079 | spin_unlock(&dev->pending_lock); | ||
1080 | qp->s_flags &= ~QIB_S_BUSY; | ||
1081 | ret = -EBUSY; | ||
1082 | } | ||
1083 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
1084 | |||
1085 | return ret; | ||
1086 | } | ||
1087 | |||
1088 | static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr, | ||
1089 | u32 hdrwords, struct qib_sge_state *ss, u32 len, | ||
1090 | u32 plen, u32 dwords) | ||
1091 | { | ||
1092 | struct qib_ibdev *dev = to_idev(qp->ibqp.device); | ||
1093 | struct qib_devdata *dd = dd_from_dev(dev); | ||
1094 | struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); | ||
1095 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | ||
1096 | struct qib_verbs_txreq *tx; | ||
1097 | struct qib_pio_header *phdr; | ||
1098 | u32 control; | ||
1099 | u32 ndesc; | ||
1100 | int ret; | ||
1101 | |||
1102 | tx = qp->s_tx; | ||
1103 | if (tx) { | ||
1104 | qp->s_tx = NULL; | ||
1105 | /* resend previously constructed packet */ | ||
1106 | ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx); | ||
1107 | goto bail; | ||
1108 | } | ||
1109 | |||
1110 | tx = get_txreq(dev, qp, &ret); | ||
1111 | if (!tx) | ||
1112 | goto bail; | ||
1113 | |||
1114 | control = dd->f_setpbc_control(ppd, plen, qp->s_srate, | ||
1115 | be16_to_cpu(hdr->lrh[0]) >> 12); | ||
1116 | tx->qp = qp; | ||
1117 | atomic_inc(&qp->refcount); | ||
1118 | tx->wqe = qp->s_wqe; | ||
1119 | tx->mr = qp->s_rdma_mr; | ||
1120 | if (qp->s_rdma_mr) | ||
1121 | qp->s_rdma_mr = NULL; | ||
1122 | tx->txreq.callback = sdma_complete; | ||
1123 | if (dd->flags & QIB_HAS_SDMA_TIMEOUT) | ||
1124 | tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST; | ||
1125 | else | ||
1126 | tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ; | ||
1127 | if (plen + 1 > dd->piosize2kmax_dwords) | ||
1128 | tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF; | ||
1129 | |||
1130 | if (len) { | ||
1131 | /* | ||
1132 | * Don't try to DMA if it takes more descriptors than | ||
1133 | * the queue holds. | ||
1134 | */ | ||
1135 | ndesc = qib_count_sge(ss, len); | ||
1136 | if (ndesc >= ppd->sdma_descq_cnt) | ||
1137 | ndesc = 0; | ||
1138 | } else | ||
1139 | ndesc = 1; | ||
1140 | if (ndesc) { | ||
1141 | phdr = &dev->pio_hdrs[tx->hdr_inx]; | ||
1142 | phdr->pbc[0] = cpu_to_le32(plen); | ||
1143 | phdr->pbc[1] = cpu_to_le32(control); | ||
1144 | memcpy(&phdr->hdr, hdr, hdrwords << 2); | ||
1145 | tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC; | ||
1146 | tx->txreq.sg_count = ndesc; | ||
1147 | tx->txreq.addr = dev->pio_hdrs_phys + | ||
1148 | tx->hdr_inx * sizeof(struct qib_pio_header); | ||
1149 | tx->hdr_dwords = hdrwords + 2; /* add PBC length */ | ||
1150 | ret = qib_sdma_verbs_send(ppd, ss, dwords, tx); | ||
1151 | goto bail; | ||
1152 | } | ||
1153 | |||
1154 | /* Allocate a buffer and copy the header and payload to it. */ | ||
1155 | tx->hdr_dwords = plen + 1; | ||
1156 | phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC); | ||
1157 | if (!phdr) | ||
1158 | goto err_tx; | ||
1159 | phdr->pbc[0] = cpu_to_le32(plen); | ||
1160 | phdr->pbc[1] = cpu_to_le32(control); | ||
1161 | memcpy(&phdr->hdr, hdr, hdrwords << 2); | ||
1162 | qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len); | ||
1163 | |||
1164 | tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr, | ||
1165 | tx->hdr_dwords << 2, DMA_TO_DEVICE); | ||
1166 | if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr)) | ||
1167 | goto map_err; | ||
1168 | tx->align_buf = phdr; | ||
1169 | tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF; | ||
1170 | tx->txreq.sg_count = 1; | ||
1171 | ret = qib_sdma_verbs_send(ppd, NULL, 0, tx); | ||
1172 | goto unaligned; | ||
1173 | |||
1174 | map_err: | ||
1175 | kfree(phdr); | ||
1176 | err_tx: | ||
1177 | qib_put_txreq(tx); | ||
1178 | ret = wait_kmem(dev, qp); | ||
1179 | unaligned: | ||
1180 | ibp->n_unaligned++; | ||
1181 | bail: | ||
1182 | return ret; | ||
1183 | } | ||
1184 | |||
1185 | /* | ||
1186 | * If we are now in the error state, return zero to flush the | ||
1187 | * send work request. | ||
1188 | */ | ||
1189 | static int no_bufs_available(struct qib_qp *qp) | ||
1190 | { | ||
1191 | struct qib_ibdev *dev = to_idev(qp->ibqp.device); | ||
1192 | struct qib_devdata *dd; | ||
1193 | unsigned long flags; | ||
1194 | int ret = 0; | ||
1195 | |||
1196 | /* | ||
1197 | * Note that as soon as want_buffer() is called and | ||
1198 | * possibly before it returns, qib_ib_piobufavail() | ||
1199 | * could be called. Therefore, put QP on the I/O wait list before | ||
1200 | * enabling the PIO avail interrupt. | ||
1201 | */ | ||
1202 | spin_lock_irqsave(&qp->s_lock, flags); | ||
1203 | if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { | ||
1204 | spin_lock(&dev->pending_lock); | ||
1205 | if (list_empty(&qp->iowait)) { | ||
1206 | dev->n_piowait++; | ||
1207 | qp->s_flags |= QIB_S_WAIT_PIO; | ||
1208 | list_add_tail(&qp->iowait, &dev->piowait); | ||
1209 | dd = dd_from_dev(dev); | ||
1210 | dd->f_wantpiobuf_intr(dd, 1); | ||
1211 | } | ||
1212 | spin_unlock(&dev->pending_lock); | ||
1213 | qp->s_flags &= ~QIB_S_BUSY; | ||
1214 | ret = -EBUSY; | ||
1215 | } | ||
1216 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
1217 | return ret; | ||
1218 | } | ||
1219 | |||
1220 | static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr, | ||
1221 | u32 hdrwords, struct qib_sge_state *ss, u32 len, | ||
1222 | u32 plen, u32 dwords) | ||
1223 | { | ||
1224 | struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device); | ||
1225 | struct qib_pportdata *ppd = dd->pport + qp->port_num - 1; | ||
1226 | u32 *hdr = (u32 *) ibhdr; | ||
1227 | u32 __iomem *piobuf_orig; | ||
1228 | u32 __iomem *piobuf; | ||
1229 | u64 pbc; | ||
1230 | unsigned long flags; | ||
1231 | unsigned flush_wc; | ||
1232 | u32 control; | ||
1233 | u32 pbufn; | ||
1234 | |||
1235 | control = dd->f_setpbc_control(ppd, plen, qp->s_srate, | ||
1236 | be16_to_cpu(ibhdr->lrh[0]) >> 12); | ||
1237 | pbc = ((u64) control << 32) | plen; | ||
1238 | piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn); | ||
1239 | if (unlikely(piobuf == NULL)) | ||
1240 | return no_bufs_available(qp); | ||
1241 | |||
1242 | /* | ||
1243 | * Write the pbc. | ||
1244 | * We have to flush after the PBC for correctness on some cpus | ||
1245 | * or WC buffer can be written out of order. | ||
1246 | */ | ||
1247 | writeq(pbc, piobuf); | ||
1248 | piobuf_orig = piobuf; | ||
1249 | piobuf += 2; | ||
1250 | |||
1251 | flush_wc = dd->flags & QIB_PIO_FLUSH_WC; | ||
1252 | if (len == 0) { | ||
1253 | /* | ||
1254 | * If there is just the header portion, must flush before | ||
1255 | * writing last word of header for correctness, and after | ||
1256 | * the last header word (trigger word). | ||
1257 | */ | ||
1258 | if (flush_wc) { | ||
1259 | qib_flush_wc(); | ||
1260 | qib_pio_copy(piobuf, hdr, hdrwords - 1); | ||
1261 | qib_flush_wc(); | ||
1262 | __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1); | ||
1263 | qib_flush_wc(); | ||
1264 | } else | ||
1265 | qib_pio_copy(piobuf, hdr, hdrwords); | ||
1266 | goto done; | ||
1267 | } | ||
1268 | |||
1269 | if (flush_wc) | ||
1270 | qib_flush_wc(); | ||
1271 | qib_pio_copy(piobuf, hdr, hdrwords); | ||
1272 | piobuf += hdrwords; | ||
1273 | |||
1274 | /* The common case is aligned and contained in one segment. */ | ||
1275 | if (likely(ss->num_sge == 1 && len <= ss->sge.length && | ||
1276 | !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) { | ||
1277 | u32 *addr = (u32 *) ss->sge.vaddr; | ||
1278 | |||
1279 | /* Update address before sending packet. */ | ||
1280 | update_sge(ss, len); | ||
1281 | if (flush_wc) { | ||
1282 | qib_pio_copy(piobuf, addr, dwords - 1); | ||
1283 | /* must flush early everything before trigger word */ | ||
1284 | qib_flush_wc(); | ||
1285 | __raw_writel(addr[dwords - 1], piobuf + dwords - 1); | ||
1286 | /* be sure trigger word is written */ | ||
1287 | qib_flush_wc(); | ||
1288 | } else | ||
1289 | qib_pio_copy(piobuf, addr, dwords); | ||
1290 | goto done; | ||
1291 | } | ||
1292 | copy_io(piobuf, ss, len, flush_wc); | ||
1293 | done: | ||
1294 | if (dd->flags & QIB_USE_SPCL_TRIG) { | ||
1295 | u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023; | ||
1296 | qib_flush_wc(); | ||
1297 | __raw_writel(0xaebecede, piobuf_orig + spcl_off); | ||
1298 | } | ||
1299 | qib_sendbuf_done(dd, pbufn); | ||
1300 | if (qp->s_rdma_mr) { | ||
1301 | atomic_dec(&qp->s_rdma_mr->refcount); | ||
1302 | qp->s_rdma_mr = NULL; | ||
1303 | } | ||
1304 | if (qp->s_wqe) { | ||
1305 | spin_lock_irqsave(&qp->s_lock, flags); | ||
1306 | qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS); | ||
1307 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
1308 | } else if (qp->ibqp.qp_type == IB_QPT_RC) { | ||
1309 | spin_lock_irqsave(&qp->s_lock, flags); | ||
1310 | qib_rc_send_complete(qp, ibhdr); | ||
1311 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
1312 | } | ||
1313 | return 0; | ||
1314 | } | ||
1315 | |||
1316 | /** | ||
1317 | * qib_verbs_send - send a packet | ||
1318 | * @qp: the QP to send on | ||
1319 | * @hdr: the packet header | ||
1320 | * @hdrwords: the number of 32-bit words in the header | ||
1321 | * @ss: the SGE to send | ||
1322 | * @len: the length of the packet in bytes | ||
1323 | * | ||
1324 | * Return zero if packet is sent or queued OK. | ||
1325 | * Return non-zero and clear qp->s_flags QIB_S_BUSY otherwise. | ||
1326 | */ | ||
1327 | int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr, | ||
1328 | u32 hdrwords, struct qib_sge_state *ss, u32 len) | ||
1329 | { | ||
1330 | struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device); | ||
1331 | u32 plen; | ||
1332 | int ret; | ||
1333 | u32 dwords = (len + 3) >> 2; | ||
1334 | |||
1335 | /* | ||
1336 | * Calculate the send buffer trigger address. | ||
1337 | * The +1 counts for the pbc control dword following the pbc length. | ||
1338 | */ | ||
1339 | plen = hdrwords + dwords + 1; | ||
1340 | |||
1341 | /* | ||
1342 | * VL15 packets (IB_QPT_SMI) will always use PIO, so we | ||
1343 | * can defer SDMA restart until link goes ACTIVE without | ||
1344 | * worrying about just how we got there. | ||
1345 | */ | ||
1346 | if (qp->ibqp.qp_type == IB_QPT_SMI || | ||
1347 | !(dd->flags & QIB_HAS_SEND_DMA)) | ||
1348 | ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len, | ||
1349 | plen, dwords); | ||
1350 | else | ||
1351 | ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len, | ||
1352 | plen, dwords); | ||
1353 | |||
1354 | return ret; | ||
1355 | } | ||
1356 | |||
1357 | int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords, | ||
1358 | u64 *rwords, u64 *spkts, u64 *rpkts, | ||
1359 | u64 *xmit_wait) | ||
1360 | { | ||
1361 | int ret; | ||
1362 | struct qib_devdata *dd = ppd->dd; | ||
1363 | |||
1364 | if (!(dd->flags & QIB_PRESENT)) { | ||
1365 | /* no hardware, freeze, etc. */ | ||
1366 | ret = -EINVAL; | ||
1367 | goto bail; | ||
1368 | } | ||
1369 | *swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND); | ||
1370 | *rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV); | ||
1371 | *spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND); | ||
1372 | *rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV); | ||
1373 | *xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL); | ||
1374 | |||
1375 | ret = 0; | ||
1376 | |||
1377 | bail: | ||
1378 | return ret; | ||
1379 | } | ||
1380 | |||
1381 | /** | ||
1382 | * qib_get_counters - get various chip counters | ||
1383 | * @dd: the qlogic_ib device | ||
1384 | * @cntrs: counters are placed here | ||
1385 | * | ||
1386 | * Return the counters needed by recv_pma_get_portcounters(). | ||
1387 | */ | ||
1388 | int qib_get_counters(struct qib_pportdata *ppd, | ||
1389 | struct qib_verbs_counters *cntrs) | ||
1390 | { | ||
1391 | int ret; | ||
1392 | |||
1393 | if (!(ppd->dd->flags & QIB_PRESENT)) { | ||
1394 | /* no hardware, freeze, etc. */ | ||
1395 | ret = -EINVAL; | ||
1396 | goto bail; | ||
1397 | } | ||
1398 | cntrs->symbol_error_counter = | ||
1399 | ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR); | ||
1400 | cntrs->link_error_recovery_counter = | ||
1401 | ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV); | ||
1402 | /* | ||
1403 | * The link downed counter counts when the other side downs the | ||
1404 | * connection. We add in the number of times we downed the link | ||
1405 | * due to local link integrity errors to compensate. | ||
1406 | */ | ||
1407 | cntrs->link_downed_counter = | ||
1408 | ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN); | ||
1409 | cntrs->port_rcv_errors = | ||
1410 | ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) + | ||
1411 | ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) + | ||
1412 | ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) + | ||
1413 | ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) + | ||
1414 | ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) + | ||
1415 | ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) + | ||
1416 | ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) + | ||
1417 | ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) + | ||
1418 | ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT); | ||
1419 | cntrs->port_rcv_errors += | ||
1420 | ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR); | ||
1421 | cntrs->port_rcv_errors += | ||
1422 | ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR); | ||
1423 | cntrs->port_rcv_remphys_errors = | ||
1424 | ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP); | ||
1425 | cntrs->port_xmit_discards = | ||
1426 | ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL); | ||
1427 | cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd, | ||
1428 | QIBPORTCNTR_WORDSEND); | ||
1429 | cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd, | ||
1430 | QIBPORTCNTR_WORDRCV); | ||
1431 | cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd, | ||
1432 | QIBPORTCNTR_PKTSEND); | ||
1433 | cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd, | ||
1434 | QIBPORTCNTR_PKTRCV); | ||
1435 | cntrs->local_link_integrity_errors = | ||
1436 | ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI); | ||
1437 | cntrs->excessive_buffer_overrun_errors = | ||
1438 | ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL); | ||
1439 | cntrs->vl15_dropped = | ||
1440 | ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP); | ||
1441 | |||
1442 | ret = 0; | ||
1443 | |||
1444 | bail: | ||
1445 | return ret; | ||
1446 | } | ||
1447 | |||
1448 | /** | ||
1449 | * qib_ib_piobufavail - callback when a PIO buffer is available | ||
1450 | * @dd: the device pointer | ||
1451 | * | ||
1452 | * This is called from qib_intr() at interrupt level when a PIO buffer is | ||
1453 | * available after qib_verbs_send() returned an error that no buffers were | ||
1454 | * available. Disable the interrupt if there are no more QPs waiting. | ||
1455 | */ | ||
1456 | void qib_ib_piobufavail(struct qib_devdata *dd) | ||
1457 | { | ||
1458 | struct qib_ibdev *dev = &dd->verbs_dev; | ||
1459 | struct list_head *list; | ||
1460 | struct qib_qp *qps[5]; | ||
1461 | struct qib_qp *qp; | ||
1462 | unsigned long flags; | ||
1463 | unsigned i, n; | ||
1464 | |||
1465 | list = &dev->piowait; | ||
1466 | n = 0; | ||
1467 | |||
1468 | /* | ||
1469 | * Note: checking that the piowait list is empty and clearing | ||
1470 | * the buffer available interrupt needs to be atomic or we | ||
1471 | * could end up with QPs on the wait list with the interrupt | ||
1472 | * disabled. | ||
1473 | */ | ||
1474 | spin_lock_irqsave(&dev->pending_lock, flags); | ||
1475 | while (!list_empty(list)) { | ||
1476 | if (n == ARRAY_SIZE(qps)) | ||
1477 | goto full; | ||
1478 | qp = list_entry(list->next, struct qib_qp, iowait); | ||
1479 | list_del_init(&qp->iowait); | ||
1480 | atomic_inc(&qp->refcount); | ||
1481 | qps[n++] = qp; | ||
1482 | } | ||
1483 | dd->f_wantpiobuf_intr(dd, 0); | ||
1484 | full: | ||
1485 | spin_unlock_irqrestore(&dev->pending_lock, flags); | ||
1486 | |||
1487 | for (i = 0; i < n; i++) { | ||
1488 | qp = qps[i]; | ||
1489 | |||
1490 | spin_lock_irqsave(&qp->s_lock, flags); | ||
1491 | if (qp->s_flags & QIB_S_WAIT_PIO) { | ||
1492 | qp->s_flags &= ~QIB_S_WAIT_PIO; | ||
1493 | qib_schedule_send(qp); | ||
1494 | } | ||
1495 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
1496 | |||
1497 | /* Notify qib_destroy_qp() if it is waiting. */ | ||
1498 | if (atomic_dec_and_test(&qp->refcount)) | ||
1499 | wake_up(&qp->wait); | ||
1500 | } | ||
1501 | } | ||
1502 | |||
1503 | static int qib_query_device(struct ib_device *ibdev, | ||
1504 | struct ib_device_attr *props) | ||
1505 | { | ||
1506 | struct qib_devdata *dd = dd_from_ibdev(ibdev); | ||
1507 | struct qib_ibdev *dev = to_idev(ibdev); | ||
1508 | |||
1509 | memset(props, 0, sizeof(*props)); | ||
1510 | |||
1511 | props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR | | ||
1512 | IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT | | ||
1513 | IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN | | ||
1514 | IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE; | ||
1515 | props->page_size_cap = PAGE_SIZE; | ||
1516 | props->vendor_id = | ||
1517 | QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3; | ||
1518 | props->vendor_part_id = dd->deviceid; | ||
1519 | props->hw_ver = dd->minrev; | ||
1520 | props->sys_image_guid = ib_qib_sys_image_guid; | ||
1521 | props->max_mr_size = ~0ULL; | ||
1522 | props->max_qp = ib_qib_max_qps; | ||
1523 | props->max_qp_wr = ib_qib_max_qp_wrs; | ||
1524 | props->max_sge = ib_qib_max_sges; | ||
1525 | props->max_cq = ib_qib_max_cqs; | ||
1526 | props->max_ah = ib_qib_max_ahs; | ||
1527 | props->max_cqe = ib_qib_max_cqes; | ||
1528 | props->max_mr = dev->lk_table.max; | ||
1529 | props->max_fmr = dev->lk_table.max; | ||
1530 | props->max_map_per_fmr = 32767; | ||
1531 | props->max_pd = ib_qib_max_pds; | ||
1532 | props->max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC; | ||
1533 | props->max_qp_init_rd_atom = 255; | ||
1534 | /* props->max_res_rd_atom */ | ||
1535 | props->max_srq = ib_qib_max_srqs; | ||
1536 | props->max_srq_wr = ib_qib_max_srq_wrs; | ||
1537 | props->max_srq_sge = ib_qib_max_srq_sges; | ||
1538 | /* props->local_ca_ack_delay */ | ||
1539 | props->atomic_cap = IB_ATOMIC_GLOB; | ||
1540 | props->max_pkeys = qib_get_npkeys(dd); | ||
1541 | props->max_mcast_grp = ib_qib_max_mcast_grps; | ||
1542 | props->max_mcast_qp_attach = ib_qib_max_mcast_qp_attached; | ||
1543 | props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * | ||
1544 | props->max_mcast_grp; | ||
1545 | |||
1546 | return 0; | ||
1547 | } | ||
1548 | |||
1549 | static int qib_query_port(struct ib_device *ibdev, u8 port, | ||
1550 | struct ib_port_attr *props) | ||
1551 | { | ||
1552 | struct qib_devdata *dd = dd_from_ibdev(ibdev); | ||
1553 | struct qib_ibport *ibp = to_iport(ibdev, port); | ||
1554 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | ||
1555 | enum ib_mtu mtu; | ||
1556 | u16 lid = ppd->lid; | ||
1557 | |||
1558 | memset(props, 0, sizeof(*props)); | ||
1559 | props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE); | ||
1560 | props->lmc = ppd->lmc; | ||
1561 | props->sm_lid = ibp->sm_lid; | ||
1562 | props->sm_sl = ibp->sm_sl; | ||
1563 | props->state = dd->f_iblink_state(ppd->lastibcstat); | ||
1564 | props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat); | ||
1565 | props->port_cap_flags = ibp->port_cap_flags; | ||
1566 | props->gid_tbl_len = QIB_GUIDS_PER_PORT; | ||
1567 | props->max_msg_sz = 0x80000000; | ||
1568 | props->pkey_tbl_len = qib_get_npkeys(dd); | ||
1569 | props->bad_pkey_cntr = ibp->pkey_violations; | ||
1570 | props->qkey_viol_cntr = ibp->qkey_violations; | ||
1571 | props->active_width = ppd->link_width_active; | ||
1572 | /* See rate_show() */ | ||
1573 | props->active_speed = ppd->link_speed_active; | ||
1574 | props->max_vl_num = qib_num_vls(ppd->vls_supported); | ||
1575 | props->init_type_reply = 0; | ||
1576 | |||
1577 | props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096; | ||
1578 | switch (ppd->ibmtu) { | ||
1579 | case 4096: | ||
1580 | mtu = IB_MTU_4096; | ||
1581 | break; | ||
1582 | case 2048: | ||
1583 | mtu = IB_MTU_2048; | ||
1584 | break; | ||
1585 | case 1024: | ||
1586 | mtu = IB_MTU_1024; | ||
1587 | break; | ||
1588 | case 512: | ||
1589 | mtu = IB_MTU_512; | ||
1590 | break; | ||
1591 | case 256: | ||
1592 | mtu = IB_MTU_256; | ||
1593 | break; | ||
1594 | default: | ||
1595 | mtu = IB_MTU_2048; | ||
1596 | } | ||
1597 | props->active_mtu = mtu; | ||
1598 | props->subnet_timeout = ibp->subnet_timeout; | ||
1599 | |||
1600 | return 0; | ||
1601 | } | ||
1602 | |||
1603 | static int qib_modify_device(struct ib_device *device, | ||
1604 | int device_modify_mask, | ||
1605 | struct ib_device_modify *device_modify) | ||
1606 | { | ||
1607 | struct qib_devdata *dd = dd_from_ibdev(device); | ||
1608 | unsigned i; | ||
1609 | int ret; | ||
1610 | |||
1611 | if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID | | ||
1612 | IB_DEVICE_MODIFY_NODE_DESC)) { | ||
1613 | ret = -EOPNOTSUPP; | ||
1614 | goto bail; | ||
1615 | } | ||
1616 | |||
1617 | if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) { | ||
1618 | memcpy(device->node_desc, device_modify->node_desc, 64); | ||
1619 | for (i = 0; i < dd->num_pports; i++) { | ||
1620 | struct qib_ibport *ibp = &dd->pport[i].ibport_data; | ||
1621 | |||
1622 | qib_node_desc_chg(ibp); | ||
1623 | } | ||
1624 | } | ||
1625 | |||
1626 | if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) { | ||
1627 | ib_qib_sys_image_guid = | ||
1628 | cpu_to_be64(device_modify->sys_image_guid); | ||
1629 | for (i = 0; i < dd->num_pports; i++) { | ||
1630 | struct qib_ibport *ibp = &dd->pport[i].ibport_data; | ||
1631 | |||
1632 | qib_sys_guid_chg(ibp); | ||
1633 | } | ||
1634 | } | ||
1635 | |||
1636 | ret = 0; | ||
1637 | |||
1638 | bail: | ||
1639 | return ret; | ||
1640 | } | ||
1641 | |||
1642 | static int qib_modify_port(struct ib_device *ibdev, u8 port, | ||
1643 | int port_modify_mask, struct ib_port_modify *props) | ||
1644 | { | ||
1645 | struct qib_ibport *ibp = to_iport(ibdev, port); | ||
1646 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | ||
1647 | |||
1648 | ibp->port_cap_flags |= props->set_port_cap_mask; | ||
1649 | ibp->port_cap_flags &= ~props->clr_port_cap_mask; | ||
1650 | if (props->set_port_cap_mask || props->clr_port_cap_mask) | ||
1651 | qib_cap_mask_chg(ibp); | ||
1652 | if (port_modify_mask & IB_PORT_SHUTDOWN) | ||
1653 | qib_set_linkstate(ppd, QIB_IB_LINKDOWN); | ||
1654 | if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR) | ||
1655 | ibp->qkey_violations = 0; | ||
1656 | return 0; | ||
1657 | } | ||
1658 | |||
1659 | static int qib_query_gid(struct ib_device *ibdev, u8 port, | ||
1660 | int index, union ib_gid *gid) | ||
1661 | { | ||
1662 | struct qib_devdata *dd = dd_from_ibdev(ibdev); | ||
1663 | int ret = 0; | ||
1664 | |||
1665 | if (!port || port > dd->num_pports) | ||
1666 | ret = -EINVAL; | ||
1667 | else { | ||
1668 | struct qib_ibport *ibp = to_iport(ibdev, port); | ||
1669 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | ||
1670 | |||
1671 | gid->global.subnet_prefix = ibp->gid_prefix; | ||
1672 | if (index == 0) | ||
1673 | gid->global.interface_id = ppd->guid; | ||
1674 | else if (index < QIB_GUIDS_PER_PORT) | ||
1675 | gid->global.interface_id = ibp->guids[index - 1]; | ||
1676 | else | ||
1677 | ret = -EINVAL; | ||
1678 | } | ||
1679 | |||
1680 | return ret; | ||
1681 | } | ||
1682 | |||
1683 | static struct ib_pd *qib_alloc_pd(struct ib_device *ibdev, | ||
1684 | struct ib_ucontext *context, | ||
1685 | struct ib_udata *udata) | ||
1686 | { | ||
1687 | struct qib_ibdev *dev = to_idev(ibdev); | ||
1688 | struct qib_pd *pd; | ||
1689 | struct ib_pd *ret; | ||
1690 | |||
1691 | /* | ||
1692 | * This is actually totally arbitrary. Some correctness tests | ||
1693 | * assume there's a maximum number of PDs that can be allocated. | ||
1694 | * We don't actually have this limit, but we fail the test if | ||
1695 | * we allow allocations of more than we report for this value. | ||
1696 | */ | ||
1697 | |||
1698 | pd = kmalloc(sizeof *pd, GFP_KERNEL); | ||
1699 | if (!pd) { | ||
1700 | ret = ERR_PTR(-ENOMEM); | ||
1701 | goto bail; | ||
1702 | } | ||
1703 | |||
1704 | spin_lock(&dev->n_pds_lock); | ||
1705 | if (dev->n_pds_allocated == ib_qib_max_pds) { | ||
1706 | spin_unlock(&dev->n_pds_lock); | ||
1707 | kfree(pd); | ||
1708 | ret = ERR_PTR(-ENOMEM); | ||
1709 | goto bail; | ||
1710 | } | ||
1711 | |||
1712 | dev->n_pds_allocated++; | ||
1713 | spin_unlock(&dev->n_pds_lock); | ||
1714 | |||
1715 | /* ib_alloc_pd() will initialize pd->ibpd. */ | ||
1716 | pd->user = udata != NULL; | ||
1717 | |||
1718 | ret = &pd->ibpd; | ||
1719 | |||
1720 | bail: | ||
1721 | return ret; | ||
1722 | } | ||
1723 | |||
1724 | static int qib_dealloc_pd(struct ib_pd *ibpd) | ||
1725 | { | ||
1726 | struct qib_pd *pd = to_ipd(ibpd); | ||
1727 | struct qib_ibdev *dev = to_idev(ibpd->device); | ||
1728 | |||
1729 | spin_lock(&dev->n_pds_lock); | ||
1730 | dev->n_pds_allocated--; | ||
1731 | spin_unlock(&dev->n_pds_lock); | ||
1732 | |||
1733 | kfree(pd); | ||
1734 | |||
1735 | return 0; | ||
1736 | } | ||
1737 | |||
1738 | int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr) | ||
1739 | { | ||
1740 | /* A multicast address requires a GRH (see ch. 8.4.1). */ | ||
1741 | if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE && | ||
1742 | ah_attr->dlid != QIB_PERMISSIVE_LID && | ||
1743 | !(ah_attr->ah_flags & IB_AH_GRH)) | ||
1744 | goto bail; | ||
1745 | if ((ah_attr->ah_flags & IB_AH_GRH) && | ||
1746 | ah_attr->grh.sgid_index >= QIB_GUIDS_PER_PORT) | ||
1747 | goto bail; | ||
1748 | if (ah_attr->dlid == 0) | ||
1749 | goto bail; | ||
1750 | if (ah_attr->port_num < 1 || | ||
1751 | ah_attr->port_num > ibdev->phys_port_cnt) | ||
1752 | goto bail; | ||
1753 | if (ah_attr->static_rate != IB_RATE_PORT_CURRENT && | ||
1754 | ib_rate_to_mult(ah_attr->static_rate) < 0) | ||
1755 | goto bail; | ||
1756 | if (ah_attr->sl > 15) | ||
1757 | goto bail; | ||
1758 | return 0; | ||
1759 | bail: | ||
1760 | return -EINVAL; | ||
1761 | } | ||
1762 | |||
1763 | /** | ||
1764 | * qib_create_ah - create an address handle | ||
1765 | * @pd: the protection domain | ||
1766 | * @ah_attr: the attributes of the AH | ||
1767 | * | ||
1768 | * This may be called from interrupt context. | ||
1769 | */ | ||
1770 | static struct ib_ah *qib_create_ah(struct ib_pd *pd, | ||
1771 | struct ib_ah_attr *ah_attr) | ||
1772 | { | ||
1773 | struct qib_ah *ah; | ||
1774 | struct ib_ah *ret; | ||
1775 | struct qib_ibdev *dev = to_idev(pd->device); | ||
1776 | unsigned long flags; | ||
1777 | |||
1778 | if (qib_check_ah(pd->device, ah_attr)) { | ||
1779 | ret = ERR_PTR(-EINVAL); | ||
1780 | goto bail; | ||
1781 | } | ||
1782 | |||
1783 | ah = kmalloc(sizeof *ah, GFP_ATOMIC); | ||
1784 | if (!ah) { | ||
1785 | ret = ERR_PTR(-ENOMEM); | ||
1786 | goto bail; | ||
1787 | } | ||
1788 | |||
1789 | spin_lock_irqsave(&dev->n_ahs_lock, flags); | ||
1790 | if (dev->n_ahs_allocated == ib_qib_max_ahs) { | ||
1791 | spin_unlock_irqrestore(&dev->n_ahs_lock, flags); | ||
1792 | kfree(ah); | ||
1793 | ret = ERR_PTR(-ENOMEM); | ||
1794 | goto bail; | ||
1795 | } | ||
1796 | |||
1797 | dev->n_ahs_allocated++; | ||
1798 | spin_unlock_irqrestore(&dev->n_ahs_lock, flags); | ||
1799 | |||
1800 | /* ib_create_ah() will initialize ah->ibah. */ | ||
1801 | ah->attr = *ah_attr; | ||
1802 | atomic_set(&ah->refcount, 0); | ||
1803 | |||
1804 | ret = &ah->ibah; | ||
1805 | |||
1806 | bail: | ||
1807 | return ret; | ||
1808 | } | ||
1809 | |||
1810 | /** | ||
1811 | * qib_destroy_ah - destroy an address handle | ||
1812 | * @ibah: the AH to destroy | ||
1813 | * | ||
1814 | * This may be called from interrupt context. | ||
1815 | */ | ||
1816 | static int qib_destroy_ah(struct ib_ah *ibah) | ||
1817 | { | ||
1818 | struct qib_ibdev *dev = to_idev(ibah->device); | ||
1819 | struct qib_ah *ah = to_iah(ibah); | ||
1820 | unsigned long flags; | ||
1821 | |||
1822 | if (atomic_read(&ah->refcount) != 0) | ||
1823 | return -EBUSY; | ||
1824 | |||
1825 | spin_lock_irqsave(&dev->n_ahs_lock, flags); | ||
1826 | dev->n_ahs_allocated--; | ||
1827 | spin_unlock_irqrestore(&dev->n_ahs_lock, flags); | ||
1828 | |||
1829 | kfree(ah); | ||
1830 | |||
1831 | return 0; | ||
1832 | } | ||
1833 | |||
1834 | static int qib_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr) | ||
1835 | { | ||
1836 | struct qib_ah *ah = to_iah(ibah); | ||
1837 | |||
1838 | if (qib_check_ah(ibah->device, ah_attr)) | ||
1839 | return -EINVAL; | ||
1840 | |||
1841 | ah->attr = *ah_attr; | ||
1842 | |||
1843 | return 0; | ||
1844 | } | ||
1845 | |||
1846 | static int qib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr) | ||
1847 | { | ||
1848 | struct qib_ah *ah = to_iah(ibah); | ||
1849 | |||
1850 | *ah_attr = ah->attr; | ||
1851 | |||
1852 | return 0; | ||
1853 | } | ||
1854 | |||
1855 | /** | ||
1856 | * qib_get_npkeys - return the size of the PKEY table for context 0 | ||
1857 | * @dd: the qlogic_ib device | ||
1858 | */ | ||
1859 | unsigned qib_get_npkeys(struct qib_devdata *dd) | ||
1860 | { | ||
1861 | return ARRAY_SIZE(dd->rcd[0]->pkeys); | ||
1862 | } | ||
1863 | |||
1864 | /* | ||
1865 | * Return the indexed PKEY from the port PKEY table. | ||
1866 | * No need to validate rcd[ctxt]; the port is setup if we are here. | ||
1867 | */ | ||
1868 | unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index) | ||
1869 | { | ||
1870 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | ||
1871 | struct qib_devdata *dd = ppd->dd; | ||
1872 | unsigned ctxt = ppd->hw_pidx; | ||
1873 | unsigned ret; | ||
1874 | |||
1875 | /* dd->rcd null if mini_init or some init failures */ | ||
1876 | if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys)) | ||
1877 | ret = 0; | ||
1878 | else | ||
1879 | ret = dd->rcd[ctxt]->pkeys[index]; | ||
1880 | |||
1881 | return ret; | ||
1882 | } | ||
1883 | |||
1884 | static int qib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, | ||
1885 | u16 *pkey) | ||
1886 | { | ||
1887 | struct qib_devdata *dd = dd_from_ibdev(ibdev); | ||
1888 | int ret; | ||
1889 | |||
1890 | if (index >= qib_get_npkeys(dd)) { | ||
1891 | ret = -EINVAL; | ||
1892 | goto bail; | ||
1893 | } | ||
1894 | |||
1895 | *pkey = qib_get_pkey(to_iport(ibdev, port), index); | ||
1896 | ret = 0; | ||
1897 | |||
1898 | bail: | ||
1899 | return ret; | ||
1900 | } | ||
1901 | |||
1902 | /** | ||
1903 | * qib_alloc_ucontext - allocate a ucontest | ||
1904 | * @ibdev: the infiniband device | ||
1905 | * @udata: not used by the QLogic_IB driver | ||
1906 | */ | ||
1907 | |||
1908 | static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev, | ||
1909 | struct ib_udata *udata) | ||
1910 | { | ||
1911 | struct qib_ucontext *context; | ||
1912 | struct ib_ucontext *ret; | ||
1913 | |||
1914 | context = kmalloc(sizeof *context, GFP_KERNEL); | ||
1915 | if (!context) { | ||
1916 | ret = ERR_PTR(-ENOMEM); | ||
1917 | goto bail; | ||
1918 | } | ||
1919 | |||
1920 | ret = &context->ibucontext; | ||
1921 | |||
1922 | bail: | ||
1923 | return ret; | ||
1924 | } | ||
1925 | |||
1926 | static int qib_dealloc_ucontext(struct ib_ucontext *context) | ||
1927 | { | ||
1928 | kfree(to_iucontext(context)); | ||
1929 | return 0; | ||
1930 | } | ||
1931 | |||
1932 | static void init_ibport(struct qib_pportdata *ppd) | ||
1933 | { | ||
1934 | struct qib_verbs_counters cntrs; | ||
1935 | struct qib_ibport *ibp = &ppd->ibport_data; | ||
1936 | |||
1937 | spin_lock_init(&ibp->lock); | ||
1938 | /* Set the prefix to the default value (see ch. 4.1.1) */ | ||
1939 | ibp->gid_prefix = IB_DEFAULT_GID_PREFIX; | ||
1940 | ibp->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE); | ||
1941 | ibp->port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP | | ||
1942 | IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP | | ||
1943 | IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP | | ||
1944 | IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP | | ||
1945 | IB_PORT_OTHER_LOCAL_CHANGES_SUP; | ||
1946 | if (ppd->dd->flags & QIB_HAS_LINK_LATENCY) | ||
1947 | ibp->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP; | ||
1948 | ibp->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA; | ||
1949 | ibp->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA; | ||
1950 | ibp->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS; | ||
1951 | ibp->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS; | ||
1952 | ibp->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT; | ||
1953 | |||
1954 | /* Snapshot current HW counters to "clear" them. */ | ||
1955 | qib_get_counters(ppd, &cntrs); | ||
1956 | ibp->z_symbol_error_counter = cntrs.symbol_error_counter; | ||
1957 | ibp->z_link_error_recovery_counter = | ||
1958 | cntrs.link_error_recovery_counter; | ||
1959 | ibp->z_link_downed_counter = cntrs.link_downed_counter; | ||
1960 | ibp->z_port_rcv_errors = cntrs.port_rcv_errors; | ||
1961 | ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors; | ||
1962 | ibp->z_port_xmit_discards = cntrs.port_xmit_discards; | ||
1963 | ibp->z_port_xmit_data = cntrs.port_xmit_data; | ||
1964 | ibp->z_port_rcv_data = cntrs.port_rcv_data; | ||
1965 | ibp->z_port_xmit_packets = cntrs.port_xmit_packets; | ||
1966 | ibp->z_port_rcv_packets = cntrs.port_rcv_packets; | ||
1967 | ibp->z_local_link_integrity_errors = | ||
1968 | cntrs.local_link_integrity_errors; | ||
1969 | ibp->z_excessive_buffer_overrun_errors = | ||
1970 | cntrs.excessive_buffer_overrun_errors; | ||
1971 | ibp->z_vl15_dropped = cntrs.vl15_dropped; | ||
1972 | } | ||
1973 | |||
1974 | /** | ||
1975 | * qib_register_ib_device - register our device with the infiniband core | ||
1976 | * @dd: the device data structure | ||
1977 | * Return the allocated qib_ibdev pointer or NULL on error. | ||
1978 | */ | ||
1979 | int qib_register_ib_device(struct qib_devdata *dd) | ||
1980 | { | ||
1981 | struct qib_ibdev *dev = &dd->verbs_dev; | ||
1982 | struct ib_device *ibdev = &dev->ibdev; | ||
1983 | struct qib_pportdata *ppd = dd->pport; | ||
1984 | unsigned i, lk_tab_size; | ||
1985 | int ret; | ||
1986 | |||
1987 | dev->qp_table_size = ib_qib_qp_table_size; | ||
1988 | dev->qp_table = kzalloc(dev->qp_table_size * sizeof *dev->qp_table, | ||
1989 | GFP_KERNEL); | ||
1990 | if (!dev->qp_table) { | ||
1991 | ret = -ENOMEM; | ||
1992 | goto err_qpt; | ||
1993 | } | ||
1994 | |||
1995 | for (i = 0; i < dd->num_pports; i++) | ||
1996 | init_ibport(ppd + i); | ||
1997 | |||
1998 | /* Only need to initialize non-zero fields. */ | ||
1999 | spin_lock_init(&dev->qpt_lock); | ||
2000 | spin_lock_init(&dev->n_pds_lock); | ||
2001 | spin_lock_init(&dev->n_ahs_lock); | ||
2002 | spin_lock_init(&dev->n_cqs_lock); | ||
2003 | spin_lock_init(&dev->n_qps_lock); | ||
2004 | spin_lock_init(&dev->n_srqs_lock); | ||
2005 | spin_lock_init(&dev->n_mcast_grps_lock); | ||
2006 | init_timer(&dev->mem_timer); | ||
2007 | dev->mem_timer.function = mem_timer; | ||
2008 | dev->mem_timer.data = (unsigned long) dev; | ||
2009 | |||
2010 | qib_init_qpn_table(dd, &dev->qpn_table); | ||
2011 | |||
2012 | /* | ||
2013 | * The top ib_qib_lkey_table_size bits are used to index the | ||
2014 | * table. The lower 8 bits can be owned by the user (copied from | ||
2015 | * the LKEY). The remaining bits act as a generation number or tag. | ||
2016 | */ | ||
2017 | spin_lock_init(&dev->lk_table.lock); | ||
2018 | dev->lk_table.max = 1 << ib_qib_lkey_table_size; | ||
2019 | lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table); | ||
2020 | dev->lk_table.table = (struct qib_mregion **) | ||
2021 | __get_free_pages(GFP_KERNEL, get_order(lk_tab_size)); | ||
2022 | if (dev->lk_table.table == NULL) { | ||
2023 | ret = -ENOMEM; | ||
2024 | goto err_lk; | ||
2025 | } | ||
2026 | memset(dev->lk_table.table, 0, lk_tab_size); | ||
2027 | INIT_LIST_HEAD(&dev->pending_mmaps); | ||
2028 | spin_lock_init(&dev->pending_lock); | ||
2029 | dev->mmap_offset = PAGE_SIZE; | ||
2030 | spin_lock_init(&dev->mmap_offset_lock); | ||
2031 | INIT_LIST_HEAD(&dev->piowait); | ||
2032 | INIT_LIST_HEAD(&dev->dmawait); | ||
2033 | INIT_LIST_HEAD(&dev->txwait); | ||
2034 | INIT_LIST_HEAD(&dev->memwait); | ||
2035 | INIT_LIST_HEAD(&dev->txreq_free); | ||
2036 | |||
2037 | if (ppd->sdma_descq_cnt) { | ||
2038 | dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev, | ||
2039 | ppd->sdma_descq_cnt * | ||
2040 | sizeof(struct qib_pio_header), | ||
2041 | &dev->pio_hdrs_phys, | ||
2042 | GFP_KERNEL); | ||
2043 | if (!dev->pio_hdrs) { | ||
2044 | ret = -ENOMEM; | ||
2045 | goto err_hdrs; | ||
2046 | } | ||
2047 | } | ||
2048 | |||
2049 | for (i = 0; i < ppd->sdma_descq_cnt; i++) { | ||
2050 | struct qib_verbs_txreq *tx; | ||
2051 | |||
2052 | tx = kzalloc(sizeof *tx, GFP_KERNEL); | ||
2053 | if (!tx) { | ||
2054 | ret = -ENOMEM; | ||
2055 | goto err_tx; | ||
2056 | } | ||
2057 | tx->hdr_inx = i; | ||
2058 | list_add(&tx->txreq.list, &dev->txreq_free); | ||
2059 | } | ||
2060 | |||
2061 | /* | ||
2062 | * The system image GUID is supposed to be the same for all | ||
2063 | * IB HCAs in a single system but since there can be other | ||
2064 | * device types in the system, we can't be sure this is unique. | ||
2065 | */ | ||
2066 | if (!ib_qib_sys_image_guid) | ||
2067 | ib_qib_sys_image_guid = ppd->guid; | ||
2068 | |||
2069 | strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX); | ||
2070 | ibdev->owner = THIS_MODULE; | ||
2071 | ibdev->node_guid = ppd->guid; | ||
2072 | ibdev->uverbs_abi_ver = QIB_UVERBS_ABI_VERSION; | ||
2073 | ibdev->uverbs_cmd_mask = | ||
2074 | (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | | ||
2075 | (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | | ||
2076 | (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | | ||
2077 | (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | | ||
2078 | (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | | ||
2079 | (1ull << IB_USER_VERBS_CMD_CREATE_AH) | | ||
2080 | (1ull << IB_USER_VERBS_CMD_MODIFY_AH) | | ||
2081 | (1ull << IB_USER_VERBS_CMD_QUERY_AH) | | ||
2082 | (1ull << IB_USER_VERBS_CMD_DESTROY_AH) | | ||
2083 | (1ull << IB_USER_VERBS_CMD_REG_MR) | | ||
2084 | (1ull << IB_USER_VERBS_CMD_DEREG_MR) | | ||
2085 | (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | | ||
2086 | (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | | ||
2087 | (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | | ||
2088 | (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | | ||
2089 | (1ull << IB_USER_VERBS_CMD_POLL_CQ) | | ||
2090 | (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | | ||
2091 | (1ull << IB_USER_VERBS_CMD_CREATE_QP) | | ||
2092 | (1ull << IB_USER_VERBS_CMD_QUERY_QP) | | ||
2093 | (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | | ||
2094 | (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | | ||
2095 | (1ull << IB_USER_VERBS_CMD_POST_SEND) | | ||
2096 | (1ull << IB_USER_VERBS_CMD_POST_RECV) | | ||
2097 | (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | | ||
2098 | (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | | ||
2099 | (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | | ||
2100 | (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | | ||
2101 | (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | | ||
2102 | (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | | ||
2103 | (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV); | ||
2104 | ibdev->node_type = RDMA_NODE_IB_CA; | ||
2105 | ibdev->phys_port_cnt = dd->num_pports; | ||
2106 | ibdev->num_comp_vectors = 1; | ||
2107 | ibdev->dma_device = &dd->pcidev->dev; | ||
2108 | ibdev->query_device = qib_query_device; | ||
2109 | ibdev->modify_device = qib_modify_device; | ||
2110 | ibdev->query_port = qib_query_port; | ||
2111 | ibdev->modify_port = qib_modify_port; | ||
2112 | ibdev->query_pkey = qib_query_pkey; | ||
2113 | ibdev->query_gid = qib_query_gid; | ||
2114 | ibdev->alloc_ucontext = qib_alloc_ucontext; | ||
2115 | ibdev->dealloc_ucontext = qib_dealloc_ucontext; | ||
2116 | ibdev->alloc_pd = qib_alloc_pd; | ||
2117 | ibdev->dealloc_pd = qib_dealloc_pd; | ||
2118 | ibdev->create_ah = qib_create_ah; | ||
2119 | ibdev->destroy_ah = qib_destroy_ah; | ||
2120 | ibdev->modify_ah = qib_modify_ah; | ||
2121 | ibdev->query_ah = qib_query_ah; | ||
2122 | ibdev->create_srq = qib_create_srq; | ||
2123 | ibdev->modify_srq = qib_modify_srq; | ||
2124 | ibdev->query_srq = qib_query_srq; | ||
2125 | ibdev->destroy_srq = qib_destroy_srq; | ||
2126 | ibdev->create_qp = qib_create_qp; | ||
2127 | ibdev->modify_qp = qib_modify_qp; | ||
2128 | ibdev->query_qp = qib_query_qp; | ||
2129 | ibdev->destroy_qp = qib_destroy_qp; | ||
2130 | ibdev->post_send = qib_post_send; | ||
2131 | ibdev->post_recv = qib_post_receive; | ||
2132 | ibdev->post_srq_recv = qib_post_srq_receive; | ||
2133 | ibdev->create_cq = qib_create_cq; | ||
2134 | ibdev->destroy_cq = qib_destroy_cq; | ||
2135 | ibdev->resize_cq = qib_resize_cq; | ||
2136 | ibdev->poll_cq = qib_poll_cq; | ||
2137 | ibdev->req_notify_cq = qib_req_notify_cq; | ||
2138 | ibdev->get_dma_mr = qib_get_dma_mr; | ||
2139 | ibdev->reg_phys_mr = qib_reg_phys_mr; | ||
2140 | ibdev->reg_user_mr = qib_reg_user_mr; | ||
2141 | ibdev->dereg_mr = qib_dereg_mr; | ||
2142 | ibdev->alloc_fast_reg_mr = qib_alloc_fast_reg_mr; | ||
2143 | ibdev->alloc_fast_reg_page_list = qib_alloc_fast_reg_page_list; | ||
2144 | ibdev->free_fast_reg_page_list = qib_free_fast_reg_page_list; | ||
2145 | ibdev->alloc_fmr = qib_alloc_fmr; | ||
2146 | ibdev->map_phys_fmr = qib_map_phys_fmr; | ||
2147 | ibdev->unmap_fmr = qib_unmap_fmr; | ||
2148 | ibdev->dealloc_fmr = qib_dealloc_fmr; | ||
2149 | ibdev->attach_mcast = qib_multicast_attach; | ||
2150 | ibdev->detach_mcast = qib_multicast_detach; | ||
2151 | ibdev->process_mad = qib_process_mad; | ||
2152 | ibdev->mmap = qib_mmap; | ||
2153 | ibdev->dma_ops = &qib_dma_mapping_ops; | ||
2154 | |||
2155 | snprintf(ibdev->node_desc, sizeof(ibdev->node_desc), | ||
2156 | QIB_IDSTR " %s", init_utsname()->nodename); | ||
2157 | |||
2158 | ret = ib_register_device(ibdev, qib_create_port_files); | ||
2159 | if (ret) | ||
2160 | goto err_reg; | ||
2161 | |||
2162 | ret = qib_create_agents(dev); | ||
2163 | if (ret) | ||
2164 | goto err_agents; | ||
2165 | |||
2166 | if (qib_verbs_register_sysfs(dd)) | ||
2167 | goto err_class; | ||
2168 | |||
2169 | goto bail; | ||
2170 | |||
2171 | err_class: | ||
2172 | qib_free_agents(dev); | ||
2173 | err_agents: | ||
2174 | ib_unregister_device(ibdev); | ||
2175 | err_reg: | ||
2176 | err_tx: | ||
2177 | while (!list_empty(&dev->txreq_free)) { | ||
2178 | struct list_head *l = dev->txreq_free.next; | ||
2179 | struct qib_verbs_txreq *tx; | ||
2180 | |||
2181 | list_del(l); | ||
2182 | tx = list_entry(l, struct qib_verbs_txreq, txreq.list); | ||
2183 | kfree(tx); | ||
2184 | } | ||
2185 | if (ppd->sdma_descq_cnt) | ||
2186 | dma_free_coherent(&dd->pcidev->dev, | ||
2187 | ppd->sdma_descq_cnt * | ||
2188 | sizeof(struct qib_pio_header), | ||
2189 | dev->pio_hdrs, dev->pio_hdrs_phys); | ||
2190 | err_hdrs: | ||
2191 | free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size)); | ||
2192 | err_lk: | ||
2193 | kfree(dev->qp_table); | ||
2194 | err_qpt: | ||
2195 | qib_dev_err(dd, "cannot register verbs: %d!\n", -ret); | ||
2196 | bail: | ||
2197 | return ret; | ||
2198 | } | ||
2199 | |||
2200 | void qib_unregister_ib_device(struct qib_devdata *dd) | ||
2201 | { | ||
2202 | struct qib_ibdev *dev = &dd->verbs_dev; | ||
2203 | struct ib_device *ibdev = &dev->ibdev; | ||
2204 | u32 qps_inuse; | ||
2205 | unsigned lk_tab_size; | ||
2206 | |||
2207 | qib_verbs_unregister_sysfs(dd); | ||
2208 | |||
2209 | qib_free_agents(dev); | ||
2210 | |||
2211 | ib_unregister_device(ibdev); | ||
2212 | |||
2213 | if (!list_empty(&dev->piowait)) | ||
2214 | qib_dev_err(dd, "piowait list not empty!\n"); | ||
2215 | if (!list_empty(&dev->dmawait)) | ||
2216 | qib_dev_err(dd, "dmawait list not empty!\n"); | ||
2217 | if (!list_empty(&dev->txwait)) | ||
2218 | qib_dev_err(dd, "txwait list not empty!\n"); | ||
2219 | if (!list_empty(&dev->memwait)) | ||
2220 | qib_dev_err(dd, "memwait list not empty!\n"); | ||
2221 | if (dev->dma_mr) | ||
2222 | qib_dev_err(dd, "DMA MR not NULL!\n"); | ||
2223 | |||
2224 | qps_inuse = qib_free_all_qps(dd); | ||
2225 | if (qps_inuse) | ||
2226 | qib_dev_err(dd, "QP memory leak! %u still in use\n", | ||
2227 | qps_inuse); | ||
2228 | |||
2229 | del_timer_sync(&dev->mem_timer); | ||
2230 | qib_free_qpn_table(&dev->qpn_table); | ||
2231 | while (!list_empty(&dev->txreq_free)) { | ||
2232 | struct list_head *l = dev->txreq_free.next; | ||
2233 | struct qib_verbs_txreq *tx; | ||
2234 | |||
2235 | list_del(l); | ||
2236 | tx = list_entry(l, struct qib_verbs_txreq, txreq.list); | ||
2237 | kfree(tx); | ||
2238 | } | ||
2239 | if (dd->pport->sdma_descq_cnt) | ||
2240 | dma_free_coherent(&dd->pcidev->dev, | ||
2241 | dd->pport->sdma_descq_cnt * | ||
2242 | sizeof(struct qib_pio_header), | ||
2243 | dev->pio_hdrs, dev->pio_hdrs_phys); | ||
2244 | lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table); | ||
2245 | free_pages((unsigned long) dev->lk_table.table, | ||
2246 | get_order(lk_tab_size)); | ||
2247 | kfree(dev->qp_table); | ||
2248 | } | ||
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h new file mode 100644 index 000000000000..bd57c1273225 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_verbs.h | |||
@@ -0,0 +1,1100 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. | ||
3 | * All rights reserved. | ||
4 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | ||
5 | * | ||
6 | * This software is available to you under a choice of one of two | ||
7 | * licenses. You may choose to be licensed under the terms of the GNU | ||
8 | * General Public License (GPL) Version 2, available from the file | ||
9 | * COPYING in the main directory of this source tree, or the | ||
10 | * OpenIB.org BSD license below: | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or | ||
13 | * without modification, are permitted provided that the following | ||
14 | * conditions are met: | ||
15 | * | ||
16 | * - Redistributions of source code must retain the above | ||
17 | * copyright notice, this list of conditions and the following | ||
18 | * disclaimer. | ||
19 | * | ||
20 | * - Redistributions in binary form must reproduce the above | ||
21 | * copyright notice, this list of conditions and the following | ||
22 | * disclaimer in the documentation and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
32 | * SOFTWARE. | ||
33 | */ | ||
34 | |||
35 | #ifndef QIB_VERBS_H | ||
36 | #define QIB_VERBS_H | ||
37 | |||
38 | #include <linux/types.h> | ||
39 | #include <linux/spinlock.h> | ||
40 | #include <linux/kernel.h> | ||
41 | #include <linux/interrupt.h> | ||
42 | #include <linux/kref.h> | ||
43 | #include <linux/workqueue.h> | ||
44 | #include <rdma/ib_pack.h> | ||
45 | #include <rdma/ib_user_verbs.h> | ||
46 | |||
47 | struct qib_ctxtdata; | ||
48 | struct qib_pportdata; | ||
49 | struct qib_devdata; | ||
50 | struct qib_verbs_txreq; | ||
51 | |||
52 | #define QIB_MAX_RDMA_ATOMIC 16 | ||
53 | #define QIB_GUIDS_PER_PORT 5 | ||
54 | |||
55 | #define QPN_MAX (1 << 24) | ||
56 | #define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE) | ||
57 | |||
58 | /* | ||
59 | * Increment this value if any changes that break userspace ABI | ||
60 | * compatibility are made. | ||
61 | */ | ||
62 | #define QIB_UVERBS_ABI_VERSION 2 | ||
63 | |||
64 | /* | ||
65 | * Define an ib_cq_notify value that is not valid so we know when CQ | ||
66 | * notifications are armed. | ||
67 | */ | ||
68 | #define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1) | ||
69 | |||
70 | #define IB_SEQ_NAK (3 << 29) | ||
71 | |||
72 | /* AETH NAK opcode values */ | ||
73 | #define IB_RNR_NAK 0x20 | ||
74 | #define IB_NAK_PSN_ERROR 0x60 | ||
75 | #define IB_NAK_INVALID_REQUEST 0x61 | ||
76 | #define IB_NAK_REMOTE_ACCESS_ERROR 0x62 | ||
77 | #define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63 | ||
78 | #define IB_NAK_INVALID_RD_REQUEST 0x64 | ||
79 | |||
80 | /* Flags for checking QP state (see ib_qib_state_ops[]) */ | ||
81 | #define QIB_POST_SEND_OK 0x01 | ||
82 | #define QIB_POST_RECV_OK 0x02 | ||
83 | #define QIB_PROCESS_RECV_OK 0x04 | ||
84 | #define QIB_PROCESS_SEND_OK 0x08 | ||
85 | #define QIB_PROCESS_NEXT_SEND_OK 0x10 | ||
86 | #define QIB_FLUSH_SEND 0x20 | ||
87 | #define QIB_FLUSH_RECV 0x40 | ||
88 | #define QIB_PROCESS_OR_FLUSH_SEND \ | ||
89 | (QIB_PROCESS_SEND_OK | QIB_FLUSH_SEND) | ||
90 | |||
91 | /* IB Performance Manager status values */ | ||
92 | #define IB_PMA_SAMPLE_STATUS_DONE 0x00 | ||
93 | #define IB_PMA_SAMPLE_STATUS_STARTED 0x01 | ||
94 | #define IB_PMA_SAMPLE_STATUS_RUNNING 0x02 | ||
95 | |||
96 | /* Mandatory IB performance counter select values. */ | ||
97 | #define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001) | ||
98 | #define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002) | ||
99 | #define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003) | ||
100 | #define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004) | ||
101 | #define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005) | ||
102 | |||
103 | #define QIB_VENDOR_IPG cpu_to_be16(0xFFA0) | ||
104 | |||
105 | #define IB_BTH_REQ_ACK (1 << 31) | ||
106 | #define IB_BTH_SOLICITED (1 << 23) | ||
107 | #define IB_BTH_MIG_REQ (1 << 22) | ||
108 | |||
109 | /* XXX Should be defined in ib_verbs.h enum ib_port_cap_flags */ | ||
110 | #define IB_PORT_OTHER_LOCAL_CHANGES_SUP (1 << 26) | ||
111 | |||
112 | #define IB_GRH_VERSION 6 | ||
113 | #define IB_GRH_VERSION_MASK 0xF | ||
114 | #define IB_GRH_VERSION_SHIFT 28 | ||
115 | #define IB_GRH_TCLASS_MASK 0xFF | ||
116 | #define IB_GRH_TCLASS_SHIFT 20 | ||
117 | #define IB_GRH_FLOW_MASK 0xFFFFF | ||
118 | #define IB_GRH_FLOW_SHIFT 0 | ||
119 | #define IB_GRH_NEXT_HDR 0x1B | ||
120 | |||
121 | #define IB_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL) | ||
122 | |||
123 | /* Values for set/get portinfo VLCap OperationalVLs */ | ||
124 | #define IB_VL_VL0 1 | ||
125 | #define IB_VL_VL0_1 2 | ||
126 | #define IB_VL_VL0_3 3 | ||
127 | #define IB_VL_VL0_7 4 | ||
128 | #define IB_VL_VL0_14 5 | ||
129 | |||
130 | static inline int qib_num_vls(int vls) | ||
131 | { | ||
132 | switch (vls) { | ||
133 | default: | ||
134 | case IB_VL_VL0: | ||
135 | return 1; | ||
136 | case IB_VL_VL0_1: | ||
137 | return 2; | ||
138 | case IB_VL_VL0_3: | ||
139 | return 4; | ||
140 | case IB_VL_VL0_7: | ||
141 | return 8; | ||
142 | case IB_VL_VL0_14: | ||
143 | return 15; | ||
144 | } | ||
145 | } | ||
146 | |||
147 | struct ib_reth { | ||
148 | __be64 vaddr; | ||
149 | __be32 rkey; | ||
150 | __be32 length; | ||
151 | } __attribute__ ((packed)); | ||
152 | |||
153 | struct ib_atomic_eth { | ||
154 | __be32 vaddr[2]; /* unaligned so access as 2 32-bit words */ | ||
155 | __be32 rkey; | ||
156 | __be64 swap_data; | ||
157 | __be64 compare_data; | ||
158 | } __attribute__ ((packed)); | ||
159 | |||
160 | struct qib_other_headers { | ||
161 | __be32 bth[3]; | ||
162 | union { | ||
163 | struct { | ||
164 | __be32 deth[2]; | ||
165 | __be32 imm_data; | ||
166 | } ud; | ||
167 | struct { | ||
168 | struct ib_reth reth; | ||
169 | __be32 imm_data; | ||
170 | } rc; | ||
171 | struct { | ||
172 | __be32 aeth; | ||
173 | __be32 atomic_ack_eth[2]; | ||
174 | } at; | ||
175 | __be32 imm_data; | ||
176 | __be32 aeth; | ||
177 | struct ib_atomic_eth atomic_eth; | ||
178 | } u; | ||
179 | } __attribute__ ((packed)); | ||
180 | |||
181 | /* | ||
182 | * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes | ||
183 | * long (72 w/ imm_data). Only the first 56 bytes of the IB header | ||
184 | * will be in the eager header buffer. The remaining 12 or 16 bytes | ||
185 | * are in the data buffer. | ||
186 | */ | ||
187 | struct qib_ib_header { | ||
188 | __be16 lrh[4]; | ||
189 | union { | ||
190 | struct { | ||
191 | struct ib_grh grh; | ||
192 | struct qib_other_headers oth; | ||
193 | } l; | ||
194 | struct qib_other_headers oth; | ||
195 | } u; | ||
196 | } __attribute__ ((packed)); | ||
197 | |||
198 | struct qib_pio_header { | ||
199 | __le32 pbc[2]; | ||
200 | struct qib_ib_header hdr; | ||
201 | } __attribute__ ((packed)); | ||
202 | |||
203 | /* | ||
204 | * There is one struct qib_mcast for each multicast GID. | ||
205 | * All attached QPs are then stored as a list of | ||
206 | * struct qib_mcast_qp. | ||
207 | */ | ||
208 | struct qib_mcast_qp { | ||
209 | struct list_head list; | ||
210 | struct qib_qp *qp; | ||
211 | }; | ||
212 | |||
213 | struct qib_mcast { | ||
214 | struct rb_node rb_node; | ||
215 | union ib_gid mgid; | ||
216 | struct list_head qp_list; | ||
217 | wait_queue_head_t wait; | ||
218 | atomic_t refcount; | ||
219 | int n_attached; | ||
220 | }; | ||
221 | |||
222 | /* Protection domain */ | ||
223 | struct qib_pd { | ||
224 | struct ib_pd ibpd; | ||
225 | int user; /* non-zero if created from user space */ | ||
226 | }; | ||
227 | |||
228 | /* Address Handle */ | ||
229 | struct qib_ah { | ||
230 | struct ib_ah ibah; | ||
231 | struct ib_ah_attr attr; | ||
232 | atomic_t refcount; | ||
233 | }; | ||
234 | |||
235 | /* | ||
236 | * This structure is used by qib_mmap() to validate an offset | ||
237 | * when an mmap() request is made. The vm_area_struct then uses | ||
238 | * this as its vm_private_data. | ||
239 | */ | ||
240 | struct qib_mmap_info { | ||
241 | struct list_head pending_mmaps; | ||
242 | struct ib_ucontext *context; | ||
243 | void *obj; | ||
244 | __u64 offset; | ||
245 | struct kref ref; | ||
246 | unsigned size; | ||
247 | }; | ||
248 | |||
249 | /* | ||
250 | * This structure is used to contain the head pointer, tail pointer, | ||
251 | * and completion queue entries as a single memory allocation so | ||
252 | * it can be mmap'ed into user space. | ||
253 | */ | ||
254 | struct qib_cq_wc { | ||
255 | u32 head; /* index of next entry to fill */ | ||
256 | u32 tail; /* index of next ib_poll_cq() entry */ | ||
257 | union { | ||
258 | /* these are actually size ibcq.cqe + 1 */ | ||
259 | struct ib_uverbs_wc uqueue[0]; | ||
260 | struct ib_wc kqueue[0]; | ||
261 | }; | ||
262 | }; | ||
263 | |||
264 | /* | ||
265 | * The completion queue structure. | ||
266 | */ | ||
267 | struct qib_cq { | ||
268 | struct ib_cq ibcq; | ||
269 | struct work_struct comptask; | ||
270 | spinlock_t lock; /* protect changes in this struct */ | ||
271 | u8 notify; | ||
272 | u8 triggered; | ||
273 | struct qib_cq_wc *queue; | ||
274 | struct qib_mmap_info *ip; | ||
275 | }; | ||
276 | |||
277 | /* | ||
278 | * A segment is a linear region of low physical memory. | ||
279 | * XXX Maybe we should use phys addr here and kmap()/kunmap(). | ||
280 | * Used by the verbs layer. | ||
281 | */ | ||
282 | struct qib_seg { | ||
283 | void *vaddr; | ||
284 | size_t length; | ||
285 | }; | ||
286 | |||
287 | /* The number of qib_segs that fit in a page. */ | ||
288 | #define QIB_SEGSZ (PAGE_SIZE / sizeof(struct qib_seg)) | ||
289 | |||
290 | struct qib_segarray { | ||
291 | struct qib_seg segs[QIB_SEGSZ]; | ||
292 | }; | ||
293 | |||
294 | struct qib_mregion { | ||
295 | struct ib_pd *pd; /* shares refcnt of ibmr.pd */ | ||
296 | u64 user_base; /* User's address for this region */ | ||
297 | u64 iova; /* IB start address of this region */ | ||
298 | size_t length; | ||
299 | u32 lkey; | ||
300 | u32 offset; /* offset (bytes) to start of region */ | ||
301 | int access_flags; | ||
302 | u32 max_segs; /* number of qib_segs in all the arrays */ | ||
303 | u32 mapsz; /* size of the map array */ | ||
304 | atomic_t refcount; | ||
305 | struct qib_segarray *map[0]; /* the segments */ | ||
306 | }; | ||
307 | |||
308 | /* | ||
309 | * These keep track of the copy progress within a memory region. | ||
310 | * Used by the verbs layer. | ||
311 | */ | ||
312 | struct qib_sge { | ||
313 | struct qib_mregion *mr; | ||
314 | void *vaddr; /* kernel virtual address of segment */ | ||
315 | u32 sge_length; /* length of the SGE */ | ||
316 | u32 length; /* remaining length of the segment */ | ||
317 | u16 m; /* current index: mr->map[m] */ | ||
318 | u16 n; /* current index: mr->map[m]->segs[n] */ | ||
319 | }; | ||
320 | |||
321 | /* Memory region */ | ||
322 | struct qib_mr { | ||
323 | struct ib_mr ibmr; | ||
324 | struct ib_umem *umem; | ||
325 | struct qib_mregion mr; /* must be last */ | ||
326 | }; | ||
327 | |||
328 | /* | ||
329 | * Send work request queue entry. | ||
330 | * The size of the sg_list is determined when the QP is created and stored | ||
331 | * in qp->s_max_sge. | ||
332 | */ | ||
333 | struct qib_swqe { | ||
334 | struct ib_send_wr wr; /* don't use wr.sg_list */ | ||
335 | u32 psn; /* first packet sequence number */ | ||
336 | u32 lpsn; /* last packet sequence number */ | ||
337 | u32 ssn; /* send sequence number */ | ||
338 | u32 length; /* total length of data in sg_list */ | ||
339 | struct qib_sge sg_list[0]; | ||
340 | }; | ||
341 | |||
342 | /* | ||
343 | * Receive work request queue entry. | ||
344 | * The size of the sg_list is determined when the QP (or SRQ) is created | ||
345 | * and stored in qp->r_rq.max_sge (or srq->rq.max_sge). | ||
346 | */ | ||
347 | struct qib_rwqe { | ||
348 | u64 wr_id; | ||
349 | u8 num_sge; | ||
350 | struct ib_sge sg_list[0]; | ||
351 | }; | ||
352 | |||
353 | /* | ||
354 | * This structure is used to contain the head pointer, tail pointer, | ||
355 | * and receive work queue entries as a single memory allocation so | ||
356 | * it can be mmap'ed into user space. | ||
357 | * Note that the wq array elements are variable size so you can't | ||
358 | * just index into the array to get the N'th element; | ||
359 | * use get_rwqe_ptr() instead. | ||
360 | */ | ||
361 | struct qib_rwq { | ||
362 | u32 head; /* new work requests posted to the head */ | ||
363 | u32 tail; /* receives pull requests from here. */ | ||
364 | struct qib_rwqe wq[0]; | ||
365 | }; | ||
366 | |||
367 | struct qib_rq { | ||
368 | struct qib_rwq *wq; | ||
369 | spinlock_t lock; /* protect changes in this struct */ | ||
370 | u32 size; /* size of RWQE array */ | ||
371 | u8 max_sge; | ||
372 | }; | ||
373 | |||
374 | struct qib_srq { | ||
375 | struct ib_srq ibsrq; | ||
376 | struct qib_rq rq; | ||
377 | struct qib_mmap_info *ip; | ||
378 | /* send signal when number of RWQEs < limit */ | ||
379 | u32 limit; | ||
380 | }; | ||
381 | |||
382 | struct qib_sge_state { | ||
383 | struct qib_sge *sg_list; /* next SGE to be used if any */ | ||
384 | struct qib_sge sge; /* progress state for the current SGE */ | ||
385 | u32 total_len; | ||
386 | u8 num_sge; | ||
387 | }; | ||
388 | |||
389 | /* | ||
390 | * This structure holds the information that the send tasklet needs | ||
391 | * to send a RDMA read response or atomic operation. | ||
392 | */ | ||
393 | struct qib_ack_entry { | ||
394 | u8 opcode; | ||
395 | u8 sent; | ||
396 | u32 psn; | ||
397 | u32 lpsn; | ||
398 | union { | ||
399 | struct qib_sge rdma_sge; | ||
400 | u64 atomic_data; | ||
401 | }; | ||
402 | }; | ||
403 | |||
404 | /* | ||
405 | * Variables prefixed with s_ are for the requester (sender). | ||
406 | * Variables prefixed with r_ are for the responder (receiver). | ||
407 | * Variables prefixed with ack_ are for responder replies. | ||
408 | * | ||
409 | * Common variables are protected by both r_rq.lock and s_lock in that order | ||
410 | * which only happens in modify_qp() or changing the QP 'state'. | ||
411 | */ | ||
412 | struct qib_qp { | ||
413 | struct ib_qp ibqp; | ||
414 | struct qib_qp *next; /* link list for QPN hash table */ | ||
415 | struct qib_qp *timer_next; /* link list for qib_ib_timer() */ | ||
416 | struct list_head iowait; /* link for wait PIO buf */ | ||
417 | struct list_head rspwait; /* link for waititing to respond */ | ||
418 | struct ib_ah_attr remote_ah_attr; | ||
419 | struct ib_ah_attr alt_ah_attr; | ||
420 | struct qib_ib_header s_hdr; /* next packet header to send */ | ||
421 | atomic_t refcount; | ||
422 | wait_queue_head_t wait; | ||
423 | wait_queue_head_t wait_dma; | ||
424 | struct timer_list s_timer; | ||
425 | struct work_struct s_work; | ||
426 | struct qib_mmap_info *ip; | ||
427 | struct qib_sge_state *s_cur_sge; | ||
428 | struct qib_verbs_txreq *s_tx; | ||
429 | struct qib_mregion *s_rdma_mr; | ||
430 | struct qib_sge_state s_sge; /* current send request data */ | ||
431 | struct qib_ack_entry s_ack_queue[QIB_MAX_RDMA_ATOMIC + 1]; | ||
432 | struct qib_sge_state s_ack_rdma_sge; | ||
433 | struct qib_sge_state s_rdma_read_sge; | ||
434 | struct qib_sge_state r_sge; /* current receive data */ | ||
435 | spinlock_t r_lock; /* used for APM */ | ||
436 | spinlock_t s_lock; | ||
437 | atomic_t s_dma_busy; | ||
438 | unsigned processor_id; /* Processor ID QP is bound to */ | ||
439 | u32 s_flags; | ||
440 | u32 s_cur_size; /* size of send packet in bytes */ | ||
441 | u32 s_len; /* total length of s_sge */ | ||
442 | u32 s_rdma_read_len; /* total length of s_rdma_read_sge */ | ||
443 | u32 s_next_psn; /* PSN for next request */ | ||
444 | u32 s_last_psn; /* last response PSN processed */ | ||
445 | u32 s_sending_psn; /* lowest PSN that is being sent */ | ||
446 | u32 s_sending_hpsn; /* highest PSN that is being sent */ | ||
447 | u32 s_psn; /* current packet sequence number */ | ||
448 | u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */ | ||
449 | u32 s_ack_psn; /* PSN for acking sends and RDMA writes */ | ||
450 | u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */ | ||
451 | u32 r_ack_psn; /* PSN for next ACK or atomic ACK */ | ||
452 | u64 r_wr_id; /* ID for current receive WQE */ | ||
453 | unsigned long r_aflags; | ||
454 | u32 r_len; /* total length of r_sge */ | ||
455 | u32 r_rcv_len; /* receive data len processed */ | ||
456 | u32 r_psn; /* expected rcv packet sequence number */ | ||
457 | u32 r_msn; /* message sequence number */ | ||
458 | u16 s_hdrwords; /* size of s_hdr in 32 bit words */ | ||
459 | u16 s_rdma_ack_cnt; | ||
460 | u8 state; /* QP state */ | ||
461 | u8 s_state; /* opcode of last packet sent */ | ||
462 | u8 s_ack_state; /* opcode of packet to ACK */ | ||
463 | u8 s_nak_state; /* non-zero if NAK is pending */ | ||
464 | u8 r_state; /* opcode of last packet received */ | ||
465 | u8 r_nak_state; /* non-zero if NAK is pending */ | ||
466 | u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */ | ||
467 | u8 r_flags; | ||
468 | u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */ | ||
469 | u8 r_head_ack_queue; /* index into s_ack_queue[] */ | ||
470 | u8 qp_access_flags; | ||
471 | u8 s_max_sge; /* size of s_wq->sg_list */ | ||
472 | u8 s_retry_cnt; /* number of times to retry */ | ||
473 | u8 s_rnr_retry_cnt; | ||
474 | u8 s_retry; /* requester retry counter */ | ||
475 | u8 s_rnr_retry; /* requester RNR retry counter */ | ||
476 | u8 s_pkey_index; /* PKEY index to use */ | ||
477 | u8 s_alt_pkey_index; /* Alternate path PKEY index to use */ | ||
478 | u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */ | ||
479 | u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */ | ||
480 | u8 s_tail_ack_queue; /* index into s_ack_queue[] */ | ||
481 | u8 s_srate; | ||
482 | u8 s_draining; | ||
483 | u8 s_mig_state; | ||
484 | u8 timeout; /* Timeout for this QP */ | ||
485 | u8 alt_timeout; /* Alternate path timeout for this QP */ | ||
486 | u8 port_num; | ||
487 | enum ib_mtu path_mtu; | ||
488 | u32 remote_qpn; | ||
489 | u32 qkey; /* QKEY for this QP (for UD or RD) */ | ||
490 | u32 s_size; /* send work queue size */ | ||
491 | u32 s_head; /* new entries added here */ | ||
492 | u32 s_tail; /* next entry to process */ | ||
493 | u32 s_cur; /* current work queue entry */ | ||
494 | u32 s_acked; /* last un-ACK'ed entry */ | ||
495 | u32 s_last; /* last completed entry */ | ||
496 | u32 s_ssn; /* SSN of tail entry */ | ||
497 | u32 s_lsn; /* limit sequence number (credit) */ | ||
498 | struct qib_swqe *s_wq; /* send work queue */ | ||
499 | struct qib_swqe *s_wqe; | ||
500 | struct qib_rq r_rq; /* receive work queue */ | ||
501 | struct qib_sge r_sg_list[0]; /* verified SGEs */ | ||
502 | }; | ||
503 | |||
504 | /* | ||
505 | * Atomic bit definitions for r_aflags. | ||
506 | */ | ||
507 | #define QIB_R_WRID_VALID 0 | ||
508 | #define QIB_R_REWIND_SGE 1 | ||
509 | |||
510 | /* | ||
511 | * Bit definitions for r_flags. | ||
512 | */ | ||
513 | #define QIB_R_REUSE_SGE 0x01 | ||
514 | #define QIB_R_RDMAR_SEQ 0x02 | ||
515 | #define QIB_R_RSP_NAK 0x04 | ||
516 | #define QIB_R_RSP_SEND 0x08 | ||
517 | #define QIB_R_COMM_EST 0x10 | ||
518 | |||
519 | /* | ||
520 | * Bit definitions for s_flags. | ||
521 | * | ||
522 | * QIB_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled | ||
523 | * QIB_S_BUSY - send tasklet is processing the QP | ||
524 | * QIB_S_TIMER - the RC retry timer is active | ||
525 | * QIB_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics | ||
526 | * QIB_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs | ||
527 | * before processing the next SWQE | ||
528 | * QIB_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete | ||
529 | * before processing the next SWQE | ||
530 | * QIB_S_WAIT_RNR - waiting for RNR timeout | ||
531 | * QIB_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE | ||
532 | * QIB_S_WAIT_DMA - waiting for send DMA queue to drain before generating | ||
533 | * next send completion entry not via send DMA | ||
534 | * QIB_S_WAIT_PIO - waiting for a send buffer to be available | ||
535 | * QIB_S_WAIT_TX - waiting for a struct qib_verbs_txreq to be available | ||
536 | * QIB_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available | ||
537 | * QIB_S_WAIT_KMEM - waiting for kernel memory to be available | ||
538 | * QIB_S_WAIT_PSN - waiting for a packet to exit the send DMA queue | ||
539 | * QIB_S_WAIT_ACK - waiting for an ACK packet before sending more requests | ||
540 | * QIB_S_SEND_ONE - send one packet, request ACK, then wait for ACK | ||
541 | */ | ||
542 | #define QIB_S_SIGNAL_REQ_WR 0x0001 | ||
543 | #define QIB_S_BUSY 0x0002 | ||
544 | #define QIB_S_TIMER 0x0004 | ||
545 | #define QIB_S_RESP_PENDING 0x0008 | ||
546 | #define QIB_S_ACK_PENDING 0x0010 | ||
547 | #define QIB_S_WAIT_FENCE 0x0020 | ||
548 | #define QIB_S_WAIT_RDMAR 0x0040 | ||
549 | #define QIB_S_WAIT_RNR 0x0080 | ||
550 | #define QIB_S_WAIT_SSN_CREDIT 0x0100 | ||
551 | #define QIB_S_WAIT_DMA 0x0200 | ||
552 | #define QIB_S_WAIT_PIO 0x0400 | ||
553 | #define QIB_S_WAIT_TX 0x0800 | ||
554 | #define QIB_S_WAIT_DMA_DESC 0x1000 | ||
555 | #define QIB_S_WAIT_KMEM 0x2000 | ||
556 | #define QIB_S_WAIT_PSN 0x4000 | ||
557 | #define QIB_S_WAIT_ACK 0x8000 | ||
558 | #define QIB_S_SEND_ONE 0x10000 | ||
559 | #define QIB_S_UNLIMITED_CREDIT 0x20000 | ||
560 | |||
561 | /* | ||
562 | * Wait flags that would prevent any packet type from being sent. | ||
563 | */ | ||
564 | #define QIB_S_ANY_WAIT_IO (QIB_S_WAIT_PIO | QIB_S_WAIT_TX | \ | ||
565 | QIB_S_WAIT_DMA_DESC | QIB_S_WAIT_KMEM) | ||
566 | |||
567 | /* | ||
568 | * Wait flags that would prevent send work requests from making progress. | ||
569 | */ | ||
570 | #define QIB_S_ANY_WAIT_SEND (QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | \ | ||
571 | QIB_S_WAIT_RNR | QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_DMA | \ | ||
572 | QIB_S_WAIT_PSN | QIB_S_WAIT_ACK) | ||
573 | |||
574 | #define QIB_S_ANY_WAIT (QIB_S_ANY_WAIT_IO | QIB_S_ANY_WAIT_SEND) | ||
575 | |||
576 | #define QIB_PSN_CREDIT 16 | ||
577 | |||
578 | /* | ||
579 | * Since struct qib_swqe is not a fixed size, we can't simply index into | ||
580 | * struct qib_qp.s_wq. This function does the array index computation. | ||
581 | */ | ||
582 | static inline struct qib_swqe *get_swqe_ptr(struct qib_qp *qp, | ||
583 | unsigned n) | ||
584 | { | ||
585 | return (struct qib_swqe *)((char *)qp->s_wq + | ||
586 | (sizeof(struct qib_swqe) + | ||
587 | qp->s_max_sge * | ||
588 | sizeof(struct qib_sge)) * n); | ||
589 | } | ||
590 | |||
591 | /* | ||
592 | * Since struct qib_rwqe is not a fixed size, we can't simply index into | ||
593 | * struct qib_rwq.wq. This function does the array index computation. | ||
594 | */ | ||
595 | static inline struct qib_rwqe *get_rwqe_ptr(struct qib_rq *rq, unsigned n) | ||
596 | { | ||
597 | return (struct qib_rwqe *) | ||
598 | ((char *) rq->wq->wq + | ||
599 | (sizeof(struct qib_rwqe) + | ||
600 | rq->max_sge * sizeof(struct ib_sge)) * n); | ||
601 | } | ||
602 | |||
603 | /* | ||
604 | * QPN-map pages start out as NULL, they get allocated upon | ||
605 | * first use and are never deallocated. This way, | ||
606 | * large bitmaps are not allocated unless large numbers of QPs are used. | ||
607 | */ | ||
608 | struct qpn_map { | ||
609 | void *page; | ||
610 | }; | ||
611 | |||
612 | struct qib_qpn_table { | ||
613 | spinlock_t lock; /* protect changes in this struct */ | ||
614 | unsigned flags; /* flags for QP0/1 allocated for each port */ | ||
615 | u32 last; /* last QP number allocated */ | ||
616 | u32 nmaps; /* size of the map table */ | ||
617 | u16 limit; | ||
618 | u16 mask; | ||
619 | /* bit map of free QP numbers other than 0/1 */ | ||
620 | struct qpn_map map[QPNMAP_ENTRIES]; | ||
621 | }; | ||
622 | |||
623 | struct qib_lkey_table { | ||
624 | spinlock_t lock; /* protect changes in this struct */ | ||
625 | u32 next; /* next unused index (speeds search) */ | ||
626 | u32 gen; /* generation count */ | ||
627 | u32 max; /* size of the table */ | ||
628 | struct qib_mregion **table; | ||
629 | }; | ||
630 | |||
631 | struct qib_opcode_stats { | ||
632 | u64 n_packets; /* number of packets */ | ||
633 | u64 n_bytes; /* total number of bytes */ | ||
634 | }; | ||
635 | |||
636 | struct qib_ibport { | ||
637 | struct qib_qp *qp0; | ||
638 | struct qib_qp *qp1; | ||
639 | struct ib_mad_agent *send_agent; /* agent for SMI (traps) */ | ||
640 | struct qib_ah *sm_ah; | ||
641 | struct qib_ah *smi_ah; | ||
642 | struct rb_root mcast_tree; | ||
643 | spinlock_t lock; /* protect changes in this struct */ | ||
644 | |||
645 | /* non-zero when timer is set */ | ||
646 | unsigned long mkey_lease_timeout; | ||
647 | unsigned long trap_timeout; | ||
648 | __be64 gid_prefix; /* in network order */ | ||
649 | __be64 mkey; | ||
650 | __be64 guids[QIB_GUIDS_PER_PORT - 1]; /* writable GUIDs */ | ||
651 | u64 tid; /* TID for traps */ | ||
652 | u64 n_unicast_xmit; /* total unicast packets sent */ | ||
653 | u64 n_unicast_rcv; /* total unicast packets received */ | ||
654 | u64 n_multicast_xmit; /* total multicast packets sent */ | ||
655 | u64 n_multicast_rcv; /* total multicast packets received */ | ||
656 | u64 z_symbol_error_counter; /* starting count for PMA */ | ||
657 | u64 z_link_error_recovery_counter; /* starting count for PMA */ | ||
658 | u64 z_link_downed_counter; /* starting count for PMA */ | ||
659 | u64 z_port_rcv_errors; /* starting count for PMA */ | ||
660 | u64 z_port_rcv_remphys_errors; /* starting count for PMA */ | ||
661 | u64 z_port_xmit_discards; /* starting count for PMA */ | ||
662 | u64 z_port_xmit_data; /* starting count for PMA */ | ||
663 | u64 z_port_rcv_data; /* starting count for PMA */ | ||
664 | u64 z_port_xmit_packets; /* starting count for PMA */ | ||
665 | u64 z_port_rcv_packets; /* starting count for PMA */ | ||
666 | u32 z_local_link_integrity_errors; /* starting count for PMA */ | ||
667 | u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */ | ||
668 | u32 z_vl15_dropped; /* starting count for PMA */ | ||
669 | u32 n_rc_resends; | ||
670 | u32 n_rc_acks; | ||
671 | u32 n_rc_qacks; | ||
672 | u32 n_rc_delayed_comp; | ||
673 | u32 n_seq_naks; | ||
674 | u32 n_rdma_seq; | ||
675 | u32 n_rnr_naks; | ||
676 | u32 n_other_naks; | ||
677 | u32 n_loop_pkts; | ||
678 | u32 n_pkt_drops; | ||
679 | u32 n_vl15_dropped; | ||
680 | u32 n_rc_timeouts; | ||
681 | u32 n_dmawait; | ||
682 | u32 n_unaligned; | ||
683 | u32 n_rc_dupreq; | ||
684 | u32 n_rc_seqnak; | ||
685 | u32 port_cap_flags; | ||
686 | u32 pma_sample_start; | ||
687 | u32 pma_sample_interval; | ||
688 | __be16 pma_counter_select[5]; | ||
689 | u16 pma_tag; | ||
690 | u16 pkey_violations; | ||
691 | u16 qkey_violations; | ||
692 | u16 mkey_violations; | ||
693 | u16 mkey_lease_period; | ||
694 | u16 sm_lid; | ||
695 | u16 repress_traps; | ||
696 | u8 sm_sl; | ||
697 | u8 mkeyprot; | ||
698 | u8 subnet_timeout; | ||
699 | u8 vl_high_limit; | ||
700 | u8 sl_to_vl[16]; | ||
701 | |||
702 | struct qib_opcode_stats opstats[128]; | ||
703 | }; | ||
704 | |||
705 | struct qib_ibdev { | ||
706 | struct ib_device ibdev; | ||
707 | struct list_head pending_mmaps; | ||
708 | spinlock_t mmap_offset_lock; /* protect mmap_offset */ | ||
709 | u32 mmap_offset; | ||
710 | struct qib_mregion *dma_mr; | ||
711 | |||
712 | /* QP numbers are shared by all IB ports */ | ||
713 | struct qib_qpn_table qpn_table; | ||
714 | struct qib_lkey_table lk_table; | ||
715 | struct list_head piowait; /* list for wait PIO buf */ | ||
716 | struct list_head dmawait; /* list for wait DMA */ | ||
717 | struct list_head txwait; /* list for wait qib_verbs_txreq */ | ||
718 | struct list_head memwait; /* list for wait kernel memory */ | ||
719 | struct list_head txreq_free; | ||
720 | struct timer_list mem_timer; | ||
721 | struct qib_qp **qp_table; | ||
722 | struct qib_pio_header *pio_hdrs; | ||
723 | dma_addr_t pio_hdrs_phys; | ||
724 | /* list of QPs waiting for RNR timer */ | ||
725 | spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */ | ||
726 | unsigned qp_table_size; /* size of the hash table */ | ||
727 | spinlock_t qpt_lock; | ||
728 | |||
729 | u32 n_piowait; | ||
730 | u32 n_txwait; | ||
731 | |||
732 | u32 n_pds_allocated; /* number of PDs allocated for device */ | ||
733 | spinlock_t n_pds_lock; | ||
734 | u32 n_ahs_allocated; /* number of AHs allocated for device */ | ||
735 | spinlock_t n_ahs_lock; | ||
736 | u32 n_cqs_allocated; /* number of CQs allocated for device */ | ||
737 | spinlock_t n_cqs_lock; | ||
738 | u32 n_qps_allocated; /* number of QPs allocated for device */ | ||
739 | spinlock_t n_qps_lock; | ||
740 | u32 n_srqs_allocated; /* number of SRQs allocated for device */ | ||
741 | spinlock_t n_srqs_lock; | ||
742 | u32 n_mcast_grps_allocated; /* number of mcast groups allocated */ | ||
743 | spinlock_t n_mcast_grps_lock; | ||
744 | }; | ||
745 | |||
746 | struct qib_verbs_counters { | ||
747 | u64 symbol_error_counter; | ||
748 | u64 link_error_recovery_counter; | ||
749 | u64 link_downed_counter; | ||
750 | u64 port_rcv_errors; | ||
751 | u64 port_rcv_remphys_errors; | ||
752 | u64 port_xmit_discards; | ||
753 | u64 port_xmit_data; | ||
754 | u64 port_rcv_data; | ||
755 | u64 port_xmit_packets; | ||
756 | u64 port_rcv_packets; | ||
757 | u32 local_link_integrity_errors; | ||
758 | u32 excessive_buffer_overrun_errors; | ||
759 | u32 vl15_dropped; | ||
760 | }; | ||
761 | |||
762 | static inline struct qib_mr *to_imr(struct ib_mr *ibmr) | ||
763 | { | ||
764 | return container_of(ibmr, struct qib_mr, ibmr); | ||
765 | } | ||
766 | |||
767 | static inline struct qib_pd *to_ipd(struct ib_pd *ibpd) | ||
768 | { | ||
769 | return container_of(ibpd, struct qib_pd, ibpd); | ||
770 | } | ||
771 | |||
772 | static inline struct qib_ah *to_iah(struct ib_ah *ibah) | ||
773 | { | ||
774 | return container_of(ibah, struct qib_ah, ibah); | ||
775 | } | ||
776 | |||
777 | static inline struct qib_cq *to_icq(struct ib_cq *ibcq) | ||
778 | { | ||
779 | return container_of(ibcq, struct qib_cq, ibcq); | ||
780 | } | ||
781 | |||
782 | static inline struct qib_srq *to_isrq(struct ib_srq *ibsrq) | ||
783 | { | ||
784 | return container_of(ibsrq, struct qib_srq, ibsrq); | ||
785 | } | ||
786 | |||
787 | static inline struct qib_qp *to_iqp(struct ib_qp *ibqp) | ||
788 | { | ||
789 | return container_of(ibqp, struct qib_qp, ibqp); | ||
790 | } | ||
791 | |||
792 | static inline struct qib_ibdev *to_idev(struct ib_device *ibdev) | ||
793 | { | ||
794 | return container_of(ibdev, struct qib_ibdev, ibdev); | ||
795 | } | ||
796 | |||
797 | /* | ||
798 | * Send if not busy or waiting for I/O and either | ||
799 | * a RC response is pending or we can process send work requests. | ||
800 | */ | ||
801 | static inline int qib_send_ok(struct qib_qp *qp) | ||
802 | { | ||
803 | return !(qp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT_IO)) && | ||
804 | (qp->s_hdrwords || (qp->s_flags & QIB_S_RESP_PENDING) || | ||
805 | !(qp->s_flags & QIB_S_ANY_WAIT_SEND)); | ||
806 | } | ||
807 | |||
808 | extern struct workqueue_struct *qib_wq; | ||
809 | extern struct workqueue_struct *qib_cq_wq; | ||
810 | |||
811 | /* | ||
812 | * This must be called with s_lock held. | ||
813 | */ | ||
814 | static inline void qib_schedule_send(struct qib_qp *qp) | ||
815 | { | ||
816 | if (qib_send_ok(qp)) { | ||
817 | if (qp->processor_id == smp_processor_id()) | ||
818 | queue_work(qib_wq, &qp->s_work); | ||
819 | else | ||
820 | queue_work_on(qp->processor_id, | ||
821 | qib_wq, &qp->s_work); | ||
822 | } | ||
823 | } | ||
824 | |||
825 | static inline int qib_pkey_ok(u16 pkey1, u16 pkey2) | ||
826 | { | ||
827 | u16 p1 = pkey1 & 0x7FFF; | ||
828 | u16 p2 = pkey2 & 0x7FFF; | ||
829 | |||
830 | /* | ||
831 | * Low 15 bits must be non-zero and match, and | ||
832 | * one of the two must be a full member. | ||
833 | */ | ||
834 | return p1 && p1 == p2 && ((__s16)pkey1 < 0 || (__s16)pkey2 < 0); | ||
835 | } | ||
836 | |||
837 | void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl, | ||
838 | u32 qp1, u32 qp2, __be16 lid1, __be16 lid2); | ||
839 | void qib_cap_mask_chg(struct qib_ibport *ibp); | ||
840 | void qib_sys_guid_chg(struct qib_ibport *ibp); | ||
841 | void qib_node_desc_chg(struct qib_ibport *ibp); | ||
842 | int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, | ||
843 | struct ib_wc *in_wc, struct ib_grh *in_grh, | ||
844 | struct ib_mad *in_mad, struct ib_mad *out_mad); | ||
845 | int qib_create_agents(struct qib_ibdev *dev); | ||
846 | void qib_free_agents(struct qib_ibdev *dev); | ||
847 | |||
848 | /* | ||
849 | * Compare the lower 24 bits of the two values. | ||
850 | * Returns an integer <, ==, or > than zero. | ||
851 | */ | ||
852 | static inline int qib_cmp24(u32 a, u32 b) | ||
853 | { | ||
854 | return (((int) a) - ((int) b)) << 8; | ||
855 | } | ||
856 | |||
857 | struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid); | ||
858 | |||
859 | int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords, | ||
860 | u64 *rwords, u64 *spkts, u64 *rpkts, | ||
861 | u64 *xmit_wait); | ||
862 | |||
863 | int qib_get_counters(struct qib_pportdata *ppd, | ||
864 | struct qib_verbs_counters *cntrs); | ||
865 | |||
866 | int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); | ||
867 | |||
868 | int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); | ||
869 | |||
870 | int qib_mcast_tree_empty(struct qib_ibport *ibp); | ||
871 | |||
872 | __be32 qib_compute_aeth(struct qib_qp *qp); | ||
873 | |||
874 | struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn); | ||
875 | |||
876 | struct ib_qp *qib_create_qp(struct ib_pd *ibpd, | ||
877 | struct ib_qp_init_attr *init_attr, | ||
878 | struct ib_udata *udata); | ||
879 | |||
880 | int qib_destroy_qp(struct ib_qp *ibqp); | ||
881 | |||
882 | int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err); | ||
883 | |||
884 | int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | ||
885 | int attr_mask, struct ib_udata *udata); | ||
886 | |||
887 | int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | ||
888 | int attr_mask, struct ib_qp_init_attr *init_attr); | ||
889 | |||
890 | unsigned qib_free_all_qps(struct qib_devdata *dd); | ||
891 | |||
892 | void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt); | ||
893 | |||
894 | void qib_free_qpn_table(struct qib_qpn_table *qpt); | ||
895 | |||
896 | void qib_get_credit(struct qib_qp *qp, u32 aeth); | ||
897 | |||
898 | unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult); | ||
899 | |||
900 | void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail); | ||
901 | |||
902 | void qib_put_txreq(struct qib_verbs_txreq *tx); | ||
903 | |||
904 | int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr, | ||
905 | u32 hdrwords, struct qib_sge_state *ss, u32 len); | ||
906 | |||
907 | void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, | ||
908 | int release); | ||
909 | |||
910 | void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release); | ||
911 | |||
912 | void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | ||
913 | int has_grh, void *data, u32 tlen, struct qib_qp *qp); | ||
914 | |||
915 | void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, | ||
916 | int has_grh, void *data, u32 tlen, struct qib_qp *qp); | ||
917 | |||
918 | int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr); | ||
919 | |||
920 | void qib_rc_rnr_retry(unsigned long arg); | ||
921 | |||
922 | void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr); | ||
923 | |||
924 | void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err); | ||
925 | |||
926 | int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr); | ||
927 | |||
928 | void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | ||
929 | int has_grh, void *data, u32 tlen, struct qib_qp *qp); | ||
930 | |||
931 | int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr); | ||
932 | |||
933 | int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr); | ||
934 | |||
935 | int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, | ||
936 | struct qib_sge *isge, struct ib_sge *sge, int acc); | ||
937 | |||
938 | int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, | ||
939 | u32 len, u64 vaddr, u32 rkey, int acc); | ||
940 | |||
941 | int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | ||
942 | struct ib_recv_wr **bad_wr); | ||
943 | |||
944 | struct ib_srq *qib_create_srq(struct ib_pd *ibpd, | ||
945 | struct ib_srq_init_attr *srq_init_attr, | ||
946 | struct ib_udata *udata); | ||
947 | |||
948 | int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | ||
949 | enum ib_srq_attr_mask attr_mask, | ||
950 | struct ib_udata *udata); | ||
951 | |||
952 | int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr); | ||
953 | |||
954 | int qib_destroy_srq(struct ib_srq *ibsrq); | ||
955 | |||
956 | void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig); | ||
957 | |||
958 | int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); | ||
959 | |||
960 | struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries, | ||
961 | int comp_vector, struct ib_ucontext *context, | ||
962 | struct ib_udata *udata); | ||
963 | |||
964 | int qib_destroy_cq(struct ib_cq *ibcq); | ||
965 | |||
966 | int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags); | ||
967 | |||
968 | int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata); | ||
969 | |||
970 | struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc); | ||
971 | |||
972 | struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd, | ||
973 | struct ib_phys_buf *buffer_list, | ||
974 | int num_phys_buf, int acc, u64 *iova_start); | ||
975 | |||
976 | struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | ||
977 | u64 virt_addr, int mr_access_flags, | ||
978 | struct ib_udata *udata); | ||
979 | |||
980 | int qib_dereg_mr(struct ib_mr *ibmr); | ||
981 | |||
982 | struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len); | ||
983 | |||
984 | struct ib_fast_reg_page_list *qib_alloc_fast_reg_page_list( | ||
985 | struct ib_device *ibdev, int page_list_len); | ||
986 | |||
987 | void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl); | ||
988 | |||
989 | int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr); | ||
990 | |||
991 | struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags, | ||
992 | struct ib_fmr_attr *fmr_attr); | ||
993 | |||
994 | int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, | ||
995 | int list_len, u64 iova); | ||
996 | |||
997 | int qib_unmap_fmr(struct list_head *fmr_list); | ||
998 | |||
999 | int qib_dealloc_fmr(struct ib_fmr *ibfmr); | ||
1000 | |||
1001 | void qib_release_mmap_info(struct kref *ref); | ||
1002 | |||
1003 | struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size, | ||
1004 | struct ib_ucontext *context, | ||
1005 | void *obj); | ||
1006 | |||
1007 | void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip, | ||
1008 | u32 size, void *obj); | ||
1009 | |||
1010 | int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); | ||
1011 | |||
1012 | int qib_get_rwqe(struct qib_qp *qp, int wr_id_only); | ||
1013 | |||
1014 | void qib_migrate_qp(struct qib_qp *qp); | ||
1015 | |||
1016 | int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr, | ||
1017 | int has_grh, struct qib_qp *qp, u32 bth0); | ||
1018 | |||
1019 | u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr, | ||
1020 | struct ib_global_route *grh, u32 hwords, u32 nwords); | ||
1021 | |||
1022 | void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr, | ||
1023 | u32 bth0, u32 bth2); | ||
1024 | |||
1025 | void qib_do_send(struct work_struct *work); | ||
1026 | |||
1027 | void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe, | ||
1028 | enum ib_wc_status status); | ||
1029 | |||
1030 | void qib_send_rc_ack(struct qib_qp *qp); | ||
1031 | |||
1032 | int qib_make_rc_req(struct qib_qp *qp); | ||
1033 | |||
1034 | int qib_make_uc_req(struct qib_qp *qp); | ||
1035 | |||
1036 | int qib_make_ud_req(struct qib_qp *qp); | ||
1037 | |||
1038 | int qib_register_ib_device(struct qib_devdata *); | ||
1039 | |||
1040 | void qib_unregister_ib_device(struct qib_devdata *); | ||
1041 | |||
1042 | void qib_ib_rcv(struct qib_ctxtdata *, void *, void *, u32); | ||
1043 | |||
1044 | void qib_ib_piobufavail(struct qib_devdata *); | ||
1045 | |||
1046 | unsigned qib_get_npkeys(struct qib_devdata *); | ||
1047 | |||
1048 | unsigned qib_get_pkey(struct qib_ibport *, unsigned); | ||
1049 | |||
1050 | extern const enum ib_wc_opcode ib_qib_wc_opcode[]; | ||
1051 | |||
1052 | /* | ||
1053 | * Below HCA-independent IB PhysPortState values, returned | ||
1054 | * by the f_ibphys_portstate() routine. | ||
1055 | */ | ||
1056 | #define IB_PHYSPORTSTATE_SLEEP 1 | ||
1057 | #define IB_PHYSPORTSTATE_POLL 2 | ||
1058 | #define IB_PHYSPORTSTATE_DISABLED 3 | ||
1059 | #define IB_PHYSPORTSTATE_CFG_TRAIN 4 | ||
1060 | #define IB_PHYSPORTSTATE_LINKUP 5 | ||
1061 | #define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6 | ||
1062 | #define IB_PHYSPORTSTATE_CFG_DEBOUNCE 8 | ||
1063 | #define IB_PHYSPORTSTATE_CFG_IDLE 0xB | ||
1064 | #define IB_PHYSPORTSTATE_RECOVERY_RETRAIN 0xC | ||
1065 | #define IB_PHYSPORTSTATE_RECOVERY_WAITRMT 0xE | ||
1066 | #define IB_PHYSPORTSTATE_RECOVERY_IDLE 0xF | ||
1067 | #define IB_PHYSPORTSTATE_CFG_ENH 0x10 | ||
1068 | #define IB_PHYSPORTSTATE_CFG_WAIT_ENH 0x13 | ||
1069 | |||
1070 | extern const int ib_qib_state_ops[]; | ||
1071 | |||
1072 | extern __be64 ib_qib_sys_image_guid; /* in network order */ | ||
1073 | |||
1074 | extern unsigned int ib_qib_lkey_table_size; | ||
1075 | |||
1076 | extern unsigned int ib_qib_max_cqes; | ||
1077 | |||
1078 | extern unsigned int ib_qib_max_cqs; | ||
1079 | |||
1080 | extern unsigned int ib_qib_max_qp_wrs; | ||
1081 | |||
1082 | extern unsigned int ib_qib_max_qps; | ||
1083 | |||
1084 | extern unsigned int ib_qib_max_sges; | ||
1085 | |||
1086 | extern unsigned int ib_qib_max_mcast_grps; | ||
1087 | |||
1088 | extern unsigned int ib_qib_max_mcast_qp_attached; | ||
1089 | |||
1090 | extern unsigned int ib_qib_max_srqs; | ||
1091 | |||
1092 | extern unsigned int ib_qib_max_srq_sges; | ||
1093 | |||
1094 | extern unsigned int ib_qib_max_srq_wrs; | ||
1095 | |||
1096 | extern const u32 ib_qib_rnr_table[]; | ||
1097 | |||
1098 | extern struct ib_dma_mapping_ops qib_dma_mapping_ops; | ||
1099 | |||
1100 | #endif /* QIB_VERBS_H */ | ||
diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c new file mode 100644 index 000000000000..dabb697b1c2a --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_verbs_mcast.c | |||
@@ -0,0 +1,368 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. | ||
3 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | |||
34 | #include <linux/rculist.h> | ||
35 | |||
36 | #include "qib.h" | ||
37 | |||
38 | /** | ||
39 | * qib_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct | ||
40 | * @qp: the QP to link | ||
41 | */ | ||
42 | static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp) | ||
43 | { | ||
44 | struct qib_mcast_qp *mqp; | ||
45 | |||
46 | mqp = kmalloc(sizeof *mqp, GFP_KERNEL); | ||
47 | if (!mqp) | ||
48 | goto bail; | ||
49 | |||
50 | mqp->qp = qp; | ||
51 | atomic_inc(&qp->refcount); | ||
52 | |||
53 | bail: | ||
54 | return mqp; | ||
55 | } | ||
56 | |||
57 | static void qib_mcast_qp_free(struct qib_mcast_qp *mqp) | ||
58 | { | ||
59 | struct qib_qp *qp = mqp->qp; | ||
60 | |||
61 | /* Notify qib_destroy_qp() if it is waiting. */ | ||
62 | if (atomic_dec_and_test(&qp->refcount)) | ||
63 | wake_up(&qp->wait); | ||
64 | |||
65 | kfree(mqp); | ||
66 | } | ||
67 | |||
68 | /** | ||
69 | * qib_mcast_alloc - allocate the multicast GID structure | ||
70 | * @mgid: the multicast GID | ||
71 | * | ||
72 | * A list of QPs will be attached to this structure. | ||
73 | */ | ||
74 | static struct qib_mcast *qib_mcast_alloc(union ib_gid *mgid) | ||
75 | { | ||
76 | struct qib_mcast *mcast; | ||
77 | |||
78 | mcast = kmalloc(sizeof *mcast, GFP_KERNEL); | ||
79 | if (!mcast) | ||
80 | goto bail; | ||
81 | |||
82 | mcast->mgid = *mgid; | ||
83 | INIT_LIST_HEAD(&mcast->qp_list); | ||
84 | init_waitqueue_head(&mcast->wait); | ||
85 | atomic_set(&mcast->refcount, 0); | ||
86 | mcast->n_attached = 0; | ||
87 | |||
88 | bail: | ||
89 | return mcast; | ||
90 | } | ||
91 | |||
92 | static void qib_mcast_free(struct qib_mcast *mcast) | ||
93 | { | ||
94 | struct qib_mcast_qp *p, *tmp; | ||
95 | |||
96 | list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) | ||
97 | qib_mcast_qp_free(p); | ||
98 | |||
99 | kfree(mcast); | ||
100 | } | ||
101 | |||
102 | /** | ||
103 | * qib_mcast_find - search the global table for the given multicast GID | ||
104 | * @ibp: the IB port structure | ||
105 | * @mgid: the multicast GID to search for | ||
106 | * | ||
107 | * Returns NULL if not found. | ||
108 | * | ||
109 | * The caller is responsible for decrementing the reference count if found. | ||
110 | */ | ||
111 | struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid) | ||
112 | { | ||
113 | struct rb_node *n; | ||
114 | unsigned long flags; | ||
115 | struct qib_mcast *mcast; | ||
116 | |||
117 | spin_lock_irqsave(&ibp->lock, flags); | ||
118 | n = ibp->mcast_tree.rb_node; | ||
119 | while (n) { | ||
120 | int ret; | ||
121 | |||
122 | mcast = rb_entry(n, struct qib_mcast, rb_node); | ||
123 | |||
124 | ret = memcmp(mgid->raw, mcast->mgid.raw, | ||
125 | sizeof(union ib_gid)); | ||
126 | if (ret < 0) | ||
127 | n = n->rb_left; | ||
128 | else if (ret > 0) | ||
129 | n = n->rb_right; | ||
130 | else { | ||
131 | atomic_inc(&mcast->refcount); | ||
132 | spin_unlock_irqrestore(&ibp->lock, flags); | ||
133 | goto bail; | ||
134 | } | ||
135 | } | ||
136 | spin_unlock_irqrestore(&ibp->lock, flags); | ||
137 | |||
138 | mcast = NULL; | ||
139 | |||
140 | bail: | ||
141 | return mcast; | ||
142 | } | ||
143 | |||
144 | /** | ||
145 | * qib_mcast_add - insert mcast GID into table and attach QP struct | ||
146 | * @mcast: the mcast GID table | ||
147 | * @mqp: the QP to attach | ||
148 | * | ||
149 | * Return zero if both were added. Return EEXIST if the GID was already in | ||
150 | * the table but the QP was added. Return ESRCH if the QP was already | ||
151 | * attached and neither structure was added. | ||
152 | */ | ||
153 | static int qib_mcast_add(struct qib_ibdev *dev, struct qib_ibport *ibp, | ||
154 | struct qib_mcast *mcast, struct qib_mcast_qp *mqp) | ||
155 | { | ||
156 | struct rb_node **n = &ibp->mcast_tree.rb_node; | ||
157 | struct rb_node *pn = NULL; | ||
158 | int ret; | ||
159 | |||
160 | spin_lock_irq(&ibp->lock); | ||
161 | |||
162 | while (*n) { | ||
163 | struct qib_mcast *tmcast; | ||
164 | struct qib_mcast_qp *p; | ||
165 | |||
166 | pn = *n; | ||
167 | tmcast = rb_entry(pn, struct qib_mcast, rb_node); | ||
168 | |||
169 | ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw, | ||
170 | sizeof(union ib_gid)); | ||
171 | if (ret < 0) { | ||
172 | n = &pn->rb_left; | ||
173 | continue; | ||
174 | } | ||
175 | if (ret > 0) { | ||
176 | n = &pn->rb_right; | ||
177 | continue; | ||
178 | } | ||
179 | |||
180 | /* Search the QP list to see if this is already there. */ | ||
181 | list_for_each_entry_rcu(p, &tmcast->qp_list, list) { | ||
182 | if (p->qp == mqp->qp) { | ||
183 | ret = ESRCH; | ||
184 | goto bail; | ||
185 | } | ||
186 | } | ||
187 | if (tmcast->n_attached == ib_qib_max_mcast_qp_attached) { | ||
188 | ret = ENOMEM; | ||
189 | goto bail; | ||
190 | } | ||
191 | |||
192 | tmcast->n_attached++; | ||
193 | |||
194 | list_add_tail_rcu(&mqp->list, &tmcast->qp_list); | ||
195 | ret = EEXIST; | ||
196 | goto bail; | ||
197 | } | ||
198 | |||
199 | spin_lock(&dev->n_mcast_grps_lock); | ||
200 | if (dev->n_mcast_grps_allocated == ib_qib_max_mcast_grps) { | ||
201 | spin_unlock(&dev->n_mcast_grps_lock); | ||
202 | ret = ENOMEM; | ||
203 | goto bail; | ||
204 | } | ||
205 | |||
206 | dev->n_mcast_grps_allocated++; | ||
207 | spin_unlock(&dev->n_mcast_grps_lock); | ||
208 | |||
209 | mcast->n_attached++; | ||
210 | |||
211 | list_add_tail_rcu(&mqp->list, &mcast->qp_list); | ||
212 | |||
213 | atomic_inc(&mcast->refcount); | ||
214 | rb_link_node(&mcast->rb_node, pn, n); | ||
215 | rb_insert_color(&mcast->rb_node, &ibp->mcast_tree); | ||
216 | |||
217 | ret = 0; | ||
218 | |||
219 | bail: | ||
220 | spin_unlock_irq(&ibp->lock); | ||
221 | |||
222 | return ret; | ||
223 | } | ||
224 | |||
225 | int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | ||
226 | { | ||
227 | struct qib_qp *qp = to_iqp(ibqp); | ||
228 | struct qib_ibdev *dev = to_idev(ibqp->device); | ||
229 | struct qib_ibport *ibp; | ||
230 | struct qib_mcast *mcast; | ||
231 | struct qib_mcast_qp *mqp; | ||
232 | int ret; | ||
233 | |||
234 | if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) { | ||
235 | ret = -EINVAL; | ||
236 | goto bail; | ||
237 | } | ||
238 | |||
239 | /* | ||
240 | * Allocate data structures since its better to do this outside of | ||
241 | * spin locks and it will most likely be needed. | ||
242 | */ | ||
243 | mcast = qib_mcast_alloc(gid); | ||
244 | if (mcast == NULL) { | ||
245 | ret = -ENOMEM; | ||
246 | goto bail; | ||
247 | } | ||
248 | mqp = qib_mcast_qp_alloc(qp); | ||
249 | if (mqp == NULL) { | ||
250 | qib_mcast_free(mcast); | ||
251 | ret = -ENOMEM; | ||
252 | goto bail; | ||
253 | } | ||
254 | ibp = to_iport(ibqp->device, qp->port_num); | ||
255 | switch (qib_mcast_add(dev, ibp, mcast, mqp)) { | ||
256 | case ESRCH: | ||
257 | /* Neither was used: OK to attach the same QP twice. */ | ||
258 | qib_mcast_qp_free(mqp); | ||
259 | qib_mcast_free(mcast); | ||
260 | break; | ||
261 | |||
262 | case EEXIST: /* The mcast wasn't used */ | ||
263 | qib_mcast_free(mcast); | ||
264 | break; | ||
265 | |||
266 | case ENOMEM: | ||
267 | /* Exceeded the maximum number of mcast groups. */ | ||
268 | qib_mcast_qp_free(mqp); | ||
269 | qib_mcast_free(mcast); | ||
270 | ret = -ENOMEM; | ||
271 | goto bail; | ||
272 | |||
273 | default: | ||
274 | break; | ||
275 | } | ||
276 | |||
277 | ret = 0; | ||
278 | |||
279 | bail: | ||
280 | return ret; | ||
281 | } | ||
282 | |||
283 | int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | ||
284 | { | ||
285 | struct qib_qp *qp = to_iqp(ibqp); | ||
286 | struct qib_ibdev *dev = to_idev(ibqp->device); | ||
287 | struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num); | ||
288 | struct qib_mcast *mcast = NULL; | ||
289 | struct qib_mcast_qp *p, *tmp; | ||
290 | struct rb_node *n; | ||
291 | int last = 0; | ||
292 | int ret; | ||
293 | |||
294 | if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) { | ||
295 | ret = -EINVAL; | ||
296 | goto bail; | ||
297 | } | ||
298 | |||
299 | spin_lock_irq(&ibp->lock); | ||
300 | |||
301 | /* Find the GID in the mcast table. */ | ||
302 | n = ibp->mcast_tree.rb_node; | ||
303 | while (1) { | ||
304 | if (n == NULL) { | ||
305 | spin_unlock_irq(&ibp->lock); | ||
306 | ret = -EINVAL; | ||
307 | goto bail; | ||
308 | } | ||
309 | |||
310 | mcast = rb_entry(n, struct qib_mcast, rb_node); | ||
311 | ret = memcmp(gid->raw, mcast->mgid.raw, | ||
312 | sizeof(union ib_gid)); | ||
313 | if (ret < 0) | ||
314 | n = n->rb_left; | ||
315 | else if (ret > 0) | ||
316 | n = n->rb_right; | ||
317 | else | ||
318 | break; | ||
319 | } | ||
320 | |||
321 | /* Search the QP list. */ | ||
322 | list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) { | ||
323 | if (p->qp != qp) | ||
324 | continue; | ||
325 | /* | ||
326 | * We found it, so remove it, but don't poison the forward | ||
327 | * link until we are sure there are no list walkers. | ||
328 | */ | ||
329 | list_del_rcu(&p->list); | ||
330 | mcast->n_attached--; | ||
331 | |||
332 | /* If this was the last attached QP, remove the GID too. */ | ||
333 | if (list_empty(&mcast->qp_list)) { | ||
334 | rb_erase(&mcast->rb_node, &ibp->mcast_tree); | ||
335 | last = 1; | ||
336 | } | ||
337 | break; | ||
338 | } | ||
339 | |||
340 | spin_unlock_irq(&ibp->lock); | ||
341 | |||
342 | if (p) { | ||
343 | /* | ||
344 | * Wait for any list walkers to finish before freeing the | ||
345 | * list element. | ||
346 | */ | ||
347 | wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1); | ||
348 | qib_mcast_qp_free(p); | ||
349 | } | ||
350 | if (last) { | ||
351 | atomic_dec(&mcast->refcount); | ||
352 | wait_event(mcast->wait, !atomic_read(&mcast->refcount)); | ||
353 | qib_mcast_free(mcast); | ||
354 | spin_lock_irq(&dev->n_mcast_grps_lock); | ||
355 | dev->n_mcast_grps_allocated--; | ||
356 | spin_unlock_irq(&dev->n_mcast_grps_lock); | ||
357 | } | ||
358 | |||
359 | ret = 0; | ||
360 | |||
361 | bail: | ||
362 | return ret; | ||
363 | } | ||
364 | |||
365 | int qib_mcast_tree_empty(struct qib_ibport *ibp) | ||
366 | { | ||
367 | return ibp->mcast_tree.rb_node == NULL; | ||
368 | } | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_7220.h b/drivers/infiniband/hw/qib/qib_wc_ppc64.c index 74fa5cc5131d..673cf4c22ebd 100644 --- a/drivers/infiniband/hw/ipath/ipath_7220.h +++ b/drivers/infiniband/hw/qib/qib_wc_ppc64.c | |||
@@ -1,7 +1,5 @@ | |||
1 | #ifndef _IPATH_7220_H | ||
2 | #define _IPATH_7220_H | ||
3 | /* | 1 | /* |
4 | * Copyright (c) 2007 QLogic Corporation. All rights reserved. | 2 | * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved. |
5 | * | 3 | * |
6 | * This software is available to you under a choice of one of two | 4 | * This software is available to you under a choice of one of two |
7 | * licenses. You may choose to be licensed under the terms of the GNU | 5 | * licenses. You may choose to be licensed under the terms of the GNU |
@@ -33,25 +31,32 @@ | |||
33 | */ | 31 | */ |
34 | 32 | ||
35 | /* | 33 | /* |
36 | * This header file provides the declarations and common definitions | 34 | * This file is conditionally built on PowerPC only. Otherwise weak symbol |
37 | * for (mostly) manipulation of the SerDes blocks within the IBA7220. | 35 | * versions of the functions exported from here are used. |
38 | * the functions declared should only be called from within other | ||
39 | * 7220-related files such as ipath_iba7220.c or ipath_sd7220.c. | ||
40 | */ | 36 | */ |
41 | int ipath_sd7220_presets(struct ipath_devdata *dd); | ||
42 | int ipath_sd7220_init(struct ipath_devdata *dd, int was_reset); | ||
43 | int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum, u8 *img, | ||
44 | int len, int offset); | ||
45 | int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum, const u8 *img, | ||
46 | int len, int offset); | ||
47 | /* | ||
48 | * Below used for sdnum parameter, selecting one of the two sections | ||
49 | * used for PCIe, or the single SerDes used for IB, which is the | ||
50 | * only one currently used | ||
51 | */ | ||
52 | #define IB_7220_SERDES 2 | ||
53 | 37 | ||
54 | int ipath_sd7220_ib_load(struct ipath_devdata *dd); | 38 | #include "qib.h" |
55 | int ipath_sd7220_ib_vfy(struct ipath_devdata *dd); | ||
56 | 39 | ||
57 | #endif /* _IPATH_7220_H */ | 40 | /** |
41 | * qib_enable_wc - enable write combining for MMIO writes to the device | ||
42 | * @dd: qlogic_ib device | ||
43 | * | ||
44 | * Nothing to do on PowerPC, so just return without error. | ||
45 | */ | ||
46 | int qib_enable_wc(struct qib_devdata *dd) | ||
47 | { | ||
48 | return 0; | ||
49 | } | ||
50 | |||
51 | /** | ||
52 | * qib_unordered_wc - indicate whether write combining is unordered | ||
53 | * | ||
54 | * Because our performance depends on our ability to do write | ||
55 | * combining mmio writes in the most efficient way, we need to | ||
56 | * know if we are on a processor that may reorder stores when | ||
57 | * write combining. | ||
58 | */ | ||
59 | int qib_unordered_wc(void) | ||
60 | { | ||
61 | return 1; | ||
62 | } | ||
diff --git a/drivers/infiniband/hw/qib/qib_wc_x86_64.c b/drivers/infiniband/hw/qib/qib_wc_x86_64.c new file mode 100644 index 000000000000..561b8bca4060 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_wc_x86_64.c | |||
@@ -0,0 +1,171 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. | ||
3 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | |||
34 | /* | ||
35 | * This file is conditionally built on x86_64 only. Otherwise weak symbol | ||
36 | * versions of the functions exported from here are used. | ||
37 | */ | ||
38 | |||
39 | #include <linux/pci.h> | ||
40 | #include <asm/mtrr.h> | ||
41 | #include <asm/processor.h> | ||
42 | |||
43 | #include "qib.h" | ||
44 | |||
45 | /** | ||
46 | * qib_enable_wc - enable write combining for MMIO writes to the device | ||
47 | * @dd: qlogic_ib device | ||
48 | * | ||
49 | * This routine is x86_64-specific; it twiddles the CPU's MTRRs to enable | ||
50 | * write combining. | ||
51 | */ | ||
52 | int qib_enable_wc(struct qib_devdata *dd) | ||
53 | { | ||
54 | int ret = 0; | ||
55 | u64 pioaddr, piolen; | ||
56 | unsigned bits; | ||
57 | const unsigned long addr = pci_resource_start(dd->pcidev, 0); | ||
58 | const size_t len = pci_resource_len(dd->pcidev, 0); | ||
59 | |||
60 | /* | ||
61 | * Set the PIO buffers to be WCCOMB, so we get HT bursts to the | ||
62 | * chip. Linux (possibly the hardware) requires it to be on a power | ||
63 | * of 2 address matching the length (which has to be a power of 2). | ||
64 | * For rev1, that means the base address, for rev2, it will be just | ||
65 | * the PIO buffers themselves. | ||
66 | * For chips with two sets of buffers, the calculations are | ||
67 | * somewhat more complicated; we need to sum, and the piobufbase | ||
68 | * register has both offsets, 2K in low 32 bits, 4K in high 32 bits. | ||
69 | * The buffers are still packed, so a single range covers both. | ||
70 | */ | ||
71 | if (dd->piobcnt2k && dd->piobcnt4k) { | ||
72 | /* 2 sizes for chip */ | ||
73 | unsigned long pio2kbase, pio4kbase; | ||
74 | pio2kbase = dd->piobufbase & 0xffffffffUL; | ||
75 | pio4kbase = (dd->piobufbase >> 32) & 0xffffffffUL; | ||
76 | if (pio2kbase < pio4kbase) { | ||
77 | /* all current chips */ | ||
78 | pioaddr = addr + pio2kbase; | ||
79 | piolen = pio4kbase - pio2kbase + | ||
80 | dd->piobcnt4k * dd->align4k; | ||
81 | } else { | ||
82 | pioaddr = addr + pio4kbase; | ||
83 | piolen = pio2kbase - pio4kbase + | ||
84 | dd->piobcnt2k * dd->palign; | ||
85 | } | ||
86 | } else { /* single buffer size (2K, currently) */ | ||
87 | pioaddr = addr + dd->piobufbase; | ||
88 | piolen = dd->piobcnt2k * dd->palign + | ||
89 | dd->piobcnt4k * dd->align4k; | ||
90 | } | ||
91 | |||
92 | for (bits = 0; !(piolen & (1ULL << bits)); bits++) | ||
93 | /* do nothing */ ; | ||
94 | |||
95 | if (piolen != (1ULL << bits)) { | ||
96 | piolen >>= bits; | ||
97 | while (piolen >>= 1) | ||
98 | bits++; | ||
99 | piolen = 1ULL << (bits + 1); | ||
100 | } | ||
101 | if (pioaddr & (piolen - 1)) { | ||
102 | u64 atmp; | ||
103 | atmp = pioaddr & ~(piolen - 1); | ||
104 | if (atmp < addr || (atmp + piolen) > (addr + len)) { | ||
105 | qib_dev_err(dd, "No way to align address/size " | ||
106 | "(%llx/%llx), no WC mtrr\n", | ||
107 | (unsigned long long) atmp, | ||
108 | (unsigned long long) piolen << 1); | ||
109 | ret = -ENODEV; | ||
110 | } else { | ||
111 | pioaddr = atmp; | ||
112 | piolen <<= 1; | ||
113 | } | ||
114 | } | ||
115 | |||
116 | if (!ret) { | ||
117 | int cookie; | ||
118 | |||
119 | cookie = mtrr_add(pioaddr, piolen, MTRR_TYPE_WRCOMB, 0); | ||
120 | if (cookie < 0) { | ||
121 | { | ||
122 | qib_devinfo(dd->pcidev, | ||
123 | "mtrr_add() WC for PIO bufs " | ||
124 | "failed (%d)\n", | ||
125 | cookie); | ||
126 | ret = -EINVAL; | ||
127 | } | ||
128 | } else { | ||
129 | dd->wc_cookie = cookie; | ||
130 | dd->wc_base = (unsigned long) pioaddr; | ||
131 | dd->wc_len = (unsigned long) piolen; | ||
132 | } | ||
133 | } | ||
134 | |||
135 | return ret; | ||
136 | } | ||
137 | |||
138 | /** | ||
139 | * qib_disable_wc - disable write combining for MMIO writes to the device | ||
140 | * @dd: qlogic_ib device | ||
141 | */ | ||
142 | void qib_disable_wc(struct qib_devdata *dd) | ||
143 | { | ||
144 | if (dd->wc_cookie) { | ||
145 | int r; | ||
146 | |||
147 | r = mtrr_del(dd->wc_cookie, dd->wc_base, | ||
148 | dd->wc_len); | ||
149 | if (r < 0) | ||
150 | qib_devinfo(dd->pcidev, | ||
151 | "mtrr_del(%lx, %lx, %lx) failed: %d\n", | ||
152 | dd->wc_cookie, dd->wc_base, | ||
153 | dd->wc_len, r); | ||
154 | dd->wc_cookie = 0; /* even on failure */ | ||
155 | } | ||
156 | } | ||
157 | |||
158 | /** | ||
159 | * qib_unordered_wc - indicate whether write combining is ordered | ||
160 | * | ||
161 | * Because our performance depends on our ability to do write combining mmio | ||
162 | * writes in the most efficient way, we need to know if we are on an Intel | ||
163 | * or AMD x86_64 processor. AMD x86_64 processors flush WC buffers out in | ||
164 | * the order completed, and so no special flushing is required to get | ||
165 | * correct ordering. Intel processors, however, will flush write buffers | ||
166 | * out in "random" orders, and so explicit ordering is needed at times. | ||
167 | */ | ||
168 | int qib_unordered_wc(void) | ||
169 | { | ||
170 | return boot_cpu_data.x86_vendor != X86_VENDOR_AMD; | ||
171 | } | ||
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c index 57288ca1395f..b07e4dee80aa 100644 --- a/drivers/net/mlx4/icm.c +++ b/drivers/net/mlx4/icm.c | |||
@@ -163,28 +163,30 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, | |||
163 | ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], | 163 | ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], |
164 | cur_order, gfp_mask); | 164 | cur_order, gfp_mask); |
165 | 165 | ||
166 | if (!ret) { | 166 | if (ret) { |
167 | ++chunk->npages; | 167 | if (--cur_order < 0) |
168 | 168 | goto fail; | |
169 | if (coherent) | 169 | else |
170 | ++chunk->nsg; | 170 | continue; |
171 | else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { | 171 | } |
172 | chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, | ||
173 | chunk->npages, | ||
174 | PCI_DMA_BIDIRECTIONAL); | ||
175 | 172 | ||
176 | if (chunk->nsg <= 0) | 173 | ++chunk->npages; |
177 | goto fail; | ||
178 | 174 | ||
179 | chunk = NULL; | 175 | if (coherent) |
180 | } | 176 | ++chunk->nsg; |
177 | else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { | ||
178 | chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, | ||
179 | chunk->npages, | ||
180 | PCI_DMA_BIDIRECTIONAL); | ||
181 | 181 | ||
182 | npages -= 1 << cur_order; | 182 | if (chunk->nsg <= 0) |
183 | } else { | ||
184 | --cur_order; | ||
185 | if (cur_order < 0) | ||
186 | goto fail; | 183 | goto fail; |
187 | } | 184 | } |
185 | |||
186 | if (chunk->npages == MLX4_ICM_CHUNK_LEN) | ||
187 | chunk = NULL; | ||
188 | |||
189 | npages -= 1 << cur_order; | ||
188 | } | 190 | } |
189 | 191 | ||
190 | if (!coherent && chunk) { | 192 | if (!coherent && chunk) { |
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 310d31474034..f3e8f3c07725 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
@@ -1172,7 +1172,9 @@ struct ib_client { | |||
1172 | struct ib_device *ib_alloc_device(size_t size); | 1172 | struct ib_device *ib_alloc_device(size_t size); |
1173 | void ib_dealloc_device(struct ib_device *device); | 1173 | void ib_dealloc_device(struct ib_device *device); |
1174 | 1174 | ||
1175 | int ib_register_device (struct ib_device *device); | 1175 | int ib_register_device(struct ib_device *device, |
1176 | int (*port_callback)(struct ib_device *, | ||
1177 | u8, struct kobject *)); | ||
1176 | void ib_unregister_device(struct ib_device *device); | 1178 | void ib_unregister_device(struct ib_device *device); |
1177 | 1179 | ||
1178 | int ib_register_client (struct ib_client *client); | 1180 | int ib_register_client (struct ib_client *client); |