aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/core/cm.c63
-rw-r--r--drivers/infiniband/core/cma.c2
-rw-r--r--drivers/infiniband/core/fmr_pool.c3
-rw-r--r--drivers/infiniband/core/ucma.c2
-rw-r--r--drivers/infiniband/core/uverbs.h4
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c14
-rw-r--r--drivers/infiniband/core/uverbs_main.c28
-rw-r--r--drivers/infiniband/core/verbs.c14
-rw-r--r--drivers/infiniband/hw/amso1100/c2.c80
-rw-r--r--drivers/infiniband/hw/amso1100/c2.h16
-rw-r--r--drivers/infiniband/hw/amso1100/c2_ae.c10
-rw-r--r--drivers/infiniband/hw/amso1100/c2_alloc.c12
-rw-r--r--drivers/infiniband/hw/amso1100/c2_cq.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_intr.c6
-rw-r--r--drivers/infiniband/hw/amso1100/c2_mm.c2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_mq.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_mq.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c85
-rw-r--r--drivers/infiniband/hw/amso1100/c2_qp.c30
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c31
-rw-r--r--drivers/infiniband/hw/amso1100/c2_vq.c2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_wr.h212
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_dbg.c24
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c84
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_resource.c12
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c166
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.h4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cq.c4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_ev.c12
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_mem.c6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c79
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.h4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c42
-rw-r--r--drivers/infiniband/hw/ehca/ehca_av.c31
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c19
-rw-r--r--drivers/infiniband/hw/ehca/ehca_hca.c129
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c19
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c42
-rw-r--r--drivers/infiniband/hw/ehca/ehca_pd.c11
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c51
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_tools.h16
-rw-r--r--drivers/infiniband/hw/ehca/ehca_uverbs.c19
-rw-r--r--drivers/infiniband/hw/ipath/Makefile3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_7220.h57
-rw-r--r--drivers/infiniband/hw/ipath/ipath_common.h54
-rw-r--r--drivers/infiniband/hw/ipath/ipath_debug.h2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_diag.c35
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c1041
-rw-r--r--drivers/infiniband/hw/ipath/ipath_eeprom.c428
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c176
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6110.c51
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6120.c203
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba7220.c2571
-rw-r--r--drivers/infiniband/hw/ipath/ipath_init_chip.c312
-rw-r--r--drivers/infiniband/hw/ipath/ipath_intr.c656
-rw-r--r--drivers/infiniband/hw/ipath/ipath_kernel.h304
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mad.c110
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c59
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c67
-rw-r--r--drivers/infiniband/hw/ipath/ipath_registers.h168
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ruc.c22
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sd7220.c1462
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sd7220_img.c1082
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sdma.c790
-rw-r--r--drivers/infiniband/hw/ipath/ipath_srq.c5
-rw-r--r--drivers/infiniband/hw/ipath/ipath_stats.c33
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sysfs.c104
-rw-r--r--drivers/infiniband/hw/ipath/ipath_uc.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ud.c7
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_sdma.c879
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_sdma.h54
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c413
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h32
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c319
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c2
-rw-r--r--drivers/infiniband/hw/mlx4/main.c25
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h15
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c117
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.h1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c14
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h14
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c15
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_profile.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_profile.h2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c28
-rw-r--r--drivers/infiniband/hw/mthca/mthca_wqe.h18
-rw-r--r--drivers/infiniband/hw/nes/nes.c15
-rw-r--r--drivers/infiniband/hw/nes/nes.h32
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c131
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.h35
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c49
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c26
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c29
-rw-r--r--drivers/infiniband/ulp/ipoib/Makefile3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h10
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c15
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c99
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c126
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c33
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c3
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c4
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c5
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c7
-rw-r--r--drivers/net/mlx4/catas.c2
-rw-r--r--drivers/net/mlx4/cmd.c3
-rw-r--r--drivers/net/mlx4/cq.c72
-rw-r--r--drivers/net/mlx4/eq.c5
-rw-r--r--drivers/net/mlx4/fw.c13
-rw-r--r--drivers/net/mlx4/fw.h1
-rw-r--r--drivers/net/mlx4/intf.c8
-rw-r--r--drivers/net/mlx4/main.c6
-rw-r--r--drivers/net/mlx4/mcg.c12
-rw-r--r--drivers/net/mlx4/mlx4.h4
-rw-r--r--include/linux/mlx4/cmd.h2
-rw-r--r--include/linux/mlx4/cq.h19
-rw-r--r--include/linux/mlx4/device.h1
-rw-r--r--include/linux/mlx4/driver.h3
-rw-r--r--include/linux/mlx4/qp.h15
-rw-r--r--include/rdma/ib_user_verbs.h5
-rw-r--r--include/rdma/ib_verbs.h35
-rw-r--r--net/sunrpc/xprtrdma/verbs.c1
131 files changed, 11740 insertions, 2288 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index faa7ce318a6d..a47fe64e5c39 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -467,6 +467,31 @@ static int cm_compare_private_data(u8 *private_data,
467 return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE); 467 return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE);
468} 468}
469 469
470/*
471 * Trivial helpers to strip endian annotation and compare; the
472 * endianness doesn't actually matter since we just need a stable
473 * order for the RB tree.
474 */
475static int be32_lt(__be32 a, __be32 b)
476{
477 return (__force u32) a < (__force u32) b;
478}
479
480static int be32_gt(__be32 a, __be32 b)
481{
482 return (__force u32) a > (__force u32) b;
483}
484
485static int be64_lt(__be64 a, __be64 b)
486{
487 return (__force u64) a < (__force u64) b;
488}
489
490static int be64_gt(__be64 a, __be64 b)
491{
492 return (__force u64) a > (__force u64) b;
493}
494
470static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv) 495static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
471{ 496{
472 struct rb_node **link = &cm.listen_service_table.rb_node; 497 struct rb_node **link = &cm.listen_service_table.rb_node;
@@ -492,9 +517,9 @@ static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
492 link = &(*link)->rb_left; 517 link = &(*link)->rb_left;
493 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device) 518 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
494 link = &(*link)->rb_right; 519 link = &(*link)->rb_right;
495 else if (service_id < cur_cm_id_priv->id.service_id) 520 else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
496 link = &(*link)->rb_left; 521 link = &(*link)->rb_left;
497 else if (service_id > cur_cm_id_priv->id.service_id) 522 else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
498 link = &(*link)->rb_right; 523 link = &(*link)->rb_right;
499 else if (data_cmp < 0) 524 else if (data_cmp < 0)
500 link = &(*link)->rb_left; 525 link = &(*link)->rb_left;
@@ -527,9 +552,9 @@ static struct cm_id_private * cm_find_listen(struct ib_device *device,
527 node = node->rb_left; 552 node = node->rb_left;
528 else if (device > cm_id_priv->id.device) 553 else if (device > cm_id_priv->id.device)
529 node = node->rb_right; 554 node = node->rb_right;
530 else if (service_id < cm_id_priv->id.service_id) 555 else if (be64_lt(service_id, cm_id_priv->id.service_id))
531 node = node->rb_left; 556 node = node->rb_left;
532 else if (service_id > cm_id_priv->id.service_id) 557 else if (be64_gt(service_id, cm_id_priv->id.service_id))
533 node = node->rb_right; 558 node = node->rb_right;
534 else if (data_cmp < 0) 559 else if (data_cmp < 0)
535 node = node->rb_left; 560 node = node->rb_left;
@@ -552,13 +577,13 @@ static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
552 parent = *link; 577 parent = *link;
553 cur_timewait_info = rb_entry(parent, struct cm_timewait_info, 578 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
554 remote_id_node); 579 remote_id_node);
555 if (remote_id < cur_timewait_info->work.remote_id) 580 if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
556 link = &(*link)->rb_left; 581 link = &(*link)->rb_left;
557 else if (remote_id > cur_timewait_info->work.remote_id) 582 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
558 link = &(*link)->rb_right; 583 link = &(*link)->rb_right;
559 else if (remote_ca_guid < cur_timewait_info->remote_ca_guid) 584 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
560 link = &(*link)->rb_left; 585 link = &(*link)->rb_left;
561 else if (remote_ca_guid > cur_timewait_info->remote_ca_guid) 586 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
562 link = &(*link)->rb_right; 587 link = &(*link)->rb_right;
563 else 588 else
564 return cur_timewait_info; 589 return cur_timewait_info;
@@ -578,13 +603,13 @@ static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
578 while (node) { 603 while (node) {
579 timewait_info = rb_entry(node, struct cm_timewait_info, 604 timewait_info = rb_entry(node, struct cm_timewait_info,
580 remote_id_node); 605 remote_id_node);
581 if (remote_id < timewait_info->work.remote_id) 606 if (be32_lt(remote_id, timewait_info->work.remote_id))
582 node = node->rb_left; 607 node = node->rb_left;
583 else if (remote_id > timewait_info->work.remote_id) 608 else if (be32_gt(remote_id, timewait_info->work.remote_id))
584 node = node->rb_right; 609 node = node->rb_right;
585 else if (remote_ca_guid < timewait_info->remote_ca_guid) 610 else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
586 node = node->rb_left; 611 node = node->rb_left;
587 else if (remote_ca_guid > timewait_info->remote_ca_guid) 612 else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
588 node = node->rb_right; 613 node = node->rb_right;
589 else 614 else
590 return timewait_info; 615 return timewait_info;
@@ -605,13 +630,13 @@ static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
605 parent = *link; 630 parent = *link;
606 cur_timewait_info = rb_entry(parent, struct cm_timewait_info, 631 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
607 remote_qp_node); 632 remote_qp_node);
608 if (remote_qpn < cur_timewait_info->remote_qpn) 633 if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
609 link = &(*link)->rb_left; 634 link = &(*link)->rb_left;
610 else if (remote_qpn > cur_timewait_info->remote_qpn) 635 else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
611 link = &(*link)->rb_right; 636 link = &(*link)->rb_right;
612 else if (remote_ca_guid < cur_timewait_info->remote_ca_guid) 637 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
613 link = &(*link)->rb_left; 638 link = &(*link)->rb_left;
614 else if (remote_ca_guid > cur_timewait_info->remote_ca_guid) 639 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
615 link = &(*link)->rb_right; 640 link = &(*link)->rb_right;
616 else 641 else
617 return cur_timewait_info; 642 return cur_timewait_info;
@@ -635,9 +660,9 @@ static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
635 parent = *link; 660 parent = *link;
636 cur_cm_id_priv = rb_entry(parent, struct cm_id_private, 661 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
637 sidr_id_node); 662 sidr_id_node);
638 if (remote_id < cur_cm_id_priv->id.remote_id) 663 if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
639 link = &(*link)->rb_left; 664 link = &(*link)->rb_left;
640 else if (remote_id > cur_cm_id_priv->id.remote_id) 665 else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
641 link = &(*link)->rb_right; 666 link = &(*link)->rb_right;
642 else { 667 else {
643 int cmp; 668 int cmp;
@@ -2848,7 +2873,7 @@ static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
2848 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID, 2873 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
2849 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR)); 2874 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
2850 sidr_req_msg->request_id = cm_id_priv->id.local_id; 2875 sidr_req_msg->request_id = cm_id_priv->id.local_id;
2851 sidr_req_msg->pkey = cpu_to_be16(param->path->pkey); 2876 sidr_req_msg->pkey = param->path->pkey;
2852 sidr_req_msg->service_id = param->service_id; 2877 sidr_req_msg->service_id = param->service_id;
2853 2878
2854 if (param->private_data && param->private_data_len) 2879 if (param->private_data && param->private_data_len)
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index d81c156a22b4..671f13738054 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1289,7 +1289,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1289 new_cm_id = rdma_create_id(listen_id->id.event_handler, 1289 new_cm_id = rdma_create_id(listen_id->id.event_handler,
1290 listen_id->id.context, 1290 listen_id->id.context,
1291 RDMA_PS_TCP); 1291 RDMA_PS_TCP);
1292 if (!new_cm_id) { 1292 if (IS_ERR(new_cm_id)) {
1293 ret = -ENOMEM; 1293 ret = -ENOMEM;
1294 goto out; 1294 goto out;
1295 } 1295 }
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index 06d502c06a4d..1286dc1b98b2 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -158,8 +158,7 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
158#endif 158#endif
159 } 159 }
160 160
161 list_splice(&pool->dirty_list, &unmap_list); 161 list_splice_init(&pool->dirty_list, &unmap_list);
162 INIT_LIST_HEAD(&pool->dirty_list);
163 pool->dirty_len = 0; 162 pool->dirty_len = 0;
164 163
165 spin_unlock_irq(&pool->pool_lock); 164 spin_unlock_irq(&pool->pool_lock);
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 15937eb38aae..ca4cf3a511ab 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -614,7 +614,7 @@ static ssize_t ucma_query_route(struct ucma_file *file,
614 if (!ctx->cm_id->device) 614 if (!ctx->cm_id->device)
615 goto out; 615 goto out;
616 616
617 resp.node_guid = ctx->cm_id->device->node_guid; 617 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
618 resp.port_num = ctx->cm_id->port_num; 618 resp.port_num = ctx->cm_id->port_num;
619 switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) { 619 switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
620 case RDMA_TRANSPORT_IB: 620 case RDMA_TRANSPORT_IB:
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index c75eb6c9bd49..2cad8b4b5292 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -81,13 +81,13 @@ struct ib_uverbs_device {
81 81
82struct ib_uverbs_event_file { 82struct ib_uverbs_event_file {
83 struct kref ref; 83 struct kref ref;
84 struct file *file;
85 struct ib_uverbs_file *uverbs_file; 84 struct ib_uverbs_file *uverbs_file;
86 spinlock_t lock; 85 spinlock_t lock;
87 int is_async;
88 wait_queue_head_t poll_wait; 86 wait_queue_head_t poll_wait;
89 struct fasync_struct *async_queue; 87 struct fasync_struct *async_queue;
90 struct list_head event_list; 88 struct list_head event_list;
89 int is_async;
90 int is_closed;
91}; 91};
92 92
93struct ib_uverbs_file { 93struct ib_uverbs_file {
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 495c803fb11d..2c3bff5fe867 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1065,6 +1065,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1065 attr.srq = srq; 1065 attr.srq = srq;
1066 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; 1066 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
1067 attr.qp_type = cmd.qp_type; 1067 attr.qp_type = cmd.qp_type;
1068 attr.create_flags = 0;
1068 1069
1069 attr.cap.max_send_wr = cmd.max_send_wr; 1070 attr.cap.max_send_wr = cmd.max_send_wr;
1070 attr.cap.max_recv_wr = cmd.max_recv_wr; 1071 attr.cap.max_recv_wr = cmd.max_recv_wr;
@@ -1462,7 +1463,6 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
1462 next->num_sge = user_wr->num_sge; 1463 next->num_sge = user_wr->num_sge;
1463 next->opcode = user_wr->opcode; 1464 next->opcode = user_wr->opcode;
1464 next->send_flags = user_wr->send_flags; 1465 next->send_flags = user_wr->send_flags;
1465 next->imm_data = (__be32 __force) user_wr->imm_data;
1466 1466
1467 if (is_ud) { 1467 if (is_ud) {
1468 next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah, 1468 next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
@@ -1475,14 +1475,24 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
1475 next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey; 1475 next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
1476 } else { 1476 } else {
1477 switch (next->opcode) { 1477 switch (next->opcode) {
1478 case IB_WR_RDMA_WRITE:
1479 case IB_WR_RDMA_WRITE_WITH_IMM: 1478 case IB_WR_RDMA_WRITE_WITH_IMM:
1479 next->ex.imm_data =
1480 (__be32 __force) user_wr->ex.imm_data;
1481 case IB_WR_RDMA_WRITE:
1480 case IB_WR_RDMA_READ: 1482 case IB_WR_RDMA_READ:
1481 next->wr.rdma.remote_addr = 1483 next->wr.rdma.remote_addr =
1482 user_wr->wr.rdma.remote_addr; 1484 user_wr->wr.rdma.remote_addr;
1483 next->wr.rdma.rkey = 1485 next->wr.rdma.rkey =
1484 user_wr->wr.rdma.rkey; 1486 user_wr->wr.rdma.rkey;
1485 break; 1487 break;
1488 case IB_WR_SEND_WITH_IMM:
1489 next->ex.imm_data =
1490 (__be32 __force) user_wr->ex.imm_data;
1491 break;
1492 case IB_WR_SEND_WITH_INV:
1493 next->ex.invalidate_rkey =
1494 user_wr->ex.invalidate_rkey;
1495 break;
1486 case IB_WR_ATOMIC_CMP_AND_SWP: 1496 case IB_WR_ATOMIC_CMP_AND_SWP:
1487 case IB_WR_ATOMIC_FETCH_AND_ADD: 1497 case IB_WR_ATOMIC_FETCH_AND_ADD:
1488 next->wr.atomic.remote_addr = 1498 next->wr.atomic.remote_addr =
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 7c2ac3905582..f49f94653a96 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -352,7 +352,7 @@ static int ib_uverbs_event_close(struct inode *inode, struct file *filp)
352 struct ib_uverbs_event *entry, *tmp; 352 struct ib_uverbs_event *entry, *tmp;
353 353
354 spin_lock_irq(&file->lock); 354 spin_lock_irq(&file->lock);
355 file->file = NULL; 355 file->is_closed = 1;
356 list_for_each_entry_safe(entry, tmp, &file->event_list, list) { 356 list_for_each_entry_safe(entry, tmp, &file->event_list, list) {
357 if (entry->counter) 357 if (entry->counter)
358 list_del(&entry->obj_list); 358 list_del(&entry->obj_list);
@@ -390,7 +390,7 @@ void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
390 return; 390 return;
391 391
392 spin_lock_irqsave(&file->lock, flags); 392 spin_lock_irqsave(&file->lock, flags);
393 if (!file->file) { 393 if (file->is_closed) {
394 spin_unlock_irqrestore(&file->lock, flags); 394 spin_unlock_irqrestore(&file->lock, flags);
395 return; 395 return;
396 } 396 }
@@ -423,7 +423,7 @@ static void ib_uverbs_async_handler(struct ib_uverbs_file *file,
423 unsigned long flags; 423 unsigned long flags;
424 424
425 spin_lock_irqsave(&file->async_file->lock, flags); 425 spin_lock_irqsave(&file->async_file->lock, flags);
426 if (!file->async_file->file) { 426 if (!file->async_file->is_closed) {
427 spin_unlock_irqrestore(&file->async_file->lock, flags); 427 spin_unlock_irqrestore(&file->async_file->lock, flags);
428 return; 428 return;
429 } 429 }
@@ -509,6 +509,7 @@ struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
509 ev_file->uverbs_file = uverbs_file; 509 ev_file->uverbs_file = uverbs_file;
510 ev_file->async_queue = NULL; 510 ev_file->async_queue = NULL;
511 ev_file->is_async = is_async; 511 ev_file->is_async = is_async;
512 ev_file->is_closed = 0;
512 513
513 *fd = get_unused_fd(); 514 *fd = get_unused_fd();
514 if (*fd < 0) { 515 if (*fd < 0) {
@@ -516,25 +517,18 @@ struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
516 goto err; 517 goto err;
517 } 518 }
518 519
519 filp = get_empty_filp();
520 if (!filp) {
521 ret = -ENFILE;
522 goto err_fd;
523 }
524
525 ev_file->file = filp;
526
527 /* 520 /*
528 * fops_get() can't fail here, because we're coming from a 521 * fops_get() can't fail here, because we're coming from a
529 * system call on a uverbs file, which will already have a 522 * system call on a uverbs file, which will already have a
530 * module reference. 523 * module reference.
531 */ 524 */
532 filp->f_op = fops_get(&uverbs_event_fops); 525 filp = alloc_file(uverbs_event_mnt, dget(uverbs_event_mnt->mnt_root),
533 filp->f_path.mnt = mntget(uverbs_event_mnt); 526 FMODE_READ, fops_get(&uverbs_event_fops));
534 filp->f_path.dentry = dget(uverbs_event_mnt->mnt_root); 527 if (!filp) {
535 filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping; 528 ret = -ENFILE;
536 filp->f_flags = O_RDONLY; 529 goto err_fd;
537 filp->f_mode = FMODE_READ; 530 }
531
538 filp->private_data = ev_file; 532 filp->private_data = ev_file;
539 533
540 return filp; 534 return filp;
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 86ed8af9c7e6..05042089de6e 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -248,7 +248,9 @@ int ib_modify_srq(struct ib_srq *srq,
248 struct ib_srq_attr *srq_attr, 248 struct ib_srq_attr *srq_attr,
249 enum ib_srq_attr_mask srq_attr_mask) 249 enum ib_srq_attr_mask srq_attr_mask)
250{ 250{
251 return srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL); 251 return srq->device->modify_srq ?
252 srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
253 -ENOSYS;
252} 254}
253EXPORT_SYMBOL(ib_modify_srq); 255EXPORT_SYMBOL(ib_modify_srq);
254 256
@@ -628,6 +630,13 @@ struct ib_cq *ib_create_cq(struct ib_device *device,
628} 630}
629EXPORT_SYMBOL(ib_create_cq); 631EXPORT_SYMBOL(ib_create_cq);
630 632
633int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
634{
635 return cq->device->modify_cq ?
636 cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
637}
638EXPORT_SYMBOL(ib_modify_cq);
639
631int ib_destroy_cq(struct ib_cq *cq) 640int ib_destroy_cq(struct ib_cq *cq)
632{ 641{
633 if (atomic_read(&cq->usecnt)) 642 if (atomic_read(&cq->usecnt))
@@ -672,6 +681,9 @@ struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
672{ 681{
673 struct ib_mr *mr; 682 struct ib_mr *mr;
674 683
684 if (!pd->device->reg_phys_mr)
685 return ERR_PTR(-ENOSYS);
686
675 mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf, 687 mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
676 mr_access_flags, iova_start); 688 mr_access_flags, iova_start);
677 689
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c
index f283a9f0c23b..113f3c03c5b5 100644
--- a/drivers/infiniband/hw/amso1100/c2.c
+++ b/drivers/infiniband/hw/amso1100/c2.c
@@ -130,10 +130,10 @@ static int c2_tx_ring_alloc(struct c2_ring *tx_ring, void *vaddr,
130 tx_desc->status = 0; 130 tx_desc->status = 0;
131 131
132 /* Set TXP_HTXD_UNINIT */ 132 /* Set TXP_HTXD_UNINIT */
133 __raw_writeq(cpu_to_be64(0x1122334455667788ULL), 133 __raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL),
134 (void __iomem *) txp_desc + C2_TXP_ADDR); 134 (void __iomem *) txp_desc + C2_TXP_ADDR);
135 __raw_writew(0, (void __iomem *) txp_desc + C2_TXP_LEN); 135 __raw_writew(0, (void __iomem *) txp_desc + C2_TXP_LEN);
136 __raw_writew(cpu_to_be16(TXP_HTXD_UNINIT), 136 __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT),
137 (void __iomem *) txp_desc + C2_TXP_FLAGS); 137 (void __iomem *) txp_desc + C2_TXP_FLAGS);
138 138
139 elem->skb = NULL; 139 elem->skb = NULL;
@@ -179,13 +179,13 @@ static int c2_rx_ring_alloc(struct c2_ring *rx_ring, void *vaddr,
179 rx_desc->status = 0; 179 rx_desc->status = 0;
180 180
181 /* Set RXP_HRXD_UNINIT */ 181 /* Set RXP_HRXD_UNINIT */
182 __raw_writew(cpu_to_be16(RXP_HRXD_OK), 182 __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_OK),
183 (void __iomem *) rxp_desc + C2_RXP_STATUS); 183 (void __iomem *) rxp_desc + C2_RXP_STATUS);
184 __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_COUNT); 184 __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_COUNT);
185 __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_LEN); 185 __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_LEN);
186 __raw_writeq(cpu_to_be64(0x99aabbccddeeffULL), 186 __raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL),
187 (void __iomem *) rxp_desc + C2_RXP_ADDR); 187 (void __iomem *) rxp_desc + C2_RXP_ADDR);
188 __raw_writew(cpu_to_be16(RXP_HRXD_UNINIT), 188 __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT),
189 (void __iomem *) rxp_desc + C2_RXP_FLAGS); 189 (void __iomem *) rxp_desc + C2_RXP_FLAGS);
190 190
191 elem->skb = NULL; 191 elem->skb = NULL;
@@ -239,10 +239,11 @@ static inline int c2_rx_alloc(struct c2_port *c2_port, struct c2_element *elem)
239 rxp_hdr->flags = RXP_HRXD_READY; 239 rxp_hdr->flags = RXP_HRXD_READY;
240 240
241 __raw_writew(0, elem->hw_desc + C2_RXP_STATUS); 241 __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
242 __raw_writew(cpu_to_be16((u16) maplen - sizeof(*rxp_hdr)), 242 __raw_writew((__force u16) cpu_to_be16((u16) maplen - sizeof(*rxp_hdr)),
243 elem->hw_desc + C2_RXP_LEN); 243 elem->hw_desc + C2_RXP_LEN);
244 __raw_writeq(cpu_to_be64(mapaddr), elem->hw_desc + C2_RXP_ADDR); 244 __raw_writeq((__force u64) cpu_to_be64(mapaddr), elem->hw_desc + C2_RXP_ADDR);
245 __raw_writew(cpu_to_be16(RXP_HRXD_READY), elem->hw_desc + C2_RXP_FLAGS); 245 __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
246 elem->hw_desc + C2_RXP_FLAGS);
246 247
247 elem->skb = skb; 248 elem->skb = skb;
248 elem->mapaddr = mapaddr; 249 elem->mapaddr = mapaddr;
@@ -290,9 +291,9 @@ static void c2_rx_clean(struct c2_port *c2_port)
290 __raw_writew(0, elem->hw_desc + C2_RXP_STATUS); 291 __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
291 __raw_writew(0, elem->hw_desc + C2_RXP_COUNT); 292 __raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
292 __raw_writew(0, elem->hw_desc + C2_RXP_LEN); 293 __raw_writew(0, elem->hw_desc + C2_RXP_LEN);
293 __raw_writeq(cpu_to_be64(0x99aabbccddeeffULL), 294 __raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL),
294 elem->hw_desc + C2_RXP_ADDR); 295 elem->hw_desc + C2_RXP_ADDR);
295 __raw_writew(cpu_to_be16(RXP_HRXD_UNINIT), 296 __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT),
296 elem->hw_desc + C2_RXP_FLAGS); 297 elem->hw_desc + C2_RXP_FLAGS);
297 298
298 if (elem->skb) { 299 if (elem->skb) {
@@ -346,16 +347,16 @@ static void c2_tx_clean(struct c2_port *c2_port)
346 elem->hw_desc + C2_TXP_LEN); 347 elem->hw_desc + C2_TXP_LEN);
347 __raw_writeq(0, 348 __raw_writeq(0,
348 elem->hw_desc + C2_TXP_ADDR); 349 elem->hw_desc + C2_TXP_ADDR);
349 __raw_writew(cpu_to_be16(TXP_HTXD_DONE), 350 __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_DONE),
350 elem->hw_desc + C2_TXP_FLAGS); 351 elem->hw_desc + C2_TXP_FLAGS);
351 c2_port->netstats.tx_dropped++; 352 c2_port->netstats.tx_dropped++;
352 break; 353 break;
353 } else { 354 } else {
354 __raw_writew(0, 355 __raw_writew(0,
355 elem->hw_desc + C2_TXP_LEN); 356 elem->hw_desc + C2_TXP_LEN);
356 __raw_writeq(cpu_to_be64(0x1122334455667788ULL), 357 __raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL),
357 elem->hw_desc + C2_TXP_ADDR); 358 elem->hw_desc + C2_TXP_ADDR);
358 __raw_writew(cpu_to_be16(TXP_HTXD_UNINIT), 359 __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT),
359 elem->hw_desc + C2_TXP_FLAGS); 360 elem->hw_desc + C2_TXP_FLAGS);
360 } 361 }
361 362
@@ -390,7 +391,7 @@ static void c2_tx_interrupt(struct net_device *netdev)
390 for (elem = tx_ring->to_clean; elem != tx_ring->to_use; 391 for (elem = tx_ring->to_clean; elem != tx_ring->to_use;
391 elem = elem->next) { 392 elem = elem->next) {
392 txp_htxd.flags = 393 txp_htxd.flags =
393 be16_to_cpu(readw(elem->hw_desc + C2_TXP_FLAGS)); 394 be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_FLAGS));
394 395
395 if (txp_htxd.flags != TXP_HTXD_DONE) 396 if (txp_htxd.flags != TXP_HTXD_DONE)
396 break; 397 break;
@@ -398,7 +399,7 @@ static void c2_tx_interrupt(struct net_device *netdev)
398 if (netif_msg_tx_done(c2_port)) { 399 if (netif_msg_tx_done(c2_port)) {
399 /* PCI reads are expensive in fast path */ 400 /* PCI reads are expensive in fast path */
400 txp_htxd.len = 401 txp_htxd.len =
401 be16_to_cpu(readw(elem->hw_desc + C2_TXP_LEN)); 402 be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_LEN));
402 pr_debug("%s: tx done slot %3Zu status 0x%x len " 403 pr_debug("%s: tx done slot %3Zu status 0x%x len "
403 "%5u bytes\n", 404 "%5u bytes\n",
404 netdev->name, elem - tx_ring->start, 405 netdev->name, elem - tx_ring->start,
@@ -448,10 +449,12 @@ static void c2_rx_error(struct c2_port *c2_port, struct c2_element *elem)
448 /* Write the descriptor to the adapter's rx ring */ 449 /* Write the descriptor to the adapter's rx ring */
449 __raw_writew(0, elem->hw_desc + C2_RXP_STATUS); 450 __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
450 __raw_writew(0, elem->hw_desc + C2_RXP_COUNT); 451 __raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
451 __raw_writew(cpu_to_be16((u16) elem->maplen - sizeof(*rxp_hdr)), 452 __raw_writew((__force u16) cpu_to_be16((u16) elem->maplen - sizeof(*rxp_hdr)),
452 elem->hw_desc + C2_RXP_LEN); 453 elem->hw_desc + C2_RXP_LEN);
453 __raw_writeq(cpu_to_be64(elem->mapaddr), elem->hw_desc + C2_RXP_ADDR); 454 __raw_writeq((__force u64) cpu_to_be64(elem->mapaddr),
454 __raw_writew(cpu_to_be16(RXP_HRXD_READY), elem->hw_desc + C2_RXP_FLAGS); 455 elem->hw_desc + C2_RXP_ADDR);
456 __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
457 elem->hw_desc + C2_RXP_FLAGS);
455 458
456 pr_debug("packet dropped\n"); 459 pr_debug("packet dropped\n");
457 c2_port->netstats.rx_dropped++; 460 c2_port->netstats.rx_dropped++;
@@ -653,7 +656,7 @@ static int c2_up(struct net_device *netdev)
653 i++, elem++) { 656 i++, elem++) {
654 rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data; 657 rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
655 rxp_hdr->flags = 0; 658 rxp_hdr->flags = 0;
656 __raw_writew(cpu_to_be16(RXP_HRXD_READY), 659 __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
657 elem->hw_desc + C2_RXP_FLAGS); 660 elem->hw_desc + C2_RXP_FLAGS);
658 } 661 }
659 662
@@ -787,9 +790,12 @@ static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
787 elem->maplen = maplen; 790 elem->maplen = maplen;
788 791
789 /* Tell HW to xmit */ 792 /* Tell HW to xmit */
790 __raw_writeq(cpu_to_be64(mapaddr), elem->hw_desc + C2_TXP_ADDR); 793 __raw_writeq((__force u64) cpu_to_be64(mapaddr),
791 __raw_writew(cpu_to_be16(maplen), elem->hw_desc + C2_TXP_LEN); 794 elem->hw_desc + C2_TXP_ADDR);
792 __raw_writew(cpu_to_be16(TXP_HTXD_READY), elem->hw_desc + C2_TXP_FLAGS); 795 __raw_writew((__force u16) cpu_to_be16(maplen),
796 elem->hw_desc + C2_TXP_LEN);
797 __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
798 elem->hw_desc + C2_TXP_FLAGS);
793 799
794 c2_port->netstats.tx_packets++; 800 c2_port->netstats.tx_packets++;
795 c2_port->netstats.tx_bytes += maplen; 801 c2_port->netstats.tx_bytes += maplen;
@@ -810,11 +816,11 @@ static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
810 elem->maplen = maplen; 816 elem->maplen = maplen;
811 817
812 /* Tell HW to xmit */ 818 /* Tell HW to xmit */
813 __raw_writeq(cpu_to_be64(mapaddr), 819 __raw_writeq((__force u64) cpu_to_be64(mapaddr),
814 elem->hw_desc + C2_TXP_ADDR); 820 elem->hw_desc + C2_TXP_ADDR);
815 __raw_writew(cpu_to_be16(maplen), 821 __raw_writew((__force u16) cpu_to_be16(maplen),
816 elem->hw_desc + C2_TXP_LEN); 822 elem->hw_desc + C2_TXP_LEN);
817 __raw_writew(cpu_to_be16(TXP_HTXD_READY), 823 __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
818 elem->hw_desc + C2_TXP_FLAGS); 824 elem->hw_desc + C2_TXP_FLAGS);
819 825
820 c2_port->netstats.tx_packets++; 826 c2_port->netstats.tx_packets++;
@@ -1005,7 +1011,7 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
1005 /* Remap the adapter PCI registers in BAR4 */ 1011 /* Remap the adapter PCI registers in BAR4 */
1006 mmio_regs = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET, 1012 mmio_regs = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET,
1007 sizeof(struct c2_adapter_pci_regs)); 1013 sizeof(struct c2_adapter_pci_regs));
1008 if (mmio_regs == 0UL) { 1014 if (!mmio_regs) {
1009 printk(KERN_ERR PFX 1015 printk(KERN_ERR PFX
1010 "Unable to remap adapter PCI registers in BAR4\n"); 1016 "Unable to remap adapter PCI registers in BAR4\n");
1011 ret = -EIO; 1017 ret = -EIO;
@@ -1029,10 +1035,10 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
1029 } 1035 }
1030 1036
1031 /* Validate the adapter version */ 1037 /* Validate the adapter version */
1032 if (be32_to_cpu(readl(mmio_regs + C2_REGS_VERS)) != C2_VERSION) { 1038 if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)) != C2_VERSION) {
1033 printk(KERN_ERR PFX "Version mismatch " 1039 printk(KERN_ERR PFX "Version mismatch "
1034 "[fw=%u, c2=%u], Adapter not claimed\n", 1040 "[fw=%u, c2=%u], Adapter not claimed\n",
1035 be32_to_cpu(readl(mmio_regs + C2_REGS_VERS)), 1041 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)),
1036 C2_VERSION); 1042 C2_VERSION);
1037 ret = -EINVAL; 1043 ret = -EINVAL;
1038 iounmap(mmio_regs); 1044 iounmap(mmio_regs);
@@ -1040,12 +1046,12 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
1040 } 1046 }
1041 1047
1042 /* Validate the adapter IVN */ 1048 /* Validate the adapter IVN */
1043 if (be32_to_cpu(readl(mmio_regs + C2_REGS_IVN)) != C2_IVN) { 1049 if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)) != C2_IVN) {
1044 printk(KERN_ERR PFX "Downlevel FIrmware level. You should be using " 1050 printk(KERN_ERR PFX "Downlevel FIrmware level. You should be using "
1045 "the OpenIB device support kit. " 1051 "the OpenIB device support kit. "
1046 "[fw=0x%x, c2=0x%x], Adapter not claimed\n", 1052 "[fw=0x%x, c2=0x%x], Adapter not claimed\n",
1047 be32_to_cpu(readl(mmio_regs + C2_REGS_IVN)), 1053 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)),
1048 C2_IVN); 1054 C2_IVN);
1049 ret = -EINVAL; 1055 ret = -EINVAL;
1050 iounmap(mmio_regs); 1056 iounmap(mmio_regs);
1051 goto bail2; 1057 goto bail2;
@@ -1068,7 +1074,7 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
1068 1074
1069 /* Get the last RX index */ 1075 /* Get the last RX index */
1070 c2dev->cur_rx = 1076 c2dev->cur_rx =
1071 (be32_to_cpu(readl(mmio_regs + C2_REGS_HRX_CUR)) - 1077 (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_HRX_CUR)) -
1072 0xffffc000) / sizeof(struct c2_rxp_desc); 1078 0xffffc000) / sizeof(struct c2_rxp_desc);
1073 1079
1074 /* Request an interrupt line for the driver */ 1080 /* Request an interrupt line for the driver */
@@ -1090,7 +1096,7 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
1090 } 1096 }
1091 1097
1092 /* Save off the actual size prior to unmapping mmio_regs */ 1098 /* Save off the actual size prior to unmapping mmio_regs */
1093 kva_map_size = be32_to_cpu(readl(mmio_regs + C2_REGS_PCI_WINSIZE)); 1099 kva_map_size = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_PCI_WINSIZE));
1094 1100
1095 /* Unmap the adapter PCI registers in BAR4 */ 1101 /* Unmap the adapter PCI registers in BAR4 */
1096 iounmap(mmio_regs); 1102 iounmap(mmio_regs);
@@ -1109,7 +1115,7 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
1109 /* Remap the adapter HRXDQ PA space to kernel VA space */ 1115 /* Remap the adapter HRXDQ PA space to kernel VA space */
1110 c2dev->mmio_rxp_ring = ioremap_nocache(reg4_start + C2_RXP_HRXDQ_OFFSET, 1116 c2dev->mmio_rxp_ring = ioremap_nocache(reg4_start + C2_RXP_HRXDQ_OFFSET,
1111 C2_RXP_HRXDQ_SIZE); 1117 C2_RXP_HRXDQ_SIZE);
1112 if (c2dev->mmio_rxp_ring == 0UL) { 1118 if (!c2dev->mmio_rxp_ring) {
1113 printk(KERN_ERR PFX "Unable to remap MMIO HRXDQ region\n"); 1119 printk(KERN_ERR PFX "Unable to remap MMIO HRXDQ region\n");
1114 ret = -EIO; 1120 ret = -EIO;
1115 goto bail6; 1121 goto bail6;
@@ -1118,7 +1124,7 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
1118 /* Remap the adapter HTXDQ PA space to kernel VA space */ 1124 /* Remap the adapter HTXDQ PA space to kernel VA space */
1119 c2dev->mmio_txp_ring = ioremap_nocache(reg4_start + C2_TXP_HTXDQ_OFFSET, 1125 c2dev->mmio_txp_ring = ioremap_nocache(reg4_start + C2_TXP_HTXDQ_OFFSET,
1120 C2_TXP_HTXDQ_SIZE); 1126 C2_TXP_HTXDQ_SIZE);
1121 if (c2dev->mmio_txp_ring == 0UL) { 1127 if (!c2dev->mmio_txp_ring) {
1122 printk(KERN_ERR PFX "Unable to remap MMIO HTXDQ region\n"); 1128 printk(KERN_ERR PFX "Unable to remap MMIO HTXDQ region\n");
1123 ret = -EIO; 1129 ret = -EIO;
1124 goto bail7; 1130 goto bail7;
@@ -1129,7 +1135,7 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
1129 1135
1130 /* Remap the PCI registers in adapter BAR0 to kernel VA space */ 1136 /* Remap the PCI registers in adapter BAR0 to kernel VA space */
1131 c2dev->regs = ioremap_nocache(reg0_start, reg0_len); 1137 c2dev->regs = ioremap_nocache(reg0_start, reg0_len);
1132 if (c2dev->regs == 0UL) { 1138 if (!c2dev->regs) {
1133 printk(KERN_ERR PFX "Unable to remap BAR0\n"); 1139 printk(KERN_ERR PFX "Unable to remap BAR0\n");
1134 ret = -EIO; 1140 ret = -EIO;
1135 goto bail8; 1141 goto bail8;
@@ -1139,7 +1145,7 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
1139 c2dev->pa = reg4_start + C2_PCI_REGS_OFFSET; 1145 c2dev->pa = reg4_start + C2_PCI_REGS_OFFSET;
1140 c2dev->kva = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET, 1146 c2dev->kva = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET,
1141 kva_map_size); 1147 kva_map_size);
1142 if (c2dev->kva == 0UL) { 1148 if (!c2dev->kva) {
1143 printk(KERN_ERR PFX "Unable to remap BAR4\n"); 1149 printk(KERN_ERR PFX "Unable to remap BAR4\n");
1144 ret = -EIO; 1150 ret = -EIO;
1145 goto bail9; 1151 goto bail9;
diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h
index fa58200217a1..ed38ab8d9c0c 100644
--- a/drivers/infiniband/hw/amso1100/c2.h
+++ b/drivers/infiniband/hw/amso1100/c2.h
@@ -346,7 +346,7 @@ struct c2_dev {
346 // spinlock_t aeq_lock; 346 // spinlock_t aeq_lock;
347 // spinlock_t rnic_lock; 347 // spinlock_t rnic_lock;
348 348
349 u16 *hint_count; 349 __be16 *hint_count;
350 dma_addr_t hint_count_dma; 350 dma_addr_t hint_count_dma;
351 u16 hints_read; 351 u16 hints_read;
352 352
@@ -425,10 +425,10 @@ static inline void __raw_writeq(u64 val, void __iomem * addr)
425#endif 425#endif
426 426
427#define C2_SET_CUR_RX(c2dev, cur_rx) \ 427#define C2_SET_CUR_RX(c2dev, cur_rx) \
428 __raw_writel(cpu_to_be32(cur_rx), c2dev->mmio_txp_ring + 4092) 428 __raw_writel((__force u32) cpu_to_be32(cur_rx), c2dev->mmio_txp_ring + 4092)
429 429
430#define C2_GET_CUR_RX(c2dev) \ 430#define C2_GET_CUR_RX(c2dev) \
431 be32_to_cpu(readl(c2dev->mmio_txp_ring + 4092)) 431 be32_to_cpu((__force __be32) readl(c2dev->mmio_txp_ring + 4092))
432 432
433static inline struct c2_dev *to_c2dev(struct ib_device *ibdev) 433static inline struct c2_dev *to_c2dev(struct ib_device *ibdev)
434{ 434{
@@ -485,8 +485,8 @@ extern void c2_unregister_device(struct c2_dev *c2dev);
485extern int c2_rnic_init(struct c2_dev *c2dev); 485extern int c2_rnic_init(struct c2_dev *c2dev);
486extern void c2_rnic_term(struct c2_dev *c2dev); 486extern void c2_rnic_term(struct c2_dev *c2dev);
487extern void c2_rnic_interrupt(struct c2_dev *c2dev); 487extern void c2_rnic_interrupt(struct c2_dev *c2dev);
488extern int c2_del_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask); 488extern int c2_del_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask);
489extern int c2_add_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask); 489extern int c2_add_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask);
490 490
491/* QPs */ 491/* QPs */
492extern int c2_alloc_qp(struct c2_dev *c2dev, struct c2_pd *pd, 492extern int c2_alloc_qp(struct c2_dev *c2dev, struct c2_pd *pd,
@@ -545,7 +545,7 @@ extern void c2_ae_event(struct c2_dev *c2dev, u32 mq_index);
545extern int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask, 545extern int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask,
546 struct sp_chunk **root); 546 struct sp_chunk **root);
547extern void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root); 547extern void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root);
548extern u16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head, 548extern __be16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
549 dma_addr_t *dma_addr, gfp_t gfp_mask); 549 dma_addr_t *dma_addr, gfp_t gfp_mask);
550extern void c2_free_mqsp(u16 * mqsp); 550extern void c2_free_mqsp(__be16* mqsp);
551#endif 551#endif
diff --git a/drivers/infiniband/hw/amso1100/c2_ae.c b/drivers/infiniband/hw/amso1100/c2_ae.c
index a31439bd3b67..62af74295dbe 100644
--- a/drivers/infiniband/hw/amso1100/c2_ae.c
+++ b/drivers/infiniband/hw/amso1100/c2_ae.c
@@ -61,7 +61,7 @@ static int c2_convert_cm_status(u32 c2_status)
61 default: 61 default:
62 printk(KERN_ERR PFX 62 printk(KERN_ERR PFX
63 "%s - Unable to convert CM status: %d\n", 63 "%s - Unable to convert CM status: %d\n",
64 __FUNCTION__, c2_status); 64 __func__, c2_status);
65 return -EIO; 65 return -EIO;
66 } 66 }
67} 67}
@@ -193,9 +193,9 @@ void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
193 pr_debug("%s: event = %s, user_context=%llx, " 193 pr_debug("%s: event = %s, user_context=%llx, "
194 "resource_type=%x, " 194 "resource_type=%x, "
195 "resource=%x, qp_state=%s\n", 195 "resource=%x, qp_state=%s\n",
196 __FUNCTION__, 196 __func__,
197 to_event_str(event_id), 197 to_event_str(event_id),
198 (unsigned long long) be64_to_cpu(wr->ae.ae_generic.user_context), 198 (unsigned long long) wr->ae.ae_generic.user_context,
199 be32_to_cpu(wr->ae.ae_generic.resource_type), 199 be32_to_cpu(wr->ae.ae_generic.resource_type),
200 be32_to_cpu(wr->ae.ae_generic.resource), 200 be32_to_cpu(wr->ae.ae_generic.resource),
201 to_qp_state_str(be32_to_cpu(wr->ae.ae_generic.qp_state))); 201 to_qp_state_str(be32_to_cpu(wr->ae.ae_generic.qp_state)));
@@ -259,7 +259,7 @@ void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
259 BUG_ON(1); 259 BUG_ON(1);
260 pr_debug("%s:%d Unexpected event_id=%d on QP=%p, " 260 pr_debug("%s:%d Unexpected event_id=%d on QP=%p, "
261 "CM_ID=%p\n", 261 "CM_ID=%p\n",
262 __FUNCTION__, __LINE__, 262 __func__, __LINE__,
263 event_id, qp, cm_id); 263 event_id, qp, cm_id);
264 break; 264 break;
265 } 265 }
@@ -276,7 +276,7 @@ void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
276 pr_debug("C2_RES_IND_EP event_id=%d\n", event_id); 276 pr_debug("C2_RES_IND_EP event_id=%d\n", event_id);
277 if (event_id != CCAE_CONNECTION_REQUEST) { 277 if (event_id != CCAE_CONNECTION_REQUEST) {
278 pr_debug("%s: Invalid event_id: %d\n", 278 pr_debug("%s: Invalid event_id: %d\n",
279 __FUNCTION__, event_id); 279 __func__, event_id);
280 break; 280 break;
281 } 281 }
282 cm_event.event = IW_CM_EVENT_CONNECT_REQUEST; 282 cm_event.event = IW_CM_EVENT_CONNECT_REQUEST;
diff --git a/drivers/infiniband/hw/amso1100/c2_alloc.c b/drivers/infiniband/hw/amso1100/c2_alloc.c
index 0315f99e4191..e9110163aeff 100644
--- a/drivers/infiniband/hw/amso1100/c2_alloc.c
+++ b/drivers/infiniband/hw/amso1100/c2_alloc.c
@@ -87,8 +87,8 @@ void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root)
87 } 87 }
88} 88}
89 89
90u16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head, 90__be16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
91 dma_addr_t *dma_addr, gfp_t gfp_mask) 91 dma_addr_t *dma_addr, gfp_t gfp_mask)
92{ 92{
93 u16 mqsp; 93 u16 mqsp;
94 94
@@ -113,14 +113,14 @@ u16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
113 *dma_addr = head->dma_addr + 113 *dma_addr = head->dma_addr +
114 ((unsigned long) &(head->shared_ptr[mqsp]) - 114 ((unsigned long) &(head->shared_ptr[mqsp]) -
115 (unsigned long) head); 115 (unsigned long) head);
116 pr_debug("%s addr %p dma_addr %llx\n", __FUNCTION__, 116 pr_debug("%s addr %p dma_addr %llx\n", __func__,
117 &(head->shared_ptr[mqsp]), (unsigned long long) *dma_addr); 117 &(head->shared_ptr[mqsp]), (unsigned long long) *dma_addr);
118 return &(head->shared_ptr[mqsp]); 118 return (__force __be16 *) &(head->shared_ptr[mqsp]);
119 } 119 }
120 return NULL; 120 return NULL;
121} 121}
122 122
123void c2_free_mqsp(u16 * mqsp) 123void c2_free_mqsp(__be16 *mqsp)
124{ 124{
125 struct sp_chunk *head; 125 struct sp_chunk *head;
126 u16 idx; 126 u16 idx;
@@ -129,7 +129,7 @@ void c2_free_mqsp(u16 * mqsp)
129 head = (struct sp_chunk *) ((unsigned long) mqsp & PAGE_MASK); 129 head = (struct sp_chunk *) ((unsigned long) mqsp & PAGE_MASK);
130 130
131 /* Link head to new mqsp */ 131 /* Link head to new mqsp */
132 *mqsp = head->head; 132 *mqsp = (__force __be16) head->head;
133 133
134 /* Compute the shared_ptr index */ 134 /* Compute the shared_ptr index */
135 idx = ((unsigned long) mqsp & ~PAGE_MASK) >> 1; 135 idx = ((unsigned long) mqsp & ~PAGE_MASK) >> 1;
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c
index d2b3366786d6..bb17cce3cb59 100644
--- a/drivers/infiniband/hw/amso1100/c2_cq.c
+++ b/drivers/infiniband/hw/amso1100/c2_cq.c
@@ -422,8 +422,8 @@ void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq)
422 goto bail1; 422 goto bail1;
423 423
424 reply = (struct c2wr_cq_destroy_rep *) (unsigned long) (vq_req->reply_msg); 424 reply = (struct c2wr_cq_destroy_rep *) (unsigned long) (vq_req->reply_msg);
425 425 if (reply)
426 vq_repbuf_free(c2dev, reply); 426 vq_repbuf_free(c2dev, reply);
427 bail1: 427 bail1:
428 vq_req_free(c2dev, vq_req); 428 vq_req_free(c2dev, vq_req);
429 bail0: 429 bail0:
diff --git a/drivers/infiniband/hw/amso1100/c2_intr.c b/drivers/infiniband/hw/amso1100/c2_intr.c
index 0d0bc33ca30a..3b5095470cb3 100644
--- a/drivers/infiniband/hw/amso1100/c2_intr.c
+++ b/drivers/infiniband/hw/amso1100/c2_intr.c
@@ -174,7 +174,11 @@ static void handle_vq(struct c2_dev *c2dev, u32 mq_index)
174 return; 174 return;
175 } 175 }
176 176
177 err = c2_errno(reply_msg); 177 if (reply_msg)
178 err = c2_errno(reply_msg);
179 else
180 err = -ENOMEM;
181
178 if (!err) switch (req->event) { 182 if (!err) switch (req->event) {
179 case IW_CM_EVENT_ESTABLISHED: 183 case IW_CM_EVENT_ESTABLISHED:
180 c2_set_qp_state(req->qp, 184 c2_set_qp_state(req->qp,
diff --git a/drivers/infiniband/hw/amso1100/c2_mm.c b/drivers/infiniband/hw/amso1100/c2_mm.c
index 1e4f46493fcb..b506fe22b4d4 100644
--- a/drivers/infiniband/hw/amso1100/c2_mm.c
+++ b/drivers/infiniband/hw/amso1100/c2_mm.c
@@ -45,7 +45,7 @@
45 * Reply buffer _is_ freed by this function. 45 * Reply buffer _is_ freed by this function.
46 */ 46 */
47static int 47static int
48send_pbl_messages(struct c2_dev *c2dev, u32 stag_index, 48send_pbl_messages(struct c2_dev *c2dev, __be32 stag_index,
49 unsigned long va, u32 pbl_depth, 49 unsigned long va, u32 pbl_depth,
50 struct c2_vq_req *vq_req, int pbl_type) 50 struct c2_vq_req *vq_req, int pbl_type)
51{ 51{
diff --git a/drivers/infiniband/hw/amso1100/c2_mq.c b/drivers/infiniband/hw/amso1100/c2_mq.c
index b88a75592102..0cddc49beae1 100644
--- a/drivers/infiniband/hw/amso1100/c2_mq.c
+++ b/drivers/infiniband/hw/amso1100/c2_mq.c
@@ -64,7 +64,7 @@ void c2_mq_produce(struct c2_mq *q)
64 q->priv = (q->priv + 1) % q->q_size; 64 q->priv = (q->priv + 1) % q->q_size;
65 q->hint_count++; 65 q->hint_count++;
66 /* Update peer's offset. */ 66 /* Update peer's offset. */
67 __raw_writew(cpu_to_be16(q->priv), &q->peer->shared); 67 __raw_writew((__force u16) cpu_to_be16(q->priv), &q->peer->shared);
68 } 68 }
69} 69}
70 70
@@ -105,7 +105,7 @@ void c2_mq_free(struct c2_mq *q)
105#endif 105#endif
106 q->priv = (q->priv + 1) % q->q_size; 106 q->priv = (q->priv + 1) % q->q_size;
107 /* Update peer's offset. */ 107 /* Update peer's offset. */
108 __raw_writew(cpu_to_be16(q->priv), &q->peer->shared); 108 __raw_writew((__force u16) cpu_to_be16(q->priv), &q->peer->shared);
109 } 109 }
110} 110}
111 111
diff --git a/drivers/infiniband/hw/amso1100/c2_mq.h b/drivers/infiniband/hw/amso1100/c2_mq.h
index 9185bbb21658..acede007b94a 100644
--- a/drivers/infiniband/hw/amso1100/c2_mq.h
+++ b/drivers/infiniband/hw/amso1100/c2_mq.h
@@ -75,7 +75,7 @@ struct c2_mq {
75 u16 hint_count; 75 u16 hint_count;
76 u16 priv; 76 u16 priv;
77 struct c2_mq_shared __iomem *peer; 77 struct c2_mq_shared __iomem *peer;
78 u16 *shared; 78 __be16 *shared;
79 dma_addr_t shared_dma; 79 dma_addr_t shared_dma;
80 u32 q_size; 80 u32 q_size;
81 u32 msg_size; 81 u32 msg_size;
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index 7a6cece6ea9d..e10d27a6e145 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -67,7 +67,7 @@ static int c2_query_device(struct ib_device *ibdev,
67{ 67{
68 struct c2_dev *c2dev = to_c2dev(ibdev); 68 struct c2_dev *c2dev = to_c2dev(ibdev);
69 69
70 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 70 pr_debug("%s:%u\n", __func__, __LINE__);
71 71
72 *props = c2dev->props; 72 *props = c2dev->props;
73 return 0; 73 return 0;
@@ -76,7 +76,7 @@ static int c2_query_device(struct ib_device *ibdev,
76static int c2_query_port(struct ib_device *ibdev, 76static int c2_query_port(struct ib_device *ibdev,
77 u8 port, struct ib_port_attr *props) 77 u8 port, struct ib_port_attr *props)
78{ 78{
79 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 79 pr_debug("%s:%u\n", __func__, __LINE__);
80 80
81 props->max_mtu = IB_MTU_4096; 81 props->max_mtu = IB_MTU_4096;
82 props->lid = 0; 82 props->lid = 0;
@@ -102,14 +102,14 @@ static int c2_modify_port(struct ib_device *ibdev,
102 u8 port, int port_modify_mask, 102 u8 port, int port_modify_mask,
103 struct ib_port_modify *props) 103 struct ib_port_modify *props)
104{ 104{
105 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 105 pr_debug("%s:%u\n", __func__, __LINE__);
106 return 0; 106 return 0;
107} 107}
108 108
109static int c2_query_pkey(struct ib_device *ibdev, 109static int c2_query_pkey(struct ib_device *ibdev,
110 u8 port, u16 index, u16 * pkey) 110 u8 port, u16 index, u16 * pkey)
111{ 111{
112 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 112 pr_debug("%s:%u\n", __func__, __LINE__);
113 *pkey = 0; 113 *pkey = 0;
114 return 0; 114 return 0;
115} 115}
@@ -119,7 +119,7 @@ static int c2_query_gid(struct ib_device *ibdev, u8 port,
119{ 119{
120 struct c2_dev *c2dev = to_c2dev(ibdev); 120 struct c2_dev *c2dev = to_c2dev(ibdev);
121 121
122 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 122 pr_debug("%s:%u\n", __func__, __LINE__);
123 memset(&(gid->raw[0]), 0, sizeof(gid->raw)); 123 memset(&(gid->raw[0]), 0, sizeof(gid->raw));
124 memcpy(&(gid->raw[0]), c2dev->pseudo_netdev->dev_addr, 6); 124 memcpy(&(gid->raw[0]), c2dev->pseudo_netdev->dev_addr, 6);
125 125
@@ -134,7 +134,7 @@ static struct ib_ucontext *c2_alloc_ucontext(struct ib_device *ibdev,
134{ 134{
135 struct c2_ucontext *context; 135 struct c2_ucontext *context;
136 136
137 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 137 pr_debug("%s:%u\n", __func__, __LINE__);
138 context = kmalloc(sizeof(*context), GFP_KERNEL); 138 context = kmalloc(sizeof(*context), GFP_KERNEL);
139 if (!context) 139 if (!context)
140 return ERR_PTR(-ENOMEM); 140 return ERR_PTR(-ENOMEM);
@@ -144,14 +144,14 @@ static struct ib_ucontext *c2_alloc_ucontext(struct ib_device *ibdev,
144 144
145static int c2_dealloc_ucontext(struct ib_ucontext *context) 145static int c2_dealloc_ucontext(struct ib_ucontext *context)
146{ 146{
147 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 147 pr_debug("%s:%u\n", __func__, __LINE__);
148 kfree(context); 148 kfree(context);
149 return 0; 149 return 0;
150} 150}
151 151
152static int c2_mmap_uar(struct ib_ucontext *context, struct vm_area_struct *vma) 152static int c2_mmap_uar(struct ib_ucontext *context, struct vm_area_struct *vma)
153{ 153{
154 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 154 pr_debug("%s:%u\n", __func__, __LINE__);
155 return -ENOSYS; 155 return -ENOSYS;
156} 156}
157 157
@@ -162,7 +162,7 @@ static struct ib_pd *c2_alloc_pd(struct ib_device *ibdev,
162 struct c2_pd *pd; 162 struct c2_pd *pd;
163 int err; 163 int err;
164 164
165 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 165 pr_debug("%s:%u\n", __func__, __LINE__);
166 166
167 pd = kmalloc(sizeof(*pd), GFP_KERNEL); 167 pd = kmalloc(sizeof(*pd), GFP_KERNEL);
168 if (!pd) 168 if (!pd)
@@ -187,7 +187,7 @@ static struct ib_pd *c2_alloc_pd(struct ib_device *ibdev,
187 187
188static int c2_dealloc_pd(struct ib_pd *pd) 188static int c2_dealloc_pd(struct ib_pd *pd)
189{ 189{
190 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 190 pr_debug("%s:%u\n", __func__, __LINE__);
191 c2_pd_free(to_c2dev(pd->device), to_c2pd(pd)); 191 c2_pd_free(to_c2dev(pd->device), to_c2pd(pd));
192 kfree(pd); 192 kfree(pd);
193 193
@@ -196,13 +196,13 @@ static int c2_dealloc_pd(struct ib_pd *pd)
196 196
197static struct ib_ah *c2_ah_create(struct ib_pd *pd, struct ib_ah_attr *ah_attr) 197static struct ib_ah *c2_ah_create(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
198{ 198{
199 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 199 pr_debug("%s:%u\n", __func__, __LINE__);
200 return ERR_PTR(-ENOSYS); 200 return ERR_PTR(-ENOSYS);
201} 201}
202 202
203static int c2_ah_destroy(struct ib_ah *ah) 203static int c2_ah_destroy(struct ib_ah *ah)
204{ 204{
205 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 205 pr_debug("%s:%u\n", __func__, __LINE__);
206 return -ENOSYS; 206 return -ENOSYS;
207} 207}
208 208
@@ -230,7 +230,7 @@ struct ib_qp *c2_get_qp(struct ib_device *device, int qpn)
230 230
231 qp = c2_find_qpn(c2dev, qpn); 231 qp = c2_find_qpn(c2dev, qpn);
232 pr_debug("%s Returning QP=%p for QPN=%d, device=%p, refcount=%d\n", 232 pr_debug("%s Returning QP=%p for QPN=%d, device=%p, refcount=%d\n",
233 __FUNCTION__, qp, qpn, device, 233 __func__, qp, qpn, device,
234 (qp?atomic_read(&qp->refcount):0)); 234 (qp?atomic_read(&qp->refcount):0));
235 235
236 return (qp?&qp->ibqp:NULL); 236 return (qp?&qp->ibqp:NULL);
@@ -243,13 +243,16 @@ static struct ib_qp *c2_create_qp(struct ib_pd *pd,
243 struct c2_qp *qp; 243 struct c2_qp *qp;
244 int err; 244 int err;
245 245
246 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 246 pr_debug("%s:%u\n", __func__, __LINE__);
247
248 if (init_attr->create_flags)
249 return ERR_PTR(-EINVAL);
247 250
248 switch (init_attr->qp_type) { 251 switch (init_attr->qp_type) {
249 case IB_QPT_RC: 252 case IB_QPT_RC:
250 qp = kzalloc(sizeof(*qp), GFP_KERNEL); 253 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
251 if (!qp) { 254 if (!qp) {
252 pr_debug("%s: Unable to allocate QP\n", __FUNCTION__); 255 pr_debug("%s: Unable to allocate QP\n", __func__);
253 return ERR_PTR(-ENOMEM); 256 return ERR_PTR(-ENOMEM);
254 } 257 }
255 spin_lock_init(&qp->lock); 258 spin_lock_init(&qp->lock);
@@ -266,7 +269,7 @@ static struct ib_qp *c2_create_qp(struct ib_pd *pd,
266 269
267 break; 270 break;
268 default: 271 default:
269 pr_debug("%s: Invalid QP type: %d\n", __FUNCTION__, 272 pr_debug("%s: Invalid QP type: %d\n", __func__,
270 init_attr->qp_type); 273 init_attr->qp_type);
271 return ERR_PTR(-EINVAL); 274 return ERR_PTR(-EINVAL);
272 break; 275 break;
@@ -285,7 +288,7 @@ static int c2_destroy_qp(struct ib_qp *ib_qp)
285 struct c2_qp *qp = to_c2qp(ib_qp); 288 struct c2_qp *qp = to_c2qp(ib_qp);
286 289
287 pr_debug("%s:%u qp=%p,qp->state=%d\n", 290 pr_debug("%s:%u qp=%p,qp->state=%d\n",
288 __FUNCTION__, __LINE__,ib_qp,qp->state); 291 __func__, __LINE__, ib_qp, qp->state);
289 c2_free_qp(to_c2dev(ib_qp->device), qp); 292 c2_free_qp(to_c2dev(ib_qp->device), qp);
290 kfree(qp); 293 kfree(qp);
291 return 0; 294 return 0;
@@ -300,13 +303,13 @@ static struct ib_cq *c2_create_cq(struct ib_device *ibdev, int entries, int vect
300 303
301 cq = kmalloc(sizeof(*cq), GFP_KERNEL); 304 cq = kmalloc(sizeof(*cq), GFP_KERNEL);
302 if (!cq) { 305 if (!cq) {
303 pr_debug("%s: Unable to allocate CQ\n", __FUNCTION__); 306 pr_debug("%s: Unable to allocate CQ\n", __func__);
304 return ERR_PTR(-ENOMEM); 307 return ERR_PTR(-ENOMEM);
305 } 308 }
306 309
307 err = c2_init_cq(to_c2dev(ibdev), entries, NULL, cq); 310 err = c2_init_cq(to_c2dev(ibdev), entries, NULL, cq);
308 if (err) { 311 if (err) {
309 pr_debug("%s: error initializing CQ\n", __FUNCTION__); 312 pr_debug("%s: error initializing CQ\n", __func__);
310 kfree(cq); 313 kfree(cq);
311 return ERR_PTR(err); 314 return ERR_PTR(err);
312 } 315 }
@@ -318,7 +321,7 @@ static int c2_destroy_cq(struct ib_cq *ib_cq)
318{ 321{
319 struct c2_cq *cq = to_c2cq(ib_cq); 322 struct c2_cq *cq = to_c2cq(ib_cq);
320 323
321 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 324 pr_debug("%s:%u\n", __func__, __LINE__);
322 325
323 c2_free_cq(to_c2dev(ib_cq->device), cq); 326 c2_free_cq(to_c2dev(ib_cq->device), cq);
324 kfree(cq); 327 kfree(cq);
@@ -400,7 +403,7 @@ static struct ib_mr *c2_reg_phys_mr(struct ib_pd *ib_pd,
400 mr->umem = NULL; 403 mr->umem = NULL;
401 pr_debug("%s - page shift %d, pbl_depth %d, total_len %u, " 404 pr_debug("%s - page shift %d, pbl_depth %d, total_len %u, "
402 "*iova_start %llx, first pa %llx, last pa %llx\n", 405 "*iova_start %llx, first pa %llx, last pa %llx\n",
403 __FUNCTION__, page_shift, pbl_depth, total_len, 406 __func__, page_shift, pbl_depth, total_len,
404 (unsigned long long) *iova_start, 407 (unsigned long long) *iova_start,
405 (unsigned long long) page_list[0], 408 (unsigned long long) page_list[0],
406 (unsigned long long) page_list[pbl_depth-1]); 409 (unsigned long long) page_list[pbl_depth-1]);
@@ -422,7 +425,7 @@ static struct ib_mr *c2_get_dma_mr(struct ib_pd *pd, int acc)
422 struct ib_phys_buf bl; 425 struct ib_phys_buf bl;
423 u64 kva = 0; 426 u64 kva = 0;
424 427
425 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 428 pr_debug("%s:%u\n", __func__, __LINE__);
426 429
427 /* AMSO1100 limit */ 430 /* AMSO1100 limit */
428 bl.size = 0xffffffff; 431 bl.size = 0xffffffff;
@@ -442,7 +445,7 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
442 struct c2_pd *c2pd = to_c2pd(pd); 445 struct c2_pd *c2pd = to_c2pd(pd);
443 struct c2_mr *c2mr; 446 struct c2_mr *c2mr;
444 447
445 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 448 pr_debug("%s:%u\n", __func__, __LINE__);
446 449
447 c2mr = kmalloc(sizeof(*c2mr), GFP_KERNEL); 450 c2mr = kmalloc(sizeof(*c2mr), GFP_KERNEL);
448 if (!c2mr) 451 if (!c2mr)
@@ -506,7 +509,7 @@ static int c2_dereg_mr(struct ib_mr *ib_mr)
506 struct c2_mr *mr = to_c2mr(ib_mr); 509 struct c2_mr *mr = to_c2mr(ib_mr);
507 int err; 510 int err;
508 511
509 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 512 pr_debug("%s:%u\n", __func__, __LINE__);
510 513
511 err = c2_stag_dealloc(to_c2dev(ib_mr->device), ib_mr->lkey); 514 err = c2_stag_dealloc(to_c2dev(ib_mr->device), ib_mr->lkey);
512 if (err) 515 if (err)
@@ -523,14 +526,14 @@ static int c2_dereg_mr(struct ib_mr *ib_mr)
523static ssize_t show_rev(struct class_device *cdev, char *buf) 526static ssize_t show_rev(struct class_device *cdev, char *buf)
524{ 527{
525 struct c2_dev *dev = container_of(cdev, struct c2_dev, ibdev.class_dev); 528 struct c2_dev *dev = container_of(cdev, struct c2_dev, ibdev.class_dev);
526 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 529 pr_debug("%s:%u\n", __func__, __LINE__);
527 return sprintf(buf, "%x\n", dev->props.hw_ver); 530 return sprintf(buf, "%x\n", dev->props.hw_ver);
528} 531}
529 532
530static ssize_t show_fw_ver(struct class_device *cdev, char *buf) 533static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
531{ 534{
532 struct c2_dev *dev = container_of(cdev, struct c2_dev, ibdev.class_dev); 535 struct c2_dev *dev = container_of(cdev, struct c2_dev, ibdev.class_dev);
533 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 536 pr_debug("%s:%u\n", __func__, __LINE__);
534 return sprintf(buf, "%x.%x.%x\n", 537 return sprintf(buf, "%x.%x.%x\n",
535 (int) (dev->props.fw_ver >> 32), 538 (int) (dev->props.fw_ver >> 32),
536 (int) (dev->props.fw_ver >> 16) & 0xffff, 539 (int) (dev->props.fw_ver >> 16) & 0xffff,
@@ -539,13 +542,13 @@ static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
539 542
540static ssize_t show_hca(struct class_device *cdev, char *buf) 543static ssize_t show_hca(struct class_device *cdev, char *buf)
541{ 544{
542 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 545 pr_debug("%s:%u\n", __func__, __LINE__);
543 return sprintf(buf, "AMSO1100\n"); 546 return sprintf(buf, "AMSO1100\n");
544} 547}
545 548
546static ssize_t show_board(struct class_device *cdev, char *buf) 549static ssize_t show_board(struct class_device *cdev, char *buf)
547{ 550{
548 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 551 pr_debug("%s:%u\n", __func__, __LINE__);
549 return sprintf(buf, "%.*s\n", 32, "AMSO1100 Board ID"); 552 return sprintf(buf, "%.*s\n", 32, "AMSO1100 Board ID");
550} 553}
551 554
@@ -575,13 +578,13 @@ static int c2_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
575 578
576static int c2_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 579static int c2_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
577{ 580{
578 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 581 pr_debug("%s:%u\n", __func__, __LINE__);
579 return -ENOSYS; 582 return -ENOSYS;
580} 583}
581 584
582static int c2_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 585static int c2_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
583{ 586{
584 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 587 pr_debug("%s:%u\n", __func__, __LINE__);
585 return -ENOSYS; 588 return -ENOSYS;
586} 589}
587 590
@@ -592,13 +595,13 @@ static int c2_process_mad(struct ib_device *ibdev,
592 struct ib_grh *in_grh, 595 struct ib_grh *in_grh,
593 struct ib_mad *in_mad, struct ib_mad *out_mad) 596 struct ib_mad *in_mad, struct ib_mad *out_mad)
594{ 597{
595 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 598 pr_debug("%s:%u\n", __func__, __LINE__);
596 return -ENOSYS; 599 return -ENOSYS;
597} 600}
598 601
599static int c2_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) 602static int c2_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
600{ 603{
601 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 604 pr_debug("%s:%u\n", __func__, __LINE__);
602 605
603 /* Request a connection */ 606 /* Request a connection */
604 return c2_llp_connect(cm_id, iw_param); 607 return c2_llp_connect(cm_id, iw_param);
@@ -606,7 +609,7 @@ static int c2_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
606 609
607static int c2_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) 610static int c2_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
608{ 611{
609 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 612 pr_debug("%s:%u\n", __func__, __LINE__);
610 613
611 /* Accept the new connection */ 614 /* Accept the new connection */
612 return c2_llp_accept(cm_id, iw_param); 615 return c2_llp_accept(cm_id, iw_param);
@@ -616,7 +619,7 @@ static int c2_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
616{ 619{
617 int err; 620 int err;
618 621
619 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 622 pr_debug("%s:%u\n", __func__, __LINE__);
620 623
621 err = c2_llp_reject(cm_id, pdata, pdata_len); 624 err = c2_llp_reject(cm_id, pdata, pdata_len);
622 return err; 625 return err;
@@ -626,10 +629,10 @@ static int c2_service_create(struct iw_cm_id *cm_id, int backlog)
626{ 629{
627 int err; 630 int err;
628 631
629 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 632 pr_debug("%s:%u\n", __func__, __LINE__);
630 err = c2_llp_service_create(cm_id, backlog); 633 err = c2_llp_service_create(cm_id, backlog);
631 pr_debug("%s:%u err=%d\n", 634 pr_debug("%s:%u err=%d\n",
632 __FUNCTION__, __LINE__, 635 __func__, __LINE__,
633 err); 636 err);
634 return err; 637 return err;
635} 638}
@@ -637,7 +640,7 @@ static int c2_service_create(struct iw_cm_id *cm_id, int backlog)
637static int c2_service_destroy(struct iw_cm_id *cm_id) 640static int c2_service_destroy(struct iw_cm_id *cm_id)
638{ 641{
639 int err; 642 int err;
640 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 643 pr_debug("%s:%u\n", __func__, __LINE__);
641 644
642 err = c2_llp_service_destroy(cm_id); 645 err = c2_llp_service_destroy(cm_id);
643 646
@@ -743,7 +746,7 @@ static struct net_device *c2_pseudo_netdev_init(struct c2_dev *c2dev)
743 netdev = alloc_netdev(sizeof(*netdev), name, setup); 746 netdev = alloc_netdev(sizeof(*netdev), name, setup);
744 if (!netdev) { 747 if (!netdev) {
745 printk(KERN_ERR PFX "%s - etherdev alloc failed", 748 printk(KERN_ERR PFX "%s - etherdev alloc failed",
746 __FUNCTION__); 749 __func__);
747 return NULL; 750 return NULL;
748 } 751 }
749 752
@@ -780,7 +783,7 @@ int c2_register_device(struct c2_dev *dev)
780 if (ret) 783 if (ret)
781 goto out2; 784 goto out2;
782 785
783 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 786 pr_debug("%s:%u\n", __func__, __LINE__);
784 strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX); 787 strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX);
785 dev->ibdev.owner = THIS_MODULE; 788 dev->ibdev.owner = THIS_MODULE;
786 dev->ibdev.uverbs_cmd_mask = 789 dev->ibdev.uverbs_cmd_mask =
@@ -873,13 +876,13 @@ out1:
873out2: 876out2:
874 free_netdev(dev->pseudo_netdev); 877 free_netdev(dev->pseudo_netdev);
875out3: 878out3:
876 pr_debug("%s:%u ret=%d\n", __FUNCTION__, __LINE__, ret); 879 pr_debug("%s:%u ret=%d\n", __func__, __LINE__, ret);
877 return ret; 880 return ret;
878} 881}
879 882
880void c2_unregister_device(struct c2_dev *dev) 883void c2_unregister_device(struct c2_dev *dev)
881{ 884{
882 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 885 pr_debug("%s:%u\n", __func__, __LINE__);
883 unregister_netdev(dev->pseudo_netdev); 886 unregister_netdev(dev->pseudo_netdev);
884 free_netdev(dev->pseudo_netdev); 887 free_netdev(dev->pseudo_netdev);
885 ib_unregister_device(&dev->ibdev); 888 ib_unregister_device(&dev->ibdev);
diff --git a/drivers/infiniband/hw/amso1100/c2_qp.c b/drivers/infiniband/hw/amso1100/c2_qp.c
index 01d07862ea86..a6d89440ad2c 100644
--- a/drivers/infiniband/hw/amso1100/c2_qp.c
+++ b/drivers/infiniband/hw/amso1100/c2_qp.c
@@ -121,7 +121,7 @@ void c2_set_qp_state(struct c2_qp *qp, int c2_state)
121 int new_state = to_ib_state(c2_state); 121 int new_state = to_ib_state(c2_state);
122 122
123 pr_debug("%s: qp[%p] state modify %s --> %s\n", 123 pr_debug("%s: qp[%p] state modify %s --> %s\n",
124 __FUNCTION__, 124 __func__,
125 qp, 125 qp,
126 to_ib_state_str(qp->state), 126 to_ib_state_str(qp->state),
127 to_ib_state_str(new_state)); 127 to_ib_state_str(new_state));
@@ -141,7 +141,7 @@ int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
141 int err; 141 int err;
142 142
143 pr_debug("%s:%d qp=%p, %s --> %s\n", 143 pr_debug("%s:%d qp=%p, %s --> %s\n",
144 __FUNCTION__, __LINE__, 144 __func__, __LINE__,
145 qp, 145 qp,
146 to_ib_state_str(qp->state), 146 to_ib_state_str(qp->state),
147 to_ib_state_str(attr->qp_state)); 147 to_ib_state_str(attr->qp_state));
@@ -224,7 +224,7 @@ int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
224 qp->state = next_state; 224 qp->state = next_state;
225#ifdef DEBUG 225#ifdef DEBUG
226 else 226 else
227 pr_debug("%s: c2_errno=%d\n", __FUNCTION__, err); 227 pr_debug("%s: c2_errno=%d\n", __func__, err);
228#endif 228#endif
229 /* 229 /*
230 * If we're going to error and generating the event here, then 230 * If we're going to error and generating the event here, then
@@ -243,7 +243,7 @@ int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
243 vq_req_free(c2dev, vq_req); 243 vq_req_free(c2dev, vq_req);
244 244
245 pr_debug("%s:%d qp=%p, cur_state=%s\n", 245 pr_debug("%s:%d qp=%p, cur_state=%s\n",
246 __FUNCTION__, __LINE__, 246 __func__, __LINE__,
247 qp, 247 qp,
248 to_ib_state_str(qp->state)); 248 to_ib_state_str(qp->state));
249 return err; 249 return err;
@@ -811,16 +811,24 @@ int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
811 811
812 switch (ib_wr->opcode) { 812 switch (ib_wr->opcode) {
813 case IB_WR_SEND: 813 case IB_WR_SEND:
814 if (ib_wr->send_flags & IB_SEND_SOLICITED) { 814 case IB_WR_SEND_WITH_INV:
815 c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE); 815 if (ib_wr->opcode == IB_WR_SEND) {
816 msg_size = sizeof(struct c2wr_send_req); 816 if (ib_wr->send_flags & IB_SEND_SOLICITED)
817 c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE);
818 else
819 c2_wr_set_id(&wr, C2_WR_TYPE_SEND);
820 wr.sqwr.send.remote_stag = 0;
817 } else { 821 } else {
818 c2_wr_set_id(&wr, C2_WR_TYPE_SEND); 822 if (ib_wr->send_flags & IB_SEND_SOLICITED)
819 msg_size = sizeof(struct c2wr_send_req); 823 c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE_INV);
824 else
825 c2_wr_set_id(&wr, C2_WR_TYPE_SEND_INV);
826 wr.sqwr.send.remote_stag =
827 cpu_to_be32(ib_wr->ex.invalidate_rkey);
820 } 828 }
821 829
822 wr.sqwr.send.remote_stag = 0; 830 msg_size = sizeof(struct c2wr_send_req) +
823 msg_size += sizeof(struct c2_data_addr) * ib_wr->num_sge; 831 sizeof(struct c2_data_addr) * ib_wr->num_sge;
824 if (ib_wr->num_sge > qp->send_sgl_depth) { 832 if (ib_wr->num_sge > qp->send_sgl_depth) {
825 err = -EINVAL; 833 err = -EINVAL;
826 break; 834 break;
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index 1687c511cb2f..9a054c6941a4 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -208,7 +208,7 @@ static int c2_rnic_query(struct c2_dev *c2dev, struct ib_device_attr *props)
208/* 208/*
209 * Add an IP address to the RNIC interface 209 * Add an IP address to the RNIC interface
210 */ 210 */
211int c2_add_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask) 211int c2_add_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask)
212{ 212{
213 struct c2_vq_req *vq_req; 213 struct c2_vq_req *vq_req;
214 struct c2wr_rnic_setconfig_req *wr; 214 struct c2wr_rnic_setconfig_req *wr;
@@ -270,7 +270,7 @@ int c2_add_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask)
270/* 270/*
271 * Delete an IP address from the RNIC interface 271 * Delete an IP address from the RNIC interface
272 */ 272 */
273int c2_del_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask) 273int c2_del_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask)
274{ 274{
275 struct c2_vq_req *vq_req; 275 struct c2_vq_req *vq_req;
276 struct c2wr_rnic_setconfig_req *wr; 276 struct c2wr_rnic_setconfig_req *wr;
@@ -455,7 +455,8 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
455 IB_DEVICE_CURR_QP_STATE_MOD | 455 IB_DEVICE_CURR_QP_STATE_MOD |
456 IB_DEVICE_SYS_IMAGE_GUID | 456 IB_DEVICE_SYS_IMAGE_GUID |
457 IB_DEVICE_ZERO_STAG | 457 IB_DEVICE_ZERO_STAG |
458 IB_DEVICE_SEND_W_INV | IB_DEVICE_MEM_WINDOW); 458 IB_DEVICE_MEM_WINDOW |
459 IB_DEVICE_SEND_W_INV);
459 460
460 /* Allocate the qptr_array */ 461 /* Allocate the qptr_array */
461 c2dev->qptr_array = vmalloc(C2_MAX_CQS * sizeof(void *)); 462 c2dev->qptr_array = vmalloc(C2_MAX_CQS * sizeof(void *));
@@ -506,17 +507,17 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
506 mmio_regs = c2dev->kva; 507 mmio_regs = c2dev->kva;
507 /* Initialize the Verbs Request Queue */ 508 /* Initialize the Verbs Request Queue */
508 c2_mq_req_init(&c2dev->req_vq, 0, 509 c2_mq_req_init(&c2dev->req_vq, 0,
509 be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_QSIZE)), 510 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_QSIZE)),
510 be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_MSGSIZE)), 511 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_MSGSIZE)),
511 mmio_regs + 512 mmio_regs +
512 be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_POOLSTART)), 513 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_POOLSTART)),
513 mmio_regs + 514 mmio_regs +
514 be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_SHARED)), 515 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_SHARED)),
515 C2_MQ_ADAPTER_TARGET); 516 C2_MQ_ADAPTER_TARGET);
516 517
517 /* Initialize the Verbs Reply Queue */ 518 /* Initialize the Verbs Reply Queue */
518 qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_QSIZE)); 519 qsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_QSIZE));
519 msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_MSGSIZE)); 520 msgsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_MSGSIZE));
520 q1_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize, 521 q1_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
521 &c2dev->rep_vq.host_dma, GFP_KERNEL); 522 &c2dev->rep_vq.host_dma, GFP_KERNEL);
522 if (!q1_pages) { 523 if (!q1_pages) {
@@ -524,7 +525,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
524 goto bail1; 525 goto bail1;
525 } 526 }
526 pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma); 527 pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma);
527 pr_debug("%s rep_vq va %p dma %llx\n", __FUNCTION__, q1_pages, 528 pr_debug("%s rep_vq va %p dma %llx\n", __func__, q1_pages,
528 (unsigned long long) c2dev->rep_vq.host_dma); 529 (unsigned long long) c2dev->rep_vq.host_dma);
529 c2_mq_rep_init(&c2dev->rep_vq, 530 c2_mq_rep_init(&c2dev->rep_vq,
530 1, 531 1,
@@ -532,12 +533,12 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
532 msgsize, 533 msgsize,
533 q1_pages, 534 q1_pages,
534 mmio_regs + 535 mmio_regs +
535 be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_SHARED)), 536 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_SHARED)),
536 C2_MQ_HOST_TARGET); 537 C2_MQ_HOST_TARGET);
537 538
538 /* Initialize the Asynchronus Event Queue */ 539 /* Initialize the Asynchronus Event Queue */
539 qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_QSIZE)); 540 qsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_QSIZE));
540 msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_MSGSIZE)); 541 msgsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_MSGSIZE));
541 q2_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize, 542 q2_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
542 &c2dev->aeq.host_dma, GFP_KERNEL); 543 &c2dev->aeq.host_dma, GFP_KERNEL);
543 if (!q2_pages) { 544 if (!q2_pages) {
@@ -545,7 +546,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
545 goto bail2; 546 goto bail2;
546 } 547 }
547 pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma); 548 pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma);
548 pr_debug("%s aeq va %p dma %llx\n", __FUNCTION__, q2_pages, 549 pr_debug("%s aeq va %p dma %llx\n", __func__, q2_pages,
549 (unsigned long long) c2dev->aeq.host_dma); 550 (unsigned long long) c2dev->aeq.host_dma);
550 c2_mq_rep_init(&c2dev->aeq, 551 c2_mq_rep_init(&c2dev->aeq,
551 2, 552 2,
@@ -553,7 +554,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
553 msgsize, 554 msgsize,
554 q2_pages, 555 q2_pages,
555 mmio_regs + 556 mmio_regs +
556 be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_SHARED)), 557 be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_SHARED)),
557 C2_MQ_HOST_TARGET); 558 C2_MQ_HOST_TARGET);
558 559
559 /* Initialize the verbs request allocator */ 560 /* Initialize the verbs request allocator */
diff --git a/drivers/infiniband/hw/amso1100/c2_vq.c b/drivers/infiniband/hw/amso1100/c2_vq.c
index cfdacb1ec279..9ce7819b7b2e 100644
--- a/drivers/infiniband/hw/amso1100/c2_vq.c
+++ b/drivers/infiniband/hw/amso1100/c2_vq.c
@@ -197,7 +197,7 @@ int vq_send_wr(struct c2_dev *c2dev, union c2wr *wr)
197 */ 197 */
198 while (msg == NULL) { 198 while (msg == NULL) {
199 pr_debug("%s:%d no available msg in VQ, waiting...\n", 199 pr_debug("%s:%d no available msg in VQ, waiting...\n",
200 __FUNCTION__, __LINE__); 200 __func__, __LINE__);
201 init_waitqueue_entry(&__wait, current); 201 init_waitqueue_entry(&__wait, current);
202 add_wait_queue(&c2dev->req_vq_wo, &__wait); 202 add_wait_queue(&c2dev->req_vq_wo, &__wait);
203 spin_unlock(&c2dev->vqlock); 203 spin_unlock(&c2dev->vqlock);
diff --git a/drivers/infiniband/hw/amso1100/c2_wr.h b/drivers/infiniband/hw/amso1100/c2_wr.h
index 3ec6c43bb0ef..c65fbdd6e469 100644
--- a/drivers/infiniband/hw/amso1100/c2_wr.h
+++ b/drivers/infiniband/hw/amso1100/c2_wr.h
@@ -180,8 +180,8 @@ enum c2_wr_type {
180}; 180};
181 181
182struct c2_netaddr { 182struct c2_netaddr {
183 u32 ip_addr; 183 __be32 ip_addr;
184 u32 netmask; 184 __be32 netmask;
185 u32 mtu; 185 u32 mtu;
186}; 186};
187 187
@@ -199,9 +199,9 @@ struct c2_route {
199 * A Scatter Gather Entry. 199 * A Scatter Gather Entry.
200 */ 200 */
201struct c2_data_addr { 201struct c2_data_addr {
202 u32 stag; 202 __be32 stag;
203 u32 length; 203 __be32 length;
204 u64 to; 204 __be64 to;
205}; 205};
206 206
207/* 207/*
@@ -274,7 +274,7 @@ struct c2wr_hdr {
274 * from the host to adapter by libccil, but we copy it anyway 274 * from the host to adapter by libccil, but we copy it anyway
275 * to make the memcpy to the adapter better aligned. 275 * to make the memcpy to the adapter better aligned.
276 */ 276 */
277 u32 wqe_count; 277 __be32 wqe_count;
278 278
279 /* Put these fields next so that later 32- and 64-bit 279 /* Put these fields next so that later 32- and 64-bit
280 * quantities are naturally aligned. 280 * quantities are naturally aligned.
@@ -316,8 +316,8 @@ enum c2_rnic_flags {
316struct c2wr_rnic_open_req { 316struct c2wr_rnic_open_req {
317 struct c2wr_hdr hdr; 317 struct c2wr_hdr hdr;
318 u64 user_context; 318 u64 user_context;
319 u16 flags; /* See enum c2_rnic_flags */ 319 __be16 flags; /* See enum c2_rnic_flags */
320 u16 port_num; 320 __be16 port_num;
321} __attribute__((packed)); 321} __attribute__((packed));
322 322
323struct c2wr_rnic_open_rep { 323struct c2wr_rnic_open_rep {
@@ -341,30 +341,30 @@ struct c2wr_rnic_query_req {
341struct c2wr_rnic_query_rep { 341struct c2wr_rnic_query_rep {
342 struct c2wr_hdr hdr; 342 struct c2wr_hdr hdr;
343 u64 user_context; 343 u64 user_context;
344 u32 vendor_id; 344 __be32 vendor_id;
345 u32 part_number; 345 __be32 part_number;
346 u32 hw_version; 346 __be32 hw_version;
347 u32 fw_ver_major; 347 __be32 fw_ver_major;
348 u32 fw_ver_minor; 348 __be32 fw_ver_minor;
349 u32 fw_ver_patch; 349 __be32 fw_ver_patch;
350 char fw_ver_build_str[WR_BUILD_STR_LEN]; 350 char fw_ver_build_str[WR_BUILD_STR_LEN];
351 u32 max_qps; 351 __be32 max_qps;
352 u32 max_qp_depth; 352 __be32 max_qp_depth;
353 u32 max_srq_depth; 353 u32 max_srq_depth;
354 u32 max_send_sgl_depth; 354 u32 max_send_sgl_depth;
355 u32 max_rdma_sgl_depth; 355 u32 max_rdma_sgl_depth;
356 u32 max_cqs; 356 __be32 max_cqs;
357 u32 max_cq_depth; 357 __be32 max_cq_depth;
358 u32 max_cq_event_handlers; 358 u32 max_cq_event_handlers;
359 u32 max_mrs; 359 __be32 max_mrs;
360 u32 max_pbl_depth; 360 u32 max_pbl_depth;
361 u32 max_pds; 361 __be32 max_pds;
362 u32 max_global_ird; 362 __be32 max_global_ird;
363 u32 max_global_ord; 363 u32 max_global_ord;
364 u32 max_qp_ird; 364 __be32 max_qp_ird;
365 u32 max_qp_ord; 365 __be32 max_qp_ord;
366 u32 flags; 366 u32 flags;
367 u32 max_mws; 367 __be32 max_mws;
368 u32 pbe_range_low; 368 u32 pbe_range_low;
369 u32 pbe_range_high; 369 u32 pbe_range_high;
370 u32 max_srqs; 370 u32 max_srqs;
@@ -405,7 +405,7 @@ union c2wr_rnic_getconfig {
405struct c2wr_rnic_setconfig_req { 405struct c2wr_rnic_setconfig_req {
406 struct c2wr_hdr hdr; 406 struct c2wr_hdr hdr;
407 u32 rnic_handle; 407 u32 rnic_handle;
408 u32 option; /* See c2_setconfig_cmd_t */ 408 __be32 option; /* See c2_setconfig_cmd_t */
409 /* variable data and pad. See c2_netaddr and c2_route */ 409 /* variable data and pad. See c2_netaddr and c2_route */
410 u8 data[0]; 410 u8 data[0];
411} __attribute__((packed)) ; 411} __attribute__((packed)) ;
@@ -441,18 +441,18 @@ union c2wr_rnic_close {
441 */ 441 */
442struct c2wr_cq_create_req { 442struct c2wr_cq_create_req {
443 struct c2wr_hdr hdr; 443 struct c2wr_hdr hdr;
444 u64 shared_ht; 444 __be64 shared_ht;
445 u64 user_context; 445 u64 user_context;
446 u64 msg_pool; 446 __be64 msg_pool;
447 u32 rnic_handle; 447 u32 rnic_handle;
448 u32 msg_size; 448 __be32 msg_size;
449 u32 depth; 449 __be32 depth;
450} __attribute__((packed)) ; 450} __attribute__((packed)) ;
451 451
452struct c2wr_cq_create_rep { 452struct c2wr_cq_create_rep {
453 struct c2wr_hdr hdr; 453 struct c2wr_hdr hdr;
454 u32 mq_index; 454 __be32 mq_index;
455 u32 adapter_shared; 455 __be32 adapter_shared;
456 u32 cq_handle; 456 u32 cq_handle;
457} __attribute__((packed)) ; 457} __attribute__((packed)) ;
458 458
@@ -585,40 +585,40 @@ enum c2wr_qp_flags {
585 585
586struct c2wr_qp_create_req { 586struct c2wr_qp_create_req {
587 struct c2wr_hdr hdr; 587 struct c2wr_hdr hdr;
588 u64 shared_sq_ht; 588 __be64 shared_sq_ht;
589 u64 shared_rq_ht; 589 __be64 shared_rq_ht;
590 u64 user_context; 590 u64 user_context;
591 u32 rnic_handle; 591 u32 rnic_handle;
592 u32 sq_cq_handle; 592 u32 sq_cq_handle;
593 u32 rq_cq_handle; 593 u32 rq_cq_handle;
594 u32 sq_depth; 594 __be32 sq_depth;
595 u32 rq_depth; 595 __be32 rq_depth;
596 u32 srq_handle; 596 u32 srq_handle;
597 u32 srq_limit; 597 u32 srq_limit;
598 u32 flags; /* see enum c2wr_qp_flags */ 598 __be32 flags; /* see enum c2wr_qp_flags */
599 u32 send_sgl_depth; 599 __be32 send_sgl_depth;
600 u32 recv_sgl_depth; 600 __be32 recv_sgl_depth;
601 u32 rdma_write_sgl_depth; 601 __be32 rdma_write_sgl_depth;
602 u32 ord; 602 __be32 ord;
603 u32 ird; 603 __be32 ird;
604 u32 pd_id; 604 u32 pd_id;
605} __attribute__((packed)) ; 605} __attribute__((packed)) ;
606 606
607struct c2wr_qp_create_rep { 607struct c2wr_qp_create_rep {
608 struct c2wr_hdr hdr; 608 struct c2wr_hdr hdr;
609 u32 sq_depth; 609 __be32 sq_depth;
610 u32 rq_depth; 610 __be32 rq_depth;
611 u32 send_sgl_depth; 611 u32 send_sgl_depth;
612 u32 recv_sgl_depth; 612 u32 recv_sgl_depth;
613 u32 rdma_write_sgl_depth; 613 u32 rdma_write_sgl_depth;
614 u32 ord; 614 u32 ord;
615 u32 ird; 615 u32 ird;
616 u32 sq_msg_size; 616 __be32 sq_msg_size;
617 u32 sq_mq_index; 617 __be32 sq_mq_index;
618 u32 sq_mq_start; 618 __be32 sq_mq_start;
619 u32 rq_msg_size; 619 __be32 rq_msg_size;
620 u32 rq_mq_index; 620 __be32 rq_mq_index;
621 u32 rq_mq_start; 621 __be32 rq_mq_start;
622 u32 qp_handle; 622 u32 qp_handle;
623} __attribute__((packed)) ; 623} __attribute__((packed)) ;
624 624
@@ -667,11 +667,11 @@ struct c2wr_qp_modify_req {
667 u32 stream_msg_length; 667 u32 stream_msg_length;
668 u32 rnic_handle; 668 u32 rnic_handle;
669 u32 qp_handle; 669 u32 qp_handle;
670 u32 next_qp_state; 670 __be32 next_qp_state;
671 u32 ord; 671 __be32 ord;
672 u32 ird; 672 __be32 ird;
673 u32 sq_depth; 673 __be32 sq_depth;
674 u32 rq_depth; 674 __be32 rq_depth;
675 u32 llp_ep_handle; 675 u32 llp_ep_handle;
676} __attribute__((packed)) ; 676} __attribute__((packed)) ;
677 677
@@ -721,10 +721,10 @@ struct c2wr_qp_connect_req {
721 struct c2wr_hdr hdr; 721 struct c2wr_hdr hdr;
722 u32 rnic_handle; 722 u32 rnic_handle;
723 u32 qp_handle; 723 u32 qp_handle;
724 u32 remote_addr; 724 __be32 remote_addr;
725 u16 remote_port; 725 __be16 remote_port;
726 u16 pad; 726 u16 pad;
727 u32 private_data_length; 727 __be32 private_data_length;
728 u8 private_data[0]; /* Private data in-line. */ 728 u8 private_data[0]; /* Private data in-line. */
729} __attribute__((packed)) ; 729} __attribute__((packed)) ;
730 730
@@ -759,25 +759,25 @@ union c2wr_nsmr_stag_alloc {
759 759
760struct c2wr_nsmr_register_req { 760struct c2wr_nsmr_register_req {
761 struct c2wr_hdr hdr; 761 struct c2wr_hdr hdr;
762 u64 va; 762 __be64 va;
763 u32 rnic_handle; 763 u32 rnic_handle;
764 u16 flags; 764 __be16 flags;
765 u8 stag_key; 765 u8 stag_key;
766 u8 pad; 766 u8 pad;
767 u32 pd_id; 767 u32 pd_id;
768 u32 pbl_depth; 768 __be32 pbl_depth;
769 u32 pbe_size; 769 __be32 pbe_size;
770 u32 fbo; 770 __be32 fbo;
771 u32 length; 771 __be32 length;
772 u32 addrs_length; 772 __be32 addrs_length;
773 /* array of paddrs (must be aligned on a 64bit boundary) */ 773 /* array of paddrs (must be aligned on a 64bit boundary) */
774 u64 paddrs[0]; 774 __be64 paddrs[0];
775} __attribute__((packed)) ; 775} __attribute__((packed)) ;
776 776
777struct c2wr_nsmr_register_rep { 777struct c2wr_nsmr_register_rep {
778 struct c2wr_hdr hdr; 778 struct c2wr_hdr hdr;
779 u32 pbl_depth; 779 u32 pbl_depth;
780 u32 stag_index; 780 __be32 stag_index;
781} __attribute__((packed)) ; 781} __attribute__((packed)) ;
782 782
783union c2wr_nsmr_register { 783union c2wr_nsmr_register {
@@ -788,11 +788,11 @@ union c2wr_nsmr_register {
788struct c2wr_nsmr_pbl_req { 788struct c2wr_nsmr_pbl_req {
789 struct c2wr_hdr hdr; 789 struct c2wr_hdr hdr;
790 u32 rnic_handle; 790 u32 rnic_handle;
791 u32 flags; 791 __be32 flags;
792 u32 stag_index; 792 __be32 stag_index;
793 u32 addrs_length; 793 __be32 addrs_length;
794 /* array of paddrs (must be aligned on a 64bit boundary) */ 794 /* array of paddrs (must be aligned on a 64bit boundary) */
795 u64 paddrs[0]; 795 __be64 paddrs[0];
796} __attribute__((packed)) ; 796} __attribute__((packed)) ;
797 797
798struct c2wr_nsmr_pbl_rep { 798struct c2wr_nsmr_pbl_rep {
@@ -847,7 +847,7 @@ union c2wr_mw_query {
847struct c2wr_stag_dealloc_req { 847struct c2wr_stag_dealloc_req {
848 struct c2wr_hdr hdr; 848 struct c2wr_hdr hdr;
849 u32 rnic_handle; 849 u32 rnic_handle;
850 u32 stag_index; 850 __be32 stag_index;
851} __attribute__((packed)) ; 851} __attribute__((packed)) ;
852 852
853struct c2wr_stag_dealloc_rep { 853struct c2wr_stag_dealloc_rep {
@@ -949,7 +949,7 @@ struct c2wr_ce {
949 u64 qp_user_context; /* c2_user_qp_t * */ 949 u64 qp_user_context; /* c2_user_qp_t * */
950 u32 qp_state; /* Current QP State */ 950 u32 qp_state; /* Current QP State */
951 u32 handle; /* QPID or EP Handle */ 951 u32 handle; /* QPID or EP Handle */
952 u32 bytes_rcvd; /* valid for RECV WCs */ 952 __be32 bytes_rcvd; /* valid for RECV WCs */
953 u32 stag; 953 u32 stag;
954} __attribute__((packed)) ; 954} __attribute__((packed)) ;
955 955
@@ -984,8 +984,8 @@ struct c2_rq_hdr {
984 */ 984 */
985struct c2wr_send_req { 985struct c2wr_send_req {
986 struct c2_sq_hdr sq_hdr; 986 struct c2_sq_hdr sq_hdr;
987 u32 sge_len; 987 __be32 sge_len;
988 u32 remote_stag; 988 __be32 remote_stag;
989 u8 data[0]; /* SGE array */ 989 u8 data[0]; /* SGE array */
990} __attribute__((packed)); 990} __attribute__((packed));
991 991
@@ -996,9 +996,9 @@ union c2wr_send {
996 996
997struct c2wr_rdma_write_req { 997struct c2wr_rdma_write_req {
998 struct c2_sq_hdr sq_hdr; 998 struct c2_sq_hdr sq_hdr;
999 u64 remote_to; 999 __be64 remote_to;
1000 u32 remote_stag; 1000 __be32 remote_stag;
1001 u32 sge_len; 1001 __be32 sge_len;
1002 u8 data[0]; /* SGE array */ 1002 u8 data[0]; /* SGE array */
1003} __attribute__((packed)); 1003} __attribute__((packed));
1004 1004
@@ -1009,11 +1009,11 @@ union c2wr_rdma_write {
1009 1009
1010struct c2wr_rdma_read_req { 1010struct c2wr_rdma_read_req {
1011 struct c2_sq_hdr sq_hdr; 1011 struct c2_sq_hdr sq_hdr;
1012 u64 local_to; 1012 __be64 local_to;
1013 u64 remote_to; 1013 __be64 remote_to;
1014 u32 local_stag; 1014 __be32 local_stag;
1015 u32 remote_stag; 1015 __be32 remote_stag;
1016 u32 length; 1016 __be32 length;
1017} __attribute__((packed)); 1017} __attribute__((packed));
1018 1018
1019union c2wr_rdma_read { 1019union c2wr_rdma_read {
@@ -1113,9 +1113,9 @@ union c2wr_recv {
1113struct c2wr_ae_hdr { 1113struct c2wr_ae_hdr {
1114 struct c2wr_hdr hdr; 1114 struct c2wr_hdr hdr;
1115 u64 user_context; /* user context for this res. */ 1115 u64 user_context; /* user context for this res. */
1116 u32 resource_type; /* see enum c2_resource_indicator */ 1116 __be32 resource_type; /* see enum c2_resource_indicator */
1117 u32 resource; /* handle for resource */ 1117 __be32 resource; /* handle for resource */
1118 u32 qp_state; /* current QP State */ 1118 __be32 qp_state; /* current QP State */
1119} __attribute__((packed)); 1119} __attribute__((packed));
1120 1120
1121/* 1121/*
@@ -1124,11 +1124,11 @@ struct c2wr_ae_hdr {
1124 */ 1124 */
1125struct c2wr_ae_active_connect_results { 1125struct c2wr_ae_active_connect_results {
1126 struct c2wr_ae_hdr ae_hdr; 1126 struct c2wr_ae_hdr ae_hdr;
1127 u32 laddr; 1127 __be32 laddr;
1128 u32 raddr; 1128 __be32 raddr;
1129 u16 lport; 1129 __be16 lport;
1130 u16 rport; 1130 __be16 rport;
1131 u32 private_data_length; 1131 __be32 private_data_length;
1132 u8 private_data[0]; /* data is in-line in the msg. */ 1132 u8 private_data[0]; /* data is in-line in the msg. */
1133} __attribute__((packed)); 1133} __attribute__((packed));
1134 1134
@@ -1142,11 +1142,11 @@ struct c2wr_ae_active_connect_results {
1142struct c2wr_ae_connection_request { 1142struct c2wr_ae_connection_request {
1143 struct c2wr_ae_hdr ae_hdr; 1143 struct c2wr_ae_hdr ae_hdr;
1144 u32 cr_handle; /* connreq handle (sock ptr) */ 1144 u32 cr_handle; /* connreq handle (sock ptr) */
1145 u32 laddr; 1145 __be32 laddr;
1146 u32 raddr; 1146 __be32 raddr;
1147 u16 lport; 1147 __be16 lport;
1148 u16 rport; 1148 __be16 rport;
1149 u32 private_data_length; 1149 __be32 private_data_length;
1150 u8 private_data[0]; /* data is in-line in the msg. */ 1150 u8 private_data[0]; /* data is in-line in the msg. */
1151} __attribute__((packed)); 1151} __attribute__((packed));
1152 1152
@@ -1158,12 +1158,12 @@ union c2wr_ae {
1158 1158
1159struct c2wr_init_req { 1159struct c2wr_init_req {
1160 struct c2wr_hdr hdr; 1160 struct c2wr_hdr hdr;
1161 u64 hint_count; 1161 __be64 hint_count;
1162 u64 q0_host_shared; 1162 __be64 q0_host_shared;
1163 u64 q1_host_shared; 1163 __be64 q1_host_shared;
1164 u64 q1_host_msg_pool; 1164 __be64 q1_host_msg_pool;
1165 u64 q2_host_shared; 1165 __be64 q2_host_shared;
1166 u64 q2_host_msg_pool; 1166 __be64 q2_host_msg_pool;
1167} __attribute__((packed)); 1167} __attribute__((packed));
1168 1168
1169struct c2wr_init_rep { 1169struct c2wr_init_rep {
@@ -1276,10 +1276,10 @@ struct c2wr_ep_listen_create_req {
1276 struct c2wr_hdr hdr; 1276 struct c2wr_hdr hdr;
1277 u64 user_context; /* returned in AEs. */ 1277 u64 user_context; /* returned in AEs. */
1278 u32 rnic_handle; 1278 u32 rnic_handle;
1279 u32 local_addr; /* local addr, or 0 */ 1279 __be32 local_addr; /* local addr, or 0 */
1280 u16 local_port; /* 0 means "pick one" */ 1280 __be16 local_port; /* 0 means "pick one" */
1281 u16 pad; 1281 u16 pad;
1282 u32 backlog; /* tradional tcp listen bl */ 1282 __be32 backlog; /* tradional tcp listen bl */
1283} __attribute__((packed)); 1283} __attribute__((packed));
1284 1284
1285struct c2wr_ep_listen_create_rep { 1285struct c2wr_ep_listen_create_rep {
@@ -1340,7 +1340,7 @@ struct c2wr_cr_accept_req {
1340 u32 rnic_handle; 1340 u32 rnic_handle;
1341 u32 qp_handle; /* QP to bind to this LLP conn */ 1341 u32 qp_handle; /* QP to bind to this LLP conn */
1342 u32 ep_handle; /* LLP handle to accept */ 1342 u32 ep_handle; /* LLP handle to accept */
1343 u32 private_data_length; 1343 __be32 private_data_length;
1344 u8 private_data[0]; /* data in-line in msg. */ 1344 u8 private_data[0]; /* data in-line in msg. */
1345} __attribute__((packed)); 1345} __attribute__((packed));
1346 1346
@@ -1508,7 +1508,7 @@ static __inline__ void c2_wr_set_sge_count(void *wr, u8 sge_count)
1508{ 1508{
1509 ((struct c2wr_hdr *) wr)->sge_count = sge_count; 1509 ((struct c2wr_hdr *) wr)->sge_count = sge_count;
1510} 1510}
1511static __inline__ u32 c2_wr_get_wqe_count(void *wr) 1511static __inline__ __be32 c2_wr_get_wqe_count(void *wr)
1512{ 1512{
1513 return ((struct c2wr_hdr *) wr)->wqe_count; 1513 return ((struct c2wr_hdr *) wr)->wqe_count;
1514} 1514}
diff --git a/drivers/infiniband/hw/cxgb3/cxio_dbg.c b/drivers/infiniband/hw/cxgb3/cxio_dbg.c
index 75f7b16a271d..a8d24d53f307 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_dbg.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_dbg.c
@@ -45,16 +45,16 @@ void cxio_dump_tpt(struct cxio_rdev *rdev, u32 stag)
45 45
46 m = kmalloc(sizeof(*m) + size, GFP_ATOMIC); 46 m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
47 if (!m) { 47 if (!m) {
48 PDBG("%s couldn't allocate memory.\n", __FUNCTION__); 48 PDBG("%s couldn't allocate memory.\n", __func__);
49 return; 49 return;
50 } 50 }
51 m->mem_id = MEM_PMRX; 51 m->mem_id = MEM_PMRX;
52 m->addr = (stag>>8) * 32 + rdev->rnic_info.tpt_base; 52 m->addr = (stag>>8) * 32 + rdev->rnic_info.tpt_base;
53 m->len = size; 53 m->len = size;
54 PDBG("%s TPT addr 0x%x len %d\n", __FUNCTION__, m->addr, m->len); 54 PDBG("%s TPT addr 0x%x len %d\n", __func__, m->addr, m->len);
55 rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m); 55 rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m);
56 if (rc) { 56 if (rc) {
57 PDBG("%s toectl returned error %d\n", __FUNCTION__, rc); 57 PDBG("%s toectl returned error %d\n", __func__, rc);
58 kfree(m); 58 kfree(m);
59 return; 59 return;
60 } 60 }
@@ -82,17 +82,17 @@ void cxio_dump_pbl(struct cxio_rdev *rdev, u32 pbl_addr, uint len, u8 shift)
82 82
83 m = kmalloc(sizeof(*m) + size, GFP_ATOMIC); 83 m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
84 if (!m) { 84 if (!m) {
85 PDBG("%s couldn't allocate memory.\n", __FUNCTION__); 85 PDBG("%s couldn't allocate memory.\n", __func__);
86 return; 86 return;
87 } 87 }
88 m->mem_id = MEM_PMRX; 88 m->mem_id = MEM_PMRX;
89 m->addr = pbl_addr; 89 m->addr = pbl_addr;
90 m->len = size; 90 m->len = size;
91 PDBG("%s PBL addr 0x%x len %d depth %d\n", 91 PDBG("%s PBL addr 0x%x len %d depth %d\n",
92 __FUNCTION__, m->addr, m->len, npages); 92 __func__, m->addr, m->len, npages);
93 rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m); 93 rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m);
94 if (rc) { 94 if (rc) {
95 PDBG("%s toectl returned error %d\n", __FUNCTION__, rc); 95 PDBG("%s toectl returned error %d\n", __func__, rc);
96 kfree(m); 96 kfree(m);
97 return; 97 return;
98 } 98 }
@@ -144,16 +144,16 @@ void cxio_dump_rqt(struct cxio_rdev *rdev, u32 hwtid, int nents)
144 144
145 m = kmalloc(sizeof(*m) + size, GFP_ATOMIC); 145 m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
146 if (!m) { 146 if (!m) {
147 PDBG("%s couldn't allocate memory.\n", __FUNCTION__); 147 PDBG("%s couldn't allocate memory.\n", __func__);
148 return; 148 return;
149 } 149 }
150 m->mem_id = MEM_PMRX; 150 m->mem_id = MEM_PMRX;
151 m->addr = ((hwtid)<<10) + rdev->rnic_info.rqt_base; 151 m->addr = ((hwtid)<<10) + rdev->rnic_info.rqt_base;
152 m->len = size; 152 m->len = size;
153 PDBG("%s RQT addr 0x%x len %d\n", __FUNCTION__, m->addr, m->len); 153 PDBG("%s RQT addr 0x%x len %d\n", __func__, m->addr, m->len);
154 rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m); 154 rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m);
155 if (rc) { 155 if (rc) {
156 PDBG("%s toectl returned error %d\n", __FUNCTION__, rc); 156 PDBG("%s toectl returned error %d\n", __func__, rc);
157 kfree(m); 157 kfree(m);
158 return; 158 return;
159 } 159 }
@@ -177,16 +177,16 @@ void cxio_dump_tcb(struct cxio_rdev *rdev, u32 hwtid)
177 177
178 m = kmalloc(sizeof(*m) + size, GFP_ATOMIC); 178 m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
179 if (!m) { 179 if (!m) {
180 PDBG("%s couldn't allocate memory.\n", __FUNCTION__); 180 PDBG("%s couldn't allocate memory.\n", __func__);
181 return; 181 return;
182 } 182 }
183 m->mem_id = MEM_CM; 183 m->mem_id = MEM_CM;
184 m->addr = hwtid * size; 184 m->addr = hwtid * size;
185 m->len = size; 185 m->len = size;
186 PDBG("%s TCB %d len %d\n", __FUNCTION__, m->addr, m->len); 186 PDBG("%s TCB %d len %d\n", __func__, m->addr, m->len);
187 rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m); 187 rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m);
188 if (rc) { 188 if (rc) {
189 PDBG("%s toectl returned error %d\n", __FUNCTION__, rc); 189 PDBG("%s toectl returned error %d\n", __func__, rc);
190 kfree(m); 190 kfree(m);
191 return; 191 return;
192 } 192 }
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 03c5ff62889a..66eb7030aea8 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -140,7 +140,7 @@ static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid)
140 struct t3_modify_qp_wr *wqe; 140 struct t3_modify_qp_wr *wqe;
141 struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_KERNEL); 141 struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
142 if (!skb) { 142 if (!skb) {
143 PDBG("%s alloc_skb failed\n", __FUNCTION__); 143 PDBG("%s alloc_skb failed\n", __func__);
144 return -ENOMEM; 144 return -ENOMEM;
145 } 145 }
146 wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe)); 146 wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe));
@@ -225,7 +225,7 @@ static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
225 } 225 }
226out: 226out:
227 mutex_unlock(&uctx->lock); 227 mutex_unlock(&uctx->lock);
228 PDBG("%s qpid 0x%x\n", __FUNCTION__, qpid); 228 PDBG("%s qpid 0x%x\n", __func__, qpid);
229 return qpid; 229 return qpid;
230} 230}
231 231
@@ -237,7 +237,7 @@ static void put_qpid(struct cxio_rdev *rdev_p, u32 qpid,
237 entry = kmalloc(sizeof *entry, GFP_KERNEL); 237 entry = kmalloc(sizeof *entry, GFP_KERNEL);
238 if (!entry) 238 if (!entry)
239 return; 239 return;
240 PDBG("%s qpid 0x%x\n", __FUNCTION__, qpid); 240 PDBG("%s qpid 0x%x\n", __func__, qpid);
241 entry->qpid = qpid; 241 entry->qpid = qpid;
242 mutex_lock(&uctx->lock); 242 mutex_lock(&uctx->lock);
243 list_add_tail(&entry->entry, &uctx->qpids); 243 list_add_tail(&entry->entry, &uctx->qpids);
@@ -300,7 +300,7 @@ int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,
300 if (!kernel_domain) 300 if (!kernel_domain)
301 wq->udb = (u64)rdev_p->rnic_info.udbell_physbase + 301 wq->udb = (u64)rdev_p->rnic_info.udbell_physbase +
302 (wq->qpid << rdev_p->qpshift); 302 (wq->qpid << rdev_p->qpshift);
303 PDBG("%s qpid 0x%x doorbell 0x%p udb 0x%llx\n", __FUNCTION__, 303 PDBG("%s qpid 0x%x doorbell 0x%p udb 0x%llx\n", __func__,
304 wq->qpid, wq->doorbell, (unsigned long long) wq->udb); 304 wq->qpid, wq->doorbell, (unsigned long long) wq->udb);
305 return 0; 305 return 0;
306err4: 306err4:
@@ -345,7 +345,7 @@ static void insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq)
345{ 345{
346 struct t3_cqe cqe; 346 struct t3_cqe cqe;
347 347
348 PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __FUNCTION__, 348 PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__,
349 wq, cq, cq->sw_rptr, cq->sw_wptr); 349 wq, cq, cq->sw_rptr, cq->sw_wptr);
350 memset(&cqe, 0, sizeof(cqe)); 350 memset(&cqe, 0, sizeof(cqe));
351 cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) | 351 cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
@@ -363,10 +363,10 @@ void cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count)
363{ 363{
364 u32 ptr; 364 u32 ptr;
365 365
366 PDBG("%s wq %p cq %p\n", __FUNCTION__, wq, cq); 366 PDBG("%s wq %p cq %p\n", __func__, wq, cq);
367 367
368 /* flush RQ */ 368 /* flush RQ */
369 PDBG("%s rq_rptr %u rq_wptr %u skip count %u\n", __FUNCTION__, 369 PDBG("%s rq_rptr %u rq_wptr %u skip count %u\n", __func__,
370 wq->rq_rptr, wq->rq_wptr, count); 370 wq->rq_rptr, wq->rq_wptr, count);
371 ptr = wq->rq_rptr + count; 371 ptr = wq->rq_rptr + count;
372 while (ptr++ != wq->rq_wptr) 372 while (ptr++ != wq->rq_wptr)
@@ -378,7 +378,7 @@ static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,
378{ 378{
379 struct t3_cqe cqe; 379 struct t3_cqe cqe;
380 380
381 PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __FUNCTION__, 381 PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__,
382 wq, cq, cq->sw_rptr, cq->sw_wptr); 382 wq, cq, cq->sw_rptr, cq->sw_wptr);
383 memset(&cqe, 0, sizeof(cqe)); 383 memset(&cqe, 0, sizeof(cqe));
384 cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) | 384 cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
@@ -415,11 +415,11 @@ void cxio_flush_hw_cq(struct t3_cq *cq)
415{ 415{
416 struct t3_cqe *cqe, *swcqe; 416 struct t3_cqe *cqe, *swcqe;
417 417
418 PDBG("%s cq %p cqid 0x%x\n", __FUNCTION__, cq, cq->cqid); 418 PDBG("%s cq %p cqid 0x%x\n", __func__, cq, cq->cqid);
419 cqe = cxio_next_hw_cqe(cq); 419 cqe = cxio_next_hw_cqe(cq);
420 while (cqe) { 420 while (cqe) {
421 PDBG("%s flushing hwcq rptr 0x%x to swcq wptr 0x%x\n", 421 PDBG("%s flushing hwcq rptr 0x%x to swcq wptr 0x%x\n",
422 __FUNCTION__, cq->rptr, cq->sw_wptr); 422 __func__, cq->rptr, cq->sw_wptr);
423 swcqe = cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2); 423 swcqe = cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2);
424 *swcqe = *cqe; 424 *swcqe = *cqe;
425 swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1)); 425 swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
@@ -461,7 +461,7 @@ void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
461 (*count)++; 461 (*count)++;
462 ptr++; 462 ptr++;
463 } 463 }
464 PDBG("%s cq %p count %d\n", __FUNCTION__, cq, *count); 464 PDBG("%s cq %p count %d\n", __func__, cq, *count);
465} 465}
466 466
467void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count) 467void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
@@ -470,7 +470,7 @@ void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
470 u32 ptr; 470 u32 ptr;
471 471
472 *count = 0; 472 *count = 0;
473 PDBG("%s count zero %d\n", __FUNCTION__, *count); 473 PDBG("%s count zero %d\n", __func__, *count);
474 ptr = cq->sw_rptr; 474 ptr = cq->sw_rptr;
475 while (!Q_EMPTY(ptr, cq->sw_wptr)) { 475 while (!Q_EMPTY(ptr, cq->sw_wptr)) {
476 cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2)); 476 cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
@@ -479,7 +479,7 @@ void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
479 (*count)++; 479 (*count)++;
480 ptr++; 480 ptr++;
481 } 481 }
482 PDBG("%s cq %p count %d\n", __FUNCTION__, cq, *count); 482 PDBG("%s cq %p count %d\n", __func__, cq, *count);
483} 483}
484 484
485static int cxio_hal_init_ctrl_cq(struct cxio_rdev *rdev_p) 485static int cxio_hal_init_ctrl_cq(struct cxio_rdev *rdev_p)
@@ -506,12 +506,12 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
506 506
507 skb = alloc_skb(sizeof(*wqe), GFP_KERNEL); 507 skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
508 if (!skb) { 508 if (!skb) {
509 PDBG("%s alloc_skb failed\n", __FUNCTION__); 509 PDBG("%s alloc_skb failed\n", __func__);
510 return -ENOMEM; 510 return -ENOMEM;
511 } 511 }
512 err = cxio_hal_init_ctrl_cq(rdev_p); 512 err = cxio_hal_init_ctrl_cq(rdev_p);
513 if (err) { 513 if (err) {
514 PDBG("%s err %d initializing ctrl_cq\n", __FUNCTION__, err); 514 PDBG("%s err %d initializing ctrl_cq\n", __func__, err);
515 goto err; 515 goto err;
516 } 516 }
517 rdev_p->ctrl_qp.workq = dma_alloc_coherent( 517 rdev_p->ctrl_qp.workq = dma_alloc_coherent(
@@ -521,7 +521,7 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
521 &(rdev_p->ctrl_qp.dma_addr), 521 &(rdev_p->ctrl_qp.dma_addr),
522 GFP_KERNEL); 522 GFP_KERNEL);
523 if (!rdev_p->ctrl_qp.workq) { 523 if (!rdev_p->ctrl_qp.workq) {
524 PDBG("%s dma_alloc_coherent failed\n", __FUNCTION__); 524 PDBG("%s dma_alloc_coherent failed\n", __func__);
525 err = -ENOMEM; 525 err = -ENOMEM;
526 goto err; 526 goto err;
527 } 527 }
@@ -591,25 +591,25 @@ static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
591 addr &= 0x7FFFFFF; 591 addr &= 0x7FFFFFF;
592 nr_wqe = len % 96 ? len / 96 + 1 : len / 96; /* 96B max per WQE */ 592 nr_wqe = len % 96 ? len / 96 + 1 : len / 96; /* 96B max per WQE */
593 PDBG("%s wptr 0x%x rptr 0x%x len %d, nr_wqe %d data %p addr 0x%0x\n", 593 PDBG("%s wptr 0x%x rptr 0x%x len %d, nr_wqe %d data %p addr 0x%0x\n",
594 __FUNCTION__, rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, len, 594 __func__, rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, len,
595 nr_wqe, data, addr); 595 nr_wqe, data, addr);
596 utx_len = 3; /* in 32B unit */ 596 utx_len = 3; /* in 32B unit */
597 for (i = 0; i < nr_wqe; i++) { 597 for (i = 0; i < nr_wqe; i++) {
598 if (Q_FULL(rdev_p->ctrl_qp.rptr, rdev_p->ctrl_qp.wptr, 598 if (Q_FULL(rdev_p->ctrl_qp.rptr, rdev_p->ctrl_qp.wptr,
599 T3_CTRL_QP_SIZE_LOG2)) { 599 T3_CTRL_QP_SIZE_LOG2)) {
600 PDBG("%s ctrl_qp full wtpr 0x%0x rptr 0x%0x, " 600 PDBG("%s ctrl_qp full wtpr 0x%0x rptr 0x%0x, "
601 "wait for more space i %d\n", __FUNCTION__, 601 "wait for more space i %d\n", __func__,
602 rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, i); 602 rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, i);
603 if (wait_event_interruptible(rdev_p->ctrl_qp.waitq, 603 if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
604 !Q_FULL(rdev_p->ctrl_qp.rptr, 604 !Q_FULL(rdev_p->ctrl_qp.rptr,
605 rdev_p->ctrl_qp.wptr, 605 rdev_p->ctrl_qp.wptr,
606 T3_CTRL_QP_SIZE_LOG2))) { 606 T3_CTRL_QP_SIZE_LOG2))) {
607 PDBG("%s ctrl_qp workq interrupted\n", 607 PDBG("%s ctrl_qp workq interrupted\n",
608 __FUNCTION__); 608 __func__);
609 return -ERESTARTSYS; 609 return -ERESTARTSYS;
610 } 610 }
611 PDBG("%s ctrl_qp wakeup, continue posting work request " 611 PDBG("%s ctrl_qp wakeup, continue posting work request "
612 "i %d\n", __FUNCTION__, i); 612 "i %d\n", __func__, i);
613 } 613 }
614 wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr % 614 wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr %
615 (1 << T3_CTRL_QP_SIZE_LOG2))); 615 (1 << T3_CTRL_QP_SIZE_LOG2)));
@@ -630,7 +630,7 @@ static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
630 if ((i != 0) && 630 if ((i != 0) &&
631 (i % (((1 << T3_CTRL_QP_SIZE_LOG2)) >> 1) == 0)) { 631 (i % (((1 << T3_CTRL_QP_SIZE_LOG2)) >> 1) == 0)) {
632 flag = T3_COMPLETION_FLAG; 632 flag = T3_COMPLETION_FLAG;
633 PDBG("%s force completion at i %d\n", __FUNCTION__, i); 633 PDBG("%s force completion at i %d\n", __func__, i);
634 } 634 }
635 635
636 /* build the utx mem command */ 636 /* build the utx mem command */
@@ -701,7 +701,7 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
701 *stag = (stag_idx << 8) | ((*stag) & 0xFF); 701 *stag = (stag_idx << 8) | ((*stag) & 0xFF);
702 } 702 }
703 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n", 703 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
704 __FUNCTION__, stag_state, type, pdid, stag_idx); 704 __func__, stag_state, type, pdid, stag_idx);
705 705
706 if (reset_tpt_entry) 706 if (reset_tpt_entry)
707 cxio_hal_pblpool_free(rdev_p, *pbl_addr, *pbl_size << 3); 707 cxio_hal_pblpool_free(rdev_p, *pbl_addr, *pbl_size << 3);
@@ -718,7 +718,7 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
718 if (pbl) { 718 if (pbl) {
719 719
720 PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n", 720 PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
721 __FUNCTION__, *pbl_addr, rdev_p->rnic_info.pbl_base, 721 __func__, *pbl_addr, rdev_p->rnic_info.pbl_base,
722 *pbl_size); 722 *pbl_size);
723 err = cxio_hal_ctrl_qp_write_mem(rdev_p, 723 err = cxio_hal_ctrl_qp_write_mem(rdev_p,
724 (*pbl_addr >> 5), 724 (*pbl_addr >> 5),
@@ -814,7 +814,7 @@ int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
814 struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_ATOMIC); 814 struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_ATOMIC);
815 if (!skb) 815 if (!skb)
816 return -ENOMEM; 816 return -ENOMEM;
817 PDBG("%s rdev_p %p\n", __FUNCTION__, rdev_p); 817 PDBG("%s rdev_p %p\n", __func__, rdev_p);
818 wqe = (struct t3_rdma_init_wr *) __skb_put(skb, sizeof(*wqe)); 818 wqe = (struct t3_rdma_init_wr *) __skb_put(skb, sizeof(*wqe));
819 wqe->wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_INIT)); 819 wqe->wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_INIT));
820 wqe->wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(attr->tid) | 820 wqe->wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(attr->tid) |
@@ -856,7 +856,7 @@ static int cxio_hal_ev_handler(struct t3cdev *t3cdev_p, struct sk_buff *skb)
856 struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data; 856 struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data;
857 PDBG("%d: %s cq_id 0x%x cq_ptr 0x%x genbit %0x overflow %0x an %0x" 857 PDBG("%d: %s cq_id 0x%x cq_ptr 0x%x genbit %0x overflow %0x an %0x"
858 " se %0x notify %0x cqbranch %0x creditth %0x\n", 858 " se %0x notify %0x cqbranch %0x creditth %0x\n",
859 cnt, __FUNCTION__, RSPQ_CQID(rsp_msg), RSPQ_CQPTR(rsp_msg), 859 cnt, __func__, RSPQ_CQID(rsp_msg), RSPQ_CQPTR(rsp_msg),
860 RSPQ_GENBIT(rsp_msg), RSPQ_OVERFLOW(rsp_msg), RSPQ_AN(rsp_msg), 860 RSPQ_GENBIT(rsp_msg), RSPQ_OVERFLOW(rsp_msg), RSPQ_AN(rsp_msg),
861 RSPQ_SE(rsp_msg), RSPQ_NOTIFY(rsp_msg), RSPQ_CQBRANCH(rsp_msg), 861 RSPQ_SE(rsp_msg), RSPQ_NOTIFY(rsp_msg), RSPQ_CQBRANCH(rsp_msg),
862 RSPQ_CREDIT_THRESH(rsp_msg)); 862 RSPQ_CREDIT_THRESH(rsp_msg));
@@ -868,7 +868,7 @@ static int cxio_hal_ev_handler(struct t3cdev *t3cdev_p, struct sk_buff *skb)
868 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe)); 868 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
869 rdev_p = (struct cxio_rdev *)t3cdev_p->ulp; 869 rdev_p = (struct cxio_rdev *)t3cdev_p->ulp;
870 if (!rdev_p) { 870 if (!rdev_p) {
871 PDBG("%s called by t3cdev %p with null ulp\n", __FUNCTION__, 871 PDBG("%s called by t3cdev %p with null ulp\n", __func__,
872 t3cdev_p); 872 t3cdev_p);
873 return 0; 873 return 0;
874 } 874 }
@@ -908,13 +908,13 @@ int cxio_rdev_open(struct cxio_rdev *rdev_p)
908 strncpy(rdev_p->dev_name, rdev_p->t3cdev_p->name, 908 strncpy(rdev_p->dev_name, rdev_p->t3cdev_p->name,
909 T3_MAX_DEV_NAME_LEN); 909 T3_MAX_DEV_NAME_LEN);
910 } else { 910 } else {
911 PDBG("%s t3cdev_p or dev_name must be set\n", __FUNCTION__); 911 PDBG("%s t3cdev_p or dev_name must be set\n", __func__);
912 return -EINVAL; 912 return -EINVAL;
913 } 913 }
914 914
915 list_add_tail(&rdev_p->entry, &rdev_list); 915 list_add_tail(&rdev_p->entry, &rdev_list);
916 916
917 PDBG("%s opening rnic dev %s\n", __FUNCTION__, rdev_p->dev_name); 917 PDBG("%s opening rnic dev %s\n", __func__, rdev_p->dev_name);
918 memset(&rdev_p->ctrl_qp, 0, sizeof(rdev_p->ctrl_qp)); 918 memset(&rdev_p->ctrl_qp, 0, sizeof(rdev_p->ctrl_qp));
919 if (!rdev_p->t3cdev_p) 919 if (!rdev_p->t3cdev_p)
920 rdev_p->t3cdev_p = dev2t3cdev(netdev_p); 920 rdev_p->t3cdev_p = dev2t3cdev(netdev_p);
@@ -923,14 +923,14 @@ int cxio_rdev_open(struct cxio_rdev *rdev_p)
923 &(rdev_p->rnic_info)); 923 &(rdev_p->rnic_info));
924 if (err) { 924 if (err) {
925 printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n", 925 printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n",
926 __FUNCTION__, rdev_p->t3cdev_p, err); 926 __func__, rdev_p->t3cdev_p, err);
927 goto err1; 927 goto err1;
928 } 928 }
929 err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_PORTS, 929 err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_PORTS,
930 &(rdev_p->port_info)); 930 &(rdev_p->port_info));
931 if (err) { 931 if (err) {
932 printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n", 932 printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n",
933 __FUNCTION__, rdev_p->t3cdev_p, err); 933 __func__, rdev_p->t3cdev_p, err);
934 goto err1; 934 goto err1;
935 } 935 }
936 936
@@ -947,7 +947,7 @@ int cxio_rdev_open(struct cxio_rdev *rdev_p)
947 rdev_p->qpmask = (65536 >> ilog2(rdev_p->qpnr)) - 1; 947 rdev_p->qpmask = (65536 >> ilog2(rdev_p->qpnr)) - 1;
948 PDBG("%s rnic %s info: tpt_base 0x%0x tpt_top 0x%0x num stags %d " 948 PDBG("%s rnic %s info: tpt_base 0x%0x tpt_top 0x%0x num stags %d "
949 "pbl_base 0x%0x pbl_top 0x%0x rqt_base 0x%0x, rqt_top 0x%0x\n", 949 "pbl_base 0x%0x pbl_top 0x%0x rqt_base 0x%0x, rqt_top 0x%0x\n",
950 __FUNCTION__, rdev_p->dev_name, rdev_p->rnic_info.tpt_base, 950 __func__, rdev_p->dev_name, rdev_p->rnic_info.tpt_base,
951 rdev_p->rnic_info.tpt_top, cxio_num_stags(rdev_p), 951 rdev_p->rnic_info.tpt_top, cxio_num_stags(rdev_p),
952 rdev_p->rnic_info.pbl_base, 952 rdev_p->rnic_info.pbl_base,
953 rdev_p->rnic_info.pbl_top, rdev_p->rnic_info.rqt_base, 953 rdev_p->rnic_info.pbl_top, rdev_p->rnic_info.rqt_base,
@@ -961,7 +961,7 @@ int cxio_rdev_open(struct cxio_rdev *rdev_p)
961 err = cxio_hal_init_ctrl_qp(rdev_p); 961 err = cxio_hal_init_ctrl_qp(rdev_p);
962 if (err) { 962 if (err) {
963 printk(KERN_ERR "%s error %d initializing ctrl_qp.\n", 963 printk(KERN_ERR "%s error %d initializing ctrl_qp.\n",
964 __FUNCTION__, err); 964 __func__, err);
965 goto err1; 965 goto err1;
966 } 966 }
967 err = cxio_hal_init_resource(rdev_p, cxio_num_stags(rdev_p), 0, 967 err = cxio_hal_init_resource(rdev_p, cxio_num_stags(rdev_p), 0,
@@ -969,19 +969,19 @@ int cxio_rdev_open(struct cxio_rdev *rdev_p)
969 T3_MAX_NUM_PD); 969 T3_MAX_NUM_PD);
970 if (err) { 970 if (err) {
971 printk(KERN_ERR "%s error %d initializing hal resources.\n", 971 printk(KERN_ERR "%s error %d initializing hal resources.\n",
972 __FUNCTION__, err); 972 __func__, err);
973 goto err2; 973 goto err2;
974 } 974 }
975 err = cxio_hal_pblpool_create(rdev_p); 975 err = cxio_hal_pblpool_create(rdev_p);
976 if (err) { 976 if (err) {
977 printk(KERN_ERR "%s error %d initializing pbl mem pool.\n", 977 printk(KERN_ERR "%s error %d initializing pbl mem pool.\n",
978 __FUNCTION__, err); 978 __func__, err);
979 goto err3; 979 goto err3;
980 } 980 }
981 err = cxio_hal_rqtpool_create(rdev_p); 981 err = cxio_hal_rqtpool_create(rdev_p);
982 if (err) { 982 if (err) {
983 printk(KERN_ERR "%s error %d initializing rqt mem pool.\n", 983 printk(KERN_ERR "%s error %d initializing rqt mem pool.\n",
984 __FUNCTION__, err); 984 __func__, err);
985 goto err4; 985 goto err4;
986 } 986 }
987 return 0; 987 return 0;
@@ -1043,7 +1043,7 @@ static void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq)
1043 * Insert this completed cqe into the swcq. 1043 * Insert this completed cqe into the swcq.
1044 */ 1044 */
1045 PDBG("%s moving cqe into swcq sq idx %ld cq idx %ld\n", 1045 PDBG("%s moving cqe into swcq sq idx %ld cq idx %ld\n",
1046 __FUNCTION__, Q_PTR2IDX(ptr, wq->sq_size_log2), 1046 __func__, Q_PTR2IDX(ptr, wq->sq_size_log2),
1047 Q_PTR2IDX(cq->sw_wptr, cq->size_log2)); 1047 Q_PTR2IDX(cq->sw_wptr, cq->size_log2));
1048 sqp->cqe.header |= htonl(V_CQE_SWCQE(1)); 1048 sqp->cqe.header |= htonl(V_CQE_SWCQE(1));
1049 *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) 1049 *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2))
@@ -1112,7 +1112,7 @@ int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
1112 1112
1113 PDBG("%s CQE OOO %d qpid 0x%0x genbit %d type %d status 0x%0x" 1113 PDBG("%s CQE OOO %d qpid 0x%0x genbit %d type %d status 0x%0x"
1114 " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n", 1114 " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
1115 __FUNCTION__, CQE_OOO(*hw_cqe), CQE_QPID(*hw_cqe), 1115 __func__, CQE_OOO(*hw_cqe), CQE_QPID(*hw_cqe),
1116 CQE_GENBIT(*hw_cqe), CQE_TYPE(*hw_cqe), CQE_STATUS(*hw_cqe), 1116 CQE_GENBIT(*hw_cqe), CQE_TYPE(*hw_cqe), CQE_STATUS(*hw_cqe),
1117 CQE_OPCODE(*hw_cqe), CQE_LEN(*hw_cqe), CQE_WRID_HI(*hw_cqe), 1117 CQE_OPCODE(*hw_cqe), CQE_LEN(*hw_cqe), CQE_WRID_HI(*hw_cqe),
1118 CQE_WRID_LOW(*hw_cqe)); 1118 CQE_WRID_LOW(*hw_cqe));
@@ -1215,7 +1215,7 @@ int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
1215 struct t3_swsq *sqp; 1215 struct t3_swsq *sqp;
1216 1216
1217 PDBG("%s out of order completion going in swsq at idx %ld\n", 1217 PDBG("%s out of order completion going in swsq at idx %ld\n",
1218 __FUNCTION__, 1218 __func__,
1219 Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2)); 1219 Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2));
1220 sqp = wq->sq + 1220 sqp = wq->sq +
1221 Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2); 1221 Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2);
@@ -1234,13 +1234,13 @@ proc_cqe:
1234 */ 1234 */
1235 if (SQ_TYPE(*hw_cqe)) { 1235 if (SQ_TYPE(*hw_cqe)) {
1236 wq->sq_rptr = CQE_WRID_SQ_WPTR(*hw_cqe); 1236 wq->sq_rptr = CQE_WRID_SQ_WPTR(*hw_cqe);
1237 PDBG("%s completing sq idx %ld\n", __FUNCTION__, 1237 PDBG("%s completing sq idx %ld\n", __func__,
1238 Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2)); 1238 Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2));
1239 *cookie = (wq->sq + 1239 *cookie = (wq->sq +
1240 Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2))->wr_id; 1240 Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2))->wr_id;
1241 wq->sq_rptr++; 1241 wq->sq_rptr++;
1242 } else { 1242 } else {
1243 PDBG("%s completing rq idx %ld\n", __FUNCTION__, 1243 PDBG("%s completing rq idx %ld\n", __func__,
1244 Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)); 1244 Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2));
1245 *cookie = *(wq->rq + Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)); 1245 *cookie = *(wq->rq + Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2));
1246 wq->rq_rptr++; 1246 wq->rq_rptr++;
@@ -1255,11 +1255,11 @@ flush_wq:
1255skip_cqe: 1255skip_cqe:
1256 if (SW_CQE(*hw_cqe)) { 1256 if (SW_CQE(*hw_cqe)) {
1257 PDBG("%s cq %p cqid 0x%x skip sw cqe sw_rptr 0x%x\n", 1257 PDBG("%s cq %p cqid 0x%x skip sw cqe sw_rptr 0x%x\n",
1258 __FUNCTION__, cq, cq->cqid, cq->sw_rptr); 1258 __func__, cq, cq->cqid, cq->sw_rptr);
1259 ++cq->sw_rptr; 1259 ++cq->sw_rptr;
1260 } else { 1260 } else {
1261 PDBG("%s cq %p cqid 0x%x skip hw cqe rptr 0x%x\n", 1261 PDBG("%s cq %p cqid 0x%x skip hw cqe rptr 0x%x\n",
1262 __FUNCTION__, cq, cq->cqid, cq->rptr); 1262 __func__, cq, cq->cqid, cq->rptr);
1263 ++cq->rptr; 1263 ++cq->rptr;
1264 1264
1265 /* 1265 /*
diff --git a/drivers/infiniband/hw/cxgb3/cxio_resource.c b/drivers/infiniband/hw/cxgb3/cxio_resource.c
index d3095ae5bc2e..45ed4f25ef78 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_resource.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_resource.c
@@ -206,13 +206,13 @@ void cxio_hal_put_stag(struct cxio_hal_resource *rscp, u32 stag)
206u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp) 206u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp)
207{ 207{
208 u32 qpid = cxio_hal_get_resource(rscp->qpid_fifo); 208 u32 qpid = cxio_hal_get_resource(rscp->qpid_fifo);
209 PDBG("%s qpid 0x%x\n", __FUNCTION__, qpid); 209 PDBG("%s qpid 0x%x\n", __func__, qpid);
210 return qpid; 210 return qpid;
211} 211}
212 212
213void cxio_hal_put_qpid(struct cxio_hal_resource *rscp, u32 qpid) 213void cxio_hal_put_qpid(struct cxio_hal_resource *rscp, u32 qpid)
214{ 214{
215 PDBG("%s qpid 0x%x\n", __FUNCTION__, qpid); 215 PDBG("%s qpid 0x%x\n", __func__, qpid);
216 cxio_hal_put_resource(rscp->qpid_fifo, qpid); 216 cxio_hal_put_resource(rscp->qpid_fifo, qpid);
217} 217}
218 218
@@ -255,13 +255,13 @@ void cxio_hal_destroy_resource(struct cxio_hal_resource *rscp)
255u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size) 255u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size)
256{ 256{
257 unsigned long addr = gen_pool_alloc(rdev_p->pbl_pool, size); 257 unsigned long addr = gen_pool_alloc(rdev_p->pbl_pool, size);
258 PDBG("%s addr 0x%x size %d\n", __FUNCTION__, (u32)addr, size); 258 PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
259 return (u32)addr; 259 return (u32)addr;
260} 260}
261 261
262void cxio_hal_pblpool_free(struct cxio_rdev *rdev_p, u32 addr, int size) 262void cxio_hal_pblpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
263{ 263{
264 PDBG("%s addr 0x%x size %d\n", __FUNCTION__, addr, size); 264 PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
265 gen_pool_free(rdev_p->pbl_pool, (unsigned long)addr, size); 265 gen_pool_free(rdev_p->pbl_pool, (unsigned long)addr, size);
266} 266}
267 267
@@ -292,13 +292,13 @@ void cxio_hal_pblpool_destroy(struct cxio_rdev *rdev_p)
292u32 cxio_hal_rqtpool_alloc(struct cxio_rdev *rdev_p, int size) 292u32 cxio_hal_rqtpool_alloc(struct cxio_rdev *rdev_p, int size)
293{ 293{
294 unsigned long addr = gen_pool_alloc(rdev_p->rqt_pool, size << 6); 294 unsigned long addr = gen_pool_alloc(rdev_p->rqt_pool, size << 6);
295 PDBG("%s addr 0x%x size %d\n", __FUNCTION__, (u32)addr, size << 6); 295 PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
296 return (u32)addr; 296 return (u32)addr;
297} 297}
298 298
299void cxio_hal_rqtpool_free(struct cxio_rdev *rdev_p, u32 addr, int size) 299void cxio_hal_rqtpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
300{ 300{
301 PDBG("%s addr 0x%x size %d\n", __FUNCTION__, addr, size << 6); 301 PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6);
302 gen_pool_free(rdev_p->rqt_pool, (unsigned long)addr, size << 6); 302 gen_pool_free(rdev_p->rqt_pool, (unsigned long)addr, size << 6);
303} 303}
304 304
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
index 0315c9d9fce9..6ba4138c8ec3 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ b/drivers/infiniband/hw/cxgb3/iwch.c
@@ -65,7 +65,7 @@ static DEFINE_MUTEX(dev_mutex);
65 65
66static void rnic_init(struct iwch_dev *rnicp) 66static void rnic_init(struct iwch_dev *rnicp)
67{ 67{
68 PDBG("%s iwch_dev %p\n", __FUNCTION__, rnicp); 68 PDBG("%s iwch_dev %p\n", __func__, rnicp);
69 idr_init(&rnicp->cqidr); 69 idr_init(&rnicp->cqidr);
70 idr_init(&rnicp->qpidr); 70 idr_init(&rnicp->qpidr);
71 idr_init(&rnicp->mmidr); 71 idr_init(&rnicp->mmidr);
@@ -106,7 +106,7 @@ static void open_rnic_dev(struct t3cdev *tdev)
106 struct iwch_dev *rnicp; 106 struct iwch_dev *rnicp;
107 static int vers_printed; 107 static int vers_printed;
108 108
109 PDBG("%s t3cdev %p\n", __FUNCTION__, tdev); 109 PDBG("%s t3cdev %p\n", __func__, tdev);
110 if (!vers_printed++) 110 if (!vers_printed++)
111 printk(KERN_INFO MOD "Chelsio T3 RDMA Driver - version %s\n", 111 printk(KERN_INFO MOD "Chelsio T3 RDMA Driver - version %s\n",
112 DRV_VERSION); 112 DRV_VERSION);
@@ -144,7 +144,7 @@ static void open_rnic_dev(struct t3cdev *tdev)
144static void close_rnic_dev(struct t3cdev *tdev) 144static void close_rnic_dev(struct t3cdev *tdev)
145{ 145{
146 struct iwch_dev *dev, *tmp; 146 struct iwch_dev *dev, *tmp;
147 PDBG("%s t3cdev %p\n", __FUNCTION__, tdev); 147 PDBG("%s t3cdev %p\n", __func__, tdev);
148 mutex_lock(&dev_mutex); 148 mutex_lock(&dev_mutex);
149 list_for_each_entry_safe(dev, tmp, &dev_list, entry) { 149 list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
150 if (dev->rdev.t3cdev_p == tdev) { 150 if (dev->rdev.t3cdev_p == tdev) {
diff --git a/drivers/infiniband/hw/cxgb3/iwch.h b/drivers/infiniband/hw/cxgb3/iwch.h
index caf4e6007a44..9ad9b1e7c8c1 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.h
+++ b/drivers/infiniband/hw/cxgb3/iwch.h
@@ -147,7 +147,7 @@ static inline int insert_handle(struct iwch_dev *rhp, struct idr *idr,
147 void *handle, u32 id) 147 void *handle, u32 id)
148{ 148{
149 int ret; 149 int ret;
150 u32 newid; 150 int newid;
151 151
152 do { 152 do {
153 if (!idr_pre_get(idr, GFP_KERNEL)) { 153 if (!idr_pre_get(idr, GFP_KERNEL)) {
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 99f2f2a46bf7..72ca360c3dbc 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -110,9 +110,9 @@ static void connect_reply_upcall(struct iwch_ep *ep, int status);
110 110
111static void start_ep_timer(struct iwch_ep *ep) 111static void start_ep_timer(struct iwch_ep *ep)
112{ 112{
113 PDBG("%s ep %p\n", __FUNCTION__, ep); 113 PDBG("%s ep %p\n", __func__, ep);
114 if (timer_pending(&ep->timer)) { 114 if (timer_pending(&ep->timer)) {
115 PDBG("%s stopped / restarted timer ep %p\n", __FUNCTION__, ep); 115 PDBG("%s stopped / restarted timer ep %p\n", __func__, ep);
116 del_timer_sync(&ep->timer); 116 del_timer_sync(&ep->timer);
117 } else 117 } else
118 get_ep(&ep->com); 118 get_ep(&ep->com);
@@ -124,7 +124,7 @@ static void start_ep_timer(struct iwch_ep *ep)
124 124
125static void stop_ep_timer(struct iwch_ep *ep) 125static void stop_ep_timer(struct iwch_ep *ep)
126{ 126{
127 PDBG("%s ep %p\n", __FUNCTION__, ep); 127 PDBG("%s ep %p\n", __func__, ep);
128 del_timer_sync(&ep->timer); 128 del_timer_sync(&ep->timer);
129 put_ep(&ep->com); 129 put_ep(&ep->com);
130} 130}
@@ -190,7 +190,7 @@ int iwch_resume_tid(struct iwch_ep *ep)
190 190
191static void set_emss(struct iwch_ep *ep, u16 opt) 191static void set_emss(struct iwch_ep *ep, u16 opt)
192{ 192{
193 PDBG("%s ep %p opt %u\n", __FUNCTION__, ep, opt); 193 PDBG("%s ep %p opt %u\n", __func__, ep, opt);
194 ep->emss = T3C_DATA(ep->com.tdev)->mtus[G_TCPOPT_MSS(opt)] - 40; 194 ep->emss = T3C_DATA(ep->com.tdev)->mtus[G_TCPOPT_MSS(opt)] - 40;
195 if (G_TCPOPT_TSTAMP(opt)) 195 if (G_TCPOPT_TSTAMP(opt))
196 ep->emss -= 12; 196 ep->emss -= 12;
@@ -220,7 +220,7 @@ static void state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
220 unsigned long flags; 220 unsigned long flags;
221 221
222 spin_lock_irqsave(&epc->lock, flags); 222 spin_lock_irqsave(&epc->lock, flags);
223 PDBG("%s - %s -> %s\n", __FUNCTION__, states[epc->state], states[new]); 223 PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
224 __state_set(epc, new); 224 __state_set(epc, new);
225 spin_unlock_irqrestore(&epc->lock, flags); 225 spin_unlock_irqrestore(&epc->lock, flags);
226 return; 226 return;
@@ -236,7 +236,7 @@ static void *alloc_ep(int size, gfp_t gfp)
236 spin_lock_init(&epc->lock); 236 spin_lock_init(&epc->lock);
237 init_waitqueue_head(&epc->waitq); 237 init_waitqueue_head(&epc->waitq);
238 } 238 }
239 PDBG("%s alloc ep %p\n", __FUNCTION__, epc); 239 PDBG("%s alloc ep %p\n", __func__, epc);
240 return epc; 240 return epc;
241} 241}
242 242
@@ -244,13 +244,13 @@ void __free_ep(struct kref *kref)
244{ 244{
245 struct iwch_ep_common *epc; 245 struct iwch_ep_common *epc;
246 epc = container_of(kref, struct iwch_ep_common, kref); 246 epc = container_of(kref, struct iwch_ep_common, kref);
247 PDBG("%s ep %p state %s\n", __FUNCTION__, epc, states[state_read(epc)]); 247 PDBG("%s ep %p state %s\n", __func__, epc, states[state_read(epc)]);
248 kfree(epc); 248 kfree(epc);
249} 249}
250 250
251static void release_ep_resources(struct iwch_ep *ep) 251static void release_ep_resources(struct iwch_ep *ep)
252{ 252{
253 PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid); 253 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
254 cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid); 254 cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
255 dst_release(ep->dst); 255 dst_release(ep->dst);
256 l2t_release(L2DATA(ep->com.tdev), ep->l2t); 256 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
@@ -349,7 +349,7 @@ static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu)
349 349
350static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb) 350static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb)
351{ 351{
352 PDBG("%s t3cdev %p\n", __FUNCTION__, dev); 352 PDBG("%s t3cdev %p\n", __func__, dev);
353 kfree_skb(skb); 353 kfree_skb(skb);
354} 354}
355 355
@@ -370,7 +370,7 @@ static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
370{ 370{
371 struct cpl_abort_req *req = cplhdr(skb); 371 struct cpl_abort_req *req = cplhdr(skb);
372 372
373 PDBG("%s t3cdev %p\n", __FUNCTION__, dev); 373 PDBG("%s t3cdev %p\n", __func__, dev);
374 req->cmd = CPL_ABORT_NO_RST; 374 req->cmd = CPL_ABORT_NO_RST;
375 cxgb3_ofld_send(dev, skb); 375 cxgb3_ofld_send(dev, skb);
376} 376}
@@ -380,10 +380,10 @@ static int send_halfclose(struct iwch_ep *ep, gfp_t gfp)
380 struct cpl_close_con_req *req; 380 struct cpl_close_con_req *req;
381 struct sk_buff *skb; 381 struct sk_buff *skb;
382 382
383 PDBG("%s ep %p\n", __FUNCTION__, ep); 383 PDBG("%s ep %p\n", __func__, ep);
384 skb = get_skb(NULL, sizeof(*req), gfp); 384 skb = get_skb(NULL, sizeof(*req), gfp);
385 if (!skb) { 385 if (!skb) {
386 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __FUNCTION__); 386 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
387 return -ENOMEM; 387 return -ENOMEM;
388 } 388 }
389 skb->priority = CPL_PRIORITY_DATA; 389 skb->priority = CPL_PRIORITY_DATA;
@@ -400,11 +400,11 @@ static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
400{ 400{
401 struct cpl_abort_req *req; 401 struct cpl_abort_req *req;
402 402
403 PDBG("%s ep %p\n", __FUNCTION__, ep); 403 PDBG("%s ep %p\n", __func__, ep);
404 skb = get_skb(skb, sizeof(*req), gfp); 404 skb = get_skb(skb, sizeof(*req), gfp);
405 if (!skb) { 405 if (!skb) {
406 printk(KERN_ERR MOD "%s - failed to alloc skb.\n", 406 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
407 __FUNCTION__); 407 __func__);
408 return -ENOMEM; 408 return -ENOMEM;
409 } 409 }
410 skb->priority = CPL_PRIORITY_DATA; 410 skb->priority = CPL_PRIORITY_DATA;
@@ -426,12 +426,12 @@ static int send_connect(struct iwch_ep *ep)
426 unsigned int mtu_idx; 426 unsigned int mtu_idx;
427 int wscale; 427 int wscale;
428 428
429 PDBG("%s ep %p\n", __FUNCTION__, ep); 429 PDBG("%s ep %p\n", __func__, ep);
430 430
431 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 431 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
432 if (!skb) { 432 if (!skb) {
433 printk(KERN_ERR MOD "%s - failed to alloc skb.\n", 433 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
434 __FUNCTION__); 434 __func__);
435 return -ENOMEM; 435 return -ENOMEM;
436 } 436 }
437 mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst)); 437 mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
@@ -470,7 +470,7 @@ static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
470 struct mpa_message *mpa; 470 struct mpa_message *mpa;
471 int len; 471 int len;
472 472
473 PDBG("%s ep %p pd_len %d\n", __FUNCTION__, ep, ep->plen); 473 PDBG("%s ep %p pd_len %d\n", __func__, ep, ep->plen);
474 474
475 BUG_ON(skb_cloned(skb)); 475 BUG_ON(skb_cloned(skb));
476 476
@@ -530,13 +530,13 @@ static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)
530 struct mpa_message *mpa; 530 struct mpa_message *mpa;
531 struct sk_buff *skb; 531 struct sk_buff *skb;
532 532
533 PDBG("%s ep %p plen %d\n", __FUNCTION__, ep, plen); 533 PDBG("%s ep %p plen %d\n", __func__, ep, plen);
534 534
535 mpalen = sizeof(*mpa) + plen; 535 mpalen = sizeof(*mpa) + plen;
536 536
537 skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL); 537 skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
538 if (!skb) { 538 if (!skb) {
539 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __FUNCTION__); 539 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
540 return -ENOMEM; 540 return -ENOMEM;
541 } 541 }
542 skb_reserve(skb, sizeof(*req)); 542 skb_reserve(skb, sizeof(*req));
@@ -580,13 +580,13 @@ static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
580 int len; 580 int len;
581 struct sk_buff *skb; 581 struct sk_buff *skb;
582 582
583 PDBG("%s ep %p plen %d\n", __FUNCTION__, ep, plen); 583 PDBG("%s ep %p plen %d\n", __func__, ep, plen);
584 584
585 mpalen = sizeof(*mpa) + plen; 585 mpalen = sizeof(*mpa) + plen;
586 586
587 skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL); 587 skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
588 if (!skb) { 588 if (!skb) {
589 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __FUNCTION__); 589 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
590 return -ENOMEM; 590 return -ENOMEM;
591 } 591 }
592 skb->priority = CPL_PRIORITY_DATA; 592 skb->priority = CPL_PRIORITY_DATA;
@@ -630,7 +630,7 @@ static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
630 struct cpl_act_establish *req = cplhdr(skb); 630 struct cpl_act_establish *req = cplhdr(skb);
631 unsigned int tid = GET_TID(req); 631 unsigned int tid = GET_TID(req);
632 632
633 PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, tid); 633 PDBG("%s ep %p tid %d\n", __func__, ep, tid);
634 634
635 dst_confirm(ep->dst); 635 dst_confirm(ep->dst);
636 636
@@ -663,7 +663,7 @@ static void close_complete_upcall(struct iwch_ep *ep)
663{ 663{
664 struct iw_cm_event event; 664 struct iw_cm_event event;
665 665
666 PDBG("%s ep %p\n", __FUNCTION__, ep); 666 PDBG("%s ep %p\n", __func__, ep);
667 memset(&event, 0, sizeof(event)); 667 memset(&event, 0, sizeof(event));
668 event.event = IW_CM_EVENT_CLOSE; 668 event.event = IW_CM_EVENT_CLOSE;
669 if (ep->com.cm_id) { 669 if (ep->com.cm_id) {
@@ -680,7 +680,7 @@ static void peer_close_upcall(struct iwch_ep *ep)
680{ 680{
681 struct iw_cm_event event; 681 struct iw_cm_event event;
682 682
683 PDBG("%s ep %p\n", __FUNCTION__, ep); 683 PDBG("%s ep %p\n", __func__, ep);
684 memset(&event, 0, sizeof(event)); 684 memset(&event, 0, sizeof(event));
685 event.event = IW_CM_EVENT_DISCONNECT; 685 event.event = IW_CM_EVENT_DISCONNECT;
686 if (ep->com.cm_id) { 686 if (ep->com.cm_id) {
@@ -694,7 +694,7 @@ static void peer_abort_upcall(struct iwch_ep *ep)
694{ 694{
695 struct iw_cm_event event; 695 struct iw_cm_event event;
696 696
697 PDBG("%s ep %p\n", __FUNCTION__, ep); 697 PDBG("%s ep %p\n", __func__, ep);
698 memset(&event, 0, sizeof(event)); 698 memset(&event, 0, sizeof(event));
699 event.event = IW_CM_EVENT_CLOSE; 699 event.event = IW_CM_EVENT_CLOSE;
700 event.status = -ECONNRESET; 700 event.status = -ECONNRESET;
@@ -712,7 +712,7 @@ static void connect_reply_upcall(struct iwch_ep *ep, int status)
712{ 712{
713 struct iw_cm_event event; 713 struct iw_cm_event event;
714 714
715 PDBG("%s ep %p status %d\n", __FUNCTION__, ep, status); 715 PDBG("%s ep %p status %d\n", __func__, ep, status);
716 memset(&event, 0, sizeof(event)); 716 memset(&event, 0, sizeof(event));
717 event.event = IW_CM_EVENT_CONNECT_REPLY; 717 event.event = IW_CM_EVENT_CONNECT_REPLY;
718 event.status = status; 718 event.status = status;
@@ -724,7 +724,7 @@ static void connect_reply_upcall(struct iwch_ep *ep, int status)
724 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); 724 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
725 } 725 }
726 if (ep->com.cm_id) { 726 if (ep->com.cm_id) {
727 PDBG("%s ep %p tid %d status %d\n", __FUNCTION__, ep, 727 PDBG("%s ep %p tid %d status %d\n", __func__, ep,
728 ep->hwtid, status); 728 ep->hwtid, status);
729 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 729 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
730 } 730 }
@@ -739,7 +739,7 @@ static void connect_request_upcall(struct iwch_ep *ep)
739{ 739{
740 struct iw_cm_event event; 740 struct iw_cm_event event;
741 741
742 PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid); 742 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
743 memset(&event, 0, sizeof(event)); 743 memset(&event, 0, sizeof(event));
744 event.event = IW_CM_EVENT_CONNECT_REQUEST; 744 event.event = IW_CM_EVENT_CONNECT_REQUEST;
745 event.local_addr = ep->com.local_addr; 745 event.local_addr = ep->com.local_addr;
@@ -759,11 +759,11 @@ static void established_upcall(struct iwch_ep *ep)
759{ 759{
760 struct iw_cm_event event; 760 struct iw_cm_event event;
761 761
762 PDBG("%s ep %p\n", __FUNCTION__, ep); 762 PDBG("%s ep %p\n", __func__, ep);
763 memset(&event, 0, sizeof(event)); 763 memset(&event, 0, sizeof(event));
764 event.event = IW_CM_EVENT_ESTABLISHED; 764 event.event = IW_CM_EVENT_ESTABLISHED;
765 if (ep->com.cm_id) { 765 if (ep->com.cm_id) {
766 PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid); 766 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
767 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 767 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
768 } 768 }
769} 769}
@@ -773,7 +773,7 @@ static int update_rx_credits(struct iwch_ep *ep, u32 credits)
773 struct cpl_rx_data_ack *req; 773 struct cpl_rx_data_ack *req;
774 struct sk_buff *skb; 774 struct sk_buff *skb;
775 775
776 PDBG("%s ep %p credits %u\n", __FUNCTION__, ep, credits); 776 PDBG("%s ep %p credits %u\n", __func__, ep, credits);
777 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 777 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
778 if (!skb) { 778 if (!skb) {
779 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n"); 779 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
@@ -797,7 +797,7 @@ static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
797 enum iwch_qp_attr_mask mask; 797 enum iwch_qp_attr_mask mask;
798 int err; 798 int err;
799 799
800 PDBG("%s ep %p\n", __FUNCTION__, ep); 800 PDBG("%s ep %p\n", __func__, ep);
801 801
802 /* 802 /*
803 * Stop mpa timer. If it expired, then the state has 803 * Stop mpa timer. If it expired, then the state has
@@ -884,7 +884,7 @@ static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
884 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 884 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
885 ep->mpa_attr.version = mpa_rev; 885 ep->mpa_attr.version = mpa_rev;
886 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " 886 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
887 "xmit_marker_enabled=%d, version=%d\n", __FUNCTION__, 887 "xmit_marker_enabled=%d, version=%d\n", __func__,
888 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 888 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
889 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version); 889 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
890 890
@@ -915,7 +915,7 @@ static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
915 struct mpa_message *mpa; 915 struct mpa_message *mpa;
916 u16 plen; 916 u16 plen;
917 917
918 PDBG("%s ep %p\n", __FUNCTION__, ep); 918 PDBG("%s ep %p\n", __func__, ep);
919 919
920 /* 920 /*
921 * Stop mpa timer. If it expired, then the state has 921 * Stop mpa timer. If it expired, then the state has
@@ -935,7 +935,7 @@ static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
935 return; 935 return;
936 } 936 }
937 937
938 PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__); 938 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
939 939
940 /* 940 /*
941 * Copy the new data into our accumulation buffer. 941 * Copy the new data into our accumulation buffer.
@@ -950,7 +950,7 @@ static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
950 */ 950 */
951 if (ep->mpa_pkt_len < sizeof(*mpa)) 951 if (ep->mpa_pkt_len < sizeof(*mpa))
952 return; 952 return;
953 PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__); 953 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
954 mpa = (struct mpa_message *) ep->mpa_pkt; 954 mpa = (struct mpa_message *) ep->mpa_pkt;
955 955
956 /* 956 /*
@@ -1000,7 +1000,7 @@ static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
1000 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1000 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1001 ep->mpa_attr.version = mpa_rev; 1001 ep->mpa_attr.version = mpa_rev;
1002 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " 1002 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1003 "xmit_marker_enabled=%d, version=%d\n", __FUNCTION__, 1003 "xmit_marker_enabled=%d, version=%d\n", __func__,
1004 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 1004 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1005 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version); 1005 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
1006 1006
@@ -1017,7 +1017,7 @@ static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1017 struct cpl_rx_data *hdr = cplhdr(skb); 1017 struct cpl_rx_data *hdr = cplhdr(skb);
1018 unsigned int dlen = ntohs(hdr->len); 1018 unsigned int dlen = ntohs(hdr->len);
1019 1019
1020 PDBG("%s ep %p dlen %u\n", __FUNCTION__, ep, dlen); 1020 PDBG("%s ep %p dlen %u\n", __func__, ep, dlen);
1021 1021
1022 skb_pull(skb, sizeof(*hdr)); 1022 skb_pull(skb, sizeof(*hdr));
1023 skb_trim(skb, dlen); 1023 skb_trim(skb, dlen);
@@ -1037,7 +1037,7 @@ static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1037 default: 1037 default:
1038 printk(KERN_ERR MOD "%s Unexpected streaming data." 1038 printk(KERN_ERR MOD "%s Unexpected streaming data."
1039 " ep %p state %d tid %d\n", 1039 " ep %p state %d tid %d\n",
1040 __FUNCTION__, ep, state_read(&ep->com), ep->hwtid); 1040 __func__, ep, state_read(&ep->com), ep->hwtid);
1041 1041
1042 /* 1042 /*
1043 * The ep will timeout and inform the ULP of the failure. 1043 * The ep will timeout and inform the ULP of the failure.
@@ -1063,7 +1063,7 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1063 struct cpl_wr_ack *hdr = cplhdr(skb); 1063 struct cpl_wr_ack *hdr = cplhdr(skb);
1064 unsigned int credits = ntohs(hdr->credits); 1064 unsigned int credits = ntohs(hdr->credits);
1065 1065
1066 PDBG("%s ep %p credits %u\n", __FUNCTION__, ep, credits); 1066 PDBG("%s ep %p credits %u\n", __func__, ep, credits);
1067 1067
1068 if (credits == 0) 1068 if (credits == 0)
1069 return CPL_RET_BUF_DONE; 1069 return CPL_RET_BUF_DONE;
@@ -1084,7 +1084,7 @@ static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1084{ 1084{
1085 struct iwch_ep *ep = ctx; 1085 struct iwch_ep *ep = ctx;
1086 1086
1087 PDBG("%s ep %p\n", __FUNCTION__, ep); 1087 PDBG("%s ep %p\n", __func__, ep);
1088 1088
1089 /* 1089 /*
1090 * We get 2 abort replies from the HW. The first one must 1090 * We get 2 abort replies from the HW. The first one must
@@ -1115,7 +1115,7 @@ static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1115 struct iwch_ep *ep = ctx; 1115 struct iwch_ep *ep = ctx;
1116 struct cpl_act_open_rpl *rpl = cplhdr(skb); 1116 struct cpl_act_open_rpl *rpl = cplhdr(skb);
1117 1117
1118 PDBG("%s ep %p status %u errno %d\n", __FUNCTION__, ep, rpl->status, 1118 PDBG("%s ep %p status %u errno %d\n", __func__, ep, rpl->status,
1119 status2errno(rpl->status)); 1119 status2errno(rpl->status));
1120 connect_reply_upcall(ep, status2errno(rpl->status)); 1120 connect_reply_upcall(ep, status2errno(rpl->status));
1121 state_set(&ep->com, DEAD); 1121 state_set(&ep->com, DEAD);
@@ -1133,7 +1133,7 @@ static int listen_start(struct iwch_listen_ep *ep)
1133 struct sk_buff *skb; 1133 struct sk_buff *skb;
1134 struct cpl_pass_open_req *req; 1134 struct cpl_pass_open_req *req;
1135 1135
1136 PDBG("%s ep %p\n", __FUNCTION__, ep); 1136 PDBG("%s ep %p\n", __func__, ep);
1137 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 1137 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1138 if (!skb) { 1138 if (!skb) {
1139 printk(KERN_ERR MOD "t3c_listen_start failed to alloc skb!\n"); 1139 printk(KERN_ERR MOD "t3c_listen_start failed to alloc skb!\n");
@@ -1162,7 +1162,7 @@ static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1162 struct iwch_listen_ep *ep = ctx; 1162 struct iwch_listen_ep *ep = ctx;
1163 struct cpl_pass_open_rpl *rpl = cplhdr(skb); 1163 struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1164 1164
1165 PDBG("%s ep %p status %d error %d\n", __FUNCTION__, ep, 1165 PDBG("%s ep %p status %d error %d\n", __func__, ep,
1166 rpl->status, status2errno(rpl->status)); 1166 rpl->status, status2errno(rpl->status));
1167 ep->com.rpl_err = status2errno(rpl->status); 1167 ep->com.rpl_err = status2errno(rpl->status);
1168 ep->com.rpl_done = 1; 1168 ep->com.rpl_done = 1;
@@ -1176,10 +1176,10 @@ static int listen_stop(struct iwch_listen_ep *ep)
1176 struct sk_buff *skb; 1176 struct sk_buff *skb;
1177 struct cpl_close_listserv_req *req; 1177 struct cpl_close_listserv_req *req;
1178 1178
1179 PDBG("%s ep %p\n", __FUNCTION__, ep); 1179 PDBG("%s ep %p\n", __func__, ep);
1180 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 1180 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1181 if (!skb) { 1181 if (!skb) {
1182 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __FUNCTION__); 1182 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
1183 return -ENOMEM; 1183 return -ENOMEM;
1184 } 1184 }
1185 req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req)); 1185 req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req));
@@ -1197,7 +1197,7 @@ static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb,
1197 struct iwch_listen_ep *ep = ctx; 1197 struct iwch_listen_ep *ep = ctx;
1198 struct cpl_close_listserv_rpl *rpl = cplhdr(skb); 1198 struct cpl_close_listserv_rpl *rpl = cplhdr(skb);
1199 1199
1200 PDBG("%s ep %p\n", __FUNCTION__, ep); 1200 PDBG("%s ep %p\n", __func__, ep);
1201 ep->com.rpl_err = status2errno(rpl->status); 1201 ep->com.rpl_err = status2errno(rpl->status);
1202 ep->com.rpl_done = 1; 1202 ep->com.rpl_done = 1;
1203 wake_up(&ep->com.waitq); 1203 wake_up(&ep->com.waitq);
@@ -1211,7 +1211,7 @@ static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
1211 u32 opt0h, opt0l, opt2; 1211 u32 opt0h, opt0l, opt2;
1212 int wscale; 1212 int wscale;
1213 1213
1214 PDBG("%s ep %p\n", __FUNCTION__, ep); 1214 PDBG("%s ep %p\n", __func__, ep);
1215 BUG_ON(skb_cloned(skb)); 1215 BUG_ON(skb_cloned(skb));
1216 skb_trim(skb, sizeof(*rpl)); 1216 skb_trim(skb, sizeof(*rpl));
1217 skb_get(skb); 1217 skb_get(skb);
@@ -1244,7 +1244,7 @@ static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
1244static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip, 1244static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip,
1245 struct sk_buff *skb) 1245 struct sk_buff *skb)
1246{ 1246{
1247 PDBG("%s t3cdev %p tid %u peer_ip %x\n", __FUNCTION__, tdev, hwtid, 1247 PDBG("%s t3cdev %p tid %u peer_ip %x\n", __func__, tdev, hwtid,
1248 peer_ip); 1248 peer_ip);
1249 BUG_ON(skb_cloned(skb)); 1249 BUG_ON(skb_cloned(skb));
1250 skb_trim(skb, sizeof(struct cpl_tid_release)); 1250 skb_trim(skb, sizeof(struct cpl_tid_release));
@@ -1279,11 +1279,11 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1279 struct rtable *rt; 1279 struct rtable *rt;
1280 struct iff_mac tim; 1280 struct iff_mac tim;
1281 1281
1282 PDBG("%s parent ep %p tid %u\n", __FUNCTION__, parent_ep, hwtid); 1282 PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
1283 1283
1284 if (state_read(&parent_ep->com) != LISTEN) { 1284 if (state_read(&parent_ep->com) != LISTEN) {
1285 printk(KERN_ERR "%s - listening ep not in LISTEN\n", 1285 printk(KERN_ERR "%s - listening ep not in LISTEN\n",
1286 __FUNCTION__); 1286 __func__);
1287 goto reject; 1287 goto reject;
1288 } 1288 }
1289 1289
@@ -1295,7 +1295,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1295 if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) { 1295 if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) {
1296 printk(KERN_ERR 1296 printk(KERN_ERR
1297 "%s bad dst mac %02x %02x %02x %02x %02x %02x\n", 1297 "%s bad dst mac %02x %02x %02x %02x %02x %02x\n",
1298 __FUNCTION__, 1298 __func__,
1299 req->dst_mac[0], 1299 req->dst_mac[0],
1300 req->dst_mac[1], 1300 req->dst_mac[1],
1301 req->dst_mac[2], 1301 req->dst_mac[2],
@@ -1313,21 +1313,21 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1313 req->peer_port, G_PASS_OPEN_TOS(ntohl(req->tos_tid))); 1313 req->peer_port, G_PASS_OPEN_TOS(ntohl(req->tos_tid)));
1314 if (!rt) { 1314 if (!rt) {
1315 printk(KERN_ERR MOD "%s - failed to find dst entry!\n", 1315 printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
1316 __FUNCTION__); 1316 __func__);
1317 goto reject; 1317 goto reject;
1318 } 1318 }
1319 dst = &rt->u.dst; 1319 dst = &rt->u.dst;
1320 l2t = t3_l2t_get(tdev, dst->neighbour, dst->neighbour->dev); 1320 l2t = t3_l2t_get(tdev, dst->neighbour, dst->neighbour->dev);
1321 if (!l2t) { 1321 if (!l2t) {
1322 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", 1322 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
1323 __FUNCTION__); 1323 __func__);
1324 dst_release(dst); 1324 dst_release(dst);
1325 goto reject; 1325 goto reject;
1326 } 1326 }
1327 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL); 1327 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
1328 if (!child_ep) { 1328 if (!child_ep) {
1329 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", 1329 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
1330 __FUNCTION__); 1330 __func__);
1331 l2t_release(L2DATA(tdev), l2t); 1331 l2t_release(L2DATA(tdev), l2t);
1332 dst_release(dst); 1332 dst_release(dst);
1333 goto reject; 1333 goto reject;
@@ -1362,7 +1362,7 @@ static int pass_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1362 struct iwch_ep *ep = ctx; 1362 struct iwch_ep *ep = ctx;
1363 struct cpl_pass_establish *req = cplhdr(skb); 1363 struct cpl_pass_establish *req = cplhdr(skb);
1364 1364
1365 PDBG("%s ep %p\n", __FUNCTION__, ep); 1365 PDBG("%s ep %p\n", __func__, ep);
1366 ep->snd_seq = ntohl(req->snd_isn); 1366 ep->snd_seq = ntohl(req->snd_isn);
1367 ep->rcv_seq = ntohl(req->rcv_isn); 1367 ep->rcv_seq = ntohl(req->rcv_isn);
1368 1368
@@ -1383,7 +1383,7 @@ static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1383 int disconnect = 1; 1383 int disconnect = 1;
1384 int release = 0; 1384 int release = 0;
1385 1385
1386 PDBG("%s ep %p\n", __FUNCTION__, ep); 1386 PDBG("%s ep %p\n", __func__, ep);
1387 dst_confirm(ep->dst); 1387 dst_confirm(ep->dst);
1388 1388
1389 spin_lock_irqsave(&ep->com.lock, flags); 1389 spin_lock_irqsave(&ep->com.lock, flags);
@@ -1473,7 +1473,7 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1473 int state; 1473 int state;
1474 1474
1475 if (is_neg_adv_abort(req->status)) { 1475 if (is_neg_adv_abort(req->status)) {
1476 PDBG("%s neg_adv_abort ep %p tid %d\n", __FUNCTION__, ep, 1476 PDBG("%s neg_adv_abort ep %p tid %d\n", __func__, ep,
1477 ep->hwtid); 1477 ep->hwtid);
1478 t3_l2t_send_event(ep->com.tdev, ep->l2t); 1478 t3_l2t_send_event(ep->com.tdev, ep->l2t);
1479 return CPL_RET_BUF_DONE; 1479 return CPL_RET_BUF_DONE;
@@ -1489,7 +1489,7 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1489 } 1489 }
1490 1490
1491 state = state_read(&ep->com); 1491 state = state_read(&ep->com);
1492 PDBG("%s ep %p state %u\n", __FUNCTION__, ep, state); 1492 PDBG("%s ep %p state %u\n", __func__, ep, state);
1493 switch (state) { 1493 switch (state) {
1494 case CONNECTING: 1494 case CONNECTING:
1495 break; 1495 break;
@@ -1528,14 +1528,14 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1528 if (ret) 1528 if (ret)
1529 printk(KERN_ERR MOD 1529 printk(KERN_ERR MOD
1530 "%s - qp <- error failed!\n", 1530 "%s - qp <- error failed!\n",
1531 __FUNCTION__); 1531 __func__);
1532 } 1532 }
1533 peer_abort_upcall(ep); 1533 peer_abort_upcall(ep);
1534 break; 1534 break;
1535 case ABORTING: 1535 case ABORTING:
1536 break; 1536 break;
1537 case DEAD: 1537 case DEAD:
1538 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __FUNCTION__); 1538 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
1539 return CPL_RET_BUF_DONE; 1539 return CPL_RET_BUF_DONE;
1540 default: 1540 default:
1541 BUG_ON(1); 1541 BUG_ON(1);
@@ -1546,7 +1546,7 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1546 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL); 1546 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
1547 if (!rpl_skb) { 1547 if (!rpl_skb) {
1548 printk(KERN_ERR MOD "%s - cannot allocate skb!\n", 1548 printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
1549 __FUNCTION__); 1549 __func__);
1550 dst_release(ep->dst); 1550 dst_release(ep->dst);
1551 l2t_release(L2DATA(ep->com.tdev), ep->l2t); 1551 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
1552 put_ep(&ep->com); 1552 put_ep(&ep->com);
@@ -1573,7 +1573,7 @@ static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1573 unsigned long flags; 1573 unsigned long flags;
1574 int release = 0; 1574 int release = 0;
1575 1575
1576 PDBG("%s ep %p\n", __FUNCTION__, ep); 1576 PDBG("%s ep %p\n", __func__, ep);
1577 BUG_ON(!ep); 1577 BUG_ON(!ep);
1578 1578
1579 /* The cm_id may be null if we failed to connect */ 1579 /* The cm_id may be null if we failed to connect */
@@ -1624,9 +1624,9 @@ static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1624{ 1624{
1625 struct iwch_ep *ep = ctx; 1625 struct iwch_ep *ep = ctx;
1626 1626
1627 PDBG("%s ep %p\n", __FUNCTION__, ep); 1627 PDBG("%s ep %p\n", __func__, ep);
1628 skb_pull(skb, sizeof(struct cpl_rdma_terminate)); 1628 skb_pull(skb, sizeof(struct cpl_rdma_terminate));
1629 PDBG("%s saving %d bytes of term msg\n", __FUNCTION__, skb->len); 1629 PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);
1630 skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer, 1630 skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
1631 skb->len); 1631 skb->len);
1632 ep->com.qp->attr.terminate_msg_len = skb->len; 1632 ep->com.qp->attr.terminate_msg_len = skb->len;
@@ -1639,13 +1639,13 @@ static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1639 struct cpl_rdma_ec_status *rep = cplhdr(skb); 1639 struct cpl_rdma_ec_status *rep = cplhdr(skb);
1640 struct iwch_ep *ep = ctx; 1640 struct iwch_ep *ep = ctx;
1641 1641
1642 PDBG("%s ep %p tid %u status %d\n", __FUNCTION__, ep, ep->hwtid, 1642 PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid,
1643 rep->status); 1643 rep->status);
1644 if (rep->status) { 1644 if (rep->status) {
1645 struct iwch_qp_attributes attrs; 1645 struct iwch_qp_attributes attrs;
1646 1646
1647 printk(KERN_ERR MOD "%s BAD CLOSE - Aborting tid %u\n", 1647 printk(KERN_ERR MOD "%s BAD CLOSE - Aborting tid %u\n",
1648 __FUNCTION__, ep->hwtid); 1648 __func__, ep->hwtid);
1649 stop_ep_timer(ep); 1649 stop_ep_timer(ep);
1650 attrs.next_state = IWCH_QP_STATE_ERROR; 1650 attrs.next_state = IWCH_QP_STATE_ERROR;
1651 iwch_modify_qp(ep->com.qp->rhp, 1651 iwch_modify_qp(ep->com.qp->rhp,
@@ -1663,7 +1663,7 @@ static void ep_timeout(unsigned long arg)
1663 unsigned long flags; 1663 unsigned long flags;
1664 1664
1665 spin_lock_irqsave(&ep->com.lock, flags); 1665 spin_lock_irqsave(&ep->com.lock, flags);
1666 PDBG("%s ep %p tid %u state %d\n", __FUNCTION__, ep, ep->hwtid, 1666 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
1667 ep->com.state); 1667 ep->com.state);
1668 switch (ep->com.state) { 1668 switch (ep->com.state) {
1669 case MPA_REQ_SENT: 1669 case MPA_REQ_SENT:
@@ -1693,7 +1693,7 @@ int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
1693{ 1693{
1694 int err; 1694 int err;
1695 struct iwch_ep *ep = to_ep(cm_id); 1695 struct iwch_ep *ep = to_ep(cm_id);
1696 PDBG("%s ep %p tid %u\n", __FUNCTION__, ep, ep->hwtid); 1696 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1697 1697
1698 if (state_read(&ep->com) == DEAD) { 1698 if (state_read(&ep->com) == DEAD) {
1699 put_ep(&ep->com); 1699 put_ep(&ep->com);
@@ -1718,7 +1718,7 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1718 struct iwch_dev *h = to_iwch_dev(cm_id->device); 1718 struct iwch_dev *h = to_iwch_dev(cm_id->device);
1719 struct iwch_qp *qp = get_qhp(h, conn_param->qpn); 1719 struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
1720 1720
1721 PDBG("%s ep %p tid %u\n", __FUNCTION__, ep, ep->hwtid); 1721 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1722 if (state_read(&ep->com) == DEAD) 1722 if (state_read(&ep->com) == DEAD)
1723 return -ECONNRESET; 1723 return -ECONNRESET;
1724 1724
@@ -1739,7 +1739,7 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1739 ep->com.rpl_err = 0; 1739 ep->com.rpl_err = 0;
1740 ep->ird = conn_param->ird; 1740 ep->ird = conn_param->ird;
1741 ep->ord = conn_param->ord; 1741 ep->ord = conn_param->ord;
1742 PDBG("%s %d ird %d ord %d\n", __FUNCTION__, __LINE__, ep->ird, ep->ord); 1742 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
1743 1743
1744 get_ep(&ep->com); 1744 get_ep(&ep->com);
1745 1745
@@ -1810,7 +1810,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1810 1810
1811 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 1811 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
1812 if (!ep) { 1812 if (!ep) {
1813 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __FUNCTION__); 1813 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
1814 err = -ENOMEM; 1814 err = -ENOMEM;
1815 goto out; 1815 goto out;
1816 } 1816 }
@@ -1827,7 +1827,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1827 ep->com.cm_id = cm_id; 1827 ep->com.cm_id = cm_id;
1828 ep->com.qp = get_qhp(h, conn_param->qpn); 1828 ep->com.qp = get_qhp(h, conn_param->qpn);
1829 BUG_ON(!ep->com.qp); 1829 BUG_ON(!ep->com.qp);
1830 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __FUNCTION__, conn_param->qpn, 1830 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
1831 ep->com.qp, cm_id); 1831 ep->com.qp, cm_id);
1832 1832
1833 /* 1833 /*
@@ -1835,7 +1835,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1835 */ 1835 */
1836 ep->atid = cxgb3_alloc_atid(h->rdev.t3cdev_p, &t3c_client, ep); 1836 ep->atid = cxgb3_alloc_atid(h->rdev.t3cdev_p, &t3c_client, ep);
1837 if (ep->atid == -1) { 1837 if (ep->atid == -1) {
1838 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __FUNCTION__); 1838 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
1839 err = -ENOMEM; 1839 err = -ENOMEM;
1840 goto fail2; 1840 goto fail2;
1841 } 1841 }
@@ -1847,7 +1847,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1847 cm_id->local_addr.sin_port, 1847 cm_id->local_addr.sin_port,
1848 cm_id->remote_addr.sin_port, IPTOS_LOWDELAY); 1848 cm_id->remote_addr.sin_port, IPTOS_LOWDELAY);
1849 if (!rt) { 1849 if (!rt) {
1850 printk(KERN_ERR MOD "%s - cannot find route.\n", __FUNCTION__); 1850 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
1851 err = -EHOSTUNREACH; 1851 err = -EHOSTUNREACH;
1852 goto fail3; 1852 goto fail3;
1853 } 1853 }
@@ -1857,7 +1857,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1857 ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst->neighbour, 1857 ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst->neighbour,
1858 ep->dst->neighbour->dev); 1858 ep->dst->neighbour->dev);
1859 if (!ep->l2t) { 1859 if (!ep->l2t) {
1860 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __FUNCTION__); 1860 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
1861 err = -ENOMEM; 1861 err = -ENOMEM;
1862 goto fail4; 1862 goto fail4;
1863 } 1863 }
@@ -1894,11 +1894,11 @@ int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
1894 1894
1895 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 1895 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
1896 if (!ep) { 1896 if (!ep) {
1897 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __FUNCTION__); 1897 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
1898 err = -ENOMEM; 1898 err = -ENOMEM;
1899 goto fail1; 1899 goto fail1;
1900 } 1900 }
1901 PDBG("%s ep %p\n", __FUNCTION__, ep); 1901 PDBG("%s ep %p\n", __func__, ep);
1902 ep->com.tdev = h->rdev.t3cdev_p; 1902 ep->com.tdev = h->rdev.t3cdev_p;
1903 cm_id->add_ref(cm_id); 1903 cm_id->add_ref(cm_id);
1904 ep->com.cm_id = cm_id; 1904 ep->com.cm_id = cm_id;
@@ -1910,7 +1910,7 @@ int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
1910 */ 1910 */
1911 ep->stid = cxgb3_alloc_stid(h->rdev.t3cdev_p, &t3c_client, ep); 1911 ep->stid = cxgb3_alloc_stid(h->rdev.t3cdev_p, &t3c_client, ep);
1912 if (ep->stid == -1) { 1912 if (ep->stid == -1) {
1913 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __FUNCTION__); 1913 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
1914 err = -ENOMEM; 1914 err = -ENOMEM;
1915 goto fail2; 1915 goto fail2;
1916 } 1916 }
@@ -1942,7 +1942,7 @@ int iwch_destroy_listen(struct iw_cm_id *cm_id)
1942 int err; 1942 int err;
1943 struct iwch_listen_ep *ep = to_listen_ep(cm_id); 1943 struct iwch_listen_ep *ep = to_listen_ep(cm_id);
1944 1944
1945 PDBG("%s ep %p\n", __FUNCTION__, ep); 1945 PDBG("%s ep %p\n", __func__, ep);
1946 1946
1947 might_sleep(); 1947 might_sleep();
1948 state_set(&ep->com, DEAD); 1948 state_set(&ep->com, DEAD);
@@ -1965,11 +1965,11 @@ int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
1965 1965
1966 spin_lock_irqsave(&ep->com.lock, flags); 1966 spin_lock_irqsave(&ep->com.lock, flags);
1967 1967
1968 PDBG("%s ep %p state %s, abrupt %d\n", __FUNCTION__, ep, 1968 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
1969 states[ep->com.state], abrupt); 1969 states[ep->com.state], abrupt);
1970 1970
1971 if (ep->com.state == DEAD) { 1971 if (ep->com.state == DEAD) {
1972 PDBG("%s already dead ep %p\n", __FUNCTION__, ep); 1972 PDBG("%s already dead ep %p\n", __func__, ep);
1973 goto out; 1973 goto out;
1974 } 1974 }
1975 1975
@@ -2020,7 +2020,7 @@ int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
2020 if (ep->dst != old) 2020 if (ep->dst != old)
2021 return 0; 2021 return 0;
2022 2022
2023 PDBG("%s ep %p redirect to dst %p l2t %p\n", __FUNCTION__, ep, new, 2023 PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
2024 l2t); 2024 l2t);
2025 dst_hold(new); 2025 dst_hold(new);
2026 l2t_release(L2DATA(ep->com.tdev), ep->l2t); 2026 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.h b/drivers/infiniband/hw/cxgb3/iwch_cm.h
index 6107e7cd9b57..2bb7fbdb3ff4 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.h
@@ -54,13 +54,13 @@
54#define MPA_FLAGS_MASK 0xE0 54#define MPA_FLAGS_MASK 0xE0
55 55
56#define put_ep(ep) { \ 56#define put_ep(ep) { \
57 PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __FUNCTION__, __LINE__, \ 57 PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__, \
58 ep, atomic_read(&((ep)->kref.refcount))); \ 58 ep, atomic_read(&((ep)->kref.refcount))); \
59 kref_put(&((ep)->kref), __free_ep); \ 59 kref_put(&((ep)->kref), __free_ep); \
60} 60}
61 61
62#define get_ep(ep) { \ 62#define get_ep(ep) { \
63 PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __FUNCTION__, __LINE__, \ 63 PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __func__, __LINE__, \
64 ep, atomic_read(&((ep)->kref.refcount))); \ 64 ep, atomic_read(&((ep)->kref.refcount))); \
65 kref_get(&((ep)->kref)); \ 65 kref_get(&((ep)->kref)); \
66} 66}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cq.c b/drivers/infiniband/hw/cxgb3/iwch_cq.c
index d7624c170ee7..4ee8ccd0a9e5 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cq.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cq.c
@@ -67,7 +67,7 @@ static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
67 ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, 67 ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie,
68 &credit); 68 &credit);
69 if (t3a_device(chp->rhp) && credit) { 69 if (t3a_device(chp->rhp) && credit) {
70 PDBG("%s updating %d cq credits on id %d\n", __FUNCTION__, 70 PDBG("%s updating %d cq credits on id %d\n", __func__,
71 credit, chp->cq.cqid); 71 credit, chp->cq.cqid);
72 cxio_hal_cq_op(&rhp->rdev, &chp->cq, CQ_CREDIT_UPDATE, credit); 72 cxio_hal_cq_op(&rhp->rdev, &chp->cq, CQ_CREDIT_UPDATE, credit);
73 } 73 }
@@ -83,7 +83,7 @@ static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
83 wc->vendor_err = CQE_STATUS(cqe); 83 wc->vendor_err = CQE_STATUS(cqe);
84 84
85 PDBG("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x " 85 PDBG("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x "
86 "lo 0x%x cookie 0x%llx\n", __FUNCTION__, 86 "lo 0x%x cookie 0x%llx\n", __func__,
87 CQE_QPID(cqe), CQE_TYPE(cqe), 87 CQE_QPID(cqe), CQE_TYPE(cqe),
88 CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe), 88 CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe),
89 CQE_WRID_LOW(cqe), (unsigned long long) cookie); 89 CQE_WRID_LOW(cqe), (unsigned long long) cookie);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_ev.c b/drivers/infiniband/hw/cxgb3/iwch_ev.c
index b40676662a8a..7b67a6771720 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_ev.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_ev.c
@@ -52,7 +52,7 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
52 52
53 if (!qhp) { 53 if (!qhp) {
54 printk(KERN_ERR "%s unaffiliated error 0x%x qpid 0x%x\n", 54 printk(KERN_ERR "%s unaffiliated error 0x%x qpid 0x%x\n",
55 __FUNCTION__, CQE_STATUS(rsp_msg->cqe), 55 __func__, CQE_STATUS(rsp_msg->cqe),
56 CQE_QPID(rsp_msg->cqe)); 56 CQE_QPID(rsp_msg->cqe));
57 spin_unlock(&rnicp->lock); 57 spin_unlock(&rnicp->lock);
58 return; 58 return;
@@ -61,14 +61,14 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
61 if ((qhp->attr.state == IWCH_QP_STATE_ERROR) || 61 if ((qhp->attr.state == IWCH_QP_STATE_ERROR) ||
62 (qhp->attr.state == IWCH_QP_STATE_TERMINATE)) { 62 (qhp->attr.state == IWCH_QP_STATE_TERMINATE)) {
63 PDBG("%s AE received after RTS - " 63 PDBG("%s AE received after RTS - "
64 "qp state %d qpid 0x%x status 0x%x\n", __FUNCTION__, 64 "qp state %d qpid 0x%x status 0x%x\n", __func__,
65 qhp->attr.state, qhp->wq.qpid, CQE_STATUS(rsp_msg->cqe)); 65 qhp->attr.state, qhp->wq.qpid, CQE_STATUS(rsp_msg->cqe));
66 spin_unlock(&rnicp->lock); 66 spin_unlock(&rnicp->lock);
67 return; 67 return;
68 } 68 }
69 69
70 printk(KERN_ERR "%s - AE qpid 0x%x opcode %d status 0x%x " 70 printk(KERN_ERR "%s - AE qpid 0x%x opcode %d status 0x%x "
71 "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __FUNCTION__, 71 "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __func__,
72 CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe), 72 CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
73 CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe), 73 CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
74 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe)); 74 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
@@ -132,10 +132,10 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
132 (CQE_STATUS(rsp_msg->cqe) == 0)) { 132 (CQE_STATUS(rsp_msg->cqe) == 0)) {
133 if (SQ_TYPE(rsp_msg->cqe)) { 133 if (SQ_TYPE(rsp_msg->cqe)) {
134 PDBG("%s QPID 0x%x ep %p disconnecting\n", 134 PDBG("%s QPID 0x%x ep %p disconnecting\n",
135 __FUNCTION__, qhp->wq.qpid, qhp->ep); 135 __func__, qhp->wq.qpid, qhp->ep);
136 iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC); 136 iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC);
137 } else { 137 } else {
138 PDBG("%s post REQ_ERR AE QPID 0x%x\n", __FUNCTION__, 138 PDBG("%s post REQ_ERR AE QPID 0x%x\n", __func__,
139 qhp->wq.qpid); 139 qhp->wq.qpid);
140 post_qp_event(rnicp, chp, rsp_msg, 140 post_qp_event(rnicp, chp, rsp_msg,
141 IB_EVENT_QP_REQ_ERR, 0); 141 IB_EVENT_QP_REQ_ERR, 0);
@@ -180,7 +180,7 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
180 case TPT_ERR_INVALIDATE_SHARED_MR: 180 case TPT_ERR_INVALIDATE_SHARED_MR:
181 case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND: 181 case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
182 printk(KERN_ERR "%s - CQE Err qpid 0x%x opcode %d status 0x%x " 182 printk(KERN_ERR "%s - CQE Err qpid 0x%x opcode %d status 0x%x "
183 "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __FUNCTION__, 183 "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __func__,
184 CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe), 184 CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
185 CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe), 185 CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
186 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe)); 186 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
diff --git a/drivers/infiniband/hw/cxgb3/iwch_mem.c b/drivers/infiniband/hw/cxgb3/iwch_mem.c
index b8797c66676d..58c3d61bcd14 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_mem.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_mem.c
@@ -62,7 +62,7 @@ int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
62 mmid = stag >> 8; 62 mmid = stag >> 8;
63 mhp->ibmr.rkey = mhp->ibmr.lkey = stag; 63 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
64 insert_handle(rhp, &rhp->mmidr, mhp, mmid); 64 insert_handle(rhp, &rhp->mmidr, mhp, mmid);
65 PDBG("%s mmid 0x%x mhp %p\n", __FUNCTION__, mmid, mhp); 65 PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
66 return 0; 66 return 0;
67} 67}
68 68
@@ -96,7 +96,7 @@ int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
96 mmid = stag >> 8; 96 mmid = stag >> 8;
97 mhp->ibmr.rkey = mhp->ibmr.lkey = stag; 97 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
98 insert_handle(rhp, &rhp->mmidr, mhp, mmid); 98 insert_handle(rhp, &rhp->mmidr, mhp, mmid);
99 PDBG("%s mmid 0x%x mhp %p\n", __FUNCTION__, mmid, mhp); 99 PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
100 return 0; 100 return 0;
101} 101}
102 102
@@ -163,7 +163,7 @@ int build_phys_page_list(struct ib_phys_buf *buffer_list,
163 ((u64) j << *shift)); 163 ((u64) j << *shift));
164 164
165 PDBG("%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n", 165 PDBG("%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n",
166 __FUNCTION__, (unsigned long long) *iova_start, 166 __func__, (unsigned long long) *iova_start,
167 (unsigned long long) mask, *shift, (unsigned long long) *total_size, 167 (unsigned long long) mask, *shift, (unsigned long long) *total_size,
168 *npages); 168 *npages);
169 169
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index b2ea9210467f..ca7265443c05 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -101,7 +101,7 @@ static int iwch_dealloc_ucontext(struct ib_ucontext *context)
101 struct iwch_ucontext *ucontext = to_iwch_ucontext(context); 101 struct iwch_ucontext *ucontext = to_iwch_ucontext(context);
102 struct iwch_mm_entry *mm, *tmp; 102 struct iwch_mm_entry *mm, *tmp;
103 103
104 PDBG("%s context %p\n", __FUNCTION__, context); 104 PDBG("%s context %p\n", __func__, context);
105 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry) 105 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
106 kfree(mm); 106 kfree(mm);
107 cxio_release_ucontext(&rhp->rdev, &ucontext->uctx); 107 cxio_release_ucontext(&rhp->rdev, &ucontext->uctx);
@@ -115,7 +115,7 @@ static struct ib_ucontext *iwch_alloc_ucontext(struct ib_device *ibdev,
115 struct iwch_ucontext *context; 115 struct iwch_ucontext *context;
116 struct iwch_dev *rhp = to_iwch_dev(ibdev); 116 struct iwch_dev *rhp = to_iwch_dev(ibdev);
117 117
118 PDBG("%s ibdev %p\n", __FUNCTION__, ibdev); 118 PDBG("%s ibdev %p\n", __func__, ibdev);
119 context = kzalloc(sizeof(*context), GFP_KERNEL); 119 context = kzalloc(sizeof(*context), GFP_KERNEL);
120 if (!context) 120 if (!context)
121 return ERR_PTR(-ENOMEM); 121 return ERR_PTR(-ENOMEM);
@@ -129,7 +129,7 @@ static int iwch_destroy_cq(struct ib_cq *ib_cq)
129{ 129{
130 struct iwch_cq *chp; 130 struct iwch_cq *chp;
131 131
132 PDBG("%s ib_cq %p\n", __FUNCTION__, ib_cq); 132 PDBG("%s ib_cq %p\n", __func__, ib_cq);
133 chp = to_iwch_cq(ib_cq); 133 chp = to_iwch_cq(ib_cq);
134 134
135 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid); 135 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
@@ -151,7 +151,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
151 struct iwch_create_cq_req ureq; 151 struct iwch_create_cq_req ureq;
152 struct iwch_ucontext *ucontext = NULL; 152 struct iwch_ucontext *ucontext = NULL;
153 153
154 PDBG("%s ib_dev %p entries %d\n", __FUNCTION__, ibdev, entries); 154 PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
155 rhp = to_iwch_dev(ibdev); 155 rhp = to_iwch_dev(ibdev);
156 chp = kzalloc(sizeof(*chp), GFP_KERNEL); 156 chp = kzalloc(sizeof(*chp), GFP_KERNEL);
157 if (!chp) 157 if (!chp)
@@ -233,7 +233,7 @@ static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
233 struct t3_cq oldcq, newcq; 233 struct t3_cq oldcq, newcq;
234 int ret; 234 int ret;
235 235
236 PDBG("%s ib_cq %p cqe %d\n", __FUNCTION__, cq, cqe); 236 PDBG("%s ib_cq %p cqe %d\n", __func__, cq, cqe);
237 237
238 /* We don't downsize... */ 238 /* We don't downsize... */
239 if (cqe <= cq->cqe) 239 if (cqe <= cq->cqe)
@@ -281,7 +281,7 @@ static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
281 ret = cxio_destroy_cq(&chp->rhp->rdev, &oldcq); 281 ret = cxio_destroy_cq(&chp->rhp->rdev, &oldcq);
282 if (ret) { 282 if (ret) {
283 printk(KERN_ERR MOD "%s - cxio_destroy_cq failed %d\n", 283 printk(KERN_ERR MOD "%s - cxio_destroy_cq failed %d\n",
284 __FUNCTION__, ret); 284 __func__, ret);
285 } 285 }
286 286
287 /* add user hooks here */ 287 /* add user hooks here */
@@ -316,7 +316,7 @@ static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
316 chp->cq.rptr = rptr; 316 chp->cq.rptr = rptr;
317 } else 317 } else
318 spin_lock_irqsave(&chp->lock, flag); 318 spin_lock_irqsave(&chp->lock, flag);
319 PDBG("%s rptr 0x%x\n", __FUNCTION__, chp->cq.rptr); 319 PDBG("%s rptr 0x%x\n", __func__, chp->cq.rptr);
320 err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0); 320 err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
321 spin_unlock_irqrestore(&chp->lock, flag); 321 spin_unlock_irqrestore(&chp->lock, flag);
322 if (err < 0) 322 if (err < 0)
@@ -337,7 +337,7 @@ static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
337 struct iwch_ucontext *ucontext; 337 struct iwch_ucontext *ucontext;
338 u64 addr; 338 u64 addr;
339 339
340 PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __FUNCTION__, vma->vm_pgoff, 340 PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
341 key, len); 341 key, len);
342 342
343 if (vma->vm_start & (PAGE_SIZE-1)) { 343 if (vma->vm_start & (PAGE_SIZE-1)) {
@@ -390,7 +390,7 @@ static int iwch_deallocate_pd(struct ib_pd *pd)
390 390
391 php = to_iwch_pd(pd); 391 php = to_iwch_pd(pd);
392 rhp = php->rhp; 392 rhp = php->rhp;
393 PDBG("%s ibpd %p pdid 0x%x\n", __FUNCTION__, pd, php->pdid); 393 PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
394 cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid); 394 cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
395 kfree(php); 395 kfree(php);
396 return 0; 396 return 0;
@@ -404,7 +404,7 @@ static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev,
404 u32 pdid; 404 u32 pdid;
405 struct iwch_dev *rhp; 405 struct iwch_dev *rhp;
406 406
407 PDBG("%s ibdev %p\n", __FUNCTION__, ibdev); 407 PDBG("%s ibdev %p\n", __func__, ibdev);
408 rhp = (struct iwch_dev *) ibdev; 408 rhp = (struct iwch_dev *) ibdev;
409 pdid = cxio_hal_get_pdid(rhp->rdev.rscp); 409 pdid = cxio_hal_get_pdid(rhp->rdev.rscp);
410 if (!pdid) 410 if (!pdid)
@@ -422,7 +422,7 @@ static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev,
422 return ERR_PTR(-EFAULT); 422 return ERR_PTR(-EFAULT);
423 } 423 }
424 } 424 }
425 PDBG("%s pdid 0x%0x ptr 0x%p\n", __FUNCTION__, pdid, php); 425 PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
426 return &php->ibpd; 426 return &php->ibpd;
427} 427}
428 428
@@ -432,7 +432,7 @@ static int iwch_dereg_mr(struct ib_mr *ib_mr)
432 struct iwch_mr *mhp; 432 struct iwch_mr *mhp;
433 u32 mmid; 433 u32 mmid;
434 434
435 PDBG("%s ib_mr %p\n", __FUNCTION__, ib_mr); 435 PDBG("%s ib_mr %p\n", __func__, ib_mr);
436 /* There can be no memory windows */ 436 /* There can be no memory windows */
437 if (atomic_read(&ib_mr->usecnt)) 437 if (atomic_read(&ib_mr->usecnt))
438 return -EINVAL; 438 return -EINVAL;
@@ -447,7 +447,7 @@ static int iwch_dereg_mr(struct ib_mr *ib_mr)
447 kfree((void *) (unsigned long) mhp->kva); 447 kfree((void *) (unsigned long) mhp->kva);
448 if (mhp->umem) 448 if (mhp->umem)
449 ib_umem_release(mhp->umem); 449 ib_umem_release(mhp->umem);
450 PDBG("%s mmid 0x%x ptr %p\n", __FUNCTION__, mmid, mhp); 450 PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
451 kfree(mhp); 451 kfree(mhp);
452 return 0; 452 return 0;
453} 453}
@@ -467,7 +467,7 @@ static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd,
467 struct iwch_mr *mhp; 467 struct iwch_mr *mhp;
468 int ret; 468 int ret;
469 469
470 PDBG("%s ib_pd %p\n", __FUNCTION__, pd); 470 PDBG("%s ib_pd %p\n", __func__, pd);
471 php = to_iwch_pd(pd); 471 php = to_iwch_pd(pd);
472 rhp = php->rhp; 472 rhp = php->rhp;
473 473
@@ -531,7 +531,7 @@ static int iwch_reregister_phys_mem(struct ib_mr *mr,
531 int npages; 531 int npages;
532 int ret; 532 int ret;
533 533
534 PDBG("%s ib_mr %p ib_pd %p\n", __FUNCTION__, mr, pd); 534 PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd);
535 535
536 /* There can be no memory windows */ 536 /* There can be no memory windows */
537 if (atomic_read(&mr->usecnt)) 537 if (atomic_read(&mr->usecnt))
@@ -594,7 +594,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
594 struct iwch_mr *mhp; 594 struct iwch_mr *mhp;
595 struct iwch_reg_user_mr_resp uresp; 595 struct iwch_reg_user_mr_resp uresp;
596 596
597 PDBG("%s ib_pd %p\n", __FUNCTION__, pd); 597 PDBG("%s ib_pd %p\n", __func__, pd);
598 598
599 php = to_iwch_pd(pd); 599 php = to_iwch_pd(pd);
600 rhp = php->rhp; 600 rhp = php->rhp;
@@ -649,7 +649,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
649 if (udata && !t3a_device(rhp)) { 649 if (udata && !t3a_device(rhp)) {
650 uresp.pbl_addr = (mhp->attr.pbl_addr - 650 uresp.pbl_addr = (mhp->attr.pbl_addr -
651 rhp->rdev.rnic_info.pbl_base) >> 3; 651 rhp->rdev.rnic_info.pbl_base) >> 3;
652 PDBG("%s user resp pbl_addr 0x%x\n", __FUNCTION__, 652 PDBG("%s user resp pbl_addr 0x%x\n", __func__,
653 uresp.pbl_addr); 653 uresp.pbl_addr);
654 654
655 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) { 655 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
@@ -673,7 +673,7 @@ static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
673 u64 kva; 673 u64 kva;
674 struct ib_mr *ibmr; 674 struct ib_mr *ibmr;
675 675
676 PDBG("%s ib_pd %p\n", __FUNCTION__, pd); 676 PDBG("%s ib_pd %p\n", __func__, pd);
677 677
678 /* 678 /*
679 * T3 only supports 32 bits of size. 679 * T3 only supports 32 bits of size.
@@ -710,7 +710,7 @@ static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd)
710 mhp->attr.stag = stag; 710 mhp->attr.stag = stag;
711 mmid = (stag) >> 8; 711 mmid = (stag) >> 8;
712 insert_handle(rhp, &rhp->mmidr, mhp, mmid); 712 insert_handle(rhp, &rhp->mmidr, mhp, mmid);
713 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __FUNCTION__, mmid, mhp, stag); 713 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
714 return &(mhp->ibmw); 714 return &(mhp->ibmw);
715} 715}
716 716
@@ -726,7 +726,7 @@ static int iwch_dealloc_mw(struct ib_mw *mw)
726 cxio_deallocate_window(&rhp->rdev, mhp->attr.stag); 726 cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
727 remove_handle(rhp, &rhp->mmidr, mmid); 727 remove_handle(rhp, &rhp->mmidr, mmid);
728 kfree(mhp); 728 kfree(mhp);
729 PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __FUNCTION__, mw, mmid, mhp); 729 PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
730 return 0; 730 return 0;
731} 731}
732 732
@@ -754,7 +754,7 @@ static int iwch_destroy_qp(struct ib_qp *ib_qp)
754 cxio_destroy_qp(&rhp->rdev, &qhp->wq, 754 cxio_destroy_qp(&rhp->rdev, &qhp->wq,
755 ucontext ? &ucontext->uctx : &rhp->rdev.uctx); 755 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
756 756
757 PDBG("%s ib_qp %p qpid 0x%0x qhp %p\n", __FUNCTION__, 757 PDBG("%s ib_qp %p qpid 0x%0x qhp %p\n", __func__,
758 ib_qp, qhp->wq.qpid, qhp); 758 ib_qp, qhp->wq.qpid, qhp);
759 kfree(qhp); 759 kfree(qhp);
760 return 0; 760 return 0;
@@ -773,7 +773,7 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
773 int wqsize, sqsize, rqsize; 773 int wqsize, sqsize, rqsize;
774 struct iwch_ucontext *ucontext; 774 struct iwch_ucontext *ucontext;
775 775
776 PDBG("%s ib_pd %p\n", __FUNCTION__, pd); 776 PDBG("%s ib_pd %p\n", __func__, pd);
777 if (attrs->qp_type != IB_QPT_RC) 777 if (attrs->qp_type != IB_QPT_RC)
778 return ERR_PTR(-EINVAL); 778 return ERR_PTR(-EINVAL);
779 php = to_iwch_pd(pd); 779 php = to_iwch_pd(pd);
@@ -805,7 +805,7 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
805 */ 805 */
806 sqsize = roundup_pow_of_two(attrs->cap.max_send_wr); 806 sqsize = roundup_pow_of_two(attrs->cap.max_send_wr);
807 wqsize = roundup_pow_of_two(rqsize + sqsize); 807 wqsize = roundup_pow_of_two(rqsize + sqsize);
808 PDBG("%s wqsize %d sqsize %d rqsize %d\n", __FUNCTION__, 808 PDBG("%s wqsize %d sqsize %d rqsize %d\n", __func__,
809 wqsize, sqsize, rqsize); 809 wqsize, sqsize, rqsize);
810 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL); 810 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
811 if (!qhp) 811 if (!qhp)
@@ -898,7 +898,7 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
898 init_timer(&(qhp->timer)); 898 init_timer(&(qhp->timer));
899 PDBG("%s sq_num_entries %d, rq_num_entries %d " 899 PDBG("%s sq_num_entries %d, rq_num_entries %d "
900 "qpid 0x%0x qhp %p dma_addr 0x%llx size %d\n", 900 "qpid 0x%0x qhp %p dma_addr 0x%llx size %d\n",
901 __FUNCTION__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries, 901 __func__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
902 qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr, 902 qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr,
903 1 << qhp->wq.size_log2); 903 1 << qhp->wq.size_log2);
904 return &qhp->ibqp; 904 return &qhp->ibqp;
@@ -912,7 +912,7 @@ static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
912 enum iwch_qp_attr_mask mask = 0; 912 enum iwch_qp_attr_mask mask = 0;
913 struct iwch_qp_attributes attrs; 913 struct iwch_qp_attributes attrs;
914 914
915 PDBG("%s ib_qp %p\n", __FUNCTION__, ibqp); 915 PDBG("%s ib_qp %p\n", __func__, ibqp);
916 916
917 /* iwarp does not support the RTR state */ 917 /* iwarp does not support the RTR state */
918 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR)) 918 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
@@ -945,20 +945,20 @@ static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
945 945
946void iwch_qp_add_ref(struct ib_qp *qp) 946void iwch_qp_add_ref(struct ib_qp *qp)
947{ 947{
948 PDBG("%s ib_qp %p\n", __FUNCTION__, qp); 948 PDBG("%s ib_qp %p\n", __func__, qp);
949 atomic_inc(&(to_iwch_qp(qp)->refcnt)); 949 atomic_inc(&(to_iwch_qp(qp)->refcnt));
950} 950}
951 951
952void iwch_qp_rem_ref(struct ib_qp *qp) 952void iwch_qp_rem_ref(struct ib_qp *qp)
953{ 953{
954 PDBG("%s ib_qp %p\n", __FUNCTION__, qp); 954 PDBG("%s ib_qp %p\n", __func__, qp);
955 if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt))) 955 if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt)))
956 wake_up(&(to_iwch_qp(qp)->wait)); 956 wake_up(&(to_iwch_qp(qp)->wait));
957} 957}
958 958
959static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn) 959static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
960{ 960{
961 PDBG("%s ib_dev %p qpn 0x%x\n", __FUNCTION__, dev, qpn); 961 PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
962 return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn); 962 return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
963} 963}
964 964
@@ -966,7 +966,7 @@ static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
966static int iwch_query_pkey(struct ib_device *ibdev, 966static int iwch_query_pkey(struct ib_device *ibdev,
967 u8 port, u16 index, u16 * pkey) 967 u8 port, u16 index, u16 * pkey)
968{ 968{
969 PDBG("%s ibdev %p\n", __FUNCTION__, ibdev); 969 PDBG("%s ibdev %p\n", __func__, ibdev);
970 *pkey = 0; 970 *pkey = 0;
971 return 0; 971 return 0;
972} 972}
@@ -977,7 +977,7 @@ static int iwch_query_gid(struct ib_device *ibdev, u8 port,
977 struct iwch_dev *dev; 977 struct iwch_dev *dev;
978 978
979 PDBG("%s ibdev %p, port %d, index %d, gid %p\n", 979 PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
980 __FUNCTION__, ibdev, port, index, gid); 980 __func__, ibdev, port, index, gid);
981 dev = to_iwch_dev(ibdev); 981 dev = to_iwch_dev(ibdev);
982 BUG_ON(port == 0 || port > 2); 982 BUG_ON(port == 0 || port > 2);
983 memset(&(gid->raw[0]), 0, sizeof(gid->raw)); 983 memset(&(gid->raw[0]), 0, sizeof(gid->raw));
@@ -990,7 +990,7 @@ static int iwch_query_device(struct ib_device *ibdev,
990{ 990{
991 991
992 struct iwch_dev *dev; 992 struct iwch_dev *dev;
993 PDBG("%s ibdev %p\n", __FUNCTION__, ibdev); 993 PDBG("%s ibdev %p\n", __func__, ibdev);
994 994
995 dev = to_iwch_dev(ibdev); 995 dev = to_iwch_dev(ibdev);
996 memset(props, 0, sizeof *props); 996 memset(props, 0, sizeof *props);
@@ -1017,7 +1017,7 @@ static int iwch_query_device(struct ib_device *ibdev,
1017static int iwch_query_port(struct ib_device *ibdev, 1017static int iwch_query_port(struct ib_device *ibdev,
1018 u8 port, struct ib_port_attr *props) 1018 u8 port, struct ib_port_attr *props)
1019{ 1019{
1020 PDBG("%s ibdev %p\n", __FUNCTION__, ibdev); 1020 PDBG("%s ibdev %p\n", __func__, ibdev);
1021 props->max_mtu = IB_MTU_4096; 1021 props->max_mtu = IB_MTU_4096;
1022 props->lid = 0; 1022 props->lid = 0;
1023 props->lmc = 0; 1023 props->lmc = 0;
@@ -1045,7 +1045,7 @@ static ssize_t show_rev(struct class_device *cdev, char *buf)
1045{ 1045{
1046 struct iwch_dev *dev = container_of(cdev, struct iwch_dev, 1046 struct iwch_dev *dev = container_of(cdev, struct iwch_dev,
1047 ibdev.class_dev); 1047 ibdev.class_dev);
1048 PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev); 1048 PDBG("%s class dev 0x%p\n", __func__, cdev);
1049 return sprintf(buf, "%d\n", dev->rdev.t3cdev_p->type); 1049 return sprintf(buf, "%d\n", dev->rdev.t3cdev_p->type);
1050} 1050}
1051 1051
@@ -1056,7 +1056,7 @@ static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
1056 struct ethtool_drvinfo info; 1056 struct ethtool_drvinfo info;
1057 struct net_device *lldev = dev->rdev.t3cdev_p->lldev; 1057 struct net_device *lldev = dev->rdev.t3cdev_p->lldev;
1058 1058
1059 PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev); 1059 PDBG("%s class dev 0x%p\n", __func__, cdev);
1060 rtnl_lock(); 1060 rtnl_lock();
1061 lldev->ethtool_ops->get_drvinfo(lldev, &info); 1061 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1062 rtnl_unlock(); 1062 rtnl_unlock();
@@ -1070,7 +1070,7 @@ static ssize_t show_hca(struct class_device *cdev, char *buf)
1070 struct ethtool_drvinfo info; 1070 struct ethtool_drvinfo info;
1071 struct net_device *lldev = dev->rdev.t3cdev_p->lldev; 1071 struct net_device *lldev = dev->rdev.t3cdev_p->lldev;
1072 1072
1073 PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev); 1073 PDBG("%s class dev 0x%p\n", __func__, cdev);
1074 rtnl_lock(); 1074 rtnl_lock();
1075 lldev->ethtool_ops->get_drvinfo(lldev, &info); 1075 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1076 rtnl_unlock(); 1076 rtnl_unlock();
@@ -1081,7 +1081,7 @@ static ssize_t show_board(struct class_device *cdev, char *buf)
1081{ 1081{
1082 struct iwch_dev *dev = container_of(cdev, struct iwch_dev, 1082 struct iwch_dev *dev = container_of(cdev, struct iwch_dev,
1083 ibdev.class_dev); 1083 ibdev.class_dev);
1084 PDBG("%s class dev 0x%p\n", __FUNCTION__, dev); 1084 PDBG("%s class dev 0x%p\n", __func__, dev);
1085 return sprintf(buf, "%x.%x\n", dev->rdev.rnic_info.pdev->vendor, 1085 return sprintf(buf, "%x.%x\n", dev->rdev.rnic_info.pdev->vendor,
1086 dev->rdev.rnic_info.pdev->device); 1086 dev->rdev.rnic_info.pdev->device);
1087} 1087}
@@ -1103,14 +1103,13 @@ int iwch_register_device(struct iwch_dev *dev)
1103 int ret; 1103 int ret;
1104 int i; 1104 int i;
1105 1105
1106 PDBG("%s iwch_dev %p\n", __FUNCTION__, dev); 1106 PDBG("%s iwch_dev %p\n", __func__, dev);
1107 strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX); 1107 strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX);
1108 memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); 1108 memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
1109 memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6); 1109 memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
1110 dev->ibdev.owner = THIS_MODULE; 1110 dev->ibdev.owner = THIS_MODULE;
1111 dev->device_cap_flags = 1111 dev->device_cap_flags =
1112 (IB_DEVICE_ZERO_STAG | 1112 (IB_DEVICE_ZERO_STAG | IB_DEVICE_MEM_WINDOW);
1113 IB_DEVICE_SEND_W_INV | IB_DEVICE_MEM_WINDOW);
1114 1113
1115 dev->ibdev.uverbs_cmd_mask = 1114 dev->ibdev.uverbs_cmd_mask =
1116 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 1115 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
@@ -1207,7 +1206,7 @@ void iwch_unregister_device(struct iwch_dev *dev)
1207{ 1206{
1208 int i; 1207 int i;
1209 1208
1210 PDBG("%s iwch_dev %p\n", __FUNCTION__, dev); 1209 PDBG("%s iwch_dev %p\n", __func__, dev);
1211 for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i) 1210 for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i)
1212 class_device_remove_file(&dev->ibdev.class_dev, 1211 class_device_remove_file(&dev->ibdev.class_dev,
1213 iwch_class_attributes[i]); 1212 iwch_class_attributes[i]);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
index 48833f3f3bd0..61356f91109d 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h
@@ -213,7 +213,7 @@ static inline struct iwch_mm_entry *remove_mmap(struct iwch_ucontext *ucontext,
213 if (mm->key == key && mm->len == len) { 213 if (mm->key == key && mm->len == len) {
214 list_del_init(&mm->entry); 214 list_del_init(&mm->entry);
215 spin_unlock(&ucontext->mmap_lock); 215 spin_unlock(&ucontext->mmap_lock);
216 PDBG("%s key 0x%x addr 0x%llx len %d\n", __FUNCTION__, 216 PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
217 key, (unsigned long long) mm->addr, mm->len); 217 key, (unsigned long long) mm->addr, mm->len);
218 return mm; 218 return mm;
219 } 219 }
@@ -226,7 +226,7 @@ static inline void insert_mmap(struct iwch_ucontext *ucontext,
226 struct iwch_mm_entry *mm) 226 struct iwch_mm_entry *mm)
227{ 227{
228 spin_lock(&ucontext->mmap_lock); 228 spin_lock(&ucontext->mmap_lock);
229 PDBG("%s key 0x%x addr 0x%llx len %d\n", __FUNCTION__, 229 PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
230 mm->key, (unsigned long long) mm->addr, mm->len); 230 mm->key, (unsigned long long) mm->addr, mm->len);
231 list_add_tail(&mm->entry, &ucontext->mmaps); 231 list_add_tail(&mm->entry, &ucontext->mmaps);
232 spin_unlock(&ucontext->mmap_lock); 232 spin_unlock(&ucontext->mmap_lock);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index ea2cdd73dd85..8891c3b0a3d5 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -72,7 +72,7 @@ static int iwch_build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr,
72 wqe->send.reserved[2] = 0; 72 wqe->send.reserved[2] = 0;
73 if (wr->opcode == IB_WR_SEND_WITH_IMM) { 73 if (wr->opcode == IB_WR_SEND_WITH_IMM) {
74 plen = 4; 74 plen = 4;
75 wqe->send.sgl[0].stag = wr->imm_data; 75 wqe->send.sgl[0].stag = wr->ex.imm_data;
76 wqe->send.sgl[0].len = __constant_cpu_to_be32(0); 76 wqe->send.sgl[0].len = __constant_cpu_to_be32(0);
77 wqe->send.num_sgle = __constant_cpu_to_be32(0); 77 wqe->send.num_sgle = __constant_cpu_to_be32(0);
78 *flit_cnt = 5; 78 *flit_cnt = 5;
@@ -112,7 +112,7 @@ static int iwch_build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr,
112 112
113 if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { 113 if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
114 plen = 4; 114 plen = 4;
115 wqe->write.sgl[0].stag = wr->imm_data; 115 wqe->write.sgl[0].stag = wr->ex.imm_data;
116 wqe->write.sgl[0].len = __constant_cpu_to_be32(0); 116 wqe->write.sgl[0].len = __constant_cpu_to_be32(0);
117 wqe->write.num_sgle = __constant_cpu_to_be32(0); 117 wqe->write.num_sgle = __constant_cpu_to_be32(0);
118 *flit_cnt = 6; 118 *flit_cnt = 6;
@@ -168,30 +168,30 @@ static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
168 168
169 mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8); 169 mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8);
170 if (!mhp) { 170 if (!mhp) {
171 PDBG("%s %d\n", __FUNCTION__, __LINE__); 171 PDBG("%s %d\n", __func__, __LINE__);
172 return -EIO; 172 return -EIO;
173 } 173 }
174 if (!mhp->attr.state) { 174 if (!mhp->attr.state) {
175 PDBG("%s %d\n", __FUNCTION__, __LINE__); 175 PDBG("%s %d\n", __func__, __LINE__);
176 return -EIO; 176 return -EIO;
177 } 177 }
178 if (mhp->attr.zbva) { 178 if (mhp->attr.zbva) {
179 PDBG("%s %d\n", __FUNCTION__, __LINE__); 179 PDBG("%s %d\n", __func__, __LINE__);
180 return -EIO; 180 return -EIO;
181 } 181 }
182 182
183 if (sg_list[i].addr < mhp->attr.va_fbo) { 183 if (sg_list[i].addr < mhp->attr.va_fbo) {
184 PDBG("%s %d\n", __FUNCTION__, __LINE__); 184 PDBG("%s %d\n", __func__, __LINE__);
185 return -EINVAL; 185 return -EINVAL;
186 } 186 }
187 if (sg_list[i].addr + ((u64) sg_list[i].length) < 187 if (sg_list[i].addr + ((u64) sg_list[i].length) <
188 sg_list[i].addr) { 188 sg_list[i].addr) {
189 PDBG("%s %d\n", __FUNCTION__, __LINE__); 189 PDBG("%s %d\n", __func__, __LINE__);
190 return -EINVAL; 190 return -EINVAL;
191 } 191 }
192 if (sg_list[i].addr + ((u64) sg_list[i].length) > 192 if (sg_list[i].addr + ((u64) sg_list[i].length) >
193 mhp->attr.va_fbo + ((u64) mhp->attr.len)) { 193 mhp->attr.va_fbo + ((u64) mhp->attr.len)) {
194 PDBG("%s %d\n", __FUNCTION__, __LINE__); 194 PDBG("%s %d\n", __func__, __LINE__);
195 return -EINVAL; 195 return -EINVAL;
196 } 196 }
197 offset = sg_list[i].addr - mhp->attr.va_fbo; 197 offset = sg_list[i].addr - mhp->attr.va_fbo;
@@ -290,7 +290,7 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
290 qhp->wq.oldest_read = sqp; 290 qhp->wq.oldest_read = sqp;
291 break; 291 break;
292 default: 292 default:
293 PDBG("%s post of type=%d TBD!\n", __FUNCTION__, 293 PDBG("%s post of type=%d TBD!\n", __func__,
294 wr->opcode); 294 wr->opcode);
295 err = -EINVAL; 295 err = -EINVAL;
296 } 296 }
@@ -309,7 +309,7 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
309 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2), 309 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
310 0, t3_wr_flit_cnt); 310 0, t3_wr_flit_cnt);
311 PDBG("%s cookie 0x%llx wq idx 0x%x swsq idx %ld opcode %d\n", 311 PDBG("%s cookie 0x%llx wq idx 0x%x swsq idx %ld opcode %d\n",
312 __FUNCTION__, (unsigned long long) wr->wr_id, idx, 312 __func__, (unsigned long long) wr->wr_id, idx,
313 Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2), 313 Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2),
314 sqp->opcode); 314 sqp->opcode);
315 wr = wr->next; 315 wr = wr->next;
@@ -361,7 +361,7 @@ int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
361 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2), 361 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
362 0, sizeof(struct t3_receive_wr) >> 3); 362 0, sizeof(struct t3_receive_wr) >> 3);
363 PDBG("%s cookie 0x%llx idx 0x%x rq_wptr 0x%x rw_rptr 0x%x " 363 PDBG("%s cookie 0x%llx idx 0x%x rq_wptr 0x%x rw_rptr 0x%x "
364 "wqe %p \n", __FUNCTION__, (unsigned long long) wr->wr_id, 364 "wqe %p \n", __func__, (unsigned long long) wr->wr_id,
365 idx, qhp->wq.rq_wptr, qhp->wq.rq_rptr, wqe); 365 idx, qhp->wq.rq_wptr, qhp->wq.rq_rptr, wqe);
366 ++(qhp->wq.rq_wptr); 366 ++(qhp->wq.rq_wptr);
367 ++(qhp->wq.wptr); 367 ++(qhp->wq.wptr);
@@ -407,7 +407,7 @@ int iwch_bind_mw(struct ib_qp *qp,
407 return -ENOMEM; 407 return -ENOMEM;
408 } 408 }
409 idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2); 409 idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
410 PDBG("%s: idx 0x%0x, mw 0x%p, mw_bind 0x%p\n", __FUNCTION__, idx, 410 PDBG("%s: idx 0x%0x, mw 0x%p, mw_bind 0x%p\n", __func__, idx,
411 mw, mw_bind); 411 mw, mw_bind);
412 wqe = (union t3_wr *) (qhp->wq.queue + idx); 412 wqe = (union t3_wr *) (qhp->wq.queue + idx);
413 413
@@ -595,10 +595,10 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
595 struct terminate_message *term; 595 struct terminate_message *term;
596 struct sk_buff *skb; 596 struct sk_buff *skb;
597 597
598 PDBG("%s %d\n", __FUNCTION__, __LINE__); 598 PDBG("%s %d\n", __func__, __LINE__);
599 skb = alloc_skb(40, GFP_ATOMIC); 599 skb = alloc_skb(40, GFP_ATOMIC);
600 if (!skb) { 600 if (!skb) {
601 printk(KERN_ERR "%s cannot send TERMINATE!\n", __FUNCTION__); 601 printk(KERN_ERR "%s cannot send TERMINATE!\n", __func__);
602 return -ENOMEM; 602 return -ENOMEM;
603 } 603 }
604 wqe = (union t3_wr *)skb_put(skb, 40); 604 wqe = (union t3_wr *)skb_put(skb, 40);
@@ -629,7 +629,7 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
629 rchp = get_chp(qhp->rhp, qhp->attr.rcq); 629 rchp = get_chp(qhp->rhp, qhp->attr.rcq);
630 schp = get_chp(qhp->rhp, qhp->attr.scq); 630 schp = get_chp(qhp->rhp, qhp->attr.scq);
631 631
632 PDBG("%s qhp %p rchp %p schp %p\n", __FUNCTION__, qhp, rchp, schp); 632 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
633 /* take a ref on the qhp since we must release the lock */ 633 /* take a ref on the qhp since we must release the lock */
634 atomic_inc(&qhp->refcnt); 634 atomic_inc(&qhp->refcnt);
635 spin_unlock_irqrestore(&qhp->lock, *flag); 635 spin_unlock_irqrestore(&qhp->lock, *flag);
@@ -720,11 +720,11 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
720 init_attr.flags |= capable(CAP_NET_BIND_SERVICE) ? PRIV_QP : 0; 720 init_attr.flags |= capable(CAP_NET_BIND_SERVICE) ? PRIV_QP : 0;
721 init_attr.irs = qhp->ep->rcv_seq; 721 init_attr.irs = qhp->ep->rcv_seq;
722 PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d " 722 PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d "
723 "flags 0x%x qpcaps 0x%x\n", __FUNCTION__, 723 "flags 0x%x qpcaps 0x%x\n", __func__,
724 init_attr.rq_addr, init_attr.rq_size, 724 init_attr.rq_addr, init_attr.rq_size,
725 init_attr.flags, init_attr.qpcaps); 725 init_attr.flags, init_attr.qpcaps);
726 ret = cxio_rdma_init(&rhp->rdev, &init_attr); 726 ret = cxio_rdma_init(&rhp->rdev, &init_attr);
727 PDBG("%s ret %d\n", __FUNCTION__, ret); 727 PDBG("%s ret %d\n", __func__, ret);
728 return ret; 728 return ret;
729} 729}
730 730
@@ -742,7 +742,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
742 int free = 0; 742 int free = 0;
743 struct iwch_ep *ep = NULL; 743 struct iwch_ep *ep = NULL;
744 744
745 PDBG("%s qhp %p qpid 0x%x ep %p state %d -> %d\n", __FUNCTION__, 745 PDBG("%s qhp %p qpid 0x%x ep %p state %d -> %d\n", __func__,
746 qhp, qhp->wq.qpid, qhp->ep, qhp->attr.state, 746 qhp, qhp->wq.qpid, qhp->ep, qhp->attr.state,
747 (mask & IWCH_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1); 747 (mask & IWCH_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
748 748
@@ -899,14 +899,14 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
899 break; 899 break;
900 default: 900 default:
901 printk(KERN_ERR "%s in a bad state %d\n", 901 printk(KERN_ERR "%s in a bad state %d\n",
902 __FUNCTION__, qhp->attr.state); 902 __func__, qhp->attr.state);
903 ret = -EINVAL; 903 ret = -EINVAL;
904 goto err; 904 goto err;
905 break; 905 break;
906 } 906 }
907 goto out; 907 goto out;
908err: 908err:
909 PDBG("%s disassociating ep %p qpid 0x%x\n", __FUNCTION__, qhp->ep, 909 PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
910 qhp->wq.qpid); 910 qhp->wq.qpid);
911 911
912 /* disassociate the LLP connection */ 912 /* disassociate the LLP connection */
@@ -939,7 +939,7 @@ out:
939 if (free) 939 if (free)
940 put_ep(&ep->com); 940 put_ep(&ep->com);
941 941
942 PDBG("%s exit state %d\n", __FUNCTION__, qhp->attr.state); 942 PDBG("%s exit state %d\n", __func__, qhp->attr.state);
943 return ret; 943 return ret;
944} 944}
945 945
diff --git a/drivers/infiniband/hw/ehca/ehca_av.c b/drivers/infiniband/hw/ehca/ehca_av.c
index 194c1c30cf63..56735ea2fc57 100644
--- a/drivers/infiniband/hw/ehca/ehca_av.c
+++ b/drivers/infiniband/hw/ehca/ehca_av.c
@@ -41,9 +41,6 @@
41 * POSSIBILITY OF SUCH DAMAGE. 41 * POSSIBILITY OF SUCH DAMAGE.
42 */ 42 */
43 43
44
45#include <asm/current.h>
46
47#include "ehca_tools.h" 44#include "ehca_tools.h"
48#include "ehca_iverbs.h" 45#include "ehca_iverbs.h"
49#include "hcp_if.h" 46#include "hcp_if.h"
@@ -170,17 +167,8 @@ int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
170{ 167{
171 struct ehca_av *av; 168 struct ehca_av *av;
172 struct ehca_ud_av new_ehca_av; 169 struct ehca_ud_av new_ehca_av;
173 struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd, ib_pd);
174 struct ehca_shca *shca = container_of(ah->pd->device, struct ehca_shca, 170 struct ehca_shca *shca = container_of(ah->pd->device, struct ehca_shca,
175 ib_device); 171 ib_device);
176 u32 cur_pid = current->tgid;
177
178 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
179 my_pd->ownpid != cur_pid) {
180 ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x",
181 cur_pid, my_pd->ownpid);
182 return -EINVAL;
183 }
184 172
185 memset(&new_ehca_av, 0, sizeof(new_ehca_av)); 173 memset(&new_ehca_av, 0, sizeof(new_ehca_av));
186 new_ehca_av.sl = ah_attr->sl; 174 new_ehca_av.sl = ah_attr->sl;
@@ -242,15 +230,6 @@ int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
242int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) 230int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
243{ 231{
244 struct ehca_av *av = container_of(ah, struct ehca_av, ib_ah); 232 struct ehca_av *av = container_of(ah, struct ehca_av, ib_ah);
245 struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd, ib_pd);
246 u32 cur_pid = current->tgid;
247
248 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
249 my_pd->ownpid != cur_pid) {
250 ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x",
251 cur_pid, my_pd->ownpid);
252 return -EINVAL;
253 }
254 233
255 memcpy(&ah_attr->grh.dgid, &av->av.grh.word_3, 234 memcpy(&ah_attr->grh.dgid, &av->av.grh.word_3,
256 sizeof(ah_attr->grh.dgid)); 235 sizeof(ah_attr->grh.dgid));
@@ -273,16 +252,6 @@ int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
273 252
274int ehca_destroy_ah(struct ib_ah *ah) 253int ehca_destroy_ah(struct ib_ah *ah)
275{ 254{
276 struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd, ib_pd);
277 u32 cur_pid = current->tgid;
278
279 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
280 my_pd->ownpid != cur_pid) {
281 ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x",
282 cur_pid, my_pd->ownpid);
283 return -EINVAL;
284 }
285
286 kmem_cache_free(av_cache, container_of(ah, struct ehca_av, ib_ah)); 255 kmem_cache_free(av_cache, container_of(ah, struct ehca_av, ib_ah));
287 256
288 return 0; 257 return 0;
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 92cce8aacbb7..0d13fe0a260b 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -132,7 +132,6 @@ struct ehca_shca {
132struct ehca_pd { 132struct ehca_pd {
133 struct ib_pd ib_pd; 133 struct ib_pd ib_pd;
134 struct ipz_pd fw_pd; 134 struct ipz_pd fw_pd;
135 u32 ownpid;
136 /* small queue mgmt */ 135 /* small queue mgmt */
137 struct mutex lock; 136 struct mutex lock;
138 struct list_head free[2]; 137 struct list_head free[2];
@@ -215,7 +214,6 @@ struct ehca_cq {
215 atomic_t nr_events; /* #events seen */ 214 atomic_t nr_events; /* #events seen */
216 wait_queue_head_t wait_completion; 215 wait_queue_head_t wait_completion;
217 spinlock_t task_lock; 216 spinlock_t task_lock;
218 u32 ownpid;
219 /* mmap counter for resources mapped into user space */ 217 /* mmap counter for resources mapped into user space */
220 u32 mm_count_queue; 218 u32 mm_count_queue;
221 u32 mm_count_galpa; 219 u32 mm_count_galpa;
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 0467c158d4a9..ec0cfcf3073f 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -43,8 +43,6 @@
43 * POSSIBILITY OF SUCH DAMAGE. 43 * POSSIBILITY OF SUCH DAMAGE.
44 */ 44 */
45 45
46#include <asm/current.h>
47
48#include "ehca_iverbs.h" 46#include "ehca_iverbs.h"
49#include "ehca_classes.h" 47#include "ehca_classes.h"
50#include "ehca_irq.h" 48#include "ehca_irq.h"
@@ -148,7 +146,6 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
148 spin_lock_init(&my_cq->task_lock); 146 spin_lock_init(&my_cq->task_lock);
149 atomic_set(&my_cq->nr_events, 0); 147 atomic_set(&my_cq->nr_events, 0);
150 init_waitqueue_head(&my_cq->wait_completion); 148 init_waitqueue_head(&my_cq->wait_completion);
151 my_cq->ownpid = current->tgid;
152 149
153 cq = &my_cq->ib_cq; 150 cq = &my_cq->ib_cq;
154 151
@@ -320,7 +317,6 @@ int ehca_destroy_cq(struct ib_cq *cq)
320 struct ehca_shca *shca = container_of(device, struct ehca_shca, 317 struct ehca_shca *shca = container_of(device, struct ehca_shca,
321 ib_device); 318 ib_device);
322 struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle; 319 struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
323 u32 cur_pid = current->tgid;
324 unsigned long flags; 320 unsigned long flags;
325 321
326 if (cq->uobject) { 322 if (cq->uobject) {
@@ -329,12 +325,6 @@ int ehca_destroy_cq(struct ib_cq *cq)
329 "user space cq_num=%x", my_cq->cq_number); 325 "user space cq_num=%x", my_cq->cq_number);
330 return -EINVAL; 326 return -EINVAL;
331 } 327 }
332 if (my_cq->ownpid != cur_pid) {
333 ehca_err(device, "Invalid caller pid=%x ownpid=%x "
334 "cq_num=%x",
335 cur_pid, my_cq->ownpid, my_cq->cq_number);
336 return -EINVAL;
337 }
338 } 328 }
339 329
340 /* 330 /*
@@ -374,15 +364,6 @@ int ehca_destroy_cq(struct ib_cq *cq)
374 364
375int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) 365int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
376{ 366{
377 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
378 u32 cur_pid = current->tgid;
379
380 if (cq->uobject && my_cq->ownpid != cur_pid) {
381 ehca_err(cq->device, "Invalid caller pid=%x ownpid=%x",
382 cur_pid, my_cq->ownpid);
383 return -EINVAL;
384 }
385
386 /* TODO: proper resize needs to be done */ 367 /* TODO: proper resize needs to be done */
387 ehca_err(cq->device, "not implemented yet"); 368 ehca_err(cq->device, "not implemented yet");
388 369
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c
index 5bd7b591987e..2515cbde7e65 100644
--- a/drivers/infiniband/hw/ehca/ehca_hca.c
+++ b/drivers/infiniband/hw/ehca/ehca_hca.c
@@ -43,6 +43,11 @@
43#include "ehca_iverbs.h" 43#include "ehca_iverbs.h"
44#include "hcp_if.h" 44#include "hcp_if.h"
45 45
46static unsigned int limit_uint(unsigned int value)
47{
48 return min_t(unsigned int, value, INT_MAX);
49}
50
46int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props) 51int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
47{ 52{
48 int i, ret = 0; 53 int i, ret = 0;
@@ -83,37 +88,40 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
83 props->vendor_id = rblock->vendor_id >> 8; 88 props->vendor_id = rblock->vendor_id >> 8;
84 props->vendor_part_id = rblock->vendor_part_id >> 16; 89 props->vendor_part_id = rblock->vendor_part_id >> 16;
85 props->hw_ver = rblock->hw_ver; 90 props->hw_ver = rblock->hw_ver;
86 props->max_qp = min_t(unsigned, rblock->max_qp, INT_MAX); 91 props->max_qp = limit_uint(rblock->max_qp);
87 props->max_qp_wr = min_t(unsigned, rblock->max_wqes_wq, INT_MAX); 92 props->max_qp_wr = limit_uint(rblock->max_wqes_wq);
88 props->max_sge = min_t(unsigned, rblock->max_sge, INT_MAX); 93 props->max_sge = limit_uint(rblock->max_sge);
89 props->max_sge_rd = min_t(unsigned, rblock->max_sge_rd, INT_MAX); 94 props->max_sge_rd = limit_uint(rblock->max_sge_rd);
90 props->max_cq = min_t(unsigned, rblock->max_cq, INT_MAX); 95 props->max_cq = limit_uint(rblock->max_cq);
91 props->max_cqe = min_t(unsigned, rblock->max_cqe, INT_MAX); 96 props->max_cqe = limit_uint(rblock->max_cqe);
92 props->max_mr = min_t(unsigned, rblock->max_mr, INT_MAX); 97 props->max_mr = limit_uint(rblock->max_mr);
93 props->max_mw = min_t(unsigned, rblock->max_mw, INT_MAX); 98 props->max_mw = limit_uint(rblock->max_mw);
94 props->max_pd = min_t(unsigned, rblock->max_pd, INT_MAX); 99 props->max_pd = limit_uint(rblock->max_pd);
95 props->max_ah = min_t(unsigned, rblock->max_ah, INT_MAX); 100 props->max_ah = limit_uint(rblock->max_ah);
96 props->max_fmr = min_t(unsigned, rblock->max_mr, INT_MAX); 101 props->max_ee = limit_uint(rblock->max_rd_ee_context);
102 props->max_rdd = limit_uint(rblock->max_rd_domain);
103 props->max_fmr = limit_uint(rblock->max_mr);
104 props->local_ca_ack_delay = limit_uint(rblock->local_ca_ack_delay);
105 props->max_qp_rd_atom = limit_uint(rblock->max_rr_qp);
106 props->max_ee_rd_atom = limit_uint(rblock->max_rr_ee_context);
107 props->max_res_rd_atom = limit_uint(rblock->max_rr_hca);
108 props->max_qp_init_rd_atom = limit_uint(rblock->max_act_wqs_qp);
109 props->max_ee_init_rd_atom = limit_uint(rblock->max_act_wqs_ee_context);
97 110
98 if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) { 111 if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
99 props->max_srq = props->max_qp; 112 props->max_srq = limit_uint(props->max_qp);
100 props->max_srq_wr = props->max_qp_wr; 113 props->max_srq_wr = limit_uint(props->max_qp_wr);
101 props->max_srq_sge = 3; 114 props->max_srq_sge = 3;
102 } 115 }
103 116
104 props->max_pkeys = 16; 117 props->max_pkeys = 16;
105 props->local_ca_ack_delay 118 props->local_ca_ack_delay = limit_uint(rblock->local_ca_ack_delay);
106 = rblock->local_ca_ack_delay; 119 props->max_raw_ipv6_qp = limit_uint(rblock->max_raw_ipv6_qp);
107 props->max_raw_ipv6_qp 120 props->max_raw_ethy_qp = limit_uint(rblock->max_raw_ethy_qp);
108 = min_t(unsigned, rblock->max_raw_ipv6_qp, INT_MAX); 121 props->max_mcast_grp = limit_uint(rblock->max_mcast_grp);
109 props->max_raw_ethy_qp 122 props->max_mcast_qp_attach = limit_uint(rblock->max_mcast_qp_attach);
110 = min_t(unsigned, rblock->max_raw_ethy_qp, INT_MAX);
111 props->max_mcast_grp
112 = min_t(unsigned, rblock->max_mcast_grp, INT_MAX);
113 props->max_mcast_qp_attach
114 = min_t(unsigned, rblock->max_mcast_qp_attach, INT_MAX);
115 props->max_total_mcast_qp_attach 123 props->max_total_mcast_qp_attach
116 = min_t(unsigned, rblock->max_total_mcast_qp_attach, INT_MAX); 124 = limit_uint(rblock->max_total_mcast_qp_attach);
117 125
118 /* translate device capabilities */ 126 /* translate device capabilities */
119 props->device_cap_flags = IB_DEVICE_SYS_IMAGE_GUID | 127 props->device_cap_flags = IB_DEVICE_SYS_IMAGE_GUID |
@@ -128,6 +136,46 @@ query_device1:
128 return ret; 136 return ret;
129} 137}
130 138
139static int map_mtu(struct ehca_shca *shca, u32 fw_mtu)
140{
141 switch (fw_mtu) {
142 case 0x1:
143 return IB_MTU_256;
144 case 0x2:
145 return IB_MTU_512;
146 case 0x3:
147 return IB_MTU_1024;
148 case 0x4:
149 return IB_MTU_2048;
150 case 0x5:
151 return IB_MTU_4096;
152 default:
153 ehca_err(&shca->ib_device, "Unknown MTU size: %x.",
154 fw_mtu);
155 return 0;
156 }
157}
158
159static int map_number_of_vls(struct ehca_shca *shca, u32 vl_cap)
160{
161 switch (vl_cap) {
162 case 0x1:
163 return 1;
164 case 0x2:
165 return 2;
166 case 0x3:
167 return 4;
168 case 0x4:
169 return 8;
170 case 0x5:
171 return 15;
172 default:
173 ehca_err(&shca->ib_device, "invalid Vl Capability: %x.",
174 vl_cap);
175 return 0;
176 }
177}
178
131int ehca_query_port(struct ib_device *ibdev, 179int ehca_query_port(struct ib_device *ibdev,
132 u8 port, struct ib_port_attr *props) 180 u8 port, struct ib_port_attr *props)
133{ 181{
@@ -152,31 +200,13 @@ int ehca_query_port(struct ib_device *ibdev,
152 200
153 memset(props, 0, sizeof(struct ib_port_attr)); 201 memset(props, 0, sizeof(struct ib_port_attr));
154 202
155 switch (rblock->max_mtu) { 203 props->active_mtu = props->max_mtu = map_mtu(shca, rblock->max_mtu);
156 case 0x1:
157 props->active_mtu = props->max_mtu = IB_MTU_256;
158 break;
159 case 0x2:
160 props->active_mtu = props->max_mtu = IB_MTU_512;
161 break;
162 case 0x3:
163 props->active_mtu = props->max_mtu = IB_MTU_1024;
164 break;
165 case 0x4:
166 props->active_mtu = props->max_mtu = IB_MTU_2048;
167 break;
168 case 0x5:
169 props->active_mtu = props->max_mtu = IB_MTU_4096;
170 break;
171 default:
172 ehca_err(&shca->ib_device, "Unknown MTU size: %x.",
173 rblock->max_mtu);
174 break;
175 }
176
177 props->port_cap_flags = rblock->capability_mask; 204 props->port_cap_flags = rblock->capability_mask;
178 props->gid_tbl_len = rblock->gid_tbl_len; 205 props->gid_tbl_len = rblock->gid_tbl_len;
179 props->max_msg_sz = rblock->max_msg_sz; 206 if (rblock->max_msg_sz)
207 props->max_msg_sz = rblock->max_msg_sz;
208 else
209 props->max_msg_sz = 0x1 << 31;
180 props->bad_pkey_cntr = rblock->bad_pkey_cntr; 210 props->bad_pkey_cntr = rblock->bad_pkey_cntr;
181 props->qkey_viol_cntr = rblock->qkey_viol_cntr; 211 props->qkey_viol_cntr = rblock->qkey_viol_cntr;
182 props->pkey_tbl_len = rblock->pkey_tbl_len; 212 props->pkey_tbl_len = rblock->pkey_tbl_len;
@@ -186,6 +216,7 @@ int ehca_query_port(struct ib_device *ibdev,
186 props->sm_sl = rblock->sm_sl; 216 props->sm_sl = rblock->sm_sl;
187 props->subnet_timeout = rblock->subnet_timeout; 217 props->subnet_timeout = rblock->subnet_timeout;
188 props->init_type_reply = rblock->init_type_reply; 218 props->init_type_reply = rblock->init_type_reply;
219 props->max_vl_num = map_number_of_vls(shca, rblock->vl_cap);
189 220
190 if (rblock->state && rblock->phys_width) { 221 if (rblock->state && rblock->phys_width) {
191 props->phys_state = rblock->phys_pstate; 222 props->phys_state = rblock->phys_pstate;
@@ -314,7 +345,7 @@ query_gid1:
314 return ret; 345 return ret;
315} 346}
316 347
317const u32 allowed_port_caps = ( 348static const u32 allowed_port_caps = (
318 IB_PORT_SM | IB_PORT_LED_INFO_SUP | IB_PORT_CM_SUP | 349 IB_PORT_SM | IB_PORT_LED_INFO_SUP | IB_PORT_CM_SUP |
319 IB_PORT_SNMP_TUNNEL_SUP | IB_PORT_DEVICE_MGMT_SUP | 350 IB_PORT_SNMP_TUNNEL_SUP | IB_PORT_DEVICE_MGMT_SUP |
320 IB_PORT_VENDOR_CLASS_SUP); 351 IB_PORT_VENDOR_CLASS_SUP);
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index a86ebcc79a95..65b3362cdb9b 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -57,16 +57,17 @@ MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
57MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); 57MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
58MODULE_VERSION(HCAD_VERSION); 58MODULE_VERSION(HCAD_VERSION);
59 59
60int ehca_open_aqp1 = 0; 60static int ehca_open_aqp1 = 0;
61static int ehca_hw_level = 0;
62static int ehca_poll_all_eqs = 1;
63static int ehca_mr_largepage = 1;
64
61int ehca_debug_level = 0; 65int ehca_debug_level = 0;
62int ehca_hw_level = 0;
63int ehca_nr_ports = 2; 66int ehca_nr_ports = 2;
64int ehca_use_hp_mr = 0; 67int ehca_use_hp_mr = 0;
65int ehca_port_act_time = 30; 68int ehca_port_act_time = 30;
66int ehca_poll_all_eqs = 1;
67int ehca_static_rate = -1; 69int ehca_static_rate = -1;
68int ehca_scaling_code = 0; 70int ehca_scaling_code = 0;
69int ehca_mr_largepage = 1;
70int ehca_lock_hcalls = -1; 71int ehca_lock_hcalls = -1;
71 72
72module_param_named(open_aqp1, ehca_open_aqp1, int, S_IRUGO); 73module_param_named(open_aqp1, ehca_open_aqp1, int, S_IRUGO);
@@ -396,7 +397,7 @@ init_node_guid1:
396 return ret; 397 return ret;
397} 398}
398 399
399int ehca_init_device(struct ehca_shca *shca) 400static int ehca_init_device(struct ehca_shca *shca)
400{ 401{
401 int ret; 402 int ret;
402 403
@@ -579,8 +580,8 @@ static ssize_t ehca_store_debug_level(struct device_driver *ddp,
579 return 1; 580 return 1;
580} 581}
581 582
582DRIVER_ATTR(debug_level, S_IRUSR | S_IWUSR, 583static DRIVER_ATTR(debug_level, S_IRUSR | S_IWUSR,
583 ehca_show_debug_level, ehca_store_debug_level); 584 ehca_show_debug_level, ehca_store_debug_level);
584 585
585static struct attribute *ehca_drv_attrs[] = { 586static struct attribute *ehca_drv_attrs[] = {
586 &driver_attr_debug_level.attr, 587 &driver_attr_debug_level.attr,
@@ -941,7 +942,7 @@ void ehca_poll_eqs(unsigned long data)
941 spin_unlock(&shca_list_lock); 942 spin_unlock(&shca_list_lock);
942} 943}
943 944
944int __init ehca_module_init(void) 945static int __init ehca_module_init(void)
945{ 946{
946 int ret; 947 int ret;
947 948
@@ -988,7 +989,7 @@ module_init1:
988 return ret; 989 return ret;
989}; 990};
990 991
991void __exit ehca_module_exit(void) 992static void __exit ehca_module_exit(void)
992{ 993{
993 if (ehca_poll_all_eqs == 1) 994 if (ehca_poll_all_eqs == 1)
994 del_timer_sync(&poll_eqs_timer); 995 del_timer_sync(&poll_eqs_timer);
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index e239bbf54da1..f26997fc00f8 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -40,8 +40,6 @@
40 * POSSIBILITY OF SUCH DAMAGE. 40 * POSSIBILITY OF SUCH DAMAGE.
41 */ 41 */
42 42
43#include <asm/current.h>
44
45#include <rdma/ib_umem.h> 43#include <rdma/ib_umem.h>
46 44
47#include "ehca_iverbs.h" 45#include "ehca_iverbs.h"
@@ -419,7 +417,6 @@ int ehca_rereg_phys_mr(struct ib_mr *mr,
419 struct ehca_shca *shca = 417 struct ehca_shca *shca =
420 container_of(mr->device, struct ehca_shca, ib_device); 418 container_of(mr->device, struct ehca_shca, ib_device);
421 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr); 419 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
422 struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
423 u64 new_size; 420 u64 new_size;
424 u64 *new_start; 421 u64 *new_start;
425 u32 new_acl; 422 u32 new_acl;
@@ -429,15 +426,6 @@ int ehca_rereg_phys_mr(struct ib_mr *mr,
429 u32 num_kpages = 0; 426 u32 num_kpages = 0;
430 u32 num_hwpages = 0; 427 u32 num_hwpages = 0;
431 struct ehca_mr_pginfo pginfo; 428 struct ehca_mr_pginfo pginfo;
432 u32 cur_pid = current->tgid;
433
434 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
435 (my_pd->ownpid != cur_pid)) {
436 ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
437 cur_pid, my_pd->ownpid);
438 ret = -EINVAL;
439 goto rereg_phys_mr_exit0;
440 }
441 429
442 if (!(mr_rereg_mask & IB_MR_REREG_TRANS)) { 430 if (!(mr_rereg_mask & IB_MR_REREG_TRANS)) {
443 /* TODO not supported, because PHYP rereg hCall needs pages */ 431 /* TODO not supported, because PHYP rereg hCall needs pages */
@@ -577,19 +565,9 @@ int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
577 struct ehca_shca *shca = 565 struct ehca_shca *shca =
578 container_of(mr->device, struct ehca_shca, ib_device); 566 container_of(mr->device, struct ehca_shca, ib_device);
579 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr); 567 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
580 struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
581 u32 cur_pid = current->tgid;
582 unsigned long sl_flags; 568 unsigned long sl_flags;
583 struct ehca_mr_hipzout_parms hipzout; 569 struct ehca_mr_hipzout_parms hipzout;
584 570
585 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
586 (my_pd->ownpid != cur_pid)) {
587 ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
588 cur_pid, my_pd->ownpid);
589 ret = -EINVAL;
590 goto query_mr_exit0;
591 }
592
593 if ((e_mr->flags & EHCA_MR_FLAG_FMR)) { 571 if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
594 ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p " 572 ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
595 "e_mr->flags=%x", mr, e_mr, e_mr->flags); 573 "e_mr->flags=%x", mr, e_mr, e_mr->flags);
@@ -634,16 +612,6 @@ int ehca_dereg_mr(struct ib_mr *mr)
634 struct ehca_shca *shca = 612 struct ehca_shca *shca =
635 container_of(mr->device, struct ehca_shca, ib_device); 613 container_of(mr->device, struct ehca_shca, ib_device);
636 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr); 614 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
637 struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
638 u32 cur_pid = current->tgid;
639
640 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
641 (my_pd->ownpid != cur_pid)) {
642 ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
643 cur_pid, my_pd->ownpid);
644 ret = -EINVAL;
645 goto dereg_mr_exit0;
646 }
647 615
648 if ((e_mr->flags & EHCA_MR_FLAG_FMR)) { 616 if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
649 ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p " 617 ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
@@ -1952,9 +1920,8 @@ next_kpage:
1952 return ret; 1920 return ret;
1953} 1921}
1954 1922
1955int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo, 1923static int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
1956 u32 number, 1924 u32 number, u64 *kpage)
1957 u64 *kpage)
1958{ 1925{
1959 int ret = 0; 1926 int ret = 0;
1960 struct ib_phys_buf *pbuf; 1927 struct ib_phys_buf *pbuf;
@@ -2012,9 +1979,8 @@ int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
2012 return ret; 1979 return ret;
2013} 1980}
2014 1981
2015int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo, 1982static int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
2016 u32 number, 1983 u32 number, u64 *kpage)
2017 u64 *kpage)
2018{ 1984{
2019 int ret = 0; 1985 int ret = 0;
2020 u64 *fmrlist; 1986 u64 *fmrlist;
diff --git a/drivers/infiniband/hw/ehca/ehca_pd.c b/drivers/infiniband/hw/ehca/ehca_pd.c
index 43bcf085fcf2..2fe554855fa5 100644
--- a/drivers/infiniband/hw/ehca/ehca_pd.c
+++ b/drivers/infiniband/hw/ehca/ehca_pd.c
@@ -38,8 +38,6 @@
38 * POSSIBILITY OF SUCH DAMAGE. 38 * POSSIBILITY OF SUCH DAMAGE.
39 */ 39 */
40 40
41#include <asm/current.h>
42
43#include "ehca_tools.h" 41#include "ehca_tools.h"
44#include "ehca_iverbs.h" 42#include "ehca_iverbs.h"
45 43
@@ -58,7 +56,6 @@ struct ib_pd *ehca_alloc_pd(struct ib_device *device,
58 return ERR_PTR(-ENOMEM); 56 return ERR_PTR(-ENOMEM);
59 } 57 }
60 58
61 pd->ownpid = current->tgid;
62 for (i = 0; i < 2; i++) { 59 for (i = 0; i < 2; i++) {
63 INIT_LIST_HEAD(&pd->free[i]); 60 INIT_LIST_HEAD(&pd->free[i]);
64 INIT_LIST_HEAD(&pd->full[i]); 61 INIT_LIST_HEAD(&pd->full[i]);
@@ -85,18 +82,10 @@ struct ib_pd *ehca_alloc_pd(struct ib_device *device,
85 82
86int ehca_dealloc_pd(struct ib_pd *pd) 83int ehca_dealloc_pd(struct ib_pd *pd)
87{ 84{
88 u32 cur_pid = current->tgid;
89 struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd); 85 struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
90 int i, leftovers = 0; 86 int i, leftovers = 0;
91 struct ipz_small_queue_page *page, *tmp; 87 struct ipz_small_queue_page *page, *tmp;
92 88
93 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
94 my_pd->ownpid != cur_pid) {
95 ehca_err(pd->device, "Invalid caller pid=%x ownpid=%x",
96 cur_pid, my_pd->ownpid);
97 return -EINVAL;
98 }
99
100 for (i = 0; i < 2; i++) { 89 for (i = 0; i < 2; i++) {
101 list_splice(&my_pd->full[i], &my_pd->free[i]); 90 list_splice(&my_pd->full[i], &my_pd->free[i]);
102 list_for_each_entry_safe(page, tmp, &my_pd->free[i], list) { 91 list_for_each_entry_safe(page, tmp, &my_pd->free[i], list) {
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 1012f15a7140..3eb14a52cbf2 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -43,9 +43,6 @@
43 * POSSIBILITY OF SUCH DAMAGE. 43 * POSSIBILITY OF SUCH DAMAGE.
44 */ 44 */
45 45
46
47#include <asm/current.h>
48
49#include "ehca_classes.h" 46#include "ehca_classes.h"
50#include "ehca_tools.h" 47#include "ehca_tools.h"
51#include "ehca_qes.h" 48#include "ehca_qes.h"
@@ -424,6 +421,9 @@ static struct ehca_qp *internal_create_qp(
424 u32 swqe_size = 0, rwqe_size = 0, ib_qp_num; 421 u32 swqe_size = 0, rwqe_size = 0, ib_qp_num;
425 unsigned long flags; 422 unsigned long flags;
426 423
424 if (init_attr->create_flags)
425 return ERR_PTR(-EINVAL);
426
427 memset(&parms, 0, sizeof(parms)); 427 memset(&parms, 0, sizeof(parms));
428 qp_type = init_attr->qp_type; 428 qp_type = init_attr->qp_type;
429 429
@@ -1526,16 +1526,6 @@ int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1526 struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca, 1526 struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
1527 ib_device); 1527 ib_device);
1528 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp); 1528 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
1529 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
1530 ib_pd);
1531 u32 cur_pid = current->tgid;
1532
1533 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
1534 my_pd->ownpid != cur_pid) {
1535 ehca_err(ibqp->pd->device, "Invalid caller pid=%x ownpid=%x",
1536 cur_pid, my_pd->ownpid);
1537 return -EINVAL;
1538 }
1539 1529
1540 /* The if-block below caches qp_attr to be modified for GSI and SMI 1530 /* The if-block below caches qp_attr to be modified for GSI and SMI
1541 * qps during the initialization by ib_mad. When the respective port 1531 * qps during the initialization by ib_mad. When the respective port
@@ -1636,23 +1626,13 @@ int ehca_query_qp(struct ib_qp *qp,
1636 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) 1626 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1637{ 1627{
1638 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp); 1628 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
1639 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
1640 ib_pd);
1641 struct ehca_shca *shca = container_of(qp->device, struct ehca_shca, 1629 struct ehca_shca *shca = container_of(qp->device, struct ehca_shca,
1642 ib_device); 1630 ib_device);
1643 struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle; 1631 struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
1644 struct hcp_modify_qp_control_block *qpcb; 1632 struct hcp_modify_qp_control_block *qpcb;
1645 u32 cur_pid = current->tgid;
1646 int cnt, ret = 0; 1633 int cnt, ret = 0;
1647 u64 h_ret; 1634 u64 h_ret;
1648 1635
1649 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
1650 my_pd->ownpid != cur_pid) {
1651 ehca_err(qp->device, "Invalid caller pid=%x ownpid=%x",
1652 cur_pid, my_pd->ownpid);
1653 return -EINVAL;
1654 }
1655
1656 if (qp_attr_mask & QP_ATTR_QUERY_NOT_SUPPORTED) { 1636 if (qp_attr_mask & QP_ATTR_QUERY_NOT_SUPPORTED) {
1657 ehca_err(qp->device, "Invalid attribute mask " 1637 ehca_err(qp->device, "Invalid attribute mask "
1658 "ehca_qp=%p qp_num=%x qp_attr_mask=%x ", 1638 "ehca_qp=%p qp_num=%x qp_attr_mask=%x ",
@@ -1797,8 +1777,6 @@ int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
1797{ 1777{
1798 struct ehca_qp *my_qp = 1778 struct ehca_qp *my_qp =
1799 container_of(ibsrq, struct ehca_qp, ib_srq); 1779 container_of(ibsrq, struct ehca_qp, ib_srq);
1800 struct ehca_pd *my_pd =
1801 container_of(ibsrq->pd, struct ehca_pd, ib_pd);
1802 struct ehca_shca *shca = 1780 struct ehca_shca *shca =
1803 container_of(ibsrq->pd->device, struct ehca_shca, ib_device); 1781 container_of(ibsrq->pd->device, struct ehca_shca, ib_device);
1804 struct hcp_modify_qp_control_block *mqpcb; 1782 struct hcp_modify_qp_control_block *mqpcb;
@@ -1806,14 +1784,6 @@ int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
1806 u64 h_ret; 1784 u64 h_ret;
1807 int ret = 0; 1785 int ret = 0;
1808 1786
1809 u32 cur_pid = current->tgid;
1810 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
1811 my_pd->ownpid != cur_pid) {
1812 ehca_err(ibsrq->pd->device, "Invalid caller pid=%x ownpid=%x",
1813 cur_pid, my_pd->ownpid);
1814 return -EINVAL;
1815 }
1816
1817 mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL); 1787 mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1818 if (!mqpcb) { 1788 if (!mqpcb) {
1819 ehca_err(ibsrq->device, "Could not get zeroed page for mqpcb " 1789 ehca_err(ibsrq->device, "Could not get zeroed page for mqpcb "
@@ -1864,22 +1834,13 @@ modify_srq_exit0:
1864int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr) 1834int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)
1865{ 1835{
1866 struct ehca_qp *my_qp = container_of(srq, struct ehca_qp, ib_srq); 1836 struct ehca_qp *my_qp = container_of(srq, struct ehca_qp, ib_srq);
1867 struct ehca_pd *my_pd = container_of(srq->pd, struct ehca_pd, ib_pd);
1868 struct ehca_shca *shca = container_of(srq->device, struct ehca_shca, 1837 struct ehca_shca *shca = container_of(srq->device, struct ehca_shca,
1869 ib_device); 1838 ib_device);
1870 struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle; 1839 struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
1871 struct hcp_modify_qp_control_block *qpcb; 1840 struct hcp_modify_qp_control_block *qpcb;
1872 u32 cur_pid = current->tgid;
1873 int ret = 0; 1841 int ret = 0;
1874 u64 h_ret; 1842 u64 h_ret;
1875 1843
1876 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
1877 my_pd->ownpid != cur_pid) {
1878 ehca_err(srq->device, "Invalid caller pid=%x ownpid=%x",
1879 cur_pid, my_pd->ownpid);
1880 return -EINVAL;
1881 }
1882
1883 qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL); 1844 qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1884 if (!qpcb) { 1845 if (!qpcb) {
1885 ehca_err(srq->device, "Out of memory for qpcb " 1846 ehca_err(srq->device, "Out of memory for qpcb "
@@ -1919,7 +1880,6 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
1919 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd, 1880 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
1920 ib_pd); 1881 ib_pd);
1921 struct ehca_sport *sport = &shca->sport[my_qp->init_attr.port_num - 1]; 1882 struct ehca_sport *sport = &shca->sport[my_qp->init_attr.port_num - 1];
1922 u32 cur_pid = current->tgid;
1923 u32 qp_num = my_qp->real_qp_num; 1883 u32 qp_num = my_qp->real_qp_num;
1924 int ret; 1884 int ret;
1925 u64 h_ret; 1885 u64 h_ret;
@@ -1934,11 +1894,6 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
1934 "user space qp_num=%x", qp_num); 1894 "user space qp_num=%x", qp_num);
1935 return -EINVAL; 1895 return -EINVAL;
1936 } 1896 }
1937 if (my_pd->ownpid != cur_pid) {
1938 ehca_err(dev, "Invalid caller pid=%x ownpid=%x",
1939 cur_pid, my_pd->ownpid);
1940 return -EINVAL;
1941 }
1942 } 1897 }
1943 1898
1944 if (my_qp->send_cq) { 1899 if (my_qp->send_cq) {
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index 2ce8cffb8664..a20bbf466188 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -188,7 +188,7 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
188 if (send_wr->opcode == IB_WR_SEND_WITH_IMM || 188 if (send_wr->opcode == IB_WR_SEND_WITH_IMM ||
189 send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { 189 send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
190 /* this might not work as long as HW does not support it */ 190 /* this might not work as long as HW does not support it */
191 wqe_p->immediate_data = be32_to_cpu(send_wr->imm_data); 191 wqe_p->immediate_data = be32_to_cpu(send_wr->ex.imm_data);
192 wqe_p->wr_flag |= WQE_WRFLAG_IMM_DATA_PRESENT; 192 wqe_p->wr_flag |= WQE_WRFLAG_IMM_DATA_PRESENT;
193 } 193 }
194 194
diff --git a/drivers/infiniband/hw/ehca/ehca_tools.h b/drivers/infiniband/hw/ehca/ehca_tools.h
index 4a8346a2bc9e..ec950bf8c479 100644
--- a/drivers/infiniband/hw/ehca/ehca_tools.h
+++ b/drivers/infiniband/hw/ehca/ehca_tools.h
@@ -73,37 +73,37 @@ extern int ehca_debug_level;
73 if (unlikely(ehca_debug_level)) \ 73 if (unlikely(ehca_debug_level)) \
74 dev_printk(KERN_DEBUG, (ib_dev)->dma_device, \ 74 dev_printk(KERN_DEBUG, (ib_dev)->dma_device, \
75 "PU%04x EHCA_DBG:%s " format "\n", \ 75 "PU%04x EHCA_DBG:%s " format "\n", \
76 raw_smp_processor_id(), __FUNCTION__, \ 76 raw_smp_processor_id(), __func__, \
77 ## arg); \ 77 ## arg); \
78 } while (0) 78 } while (0)
79 79
80#define ehca_info(ib_dev, format, arg...) \ 80#define ehca_info(ib_dev, format, arg...) \
81 dev_info((ib_dev)->dma_device, "PU%04x EHCA_INFO:%s " format "\n", \ 81 dev_info((ib_dev)->dma_device, "PU%04x EHCA_INFO:%s " format "\n", \
82 raw_smp_processor_id(), __FUNCTION__, ## arg) 82 raw_smp_processor_id(), __func__, ## arg)
83 83
84#define ehca_warn(ib_dev, format, arg...) \ 84#define ehca_warn(ib_dev, format, arg...) \
85 dev_warn((ib_dev)->dma_device, "PU%04x EHCA_WARN:%s " format "\n", \ 85 dev_warn((ib_dev)->dma_device, "PU%04x EHCA_WARN:%s " format "\n", \
86 raw_smp_processor_id(), __FUNCTION__, ## arg) 86 raw_smp_processor_id(), __func__, ## arg)
87 87
88#define ehca_err(ib_dev, format, arg...) \ 88#define ehca_err(ib_dev, format, arg...) \
89 dev_err((ib_dev)->dma_device, "PU%04x EHCA_ERR:%s " format "\n", \ 89 dev_err((ib_dev)->dma_device, "PU%04x EHCA_ERR:%s " format "\n", \
90 raw_smp_processor_id(), __FUNCTION__, ## arg) 90 raw_smp_processor_id(), __func__, ## arg)
91 91
92/* use this one only if no ib_dev available */ 92/* use this one only if no ib_dev available */
93#define ehca_gen_dbg(format, arg...) \ 93#define ehca_gen_dbg(format, arg...) \
94 do { \ 94 do { \
95 if (unlikely(ehca_debug_level)) \ 95 if (unlikely(ehca_debug_level)) \
96 printk(KERN_DEBUG "PU%04x EHCA_DBG:%s " format "\n", \ 96 printk(KERN_DEBUG "PU%04x EHCA_DBG:%s " format "\n", \
97 raw_smp_processor_id(), __FUNCTION__, ## arg); \ 97 raw_smp_processor_id(), __func__, ## arg); \
98 } while (0) 98 } while (0)
99 99
100#define ehca_gen_warn(format, arg...) \ 100#define ehca_gen_warn(format, arg...) \
101 printk(KERN_INFO "PU%04x EHCA_WARN:%s " format "\n", \ 101 printk(KERN_INFO "PU%04x EHCA_WARN:%s " format "\n", \
102 raw_smp_processor_id(), __FUNCTION__, ## arg) 102 raw_smp_processor_id(), __func__, ## arg)
103 103
104#define ehca_gen_err(format, arg...) \ 104#define ehca_gen_err(format, arg...) \
105 printk(KERN_ERR "PU%04x EHCA_ERR:%s " format "\n", \ 105 printk(KERN_ERR "PU%04x EHCA_ERR:%s " format "\n", \
106 raw_smp_processor_id(), __FUNCTION__, ## arg) 106 raw_smp_processor_id(), __func__, ## arg)
107 107
108/** 108/**
109 * ehca_dmp - printk a memory block, whose length is n*8 bytes. 109 * ehca_dmp - printk a memory block, whose length is n*8 bytes.
@@ -118,7 +118,7 @@ extern int ehca_debug_level;
118 for (x = 0; x < l; x += 16) { \ 118 for (x = 0; x < l; x += 16) { \
119 printk(KERN_INFO "EHCA_DMP:%s " format \ 119 printk(KERN_INFO "EHCA_DMP:%s " format \
120 " adr=%p ofs=%04x %016lx %016lx\n", \ 120 " adr=%p ofs=%04x %016lx %016lx\n", \
121 __FUNCTION__, ##args, deb, x, \ 121 __func__, ##args, deb, x, \
122 *((u64 *)&deb[0]), *((u64 *)&deb[8])); \ 122 *((u64 *)&deb[0]), *((u64 *)&deb[8])); \
123 deb += 16; \ 123 deb += 16; \
124 } \ 124 } \
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c
index 5234d6c15c49..1b07f2beafaf 100644
--- a/drivers/infiniband/hw/ehca/ehca_uverbs.c
+++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c
@@ -40,8 +40,6 @@
40 * POSSIBILITY OF SUCH DAMAGE. 40 * POSSIBILITY OF SUCH DAMAGE.
41 */ 41 */
42 42
43#include <asm/current.h>
44
45#include "ehca_classes.h" 43#include "ehca_classes.h"
46#include "ehca_iverbs.h" 44#include "ehca_iverbs.h"
47#include "ehca_mrmw.h" 45#include "ehca_mrmw.h"
@@ -253,11 +251,9 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
253 u32 idr_handle = fileoffset & 0x1FFFFFF; 251 u32 idr_handle = fileoffset & 0x1FFFFFF;
254 u32 q_type = (fileoffset >> 27) & 0x1; /* CQ, QP,... */ 252 u32 q_type = (fileoffset >> 27) & 0x1; /* CQ, QP,... */
255 u32 rsrc_type = (fileoffset >> 25) & 0x3; /* sq,rq,cmnd_window */ 253 u32 rsrc_type = (fileoffset >> 25) & 0x3; /* sq,rq,cmnd_window */
256 u32 cur_pid = current->tgid;
257 u32 ret; 254 u32 ret;
258 struct ehca_cq *cq; 255 struct ehca_cq *cq;
259 struct ehca_qp *qp; 256 struct ehca_qp *qp;
260 struct ehca_pd *pd;
261 struct ib_uobject *uobject; 257 struct ib_uobject *uobject;
262 258
263 switch (q_type) { 259 switch (q_type) {
@@ -270,13 +266,6 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
270 if (!cq) 266 if (!cq)
271 return -EINVAL; 267 return -EINVAL;
272 268
273 if (cq->ownpid != cur_pid) {
274 ehca_err(cq->ib_cq.device,
275 "Invalid caller pid=%x ownpid=%x",
276 cur_pid, cq->ownpid);
277 return -ENOMEM;
278 }
279
280 if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context) 269 if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context)
281 return -EINVAL; 270 return -EINVAL;
282 271
@@ -298,14 +287,6 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
298 if (!qp) 287 if (!qp)
299 return -EINVAL; 288 return -EINVAL;
300 289
301 pd = container_of(qp->ib_qp.pd, struct ehca_pd, ib_pd);
302 if (pd->ownpid != cur_pid) {
303 ehca_err(qp->ib_qp.device,
304 "Invalid caller pid=%x ownpid=%x",
305 cur_pid, pd->ownpid);
306 return -ENOMEM;
307 }
308
309 uobject = IS_SRQ(qp) ? qp->ib_srq.uobject : qp->ib_qp.uobject; 290 uobject = IS_SRQ(qp) ? qp->ib_srq.uobject : qp->ib_qp.uobject;
310 if (!uobject || uobject->context != context) 291 if (!uobject || uobject->context != context)
311 return -EINVAL; 292 return -EINVAL;
diff --git a/drivers/infiniband/hw/ipath/Makefile b/drivers/infiniband/hw/ipath/Makefile
index fe6738826865..75a6c91944c4 100644
--- a/drivers/infiniband/hw/ipath/Makefile
+++ b/drivers/infiniband/hw/ipath/Makefile
@@ -20,17 +20,20 @@ ib_ipath-y := \
20 ipath_qp.o \ 20 ipath_qp.o \
21 ipath_rc.o \ 21 ipath_rc.o \
22 ipath_ruc.o \ 22 ipath_ruc.o \
23 ipath_sdma.o \
23 ipath_srq.o \ 24 ipath_srq.o \
24 ipath_stats.o \ 25 ipath_stats.o \
25 ipath_sysfs.o \ 26 ipath_sysfs.o \
26 ipath_uc.o \ 27 ipath_uc.o \
27 ipath_ud.o \ 28 ipath_ud.o \
28 ipath_user_pages.o \ 29 ipath_user_pages.o \
30 ipath_user_sdma.o \
29 ipath_verbs_mcast.o \ 31 ipath_verbs_mcast.o \
30 ipath_verbs.o 32 ipath_verbs.o
31 33
32ib_ipath-$(CONFIG_HT_IRQ) += ipath_iba6110.o 34ib_ipath-$(CONFIG_HT_IRQ) += ipath_iba6110.o
33ib_ipath-$(CONFIG_PCI_MSI) += ipath_iba6120.o 35ib_ipath-$(CONFIG_PCI_MSI) += ipath_iba6120.o
36ib_ipath-$(CONFIG_PCI_MSI) += ipath_iba7220.o ipath_sd7220.o ipath_sd7220_img.o
34 37
35ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o 38ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o
36ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o 39ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o
diff --git a/drivers/infiniband/hw/ipath/ipath_7220.h b/drivers/infiniband/hw/ipath/ipath_7220.h
new file mode 100644
index 000000000000..74fa5cc5131d
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_7220.h
@@ -0,0 +1,57 @@
1#ifndef _IPATH_7220_H
2#define _IPATH_7220_H
3/*
4 * Copyright (c) 2007 QLogic Corporation. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35/*
36 * This header file provides the declarations and common definitions
37 * for (mostly) manipulation of the SerDes blocks within the IBA7220.
38 * the functions declared should only be called from within other
39 * 7220-related files such as ipath_iba7220.c or ipath_sd7220.c.
40 */
41int ipath_sd7220_presets(struct ipath_devdata *dd);
42int ipath_sd7220_init(struct ipath_devdata *dd, int was_reset);
43int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum, u8 *img,
44 int len, int offset);
45int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum, const u8 *img,
46 int len, int offset);
47/*
48 * Below used for sdnum parameter, selecting one of the two sections
49 * used for PCIe, or the single SerDes used for IB, which is the
50 * only one currently used
51 */
52#define IB_7220_SERDES 2
53
54int ipath_sd7220_ib_load(struct ipath_devdata *dd);
55int ipath_sd7220_ib_vfy(struct ipath_devdata *dd);
56
57#endif /* _IPATH_7220_H */
diff --git a/drivers/infiniband/hw/ipath/ipath_common.h b/drivers/infiniband/hw/ipath/ipath_common.h
index 591901aab6b7..28cfe97cf1e9 100644
--- a/drivers/infiniband/hw/ipath/ipath_common.h
+++ b/drivers/infiniband/hw/ipath/ipath_common.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -80,6 +80,8 @@
80#define IPATH_IB_LINKDOWN_DISABLE 5 80#define IPATH_IB_LINKDOWN_DISABLE 5
81#define IPATH_IB_LINK_LOOPBACK 6 /* enable local loopback */ 81#define IPATH_IB_LINK_LOOPBACK 6 /* enable local loopback */
82#define IPATH_IB_LINK_EXTERNAL 7 /* normal, disable local loopback */ 82#define IPATH_IB_LINK_EXTERNAL 7 /* normal, disable local loopback */
83#define IPATH_IB_LINK_NO_HRTBT 8 /* disable Heartbeat, e.g. for loopback */
84#define IPATH_IB_LINK_HRTBT 9 /* enable heartbeat, normal, non-loopback */
83 85
84/* 86/*
85 * These 3 values (SDR and DDR may be ORed for auto-speed 87 * These 3 values (SDR and DDR may be ORed for auto-speed
@@ -198,7 +200,8 @@ typedef enum _ipath_ureg {
198#define IPATH_RUNTIME_FORCE_WC_ORDER 0x4 200#define IPATH_RUNTIME_FORCE_WC_ORDER 0x4
199#define IPATH_RUNTIME_RCVHDR_COPY 0x8 201#define IPATH_RUNTIME_RCVHDR_COPY 0x8
200#define IPATH_RUNTIME_MASTER 0x10 202#define IPATH_RUNTIME_MASTER 0x10
201/* 0x20 and 0x40 are no longer used, but are reserved for ABI compatibility */ 203#define IPATH_RUNTIME_NODMA_RTAIL 0x80
204#define IPATH_RUNTIME_SDMA 0x200
202#define IPATH_RUNTIME_FORCE_PIOAVAIL 0x400 205#define IPATH_RUNTIME_FORCE_PIOAVAIL 0x400
203#define IPATH_RUNTIME_PIO_REGSWAPPED 0x800 206#define IPATH_RUNTIME_PIO_REGSWAPPED 0x800
204 207
@@ -444,8 +447,9 @@ struct ipath_user_info {
444#define IPATH_CMD_PIOAVAILUPD 27 /* force an update of PIOAvail reg */ 447#define IPATH_CMD_PIOAVAILUPD 27 /* force an update of PIOAvail reg */
445#define IPATH_CMD_POLL_TYPE 28 /* set the kind of polling we want */ 448#define IPATH_CMD_POLL_TYPE 28 /* set the kind of polling we want */
446#define IPATH_CMD_ARMLAUNCH_CTRL 29 /* armlaunch detection control */ 449#define IPATH_CMD_ARMLAUNCH_CTRL 29 /* armlaunch detection control */
447 450/* 30 is unused */
448#define IPATH_CMD_MAX 29 451#define IPATH_CMD_SDMA_INFLIGHT 31 /* sdma inflight counter request */
452#define IPATH_CMD_SDMA_COMPLETE 32 /* sdma completion counter request */
449 453
450/* 454/*
451 * Poll types 455 * Poll types
@@ -483,6 +487,17 @@ struct ipath_cmd {
483 union { 487 union {
484 struct ipath_tid_info tid_info; 488 struct ipath_tid_info tid_info;
485 struct ipath_user_info user_info; 489 struct ipath_user_info user_info;
490
491 /*
492 * address in userspace where we should put the sdma
493 * inflight counter
494 */
495 __u64 sdma_inflight;
496 /*
497 * address in userspace where we should put the sdma
498 * completion counter
499 */
500 __u64 sdma_complete;
486 /* address in userspace of struct ipath_port_info to 501 /* address in userspace of struct ipath_port_info to
487 write result to */ 502 write result to */
488 __u64 port_info; 503 __u64 port_info;
@@ -537,7 +552,7 @@ struct ipath_diag_pkt {
537 552
538/* The second diag_pkt struct is the expanded version that allows 553/* The second diag_pkt struct is the expanded version that allows
539 * more control over the packet, specifically, by allowing a custom 554 * more control over the packet, specifically, by allowing a custom
540 * pbc (+ extra) qword, so that special modes and deliberate 555 * pbc (+ static rate) qword, so that special modes and deliberate
541 * changes to CRCs can be used. The elements were also re-ordered 556 * changes to CRCs can be used. The elements were also re-ordered
542 * for better alignment and to avoid padding issues. 557 * for better alignment and to avoid padding issues.
543 */ 558 */
@@ -662,8 +677,12 @@ struct infinipath_counters {
662#define INFINIPATH_RHF_LENGTH_SHIFT 0 677#define INFINIPATH_RHF_LENGTH_SHIFT 0
663#define INFINIPATH_RHF_RCVTYPE_MASK 0x7 678#define INFINIPATH_RHF_RCVTYPE_MASK 0x7
664#define INFINIPATH_RHF_RCVTYPE_SHIFT 11 679#define INFINIPATH_RHF_RCVTYPE_SHIFT 11
665#define INFINIPATH_RHF_EGRINDEX_MASK 0x7FF 680#define INFINIPATH_RHF_EGRINDEX_MASK 0xFFF
666#define INFINIPATH_RHF_EGRINDEX_SHIFT 16 681#define INFINIPATH_RHF_EGRINDEX_SHIFT 16
682#define INFINIPATH_RHF_SEQ_MASK 0xF
683#define INFINIPATH_RHF_SEQ_SHIFT 0
684#define INFINIPATH_RHF_HDRQ_OFFSET_MASK 0x7FF
685#define INFINIPATH_RHF_HDRQ_OFFSET_SHIFT 4
667#define INFINIPATH_RHF_H_ICRCERR 0x80000000 686#define INFINIPATH_RHF_H_ICRCERR 0x80000000
668#define INFINIPATH_RHF_H_VCRCERR 0x40000000 687#define INFINIPATH_RHF_H_VCRCERR 0x40000000
669#define INFINIPATH_RHF_H_PARITYERR 0x20000000 688#define INFINIPATH_RHF_H_PARITYERR 0x20000000
@@ -673,6 +692,8 @@ struct infinipath_counters {
673#define INFINIPATH_RHF_H_TIDERR 0x02000000 692#define INFINIPATH_RHF_H_TIDERR 0x02000000
674#define INFINIPATH_RHF_H_MKERR 0x01000000 693#define INFINIPATH_RHF_H_MKERR 0x01000000
675#define INFINIPATH_RHF_H_IBERR 0x00800000 694#define INFINIPATH_RHF_H_IBERR 0x00800000
695#define INFINIPATH_RHF_H_ERR_MASK 0xFF800000
696#define INFINIPATH_RHF_L_USE_EGR 0x80000000
676#define INFINIPATH_RHF_L_SWA 0x00008000 697#define INFINIPATH_RHF_L_SWA 0x00008000
677#define INFINIPATH_RHF_L_SWB 0x00004000 698#define INFINIPATH_RHF_L_SWB 0x00004000
678 699
@@ -696,6 +717,7 @@ struct infinipath_counters {
696/* SendPIO per-buffer control */ 717/* SendPIO per-buffer control */
697#define INFINIPATH_SP_TEST 0x40 718#define INFINIPATH_SP_TEST 0x40
698#define INFINIPATH_SP_TESTEBP 0x20 719#define INFINIPATH_SP_TESTEBP 0x20
720#define INFINIPATH_SP_TRIGGER_SHIFT 15
699 721
700/* SendPIOAvail bits */ 722/* SendPIOAvail bits */
701#define INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT 1 723#define INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT 1
@@ -762,6 +784,7 @@ struct ether_header {
762#define IPATH_MSN_MASK 0xFFFFFF 784#define IPATH_MSN_MASK 0xFFFFFF
763#define IPATH_QPN_MASK 0xFFFFFF 785#define IPATH_QPN_MASK 0xFFFFFF
764#define IPATH_MULTICAST_LID_BASE 0xC000 786#define IPATH_MULTICAST_LID_BASE 0xC000
787#define IPATH_EAGER_TID_ID INFINIPATH_I_TID_MASK
765#define IPATH_MULTICAST_QPN 0xFFFFFF 788#define IPATH_MULTICAST_QPN 0xFFFFFF
766 789
767/* Receive Header Queue: receive type (from infinipath) */ 790/* Receive Header Queue: receive type (from infinipath) */
@@ -781,7 +804,7 @@ struct ether_header {
781 */ 804 */
782static inline __u32 ipath_hdrget_err_flags(const __le32 * rbuf) 805static inline __u32 ipath_hdrget_err_flags(const __le32 * rbuf)
783{ 806{
784 return __le32_to_cpu(rbuf[1]); 807 return __le32_to_cpu(rbuf[1]) & INFINIPATH_RHF_H_ERR_MASK;
785} 808}
786 809
787static inline __u32 ipath_hdrget_rcv_type(const __le32 * rbuf) 810static inline __u32 ipath_hdrget_rcv_type(const __le32 * rbuf)
@@ -802,6 +825,23 @@ static inline __u32 ipath_hdrget_index(const __le32 * rbuf)
802 & INFINIPATH_RHF_EGRINDEX_MASK; 825 & INFINIPATH_RHF_EGRINDEX_MASK;
803} 826}
804 827
828static inline __u32 ipath_hdrget_seq(const __le32 *rbuf)
829{
830 return (__le32_to_cpu(rbuf[1]) >> INFINIPATH_RHF_SEQ_SHIFT)
831 & INFINIPATH_RHF_SEQ_MASK;
832}
833
834static inline __u32 ipath_hdrget_offset(const __le32 *rbuf)
835{
836 return (__le32_to_cpu(rbuf[1]) >> INFINIPATH_RHF_HDRQ_OFFSET_SHIFT)
837 & INFINIPATH_RHF_HDRQ_OFFSET_MASK;
838}
839
840static inline __u32 ipath_hdrget_use_egr_buf(const __le32 *rbuf)
841{
842 return __le32_to_cpu(rbuf[0]) & INFINIPATH_RHF_L_USE_EGR;
843}
844
805static inline __u32 ipath_hdrget_ipath_ver(__le32 hdrword) 845static inline __u32 ipath_hdrget_ipath_ver(__le32 hdrword)
806{ 846{
807 return (__le32_to_cpu(hdrword) >> INFINIPATH_I_VERS_SHIFT) 847 return (__le32_to_cpu(hdrword) >> INFINIPATH_I_VERS_SHIFT)
diff --git a/drivers/infiniband/hw/ipath/ipath_debug.h b/drivers/infiniband/hw/ipath/ipath_debug.h
index d6f69532d83f..65926cd35759 100644
--- a/drivers/infiniband/hw/ipath/ipath_debug.h
+++ b/drivers/infiniband/hw/ipath/ipath_debug.h
@@ -66,6 +66,7 @@
66#define __IPATH_IPATHERR 0x40000 /* Ethernet (IPATH) errors */ 66#define __IPATH_IPATHERR 0x40000 /* Ethernet (IPATH) errors */
67#define __IPATH_IPATHPD 0x80000 /* Ethernet (IPATH) packet dump */ 67#define __IPATH_IPATHPD 0x80000 /* Ethernet (IPATH) packet dump */
68#define __IPATH_IPATHTABLE 0x100000 /* Ethernet (IPATH) table dump */ 68#define __IPATH_IPATHTABLE 0x100000 /* Ethernet (IPATH) table dump */
69#define __IPATH_LINKVERBDBG 0x200000 /* very verbose linkchange debug */
69 70
70#else /* _IPATH_DEBUGGING */ 71#else /* _IPATH_DEBUGGING */
71 72
@@ -89,6 +90,7 @@
89#define __IPATH_IPATHERR 0x0 /* Ethernet (IPATH) errors on */ 90#define __IPATH_IPATHERR 0x0 /* Ethernet (IPATH) errors on */
90#define __IPATH_IPATHPD 0x0 /* Ethernet (IPATH) packet dump on */ 91#define __IPATH_IPATHPD 0x0 /* Ethernet (IPATH) packet dump on */
91#define __IPATH_IPATHTABLE 0x0 /* Ethernet (IPATH) packet dump on */ 92#define __IPATH_IPATHTABLE 0x0 /* Ethernet (IPATH) packet dump on */
93#define __IPATH_LINKVERBDBG 0x0 /* very verbose linkchange debug */
92 94
93#endif /* _IPATH_DEBUGGING */ 95#endif /* _IPATH_DEBUGGING */
94 96
diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c
index 4137c7770f1b..6d49d2f18a88 100644
--- a/drivers/infiniband/hw/ipath/ipath_diag.c
+++ b/drivers/infiniband/hw/ipath/ipath_diag.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -330,13 +330,19 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
330 struct ipath_devdata *dd; 330 struct ipath_devdata *dd;
331 ssize_t ret = 0; 331 ssize_t ret = 0;
332 u64 val; 332 u64 val;
333 u32 l_state, lt_state; /* LinkState, LinkTrainingState */
333 334
334 if (count != sizeof(dp)) { 335 if (count < sizeof(odp)) {
335 ret = -EINVAL; 336 ret = -EINVAL;
336 goto bail; 337 goto bail;
337 } 338 }
338 339
339 if (copy_from_user(&dp, data, sizeof(dp))) { 340 if (count == sizeof(dp)) {
341 if (copy_from_user(&dp, data, sizeof(dp))) {
342 ret = -EFAULT;
343 goto bail;
344 }
345 } else if (copy_from_user(&odp, data, sizeof(odp))) {
340 ret = -EFAULT; 346 ret = -EFAULT;
341 goto bail; 347 goto bail;
342 } 348 }
@@ -396,10 +402,17 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
396 ret = -ENODEV; 402 ret = -ENODEV;
397 goto bail; 403 goto bail;
398 } 404 }
399 /* Check link state, but not if we have custom PBC */ 405 /*
400 val = dd->ipath_lastibcstat & IPATH_IBSTATE_MASK; 406 * Want to skip check for l_state if using custom PBC,
401 if (!dp.pbc_wd && val != IPATH_IBSTATE_INIT && 407 * because we might be trying to force an SM packet out.
402 val != IPATH_IBSTATE_ARM && val != IPATH_IBSTATE_ACTIVE) { 408 * first-cut, skip _all_ state checking in that case.
409 */
410 val = ipath_ib_state(dd, dd->ipath_lastibcstat);
411 lt_state = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat);
412 l_state = ipath_ib_linkstate(dd, dd->ipath_lastibcstat);
413 if (!dp.pbc_wd && (lt_state != INFINIPATH_IBCS_LT_STATE_LINKUP ||
414 (val != dd->ib_init && val != dd->ib_arm &&
415 val != dd->ib_active))) {
403 ipath_cdbg(VERBOSE, "unit %u not ready (state %llx)\n", 416 ipath_cdbg(VERBOSE, "unit %u not ready (state %llx)\n",
404 dd->ipath_unit, (unsigned long long) val); 417 dd->ipath_unit, (unsigned long long) val);
405 ret = -EINVAL; 418 ret = -EINVAL;
@@ -431,15 +444,17 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
431 goto bail; 444 goto bail;
432 } 445 }
433 446
434 piobuf = ipath_getpiobuf(dd, &pbufn); 447 plen >>= 2; /* in dwords */
448
449 piobuf = ipath_getpiobuf(dd, plen, &pbufn);
435 if (!piobuf) { 450 if (!piobuf) {
436 ipath_cdbg(VERBOSE, "No PIO buffers avail unit for %u\n", 451 ipath_cdbg(VERBOSE, "No PIO buffers avail unit for %u\n",
437 dd->ipath_unit); 452 dd->ipath_unit);
438 ret = -EBUSY; 453 ret = -EBUSY;
439 goto bail; 454 goto bail;
440 } 455 }
441 456 /* disarm it just to be extra sure */
442 plen >>= 2; /* in dwords */ 457 ipath_disarm_piobufs(dd, pbufn, 1);
443 458
444 if (ipath_debug & __IPATH_PKTDBG) 459 if (ipath_debug & __IPATH_PKTDBG)
445 ipath_cdbg(VERBOSE, "unit %u 0x%x+1w pio%d\n", 460 ipath_cdbg(VERBOSE, "unit %u 0x%x+1w pio%d\n",
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index ca4d0acc6786..e0a64f070b97 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -41,7 +41,6 @@
41 41
42#include "ipath_kernel.h" 42#include "ipath_kernel.h"
43#include "ipath_verbs.h" 43#include "ipath_verbs.h"
44#include "ipath_common.h"
45 44
46static void ipath_update_pio_bufs(struct ipath_devdata *); 45static void ipath_update_pio_bufs(struct ipath_devdata *);
47 46
@@ -73,10 +72,27 @@ module_param_named(debug, ipath_debug, uint, S_IWUSR | S_IRUGO);
73MODULE_PARM_DESC(debug, "mask for debug prints"); 72MODULE_PARM_DESC(debug, "mask for debug prints");
74EXPORT_SYMBOL_GPL(ipath_debug); 73EXPORT_SYMBOL_GPL(ipath_debug);
75 74
75unsigned ipath_mtu4096 = 1; /* max 4KB IB mtu by default, if supported */
76module_param_named(mtu4096, ipath_mtu4096, uint, S_IRUGO);
77MODULE_PARM_DESC(mtu4096, "enable MTU of 4096 bytes, if supported");
78
79static unsigned ipath_hol_timeout_ms = 13000;
80module_param_named(hol_timeout_ms, ipath_hol_timeout_ms, uint, S_IRUGO);
81MODULE_PARM_DESC(hol_timeout_ms,
82 "duration of user app suspension after link failure");
83
84unsigned ipath_linkrecovery = 1;
85module_param_named(linkrecovery, ipath_linkrecovery, uint, S_IWUSR | S_IRUGO);
86MODULE_PARM_DESC(linkrecovery, "enable workaround for link recovery issue");
87
76MODULE_LICENSE("GPL"); 88MODULE_LICENSE("GPL");
77MODULE_AUTHOR("QLogic <support@pathscale.com>"); 89MODULE_AUTHOR("QLogic <support@qlogic.com>");
78MODULE_DESCRIPTION("QLogic InfiniPath driver"); 90MODULE_DESCRIPTION("QLogic InfiniPath driver");
79 91
92/*
93 * Table to translate the LINKTRAININGSTATE portion of
94 * IBCStatus to a human-readable form.
95 */
80const char *ipath_ibcstatus_str[] = { 96const char *ipath_ibcstatus_str[] = {
81 "Disabled", 97 "Disabled",
82 "LinkUp", 98 "LinkUp",
@@ -91,9 +107,20 @@ const char *ipath_ibcstatus_str[] = {
91 "CfgWaitRmt", 107 "CfgWaitRmt",
92 "CfgIdle", 108 "CfgIdle",
93 "RecovRetrain", 109 "RecovRetrain",
94 "LState0xD", /* unused */ 110 "CfgTxRevLane", /* unused before IBA7220 */
95 "RecovWaitRmt", 111 "RecovWaitRmt",
96 "RecovIdle", 112 "RecovIdle",
113 /* below were added for IBA7220 */
114 "CfgEnhanced",
115 "CfgTest",
116 "CfgWaitRmtTest",
117 "CfgWaitCfgEnhanced",
118 "SendTS_T",
119 "SendTstIdles",
120 "RcvTS_T",
121 "SendTst_TS1s",
122 "LTState18", "LTState19", "LTState1A", "LTState1B",
123 "LTState1C", "LTState1D", "LTState1E", "LTState1F"
97}; 124};
98 125
99static void __devexit ipath_remove_one(struct pci_dev *); 126static void __devexit ipath_remove_one(struct pci_dev *);
@@ -102,8 +129,10 @@ static int __devinit ipath_init_one(struct pci_dev *,
102 129
103/* Only needed for registration, nothing else needs this info */ 130/* Only needed for registration, nothing else needs this info */
104#define PCI_VENDOR_ID_PATHSCALE 0x1fc1 131#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
132#define PCI_VENDOR_ID_QLOGIC 0x1077
105#define PCI_DEVICE_ID_INFINIPATH_HT 0xd 133#define PCI_DEVICE_ID_INFINIPATH_HT 0xd
106#define PCI_DEVICE_ID_INFINIPATH_PE800 0x10 134#define PCI_DEVICE_ID_INFINIPATH_PE800 0x10
135#define PCI_DEVICE_ID_INFINIPATH_7220 0x7220
107 136
108/* Number of seconds before our card status check... */ 137/* Number of seconds before our card status check... */
109#define STATUS_TIMEOUT 60 138#define STATUS_TIMEOUT 60
@@ -111,6 +140,7 @@ static int __devinit ipath_init_one(struct pci_dev *,
111static const struct pci_device_id ipath_pci_tbl[] = { 140static const struct pci_device_id ipath_pci_tbl[] = {
112 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) }, 141 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) },
113 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_PE800) }, 142 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_PE800) },
143 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_INFINIPATH_7220) },
114 { 0, } 144 { 0, }
115}; 145};
116 146
@@ -126,19 +156,6 @@ static struct pci_driver ipath_driver = {
126 }, 156 },
127}; 157};
128 158
129static void ipath_check_status(struct work_struct *work)
130{
131 struct ipath_devdata *dd = container_of(work, struct ipath_devdata,
132 status_work.work);
133
134 /*
135 * If we don't have any interrupts, let the user know and
136 * don't bother checking again.
137 */
138 if (dd->ipath_int_counter == 0)
139 dev_err(&dd->pcidev->dev, "No interrupts detected.\n");
140}
141
142static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev, 159static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev,
143 u32 *bar0, u32 *bar1) 160 u32 *bar0, u32 *bar1)
144{ 161{
@@ -206,8 +223,6 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
206 dd->pcidev = pdev; 223 dd->pcidev = pdev;
207 pci_set_drvdata(pdev, dd); 224 pci_set_drvdata(pdev, dd);
208 225
209 INIT_DELAYED_WORK(&dd->status_work, ipath_check_status);
210
211 list_add(&dd->ipath_list, &ipath_dev_list); 226 list_add(&dd->ipath_list, &ipath_dev_list);
212 227
213bail_unlock: 228bail_unlock:
@@ -234,12 +249,12 @@ struct ipath_devdata *ipath_lookup(int unit)
234 return dd; 249 return dd;
235} 250}
236 251
237int ipath_count_units(int *npresentp, int *nupp, u32 *maxportsp) 252int ipath_count_units(int *npresentp, int *nupp, int *maxportsp)
238{ 253{
239 int nunits, npresent, nup; 254 int nunits, npresent, nup;
240 struct ipath_devdata *dd; 255 struct ipath_devdata *dd;
241 unsigned long flags; 256 unsigned long flags;
242 u32 maxports; 257 int maxports;
243 258
244 nunits = npresent = nup = maxports = 0; 259 nunits = npresent = nup = maxports = 0;
245 260
@@ -304,7 +319,7 @@ static void ipath_verify_pioperf(struct ipath_devdata *dd)
304 u32 *addr; 319 u32 *addr;
305 u64 msecs, emsecs; 320 u64 msecs, emsecs;
306 321
307 piobuf = ipath_getpiobuf(dd, &pbnum); 322 piobuf = ipath_getpiobuf(dd, 0, &pbnum);
308 if (!piobuf) { 323 if (!piobuf) {
309 dev_info(&dd->pcidev->dev, 324 dev_info(&dd->pcidev->dev,
310 "No PIObufs for checking perf, skipping\n"); 325 "No PIObufs for checking perf, skipping\n");
@@ -336,7 +351,14 @@ static void ipath_verify_pioperf(struct ipath_devdata *dd)
336 351
337 ipath_disable_armlaunch(dd); 352 ipath_disable_armlaunch(dd);
338 353
339 writeq(0, piobuf); /* length 0, no dwords actually sent */ 354 /*
355 * length 0, no dwords actually sent, and mark as VL15
356 * on chips where that may matter (due to IB flowcontrol)
357 */
358 if ((dd->ipath_flags & IPATH_HAS_PBC_CNT))
359 writeq(1UL << 63, piobuf);
360 else
361 writeq(0, piobuf);
340 ipath_flush_wc(); 362 ipath_flush_wc();
341 363
342 /* 364 /*
@@ -377,6 +399,7 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
377 struct ipath_devdata *dd; 399 struct ipath_devdata *dd;
378 unsigned long long addr; 400 unsigned long long addr;
379 u32 bar0 = 0, bar1 = 0; 401 u32 bar0 = 0, bar1 = 0;
402 u8 rev;
380 403
381 dd = ipath_alloc_devdata(pdev); 404 dd = ipath_alloc_devdata(pdev);
382 if (IS_ERR(dd)) { 405 if (IS_ERR(dd)) {
@@ -408,7 +431,7 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
408 } 431 }
409 addr = pci_resource_start(pdev, 0); 432 addr = pci_resource_start(pdev, 0);
410 len = pci_resource_len(pdev, 0); 433 len = pci_resource_len(pdev, 0);
411 ipath_cdbg(VERBOSE, "regbase (0) %llx len %d pdev->irq %d, vend %x/%x " 434 ipath_cdbg(VERBOSE, "regbase (0) %llx len %d irq %d, vend %x/%x "
412 "driver_data %lx\n", addr, len, pdev->irq, ent->vendor, 435 "driver_data %lx\n", addr, len, pdev->irq, ent->vendor,
413 ent->device, ent->driver_data); 436 ent->device, ent->driver_data);
414 437
@@ -512,6 +535,13 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
512 "CONFIG_PCI_MSI is not enabled\n", ent->device); 535 "CONFIG_PCI_MSI is not enabled\n", ent->device);
513 return -ENODEV; 536 return -ENODEV;
514#endif 537#endif
538 case PCI_DEVICE_ID_INFINIPATH_7220:
539#ifndef CONFIG_PCI_MSI
540 ipath_dbg("CONFIG_PCI_MSI is not enabled, "
541 "using IntX for unit %u\n", dd->ipath_unit);
542#endif
543 ipath_init_iba7220_funcs(dd);
544 break;
515 default: 545 default:
516 ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, " 546 ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
517 "failing\n", ent->device); 547 "failing\n", ent->device);
@@ -533,7 +563,13 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
533 goto bail_regions; 563 goto bail_regions;
534 } 564 }
535 565
536 dd->ipath_pcirev = pdev->revision; 566 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
567 if (ret) {
568 ipath_dev_err(dd, "Failed to read PCI revision ID unit "
569 "%u: err %d\n", dd->ipath_unit, -ret);
570 goto bail_regions; /* shouldn't ever happen */
571 }
572 dd->ipath_pcirev = rev;
537 573
538#if defined(__powerpc__) 574#if defined(__powerpc__)
539 /* There isn't a generic way to specify writethrough mappings */ 575 /* There isn't a generic way to specify writethrough mappings */
@@ -556,14 +592,6 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
556 ipath_cdbg(VERBOSE, "mapped io addr %llx to kregbase %p\n", 592 ipath_cdbg(VERBOSE, "mapped io addr %llx to kregbase %p\n",
557 addr, dd->ipath_kregbase); 593 addr, dd->ipath_kregbase);
558 594
559 /*
560 * clear ipath_flags here instead of in ipath_init_chip as it is set
561 * by ipath_setup_htconfig.
562 */
563 dd->ipath_flags = 0;
564 dd->ipath_lli_counter = 0;
565 dd->ipath_lli_errors = 0;
566
567 if (dd->ipath_f_bus(dd, pdev)) 595 if (dd->ipath_f_bus(dd, pdev))
568 ipath_dev_err(dd, "Failed to setup config space; " 596 ipath_dev_err(dd, "Failed to setup config space; "
569 "continuing anyway\n"); 597 "continuing anyway\n");
@@ -608,13 +636,11 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
608 ipath_diag_add(dd); 636 ipath_diag_add(dd);
609 ipath_register_ib_device(dd); 637 ipath_register_ib_device(dd);
610 638
611 /* Check that card status in STATUS_TIMEOUT seconds. */
612 schedule_delayed_work(&dd->status_work, HZ * STATUS_TIMEOUT);
613
614 goto bail; 639 goto bail;
615 640
616bail_irqsetup: 641bail_irqsetup:
617 if (pdev->irq) free_irq(pdev->irq, dd); 642 if (pdev->irq)
643 free_irq(pdev->irq, dd);
618 644
619bail_iounmap: 645bail_iounmap:
620 iounmap((volatile void __iomem *) dd->ipath_kregbase); 646 iounmap((volatile void __iomem *) dd->ipath_kregbase);
@@ -654,6 +680,10 @@ static void __devexit cleanup_device(struct ipath_devdata *dd)
654 ipath_disable_wc(dd); 680 ipath_disable_wc(dd);
655 } 681 }
656 682
683 if (dd->ipath_spectriggerhit)
684 dev_info(&dd->pcidev->dev, "%lu special trigger hits\n",
685 dd->ipath_spectriggerhit);
686
657 if (dd->ipath_pioavailregs_dma) { 687 if (dd->ipath_pioavailregs_dma) {
658 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, 688 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
659 (void *) dd->ipath_pioavailregs_dma, 689 (void *) dd->ipath_pioavailregs_dma,
@@ -706,6 +736,8 @@ static void __devexit cleanup_device(struct ipath_devdata *dd)
706 tmpp = dd->ipath_pageshadow; 736 tmpp = dd->ipath_pageshadow;
707 dd->ipath_pageshadow = NULL; 737 dd->ipath_pageshadow = NULL;
708 vfree(tmpp); 738 vfree(tmpp);
739
740 dd->ipath_egrtidbase = NULL;
709 } 741 }
710 742
711 /* 743 /*
@@ -738,7 +770,6 @@ static void __devexit ipath_remove_one(struct pci_dev *pdev)
738 */ 770 */
739 ipath_shutdown_device(dd); 771 ipath_shutdown_device(dd);
740 772
741 cancel_delayed_work(&dd->status_work);
742 flush_scheduled_work(); 773 flush_scheduled_work();
743 774
744 if (dd->verbs_dev) 775 if (dd->verbs_dev)
@@ -823,20 +854,8 @@ void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
823 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 854 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
824 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); 855 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
825 } 856 }
826 857 /* on some older chips, update may not happen after cancel */
827 /* 858 ipath_force_pio_avail_update(dd);
828 * Disable PIOAVAILUPD, then re-enable, reading scratch in
829 * between. This seems to avoid a chip timing race that causes
830 * pioavail updates to memory to stop. We xor as we don't
831 * know the state of the bit when we're called.
832 */
833 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
834 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
835 dd->ipath_sendctrl ^ INFINIPATH_S_PIOBUFAVAILUPD);
836 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
837 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
838 dd->ipath_sendctrl);
839 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
840} 859}
841 860
842/** 861/**
@@ -873,18 +892,52 @@ int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state, int msecs)
873 (unsigned long long) ipath_read_kreg64( 892 (unsigned long long) ipath_read_kreg64(
874 dd, dd->ipath_kregs->kr_ibcctrl), 893 dd, dd->ipath_kregs->kr_ibcctrl),
875 (unsigned long long) val, 894 (unsigned long long) val,
876 ipath_ibcstatus_str[val & 0xf]); 895 ipath_ibcstatus_str[val & dd->ibcs_lts_mask]);
877 } 896 }
878 return (dd->ipath_flags & state) ? 0 : -ETIMEDOUT; 897 return (dd->ipath_flags & state) ? 0 : -ETIMEDOUT;
879} 898}
880 899
900static void decode_sdma_errs(struct ipath_devdata *dd, ipath_err_t err,
901 char *buf, size_t blen)
902{
903 static const struct {
904 ipath_err_t err;
905 const char *msg;
906 } errs[] = {
907 { INFINIPATH_E_SDMAGENMISMATCH, "SDmaGenMismatch" },
908 { INFINIPATH_E_SDMAOUTOFBOUND, "SDmaOutOfBound" },
909 { INFINIPATH_E_SDMATAILOUTOFBOUND, "SDmaTailOutOfBound" },
910 { INFINIPATH_E_SDMABASE, "SDmaBase" },
911 { INFINIPATH_E_SDMA1STDESC, "SDma1stDesc" },
912 { INFINIPATH_E_SDMARPYTAG, "SDmaRpyTag" },
913 { INFINIPATH_E_SDMADWEN, "SDmaDwEn" },
914 { INFINIPATH_E_SDMAMISSINGDW, "SDmaMissingDw" },
915 { INFINIPATH_E_SDMAUNEXPDATA, "SDmaUnexpData" },
916 { INFINIPATH_E_SDMADESCADDRMISALIGN, "SDmaDescAddrMisalign" },
917 { INFINIPATH_E_SENDBUFMISUSE, "SendBufMisuse" },
918 { INFINIPATH_E_SDMADISABLED, "SDmaDisabled" },
919 };
920 int i;
921 int expected;
922 size_t bidx = 0;
923
924 for (i = 0; i < ARRAY_SIZE(errs); i++) {
925 expected = (errs[i].err != INFINIPATH_E_SDMADISABLED) ? 0 :
926 test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
927 if ((err & errs[i].err) && !expected)
928 bidx += snprintf(buf + bidx, blen - bidx,
929 "%s ", errs[i].msg);
930 }
931}
932
881/* 933/*
882 * Decode the error status into strings, deciding whether to always 934 * Decode the error status into strings, deciding whether to always
883 * print * it or not depending on "normal packet errors" vs everything 935 * print * it or not depending on "normal packet errors" vs everything
884 * else. Return 1 if "real" errors, otherwise 0 if only packet 936 * else. Return 1 if "real" errors, otherwise 0 if only packet
885 * errors, so caller can decide what to print with the string. 937 * errors, so caller can decide what to print with the string.
886 */ 938 */
887int ipath_decode_err(char *buf, size_t blen, ipath_err_t err) 939int ipath_decode_err(struct ipath_devdata *dd, char *buf, size_t blen,
940 ipath_err_t err)
888{ 941{
889 int iserr = 1; 942 int iserr = 1;
890 *buf = '\0'; 943 *buf = '\0';
@@ -922,6 +975,8 @@ int ipath_decode_err(char *buf, size_t blen, ipath_err_t err)
922 strlcat(buf, "rbadversion ", blen); 975 strlcat(buf, "rbadversion ", blen);
923 if (err & INFINIPATH_E_RHDR) 976 if (err & INFINIPATH_E_RHDR)
924 strlcat(buf, "rhdr ", blen); 977 strlcat(buf, "rhdr ", blen);
978 if (err & INFINIPATH_E_SENDSPECIALTRIGGER)
979 strlcat(buf, "sendspecialtrigger ", blen);
925 if (err & INFINIPATH_E_RLONGPKTLEN) 980 if (err & INFINIPATH_E_RLONGPKTLEN)
926 strlcat(buf, "rlongpktlen ", blen); 981 strlcat(buf, "rlongpktlen ", blen);
927 if (err & INFINIPATH_E_RMAXPKTLEN) 982 if (err & INFINIPATH_E_RMAXPKTLEN)
@@ -964,6 +1019,10 @@ int ipath_decode_err(char *buf, size_t blen, ipath_err_t err)
964 strlcat(buf, "hardware ", blen); 1019 strlcat(buf, "hardware ", blen);
965 if (err & INFINIPATH_E_RESET) 1020 if (err & INFINIPATH_E_RESET)
966 strlcat(buf, "reset ", blen); 1021 strlcat(buf, "reset ", blen);
1022 if (err & INFINIPATH_E_SDMAERRS)
1023 decode_sdma_errs(dd, err, buf, blen);
1024 if (err & INFINIPATH_E_INVALIDEEPCMD)
1025 strlcat(buf, "invalideepromcmd ", blen);
967done: 1026done:
968 return iserr; 1027 return iserr;
969} 1028}
@@ -1076,18 +1135,17 @@ static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
1076 u32 eflags, 1135 u32 eflags,
1077 u32 l, 1136 u32 l,
1078 u32 etail, 1137 u32 etail,
1079 u64 *rc) 1138 __le32 *rhf_addr,
1139 struct ipath_message_header *hdr)
1080{ 1140{
1081 char emsg[128]; 1141 char emsg[128];
1082 struct ipath_message_header *hdr;
1083 1142
1084 get_rhf_errstring(eflags, emsg, sizeof emsg); 1143 get_rhf_errstring(eflags, emsg, sizeof emsg);
1085 hdr = (struct ipath_message_header *)&rc[1];
1086 ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u " 1144 ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
1087 "tlen=%x opcode=%x egridx=%x: %s\n", 1145 "tlen=%x opcode=%x egridx=%x: %s\n",
1088 eflags, l, 1146 eflags, l,
1089 ipath_hdrget_rcv_type((__le32 *) rc), 1147 ipath_hdrget_rcv_type(rhf_addr),
1090 ipath_hdrget_length_in_bytes((__le32 *) rc), 1148 ipath_hdrget_length_in_bytes(rhf_addr),
1091 be32_to_cpu(hdr->bth[0]) >> 24, 1149 be32_to_cpu(hdr->bth[0]) >> 24,
1092 etail, emsg); 1150 etail, emsg);
1093 1151
@@ -1112,55 +1170,52 @@ static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
1112 */ 1170 */
1113void ipath_kreceive(struct ipath_portdata *pd) 1171void ipath_kreceive(struct ipath_portdata *pd)
1114{ 1172{
1115 u64 *rc;
1116 struct ipath_devdata *dd = pd->port_dd; 1173 struct ipath_devdata *dd = pd->port_dd;
1174 __le32 *rhf_addr;
1117 void *ebuf; 1175 void *ebuf;
1118 const u32 rsize = dd->ipath_rcvhdrentsize; /* words */ 1176 const u32 rsize = dd->ipath_rcvhdrentsize; /* words */
1119 const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize; /* words */ 1177 const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize; /* words */
1120 u32 etail = -1, l, hdrqtail; 1178 u32 etail = -1, l, hdrqtail;
1121 struct ipath_message_header *hdr; 1179 struct ipath_message_header *hdr;
1122 u32 eflags, i, etype, tlen, pkttot = 0, updegr=0, reloop=0; 1180 u32 eflags, i, etype, tlen, pkttot = 0, updegr = 0, reloop = 0;
1123 static u64 totcalls; /* stats, may eventually remove */ 1181 static u64 totcalls; /* stats, may eventually remove */
1124 1182 int last;
1125 if (!dd->ipath_hdrqtailptr) {
1126 ipath_dev_err(dd,
1127 "hdrqtailptr not set, can't do receives\n");
1128 goto bail;
1129 }
1130 1183
1131 l = pd->port_head; 1184 l = pd->port_head;
1132 hdrqtail = ipath_get_rcvhdrtail(pd); 1185 rhf_addr = (__le32 *) pd->port_rcvhdrq + l + dd->ipath_rhf_offset;
1133 if (l == hdrqtail) 1186 if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
1134 goto bail; 1187 u32 seq = ipath_hdrget_seq(rhf_addr);
1135
1136reloop:
1137 for (i = 0; l != hdrqtail; i++) {
1138 u32 qp;
1139 u8 *bthbytes;
1140 1188
1141 rc = (u64 *) (pd->port_rcvhdrq + (l << 2)); 1189 if (seq != pd->port_seq_cnt)
1142 hdr = (struct ipath_message_header *)&rc[1]; 1190 goto bail;
1143 /* 1191 hdrqtail = 0;
1144 * could make a network order version of IPATH_KD_QP, and 1192 } else {
1145 * do the obvious shift before masking to speed this up. 1193 hdrqtail = ipath_get_rcvhdrtail(pd);
1146 */ 1194 if (l == hdrqtail)
1147 qp = ntohl(hdr->bth[1]) & 0xffffff; 1195 goto bail;
1148 bthbytes = (u8 *) hdr->bth; 1196 smp_rmb();
1197 }
1149 1198
1150 eflags = ipath_hdrget_err_flags((__le32 *) rc); 1199reloop:
1151 etype = ipath_hdrget_rcv_type((__le32 *) rc); 1200 for (last = 0, i = 1; !last; i++) {
1201 hdr = dd->ipath_f_get_msgheader(dd, rhf_addr);
1202 eflags = ipath_hdrget_err_flags(rhf_addr);
1203 etype = ipath_hdrget_rcv_type(rhf_addr);
1152 /* total length */ 1204 /* total length */
1153 tlen = ipath_hdrget_length_in_bytes((__le32 *) rc); 1205 tlen = ipath_hdrget_length_in_bytes(rhf_addr);
1154 ebuf = NULL; 1206 ebuf = NULL;
1155 if (etype != RCVHQ_RCV_TYPE_EXPECTED) { 1207 if ((dd->ipath_flags & IPATH_NODMA_RTAIL) ?
1208 ipath_hdrget_use_egr_buf(rhf_addr) :
1209 (etype != RCVHQ_RCV_TYPE_EXPECTED)) {
1156 /* 1210 /*
1157 * it turns out that the chips uses an eager buffer 1211 * It turns out that the chip uses an eager buffer
1158 * for all non-expected packets, whether it "needs" 1212 * for all non-expected packets, whether it "needs"
1159 * one or not. So always get the index, but don't 1213 * one or not. So always get the index, but don't
1160 * set ebuf (so we try to copy data) unless the 1214 * set ebuf (so we try to copy data) unless the
1161 * length requires it. 1215 * length requires it.
1162 */ 1216 */
1163 etail = ipath_hdrget_index((__le32 *) rc); 1217 etail = ipath_hdrget_index(rhf_addr);
1218 updegr = 1;
1164 if (tlen > sizeof(*hdr) || 1219 if (tlen > sizeof(*hdr) ||
1165 etype == RCVHQ_RCV_TYPE_NON_KD) 1220 etype == RCVHQ_RCV_TYPE_NON_KD)
1166 ebuf = ipath_get_egrbuf(dd, etail); 1221 ebuf = ipath_get_egrbuf(dd, etail);
@@ -1171,75 +1226,91 @@ reloop:
1171 * packets; only ipathhdrerr should be set. 1226 * packets; only ipathhdrerr should be set.
1172 */ 1227 */
1173 1228
1174 if (etype != RCVHQ_RCV_TYPE_NON_KD && etype != 1229 if (etype != RCVHQ_RCV_TYPE_NON_KD &&
1175 RCVHQ_RCV_TYPE_ERROR && ipath_hdrget_ipath_ver( 1230 etype != RCVHQ_RCV_TYPE_ERROR &&
1176 hdr->iph.ver_port_tid_offset) != 1231 ipath_hdrget_ipath_ver(hdr->iph.ver_port_tid_offset) !=
1177 IPS_PROTO_VERSION) { 1232 IPS_PROTO_VERSION)
1178 ipath_cdbg(PKT, "Bad InfiniPath protocol version " 1233 ipath_cdbg(PKT, "Bad InfiniPath protocol version "
1179 "%x\n", etype); 1234 "%x\n", etype);
1180 }
1181 1235
1182 if (unlikely(eflags)) 1236 if (unlikely(eflags))
1183 ipath_rcv_hdrerr(dd, eflags, l, etail, rc); 1237 ipath_rcv_hdrerr(dd, eflags, l, etail, rhf_addr, hdr);
1184 else if (etype == RCVHQ_RCV_TYPE_NON_KD) { 1238 else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
1185 ipath_ib_rcv(dd->verbs_dev, rc + 1, ebuf, tlen); 1239 ipath_ib_rcv(dd->verbs_dev, (u32 *)hdr, ebuf, tlen);
1186 if (dd->ipath_lli_counter) 1240 if (dd->ipath_lli_counter)
1187 dd->ipath_lli_counter--; 1241 dd->ipath_lli_counter--;
1242 } else if (etype == RCVHQ_RCV_TYPE_EAGER) {
1243 u8 opcode = be32_to_cpu(hdr->bth[0]) >> 24;
1244 u32 qp = be32_to_cpu(hdr->bth[1]) & 0xffffff;
1188 ipath_cdbg(PKT, "typ %x, opcode %x (eager, " 1245 ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
1189 "qp=%x), len %x; ignored\n", 1246 "qp=%x), len %x; ignored\n",
1190 etype, bthbytes[0], qp, tlen); 1247 etype, opcode, qp, tlen);
1191 } 1248 }
1192 else if (etype == RCVHQ_RCV_TYPE_EAGER)
1193 ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
1194 "qp=%x), len %x; ignored\n",
1195 etype, bthbytes[0], qp, tlen);
1196 else if (etype == RCVHQ_RCV_TYPE_EXPECTED) 1249 else if (etype == RCVHQ_RCV_TYPE_EXPECTED)
1197 ipath_dbg("Bug: Expected TID, opcode %x; ignored\n", 1250 ipath_dbg("Bug: Expected TID, opcode %x; ignored\n",
1198 be32_to_cpu(hdr->bth[0]) & 0xff); 1251 be32_to_cpu(hdr->bth[0]) >> 24);
1199 else { 1252 else {
1200 /* 1253 /*
1201 * error packet, type of error unknown. 1254 * error packet, type of error unknown.
1202 * Probably type 3, but we don't know, so don't 1255 * Probably type 3, but we don't know, so don't
1203 * even try to print the opcode, etc. 1256 * even try to print the opcode, etc.
1257 * Usually caused by a "bad packet", that has no
1258 * BTH, when the LRH says it should.
1204 */ 1259 */
1205 ipath_dbg("Error Pkt, but no eflags! egrbuf %x, " 1260 ipath_cdbg(ERRPKT, "Error Pkt, but no eflags! egrbuf"
1206 "len %x\nhdrq@%lx;hdrq+%x rhf: %llx; " 1261 " %x, len %x hdrq+%x rhf: %Lx\n",
1207 "hdr %llx %llx %llx %llx %llx\n", 1262 etail, tlen, l,
1208 etail, tlen, (unsigned long) rc, l, 1263 le64_to_cpu(*(__le64 *) rhf_addr));
1209 (unsigned long long) rc[0], 1264 if (ipath_debug & __IPATH_ERRPKTDBG) {
1210 (unsigned long long) rc[1], 1265 u32 j, *d, dw = rsize-2;
1211 (unsigned long long) rc[2], 1266 if (rsize > (tlen>>2))
1212 (unsigned long long) rc[3], 1267 dw = tlen>>2;
1213 (unsigned long long) rc[4], 1268 d = (u32 *)hdr;
1214 (unsigned long long) rc[5]); 1269 printk(KERN_DEBUG "EPkt rcvhdr(%x dw):\n",
1270 dw);
1271 for (j = 0; j < dw; j++)
1272 printk(KERN_DEBUG "%8x%s", d[j],
1273 (j%8) == 7 ? "\n" : " ");
1274 printk(KERN_DEBUG ".\n");
1275 }
1215 } 1276 }
1216 l += rsize; 1277 l += rsize;
1217 if (l >= maxcnt) 1278 if (l >= maxcnt)
1218 l = 0; 1279 l = 0;
1219 if (etype != RCVHQ_RCV_TYPE_EXPECTED) 1280 rhf_addr = (__le32 *) pd->port_rcvhdrq +
1220 updegr = 1; 1281 l + dd->ipath_rhf_offset;
1282 if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
1283 u32 seq = ipath_hdrget_seq(rhf_addr);
1284
1285 if (++pd->port_seq_cnt > 13)
1286 pd->port_seq_cnt = 1;
1287 if (seq != pd->port_seq_cnt)
1288 last = 1;
1289 } else if (l == hdrqtail)
1290 last = 1;
1221 /* 1291 /*
1222 * update head regs on last packet, and every 16 packets. 1292 * update head regs on last packet, and every 16 packets.
1223 * Reduce bus traffic, while still trying to prevent 1293 * Reduce bus traffic, while still trying to prevent
1224 * rcvhdrq overflows, for when the queue is nearly full 1294 * rcvhdrq overflows, for when the queue is nearly full
1225 */ 1295 */
1226 if (l == hdrqtail || (i && !(i&0xf))) { 1296 if (last || !(i & 0xf)) {
1227 u64 lval; 1297 u64 lval = l;
1228 if (l == hdrqtail) 1298
1229 /* request IBA6120 interrupt only on last */ 1299 /* request IBA6120 and 7220 interrupt only on last */
1230 lval = dd->ipath_rhdrhead_intr_off | l; 1300 if (last)
1231 else 1301 lval |= dd->ipath_rhdrhead_intr_off;
1232 lval = l; 1302 ipath_write_ureg(dd, ur_rcvhdrhead, lval,
1233 (void)ipath_write_ureg(dd, ur_rcvhdrhead, lval, 0); 1303 pd->port_port);
1234 if (updegr) { 1304 if (updegr) {
1235 (void)ipath_write_ureg(dd, ur_rcvegrindexhead, 1305 ipath_write_ureg(dd, ur_rcvegrindexhead,
1236 etail, 0); 1306 etail, pd->port_port);
1237 updegr = 0; 1307 updegr = 0;
1238 } 1308 }
1239 } 1309 }
1240 } 1310 }
1241 1311
1242 if (!dd->ipath_rhdrhead_intr_off && !reloop) { 1312 if (!dd->ipath_rhdrhead_intr_off && !reloop &&
1313 !(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
1243 /* IBA6110 workaround; we can have a race clearing chip 1314 /* IBA6110 workaround; we can have a race clearing chip
1244 * interrupt with another interrupt about to be delivered, 1315 * interrupt with another interrupt about to be delivered,
1245 * and can clear it before it is delivered on the GPIO 1316 * and can clear it before it is delivered on the GPIO
@@ -1301,7 +1372,6 @@ static void ipath_update_pio_bufs(struct ipath_devdata *dd)
1301 * happens when all buffers are in use, so only cpu overhead, not 1372 * happens when all buffers are in use, so only cpu overhead, not
1302 * latency or bandwidth is affected. 1373 * latency or bandwidth is affected.
1303 */ 1374 */
1304#define _IPATH_ALL_CHECKBITS 0x5555555555555555ULL
1305 if (!dd->ipath_pioavailregs_dma) { 1375 if (!dd->ipath_pioavailregs_dma) {
1306 ipath_dbg("Update shadow pioavail, but regs_dma NULL!\n"); 1376 ipath_dbg("Update shadow pioavail, but regs_dma NULL!\n");
1307 return; 1377 return;
@@ -1346,7 +1416,7 @@ static void ipath_update_pio_bufs(struct ipath_devdata *dd)
1346 piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i ^ 1]); 1416 piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i ^ 1]);
1347 else 1417 else
1348 piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i]); 1418 piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i]);
1349 pchg = _IPATH_ALL_CHECKBITS & 1419 pchg = dd->ipath_pioavailkernel[i] &
1350 ~(dd->ipath_pioavailshadow[i] ^ piov); 1420 ~(dd->ipath_pioavailshadow[i] ^ piov);
1351 pchbusy = pchg << INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT; 1421 pchbusy = pchg << INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT;
1352 if (pchg && (pchbusy & dd->ipath_pioavailshadow[i])) { 1422 if (pchg && (pchbusy & dd->ipath_pioavailshadow[i])) {
@@ -1397,27 +1467,63 @@ int ipath_setrcvhdrsize(struct ipath_devdata *dd, unsigned rhdrsize)
1397 return ret; 1467 return ret;
1398} 1468}
1399 1469
1400/** 1470/*
1401 * ipath_getpiobuf - find an available pio buffer 1471 * debugging code and stats updates if no pio buffers available.
1402 * @dd: the infinipath device 1472 */
1403 * @pbufnum: the buffer number is placed here 1473static noinline void no_pio_bufs(struct ipath_devdata *dd)
1474{
1475 unsigned long *shadow = dd->ipath_pioavailshadow;
1476 __le64 *dma = (__le64 *)dd->ipath_pioavailregs_dma;
1477
1478 dd->ipath_upd_pio_shadow = 1;
1479
1480 /*
1481 * not atomic, but if we lose a stat count in a while, that's OK
1482 */
1483 ipath_stats.sps_nopiobufs++;
1484 if (!(++dd->ipath_consec_nopiobuf % 100000)) {
1485 ipath_dbg("%u pio sends with no bufavail; dmacopy: "
1486 "%llx %llx %llx %llx; shadow: %lx %lx %lx %lx\n",
1487 dd->ipath_consec_nopiobuf,
1488 (unsigned long long) le64_to_cpu(dma[0]),
1489 (unsigned long long) le64_to_cpu(dma[1]),
1490 (unsigned long long) le64_to_cpu(dma[2]),
1491 (unsigned long long) le64_to_cpu(dma[3]),
1492 shadow[0], shadow[1], shadow[2], shadow[3]);
1493 /*
1494 * 4 buffers per byte, 4 registers above, cover rest
1495 * below
1496 */
1497 if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) >
1498 (sizeof(shadow[0]) * 4 * 4))
1499 ipath_dbg("2nd group: dmacopy: %llx %llx "
1500 "%llx %llx; shadow: %lx %lx %lx %lx\n",
1501 (unsigned long long)le64_to_cpu(dma[4]),
1502 (unsigned long long)le64_to_cpu(dma[5]),
1503 (unsigned long long)le64_to_cpu(dma[6]),
1504 (unsigned long long)le64_to_cpu(dma[7]),
1505 shadow[4], shadow[5], shadow[6],
1506 shadow[7]);
1507 }
1508}
1509
1510/*
1511 * common code for normal driver pio buffer allocation, and reserved
1512 * allocation.
1404 * 1513 *
1405 * do appropriate marking as busy, etc. 1514 * do appropriate marking as busy, etc.
1406 * returns buffer number if one found (>=0), negative number is error. 1515 * returns buffer number if one found (>=0), negative number is error.
1407 * Used by ipath_layer_send
1408 */ 1516 */
1409u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 * pbufnum) 1517static u32 __iomem *ipath_getpiobuf_range(struct ipath_devdata *dd,
1518 u32 *pbufnum, u32 first, u32 last, u32 firsti)
1410{ 1519{
1411 int i, j, starti, updated = 0; 1520 int i, j, updated = 0;
1412 unsigned piobcnt, iter; 1521 unsigned piobcnt;
1413 unsigned long flags; 1522 unsigned long flags;
1414 unsigned long *shadow = dd->ipath_pioavailshadow; 1523 unsigned long *shadow = dd->ipath_pioavailshadow;
1415 u32 __iomem *buf; 1524 u32 __iomem *buf;
1416 1525
1417 piobcnt = (unsigned)(dd->ipath_piobcnt2k 1526 piobcnt = last - first;
1418 + dd->ipath_piobcnt4k);
1419 starti = dd->ipath_lastport_piobuf;
1420 iter = piobcnt - starti;
1421 if (dd->ipath_upd_pio_shadow) { 1527 if (dd->ipath_upd_pio_shadow) {
1422 /* 1528 /*
1423 * Minor optimization. If we had no buffers on last call, 1529 * Minor optimization. If we had no buffers on last call,
@@ -1425,12 +1531,10 @@ u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 * pbufnum)
1425 * if no buffers were updated, to be paranoid 1531 * if no buffers were updated, to be paranoid
1426 */ 1532 */
1427 ipath_update_pio_bufs(dd); 1533 ipath_update_pio_bufs(dd);
1428 /* we scanned here, don't do it at end of scan */ 1534 updated++;
1429 updated = 1; 1535 i = first;
1430 i = starti;
1431 } else 1536 } else
1432 i = dd->ipath_lastpioindex; 1537 i = firsti;
1433
1434rescan: 1538rescan:
1435 /* 1539 /*
1436 * while test_and_set_bit() is atomic, we do that and then the 1540 * while test_and_set_bit() is atomic, we do that and then the
@@ -1438,104 +1542,141 @@ rescan:
1438 * of the remaining armlaunch errors. 1542 * of the remaining armlaunch errors.
1439 */ 1543 */
1440 spin_lock_irqsave(&ipath_pioavail_lock, flags); 1544 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1441 for (j = 0; j < iter; j++, i++) { 1545 for (j = 0; j < piobcnt; j++, i++) {
1442 if (i >= piobcnt) 1546 if (i >= last)
1443 i = starti; 1547 i = first;
1444 /* 1548 if (__test_and_set_bit((2 * i) + 1, shadow))
1445 * To avoid bus lock overhead, we first find a candidate
1446 * buffer, then do the test and set, and continue if that
1447 * fails.
1448 */
1449 if (test_bit((2 * i) + 1, shadow) ||
1450 test_and_set_bit((2 * i) + 1, shadow))
1451 continue; 1549 continue;
1452 /* flip generation bit */ 1550 /* flip generation bit */
1453 change_bit(2 * i, shadow); 1551 __change_bit(2 * i, shadow);
1454 break; 1552 break;
1455 } 1553 }
1456 spin_unlock_irqrestore(&ipath_pioavail_lock, flags); 1554 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1457 1555
1458 if (j == iter) { 1556 if (j == piobcnt) {
1459 volatile __le64 *dma = dd->ipath_pioavailregs_dma;
1460
1461 /*
1462 * first time through; shadow exhausted, but may be real
1463 * buffers available, so go see; if any updated, rescan
1464 * (once)
1465 */
1466 if (!updated) { 1557 if (!updated) {
1558 /*
1559 * first time through; shadow exhausted, but may be
1560 * buffers available, try an update and then rescan.
1561 */
1467 ipath_update_pio_bufs(dd); 1562 ipath_update_pio_bufs(dd);
1468 updated = 1; 1563 updated++;
1469 i = starti; 1564 i = first;
1470 goto rescan; 1565 goto rescan;
1471 } 1566 } else if (updated == 1 && piobcnt <=
1472 dd->ipath_upd_pio_shadow = 1; 1567 ((dd->ipath_sendctrl
1473 /* 1568 >> INFINIPATH_S_UPDTHRESH_SHIFT) &
1474 * not atomic, but if we lose one once in a while, that's OK 1569 INFINIPATH_S_UPDTHRESH_MASK)) {
1475 */
1476 ipath_stats.sps_nopiobufs++;
1477 if (!(++dd->ipath_consec_nopiobuf % 100000)) {
1478 ipath_dbg(
1479 "%u pio sends with no bufavail; dmacopy: "
1480 "%llx %llx %llx %llx; shadow: "
1481 "%lx %lx %lx %lx\n",
1482 dd->ipath_consec_nopiobuf,
1483 (unsigned long long) le64_to_cpu(dma[0]),
1484 (unsigned long long) le64_to_cpu(dma[1]),
1485 (unsigned long long) le64_to_cpu(dma[2]),
1486 (unsigned long long) le64_to_cpu(dma[3]),
1487 shadow[0], shadow[1], shadow[2],
1488 shadow[3]);
1489 /* 1570 /*
1490 * 4 buffers per byte, 4 registers above, cover rest 1571 * for chips supporting and using the update
1491 * below 1572 * threshold we need to force an update of the
1573 * in-memory copy if the count is less than the
1574 * thershold, then check one more time.
1492 */ 1575 */
1493 if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) > 1576 ipath_force_pio_avail_update(dd);
1494 (sizeof(shadow[0]) * 4 * 4)) 1577 ipath_update_pio_bufs(dd);
1495 ipath_dbg("2nd group: dmacopy: %llx %llx " 1578 updated++;
1496 "%llx %llx; shadow: %lx %lx " 1579 i = first;
1497 "%lx %lx\n", 1580 goto rescan;
1498 (unsigned long long)
1499 le64_to_cpu(dma[4]),
1500 (unsigned long long)
1501 le64_to_cpu(dma[5]),
1502 (unsigned long long)
1503 le64_to_cpu(dma[6]),
1504 (unsigned long long)
1505 le64_to_cpu(dma[7]),
1506 shadow[4], shadow[5],
1507 shadow[6], shadow[7]);
1508 } 1581 }
1582
1583 no_pio_bufs(dd);
1509 buf = NULL; 1584 buf = NULL;
1510 goto bail; 1585 } else {
1586 if (i < dd->ipath_piobcnt2k)
1587 buf = (u32 __iomem *) (dd->ipath_pio2kbase +
1588 i * dd->ipath_palign);
1589 else
1590 buf = (u32 __iomem *)
1591 (dd->ipath_pio4kbase +
1592 (i - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
1593 if (pbufnum)
1594 *pbufnum = i;
1511 } 1595 }
1512 1596
1513 /* 1597 return buf;
1514 * set next starting place. Since it's just an optimization, 1598}
1515 * it doesn't matter who wins on this, so no locking
1516 */
1517 dd->ipath_lastpioindex = i + 1;
1518 if (dd->ipath_upd_pio_shadow)
1519 dd->ipath_upd_pio_shadow = 0;
1520 if (dd->ipath_consec_nopiobuf)
1521 dd->ipath_consec_nopiobuf = 0;
1522 if (i < dd->ipath_piobcnt2k)
1523 buf = (u32 __iomem *) (dd->ipath_pio2kbase +
1524 i * dd->ipath_palign);
1525 else
1526 buf = (u32 __iomem *)
1527 (dd->ipath_pio4kbase +
1528 (i - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
1529 ipath_cdbg(VERBOSE, "Return piobuf%u %uk @ %p\n",
1530 i, (i < dd->ipath_piobcnt2k) ? 2 : 4, buf);
1531 if (pbufnum)
1532 *pbufnum = i;
1533 1599
1534bail: 1600/**
1601 * ipath_getpiobuf - find an available pio buffer
1602 * @dd: the infinipath device
1603 * @plen: the size of the PIO buffer needed in 32-bit words
1604 * @pbufnum: the buffer number is placed here
1605 */
1606u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 plen, u32 *pbufnum)
1607{
1608 u32 __iomem *buf;
1609 u32 pnum, nbufs;
1610 u32 first, lasti;
1611
1612 if (plen + 1 >= IPATH_SMALLBUF_DWORDS) {
1613 first = dd->ipath_piobcnt2k;
1614 lasti = dd->ipath_lastpioindexl;
1615 } else {
1616 first = 0;
1617 lasti = dd->ipath_lastpioindex;
1618 }
1619 nbufs = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
1620 buf = ipath_getpiobuf_range(dd, &pnum, first, nbufs, lasti);
1621
1622 if (buf) {
1623 /*
1624 * Set next starting place. It's just an optimization,
1625 * it doesn't matter who wins on this, so no locking
1626 */
1627 if (plen + 1 >= IPATH_SMALLBUF_DWORDS)
1628 dd->ipath_lastpioindexl = pnum + 1;
1629 else
1630 dd->ipath_lastpioindex = pnum + 1;
1631 if (dd->ipath_upd_pio_shadow)
1632 dd->ipath_upd_pio_shadow = 0;
1633 if (dd->ipath_consec_nopiobuf)
1634 dd->ipath_consec_nopiobuf = 0;
1635 ipath_cdbg(VERBOSE, "Return piobuf%u %uk @ %p\n",
1636 pnum, (pnum < dd->ipath_piobcnt2k) ? 2 : 4, buf);
1637 if (pbufnum)
1638 *pbufnum = pnum;
1639
1640 }
1535 return buf; 1641 return buf;
1536} 1642}
1537 1643
1538/** 1644/**
1645 * ipath_chg_pioavailkernel - change which send buffers are available for kernel
1646 * @dd: the infinipath device
1647 * @start: the starting send buffer number
1648 * @len: the number of send buffers
1649 * @avail: true if the buffers are available for kernel use, false otherwise
1650 */
1651void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
1652 unsigned len, int avail)
1653{
1654 unsigned long flags;
1655 unsigned end;
1656
1657 /* There are two bits per send buffer (busy and generation) */
1658 start *= 2;
1659 len *= 2;
1660 end = start + len;
1661
1662 /* Set or clear the generation bits. */
1663 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1664 while (start < end) {
1665 if (avail) {
1666 __clear_bit(start + INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT,
1667 dd->ipath_pioavailshadow);
1668 __set_bit(start, dd->ipath_pioavailkernel);
1669 } else {
1670 __set_bit(start + INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT,
1671 dd->ipath_pioavailshadow);
1672 __clear_bit(start, dd->ipath_pioavailkernel);
1673 }
1674 start += 2;
1675 }
1676 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1677}
1678
1679/**
1539 * ipath_create_rcvhdrq - create a receive header queue 1680 * ipath_create_rcvhdrq - create a receive header queue
1540 * @dd: the infinipath device 1681 * @dd: the infinipath device
1541 * @pd: the port data 1682 * @pd: the port data
@@ -1566,19 +1707,27 @@ int ipath_create_rcvhdrq(struct ipath_devdata *dd,
1566 ret = -ENOMEM; 1707 ret = -ENOMEM;
1567 goto bail; 1708 goto bail;
1568 } 1709 }
1569 pd->port_rcvhdrtail_kvaddr = dma_alloc_coherent( 1710
1570 &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail, GFP_KERNEL); 1711 if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
1571 if (!pd->port_rcvhdrtail_kvaddr) { 1712 pd->port_rcvhdrtail_kvaddr = dma_alloc_coherent(
1572 ipath_dev_err(dd, "attempt to allocate 1 page " 1713 &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
1573 "for port %u rcvhdrqtailaddr failed\n", 1714 GFP_KERNEL);
1574 pd->port_port); 1715 if (!pd->port_rcvhdrtail_kvaddr) {
1575 ret = -ENOMEM; 1716 ipath_dev_err(dd, "attempt to allocate 1 page "
1576 dma_free_coherent(&dd->pcidev->dev, amt, 1717 "for port %u rcvhdrqtailaddr "
1577 pd->port_rcvhdrq, pd->port_rcvhdrq_phys); 1718 "failed\n", pd->port_port);
1578 pd->port_rcvhdrq = NULL; 1719 ret = -ENOMEM;
1579 goto bail; 1720 dma_free_coherent(&dd->pcidev->dev, amt,
1721 pd->port_rcvhdrq,
1722 pd->port_rcvhdrq_phys);
1723 pd->port_rcvhdrq = NULL;
1724 goto bail;
1725 }
1726 pd->port_rcvhdrqtailaddr_phys = phys_hdrqtail;
1727 ipath_cdbg(VERBOSE, "port %d hdrtailaddr, %llx "
1728 "physical\n", pd->port_port,
1729 (unsigned long long) phys_hdrqtail);
1580 } 1730 }
1581 pd->port_rcvhdrqtailaddr_phys = phys_hdrqtail;
1582 1731
1583 pd->port_rcvhdrq_size = amt; 1732 pd->port_rcvhdrq_size = amt;
1584 1733
@@ -1588,10 +1737,6 @@ int ipath_create_rcvhdrq(struct ipath_devdata *dd,
1588 (unsigned long) pd->port_rcvhdrq_phys, 1737 (unsigned long) pd->port_rcvhdrq_phys,
1589 (unsigned long) pd->port_rcvhdrq_size, 1738 (unsigned long) pd->port_rcvhdrq_size,
1590 pd->port_port); 1739 pd->port_port);
1591
1592 ipath_cdbg(VERBOSE, "port %d hdrtailaddr, %llx physical\n",
1593 pd->port_port,
1594 (unsigned long long) phys_hdrqtail);
1595 } 1740 }
1596 else 1741 else
1597 ipath_cdbg(VERBOSE, "reuse port %d rcvhdrq @%p %llx phys; " 1742 ipath_cdbg(VERBOSE, "reuse port %d rcvhdrq @%p %llx phys; "
@@ -1615,7 +1760,6 @@ int ipath_create_rcvhdrq(struct ipath_devdata *dd,
1615 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr, 1760 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
1616 pd->port_port, pd->port_rcvhdrq_phys); 1761 pd->port_port, pd->port_rcvhdrq_phys);
1617 1762
1618 ret = 0;
1619bail: 1763bail:
1620 return ret; 1764 return ret;
1621} 1765}
@@ -1632,52 +1776,149 @@ bail:
1632 */ 1776 */
1633void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl) 1777void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl)
1634{ 1778{
1779 unsigned long flags;
1780
1781 if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) {
1782 ipath_cdbg(VERBOSE, "Ignore while in autonegotiation\n");
1783 goto bail;
1784 }
1785 /*
1786 * If we have SDMA, and it's not disabled, we have to kick off the
1787 * abort state machine, provided we aren't already aborting.
1788 * If we are in the process of aborting SDMA (!DISABLED, but ABORTING),
1789 * we skip the rest of this routine. It is already "in progress"
1790 */
1791 if (dd->ipath_flags & IPATH_HAS_SEND_DMA) {
1792 int skip_cancel;
1793 u64 *statp = &dd->ipath_sdma_status;
1794
1795 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
1796 skip_cancel =
1797 !test_bit(IPATH_SDMA_DISABLED, statp) &&
1798 test_and_set_bit(IPATH_SDMA_ABORTING, statp);
1799 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
1800 if (skip_cancel)
1801 goto bail;
1802 }
1803
1635 ipath_dbg("Cancelling all in-progress send buffers\n"); 1804 ipath_dbg("Cancelling all in-progress send buffers\n");
1636 dd->ipath_lastcancel = jiffies+HZ/2; /* skip armlaunch errs a bit */ 1805
1806 /* skip armlaunch errs for a while */
1807 dd->ipath_lastcancel = jiffies + HZ / 2;
1808
1637 /* 1809 /*
1638 * the abort bit is auto-clearing. We read scratch to be sure 1810 * The abort bit is auto-clearing. We also don't want pioavail
1639 * that cancels and the abort have taken effect in the chip. 1811 * update happening during this, and we don't want any other
1812 * sends going out, so turn those off for the duration. We read
1813 * the scratch register to be sure that cancels and the abort
1814 * have taken effect in the chip. Otherwise two parts are same
1815 * as ipath_force_pio_avail_update()
1640 */ 1816 */
1817 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1818 dd->ipath_sendctrl &= ~(INFINIPATH_S_PIOBUFAVAILUPD
1819 | INFINIPATH_S_PIOENABLE);
1641 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 1820 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1642 INFINIPATH_S_ABORT); 1821 dd->ipath_sendctrl | INFINIPATH_S_ABORT);
1643 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 1822 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1823 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1824
1825 /* disarm all send buffers */
1644 ipath_disarm_piobufs(dd, 0, 1826 ipath_disarm_piobufs(dd, 0,
1645 (unsigned)(dd->ipath_piobcnt2k + dd->ipath_piobcnt4k)); 1827 dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
1646 if (restore_sendctrl) /* else done by caller later */ 1828
1829 if (restore_sendctrl) {
1830 /* else done by caller later if needed */
1831 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1832 dd->ipath_sendctrl |= INFINIPATH_S_PIOBUFAVAILUPD |
1833 INFINIPATH_S_PIOENABLE;
1647 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 1834 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1648 dd->ipath_sendctrl); 1835 dd->ipath_sendctrl);
1836 /* and again, be sure all have hit the chip */
1837 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1838 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1839 }
1649 1840
1650 /* and again, be sure all have hit the chip */ 1841 if ((dd->ipath_flags & IPATH_HAS_SEND_DMA) &&
1651 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 1842 !test_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status) &&
1843 test_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status)) {
1844 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
1845 /* only wait so long for intr */
1846 dd->ipath_sdma_abort_intr_timeout = jiffies + HZ;
1847 dd->ipath_sdma_reset_wait = 200;
1848 __set_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
1849 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
1850 tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
1851 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
1852 }
1853bail:;
1652} 1854}
1653 1855
1856/*
1857 * Force an update of in-memory copy of the pioavail registers, when
1858 * needed for any of a variety of reasons. We read the scratch register
1859 * to make it highly likely that the update will have happened by the
1860 * time we return. If already off (as in cancel_sends above), this
1861 * routine is a nop, on the assumption that the caller will "do the
1862 * right thing".
1863 */
1864void ipath_force_pio_avail_update(struct ipath_devdata *dd)
1865{
1866 unsigned long flags;
1867
1868 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1869 if (dd->ipath_sendctrl & INFINIPATH_S_PIOBUFAVAILUPD) {
1870 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1871 dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
1872 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1873 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1874 dd->ipath_sendctrl);
1875 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1876 }
1877 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1878}
1654 1879
1655static void ipath_set_ib_lstate(struct ipath_devdata *dd, int which) 1880static void ipath_set_ib_lstate(struct ipath_devdata *dd, int linkcmd,
1881 int linitcmd)
1656{ 1882{
1883 u64 mod_wd;
1657 static const char *what[4] = { 1884 static const char *what[4] = {
1658 [0] = "NOP", 1885 [0] = "NOP",
1659 [INFINIPATH_IBCC_LINKCMD_DOWN] = "DOWN", 1886 [INFINIPATH_IBCC_LINKCMD_DOWN] = "DOWN",
1660 [INFINIPATH_IBCC_LINKCMD_ARMED] = "ARMED", 1887 [INFINIPATH_IBCC_LINKCMD_ARMED] = "ARMED",
1661 [INFINIPATH_IBCC_LINKCMD_ACTIVE] = "ACTIVE" 1888 [INFINIPATH_IBCC_LINKCMD_ACTIVE] = "ACTIVE"
1662 }; 1889 };
1663 int linkcmd = (which >> INFINIPATH_IBCC_LINKCMD_SHIFT) & 1890
1664 INFINIPATH_IBCC_LINKCMD_MASK; 1891 if (linitcmd == INFINIPATH_IBCC_LINKINITCMD_DISABLE) {
1665 1892 /*
1666 ipath_cdbg(VERBOSE, "Trying to move unit %u to %s, current ltstate " 1893 * If we are told to disable, note that so link-recovery
1667 "is %s\n", dd->ipath_unit, 1894 * code does not attempt to bring us back up.
1668 what[linkcmd], 1895 */
1669 ipath_ibcstatus_str[ 1896 preempt_disable();
1670 (ipath_read_kreg64 1897 dd->ipath_flags |= IPATH_IB_LINK_DISABLED;
1671 (dd, dd->ipath_kregs->kr_ibcstatus) >> 1898 preempt_enable();
1672 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) & 1899 } else if (linitcmd) {
1673 INFINIPATH_IBCS_LINKTRAININGSTATE_MASK]); 1900 /*
1674 /* flush all queued sends when going to DOWN to be sure that 1901 * Any other linkinitcmd will lead to LINKDOWN and then
1675 * they don't block MAD packets */ 1902 * to INIT (if all is well), so clear flag to let
1676 if (linkcmd == INFINIPATH_IBCC_LINKCMD_DOWN) 1903 * link-recovery code attempt to bring us back up.
1677 ipath_cancel_sends(dd, 1); 1904 */
1905 preempt_disable();
1906 dd->ipath_flags &= ~IPATH_IB_LINK_DISABLED;
1907 preempt_enable();
1908 }
1909
1910 mod_wd = (linkcmd << dd->ibcc_lc_shift) |
1911 (linitcmd << INFINIPATH_IBCC_LINKINITCMD_SHIFT);
1912 ipath_cdbg(VERBOSE,
1913 "Moving unit %u to %s (initcmd=0x%x), current ltstate is %s\n",
1914 dd->ipath_unit, what[linkcmd], linitcmd,
1915 ipath_ibcstatus_str[ipath_ib_linktrstate(dd,
1916 ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus))]);
1678 1917
1679 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, 1918 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1680 dd->ipath_ibcctrl | which); 1919 dd->ipath_ibcctrl | mod_wd);
1920 /* read from chip so write is flushed */
1921 (void) ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
1681} 1922}
1682 1923
1683int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate) 1924int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate)
@@ -1687,30 +1928,28 @@ int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate)
1687 1928
1688 switch (newstate) { 1929 switch (newstate) {
1689 case IPATH_IB_LINKDOWN_ONLY: 1930 case IPATH_IB_LINKDOWN_ONLY:
1690 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN << 1931 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN, 0);
1691 INFINIPATH_IBCC_LINKCMD_SHIFT);
1692 /* don't wait */ 1932 /* don't wait */
1693 ret = 0; 1933 ret = 0;
1694 goto bail; 1934 goto bail;
1695 1935
1696 case IPATH_IB_LINKDOWN: 1936 case IPATH_IB_LINKDOWN:
1697 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL << 1937 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
1698 INFINIPATH_IBCC_LINKINITCMD_SHIFT); 1938 INFINIPATH_IBCC_LINKINITCMD_POLL);
1699 /* don't wait */ 1939 /* don't wait */
1700 ret = 0; 1940 ret = 0;
1701 goto bail; 1941 goto bail;
1702 1942
1703 case IPATH_IB_LINKDOWN_SLEEP: 1943 case IPATH_IB_LINKDOWN_SLEEP:
1704 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP << 1944 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
1705 INFINIPATH_IBCC_LINKINITCMD_SHIFT); 1945 INFINIPATH_IBCC_LINKINITCMD_SLEEP);
1706 /* don't wait */ 1946 /* don't wait */
1707 ret = 0; 1947 ret = 0;
1708 goto bail; 1948 goto bail;
1709 1949
1710 case IPATH_IB_LINKDOWN_DISABLE: 1950 case IPATH_IB_LINKDOWN_DISABLE:
1711 ipath_set_ib_lstate(dd, 1951 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
1712 INFINIPATH_IBCC_LINKINITCMD_DISABLE << 1952 INFINIPATH_IBCC_LINKINITCMD_DISABLE);
1713 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
1714 /* don't wait */ 1953 /* don't wait */
1715 ret = 0; 1954 ret = 0;
1716 goto bail; 1955 goto bail;
@@ -1725,8 +1964,8 @@ int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate)
1725 ret = -EINVAL; 1964 ret = -EINVAL;
1726 goto bail; 1965 goto bail;
1727 } 1966 }
1728 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED << 1967 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED, 0);
1729 INFINIPATH_IBCC_LINKCMD_SHIFT); 1968
1730 /* 1969 /*
1731 * Since the port can transition to ACTIVE by receiving 1970 * Since the port can transition to ACTIVE by receiving
1732 * a non VL 15 packet, wait for either state. 1971 * a non VL 15 packet, wait for either state.
@@ -1743,8 +1982,7 @@ int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate)
1743 ret = -EINVAL; 1982 ret = -EINVAL;
1744 goto bail; 1983 goto bail;
1745 } 1984 }
1746 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE << 1985 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE, 0);
1747 INFINIPATH_IBCC_LINKCMD_SHIFT);
1748 lstate = IPATH_LINKACTIVE; 1986 lstate = IPATH_LINKACTIVE;
1749 break; 1987 break;
1750 1988
@@ -1753,16 +1991,41 @@ int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate)
1753 dd->ipath_ibcctrl |= INFINIPATH_IBCC_LOOPBACK; 1991 dd->ipath_ibcctrl |= INFINIPATH_IBCC_LOOPBACK;
1754 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, 1992 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1755 dd->ipath_ibcctrl); 1993 dd->ipath_ibcctrl);
1994
1995 /* turn heartbeat off, as it causes loopback to fail */
1996 dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
1997 IPATH_IB_HRTBT_OFF);
1998 /* don't wait */
1756 ret = 0; 1999 ret = 0;
1757 goto bail; // no state change to wait for 2000 goto bail;
1758 2001
1759 case IPATH_IB_LINK_EXTERNAL: 2002 case IPATH_IB_LINK_EXTERNAL:
1760 dev_info(&dd->pcidev->dev, "Disabling IB local loopback (normal)\n"); 2003 dev_info(&dd->pcidev->dev,
2004 "Disabling IB local loopback (normal)\n");
2005 dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
2006 IPATH_IB_HRTBT_ON);
1761 dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LOOPBACK; 2007 dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LOOPBACK;
1762 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, 2008 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1763 dd->ipath_ibcctrl); 2009 dd->ipath_ibcctrl);
2010 /* don't wait */
1764 ret = 0; 2011 ret = 0;
1765 goto bail; // no state change to wait for 2012 goto bail;
2013
2014 /*
2015 * Heartbeat can be explicitly enabled by the user via
2016 * "hrtbt_enable" "file", and if disabled, trying to enable here
2017 * will have no effect. Implicit changes (heartbeat off when
2018 * loopback on, and vice versa) are included to ease testing.
2019 */
2020 case IPATH_IB_LINK_HRTBT:
2021 ret = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
2022 IPATH_IB_HRTBT_ON);
2023 goto bail;
2024
2025 case IPATH_IB_LINK_NO_HRTBT:
2026 ret = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
2027 IPATH_IB_HRTBT_OFF);
2028 goto bail;
1766 2029
1767 default: 2030 default:
1768 ipath_dbg("Invalid linkstate 0x%x requested\n", newstate); 2031 ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
@@ -1785,7 +2048,7 @@ bail:
1785 * sanity checking on this, and we don't deal with what happens to 2048 * sanity checking on this, and we don't deal with what happens to
1786 * programs that are already running when the size changes. 2049 * programs that are already running when the size changes.
1787 * NOTE: changing the MTU will usually cause the IBC to go back to 2050 * NOTE: changing the MTU will usually cause the IBC to go back to
1788 * link initialize (IPATH_IBSTATE_INIT) state... 2051 * link INIT state...
1789 */ 2052 */
1790int ipath_set_mtu(struct ipath_devdata *dd, u16 arg) 2053int ipath_set_mtu(struct ipath_devdata *dd, u16 arg)
1791{ 2054{
@@ -1800,7 +2063,7 @@ int ipath_set_mtu(struct ipath_devdata *dd, u16 arg)
1800 * piosize). We check that it's one of the valid IB sizes. 2063 * piosize). We check that it's one of the valid IB sizes.
1801 */ 2064 */
1802 if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 && 2065 if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
1803 arg != 4096) { 2066 (arg != 4096 || !ipath_mtu4096)) {
1804 ipath_dbg("Trying to set invalid mtu %u, failing\n", arg); 2067 ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
1805 ret = -EINVAL; 2068 ret = -EINVAL;
1806 goto bail; 2069 goto bail;
@@ -1816,6 +2079,8 @@ int ipath_set_mtu(struct ipath_devdata *dd, u16 arg)
1816 if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) { 2079 if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
1817 /* Only if it's not the initial value (or reset to it) */ 2080 /* Only if it's not the initial value (or reset to it) */
1818 if (piosize != dd->ipath_init_ibmaxlen) { 2081 if (piosize != dd->ipath_init_ibmaxlen) {
2082 if (arg > piosize && arg <= dd->ipath_init_ibmaxlen)
2083 piosize = dd->ipath_init_ibmaxlen;
1819 dd->ipath_ibmaxlen = piosize; 2084 dd->ipath_ibmaxlen = piosize;
1820 changed = 1; 2085 changed = 1;
1821 } 2086 }
@@ -1829,24 +2094,17 @@ int ipath_set_mtu(struct ipath_devdata *dd, u16 arg)
1829 } 2094 }
1830 2095
1831 if (changed) { 2096 if (changed) {
2097 u64 ibc = dd->ipath_ibcctrl, ibdw;
1832 /* 2098 /*
1833 * set the IBC maxpktlength to the size of our pio 2099 * update our housekeeping variables, and set IBC max
1834 * buffers in words 2100 * size, same as init code; max IBC is max we allow in
2101 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
1835 */ 2102 */
1836 u64 ibc = dd->ipath_ibcctrl; 2103 dd->ipath_ibmaxlen = piosize - 2 * sizeof(u32);
2104 ibdw = (dd->ipath_ibmaxlen >> 2) + 1;
1837 ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK << 2105 ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
1838 INFINIPATH_IBCC_MAXPKTLEN_SHIFT); 2106 dd->ibcc_mpl_shift);
1839 2107 ibc |= ibdw << dd->ibcc_mpl_shift;
1840 piosize = piosize - 2 * sizeof(u32); /* ignore pbc */
1841 dd->ipath_ibmaxlen = piosize;
1842 piosize /= sizeof(u32); /* in words */
1843 /*
1844 * for ICRC, which we only send in diag test pkt mode, and
1845 * we don't need to worry about that for mtu
1846 */
1847 piosize += 1;
1848
1849 ibc |= piosize << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
1850 dd->ipath_ibcctrl = ibc; 2108 dd->ipath_ibcctrl = ibc;
1851 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, 2109 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1852 dd->ipath_ibcctrl); 2110 dd->ipath_ibcctrl);
@@ -1859,11 +2117,16 @@ bail:
1859 return ret; 2117 return ret;
1860} 2118}
1861 2119
1862int ipath_set_lid(struct ipath_devdata *dd, u32 arg, u8 lmc) 2120int ipath_set_lid(struct ipath_devdata *dd, u32 lid, u8 lmc)
1863{ 2121{
1864 dd->ipath_lid = arg; 2122 dd->ipath_lid = lid;
1865 dd->ipath_lmc = lmc; 2123 dd->ipath_lmc = lmc;
1866 2124
2125 dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LIDLMC, lid |
2126 (~((1U << lmc) - 1)) << 16);
2127
2128 dev_info(&dd->pcidev->dev, "We got a lid: 0x%x\n", lid);
2129
1867 return 0; 2130 return 0;
1868} 2131}
1869 2132
@@ -1925,10 +2188,8 @@ static void ipath_run_led_override(unsigned long opaque)
1925 * but leave that to per-chip functions. 2188 * but leave that to per-chip functions.
1926 */ 2189 */
1927 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus); 2190 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
1928 ltstate = (val >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) & 2191 ltstate = ipath_ib_linktrstate(dd, val);
1929 INFINIPATH_IBCS_LINKTRAININGSTATE_MASK; 2192 lstate = ipath_ib_linkstate(dd, val);
1930 lstate = (val >> INFINIPATH_IBCS_LINKSTATE_SHIFT) &
1931 INFINIPATH_IBCS_LINKSTATE_MASK;
1932 2193
1933 dd->ipath_f_setextled(dd, lstate, ltstate); 2194 dd->ipath_f_setextled(dd, lstate, ltstate);
1934 mod_timer(&dd->ipath_led_override_timer, jiffies + timeoff); 2195 mod_timer(&dd->ipath_led_override_timer, jiffies + timeoff);
@@ -1969,9 +2230,8 @@ void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val)
1969 dd->ipath_led_override_timer.data = (unsigned long) dd; 2230 dd->ipath_led_override_timer.data = (unsigned long) dd;
1970 dd->ipath_led_override_timer.expires = jiffies + 1; 2231 dd->ipath_led_override_timer.expires = jiffies + 1;
1971 add_timer(&dd->ipath_led_override_timer); 2232 add_timer(&dd->ipath_led_override_timer);
1972 } else { 2233 } else
1973 atomic_dec(&dd->ipath_led_override_timer_active); 2234 atomic_dec(&dd->ipath_led_override_timer_active);
1974 }
1975} 2235}
1976 2236
1977/** 2237/**
@@ -1989,6 +2249,8 @@ void ipath_shutdown_device(struct ipath_devdata *dd)
1989 2249
1990 ipath_dbg("Shutting down the device\n"); 2250 ipath_dbg("Shutting down the device\n");
1991 2251
2252 ipath_hol_up(dd); /* make sure user processes aren't suspended */
2253
1992 dd->ipath_flags |= IPATH_LINKUNK; 2254 dd->ipath_flags |= IPATH_LINKUNK;
1993 dd->ipath_flags &= ~(IPATH_INITTED | IPATH_LINKDOWN | 2255 dd->ipath_flags &= ~(IPATH_INITTED | IPATH_LINKDOWN |
1994 IPATH_LINKINIT | IPATH_LINKARMED | 2256 IPATH_LINKINIT | IPATH_LINKARMED |
@@ -2003,6 +2265,9 @@ void ipath_shutdown_device(struct ipath_devdata *dd)
2003 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 2265 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
2004 dd->ipath_rcvctrl); 2266 dd->ipath_rcvctrl);
2005 2267
2268 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
2269 teardown_sdma(dd);
2270
2006 /* 2271 /*
2007 * gracefully stop all sends allowing any in progress to trickle out 2272 * gracefully stop all sends allowing any in progress to trickle out
2008 * first. 2273 * first.
@@ -2020,10 +2285,16 @@ void ipath_shutdown_device(struct ipath_devdata *dd)
2020 */ 2285 */
2021 udelay(5); 2286 udelay(5);
2022 2287
2023 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_DISABLE << 2288 dd->ipath_f_setextled(dd, 0, 0); /* make sure LEDs are off */
2024 INFINIPATH_IBCC_LINKINITCMD_SHIFT); 2289
2290 ipath_set_ib_lstate(dd, 0, INFINIPATH_IBCC_LINKINITCMD_DISABLE);
2025 ipath_cancel_sends(dd, 0); 2291 ipath_cancel_sends(dd, 0);
2026 2292
2293 /*
2294 * we are shutting down, so tell components that care. We don't do
2295 * this on just a link state change, much like ethernet, a cable
2296 * unplug, etc. doesn't change driver state
2297 */
2027 signal_ib_event(dd, IB_EVENT_PORT_ERR); 2298 signal_ib_event(dd, IB_EVENT_PORT_ERR);
2028 2299
2029 /* disable IBC */ 2300 /* disable IBC */
@@ -2038,10 +2309,20 @@ void ipath_shutdown_device(struct ipath_devdata *dd)
2038 */ 2309 */
2039 dd->ipath_f_quiet_serdes(dd); 2310 dd->ipath_f_quiet_serdes(dd);
2040 2311
2312 /* stop all the timers that might still be running */
2313 del_timer_sync(&dd->ipath_hol_timer);
2041 if (dd->ipath_stats_timer_active) { 2314 if (dd->ipath_stats_timer_active) {
2042 del_timer_sync(&dd->ipath_stats_timer); 2315 del_timer_sync(&dd->ipath_stats_timer);
2043 dd->ipath_stats_timer_active = 0; 2316 dd->ipath_stats_timer_active = 0;
2044 } 2317 }
2318 if (dd->ipath_intrchk_timer.data) {
2319 del_timer_sync(&dd->ipath_intrchk_timer);
2320 dd->ipath_intrchk_timer.data = 0;
2321 }
2322 if (atomic_read(&dd->ipath_led_override_timer_active)) {
2323 del_timer_sync(&dd->ipath_led_override_timer);
2324 atomic_set(&dd->ipath_led_override_timer_active, 0);
2325 }
2045 2326
2046 /* 2327 /*
2047 * clear all interrupts and errors, so that the next time the driver 2328 * clear all interrupts and errors, so that the next time the driver
@@ -2115,13 +2396,13 @@ void ipath_free_pddata(struct ipath_devdata *dd, struct ipath_portdata *pd)
2115 ipath_cdbg(VERBOSE, "free closed port %d " 2396 ipath_cdbg(VERBOSE, "free closed port %d "
2116 "ipath_port0_skbinfo @ %p\n", pd->port_port, 2397 "ipath_port0_skbinfo @ %p\n", pd->port_port,
2117 skbinfo); 2398 skbinfo);
2118 for (e = 0; e < dd->ipath_rcvegrcnt; e++) 2399 for (e = 0; e < dd->ipath_p0_rcvegrcnt; e++)
2119 if (skbinfo[e].skb) { 2400 if (skbinfo[e].skb) {
2120 pci_unmap_single(dd->pcidev, skbinfo[e].phys, 2401 pci_unmap_single(dd->pcidev, skbinfo[e].phys,
2121 dd->ipath_ibmaxlen, 2402 dd->ipath_ibmaxlen,
2122 PCI_DMA_FROMDEVICE); 2403 PCI_DMA_FROMDEVICE);
2123 dev_kfree_skb(skbinfo[e].skb); 2404 dev_kfree_skb(skbinfo[e].skb);
2124 } 2405 }
2125 vfree(skbinfo); 2406 vfree(skbinfo);
2126 } 2407 }
2127 kfree(pd->port_tid_pg_list); 2408 kfree(pd->port_tid_pg_list);
@@ -2144,6 +2425,7 @@ static int __init infinipath_init(void)
2144 */ 2425 */
2145 idr_init(&unit_table); 2426 idr_init(&unit_table);
2146 if (!idr_pre_get(&unit_table, GFP_KERNEL)) { 2427 if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
2428 printk(KERN_ERR IPATH_DRV_NAME ": idr_pre_get() failed\n");
2147 ret = -ENOMEM; 2429 ret = -ENOMEM;
2148 goto bail; 2430 goto bail;
2149 } 2431 }
@@ -2235,13 +2517,18 @@ int ipath_reset_device(int unit)
2235 } 2517 }
2236 } 2518 }
2237 2519
2520 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
2521 teardown_sdma(dd);
2522
2238 dd->ipath_flags &= ~IPATH_INITTED; 2523 dd->ipath_flags &= ~IPATH_INITTED;
2524 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
2239 ret = dd->ipath_f_reset(dd); 2525 ret = dd->ipath_f_reset(dd);
2240 if (ret != 1) 2526 if (ret == 1) {
2241 ipath_dbg("reset was not successful\n"); 2527 ipath_dbg("Reinitializing unit %u after reset attempt\n",
2242 ipath_dbg("Trying to reinitialize unit %u after reset attempt\n", 2528 unit);
2243 unit); 2529 ret = ipath_init_chip(dd, 1);
2244 ret = ipath_init_chip(dd, 1); 2530 } else
2531 ret = -EAGAIN;
2245 if (ret) 2532 if (ret)
2246 ipath_dev_err(dd, "Reinitialize unit %u after " 2533 ipath_dev_err(dd, "Reinitialize unit %u after "
2247 "reset failed with %d\n", unit, ret); 2534 "reset failed with %d\n", unit, ret);
@@ -2253,13 +2540,121 @@ bail:
2253 return ret; 2540 return ret;
2254} 2541}
2255 2542
2543/*
2544 * send a signal to all the processes that have the driver open
2545 * through the normal interfaces (i.e., everything other than diags
2546 * interface). Returns number of signalled processes.
2547 */
2548static int ipath_signal_procs(struct ipath_devdata *dd, int sig)
2549{
2550 int i, sub, any = 0;
2551 pid_t pid;
2552
2553 if (!dd->ipath_pd)
2554 return 0;
2555 for (i = 1; i < dd->ipath_cfgports; i++) {
2556 if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt ||
2557 !dd->ipath_pd[i]->port_pid)
2558 continue;
2559 pid = dd->ipath_pd[i]->port_pid;
2560 dev_info(&dd->pcidev->dev, "context %d in use "
2561 "(PID %u), sending signal %d\n",
2562 i, pid, sig);
2563 kill_proc(pid, sig, 1);
2564 any++;
2565 for (sub = 0; sub < INFINIPATH_MAX_SUBPORT; sub++) {
2566 pid = dd->ipath_pd[i]->port_subpid[sub];
2567 if (!pid)
2568 continue;
2569 dev_info(&dd->pcidev->dev, "sub-context "
2570 "%d:%d in use (PID %u), sending "
2571 "signal %d\n", i, sub, pid, sig);
2572 kill_proc(pid, sig, 1);
2573 any++;
2574 }
2575 }
2576 return any;
2577}
2578
2579static void ipath_hol_signal_down(struct ipath_devdata *dd)
2580{
2581 if (ipath_signal_procs(dd, SIGSTOP))
2582 ipath_dbg("Stopped some processes\n");
2583 ipath_cancel_sends(dd, 1);
2584}
2585
2586
2587static void ipath_hol_signal_up(struct ipath_devdata *dd)
2588{
2589 if (ipath_signal_procs(dd, SIGCONT))
2590 ipath_dbg("Continued some processes\n");
2591}
2592
2593/*
2594 * link is down, stop any users processes, and flush pending sends
2595 * to prevent HoL blocking, then start the HoL timer that
2596 * periodically continues, then stop procs, so they can detect
2597 * link down if they want, and do something about it.
2598 * Timer may already be running, so use __mod_timer, not add_timer.
2599 */
2600void ipath_hol_down(struct ipath_devdata *dd)
2601{
2602 dd->ipath_hol_state = IPATH_HOL_DOWN;
2603 ipath_hol_signal_down(dd);
2604 dd->ipath_hol_next = IPATH_HOL_DOWNCONT;
2605 dd->ipath_hol_timer.expires = jiffies +
2606 msecs_to_jiffies(ipath_hol_timeout_ms);
2607 __mod_timer(&dd->ipath_hol_timer, dd->ipath_hol_timer.expires);
2608}
2609
2610/*
2611 * link is up, continue any user processes, and ensure timer
2612 * is a nop, if running. Let timer keep running, if set; it
2613 * will nop when it sees the link is up
2614 */
2615void ipath_hol_up(struct ipath_devdata *dd)
2616{
2617 ipath_hol_signal_up(dd);
2618 dd->ipath_hol_state = IPATH_HOL_UP;
2619}
2620
2621/*
2622 * toggle the running/not running state of user proceses
2623 * to prevent HoL blocking on chip resources, but still allow
2624 * user processes to do link down special case handling.
2625 * Should only be called via the timer
2626 */
2627void ipath_hol_event(unsigned long opaque)
2628{
2629 struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
2630
2631 if (dd->ipath_hol_next == IPATH_HOL_DOWNSTOP
2632 && dd->ipath_hol_state != IPATH_HOL_UP) {
2633 dd->ipath_hol_next = IPATH_HOL_DOWNCONT;
2634 ipath_dbg("Stopping processes\n");
2635 ipath_hol_signal_down(dd);
2636 } else { /* may do "extra" if also in ipath_hol_up() */
2637 dd->ipath_hol_next = IPATH_HOL_DOWNSTOP;
2638 ipath_dbg("Continuing processes\n");
2639 ipath_hol_signal_up(dd);
2640 }
2641 if (dd->ipath_hol_state == IPATH_HOL_UP)
2642 ipath_dbg("link's up, don't resched timer\n");
2643 else {
2644 dd->ipath_hol_timer.expires = jiffies +
2645 msecs_to_jiffies(ipath_hol_timeout_ms);
2646 __mod_timer(&dd->ipath_hol_timer,
2647 dd->ipath_hol_timer.expires);
2648 }
2649}
2650
2256int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv) 2651int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv)
2257{ 2652{
2258 u64 val; 2653 u64 val;
2259 if ( new_pol_inv > INFINIPATH_XGXS_RX_POL_MASK ) { 2654
2655 if (new_pol_inv > INFINIPATH_XGXS_RX_POL_MASK)
2260 return -1; 2656 return -1;
2261 } 2657 if (dd->ipath_rx_pol_inv != new_pol_inv) {
2262 if ( dd->ipath_rx_pol_inv != new_pol_inv ) {
2263 dd->ipath_rx_pol_inv = new_pol_inv; 2658 dd->ipath_rx_pol_inv = new_pol_inv;
2264 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig); 2659 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
2265 val &= ~(INFINIPATH_XGXS_RX_POL_MASK << 2660 val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
diff --git a/drivers/infiniband/hw/ipath/ipath_eeprom.c b/drivers/infiniband/hw/ipath/ipath_eeprom.c
index e28a42f53769..dc37277f1c80 100644
--- a/drivers/infiniband/hw/ipath/ipath_eeprom.c
+++ b/drivers/infiniband/hw/ipath/ipath_eeprom.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -62,6 +62,33 @@
62 * accessing eeprom contents from within the kernel, only via sysfs. 62 * accessing eeprom contents from within the kernel, only via sysfs.
63 */ 63 */
64 64
65/* Added functionality for IBA7220-based cards */
66#define IPATH_EEPROM_DEV_V1 0xA0
67#define IPATH_EEPROM_DEV_V2 0xA2
68#define IPATH_TEMP_DEV 0x98
69#define IPATH_BAD_DEV (IPATH_EEPROM_DEV_V2+2)
70#define IPATH_NO_DEV (0xFF)
71
72/*
73 * The number of I2C chains is proliferating. Table below brings
74 * some order to the madness. The basic principle is that the
75 * table is scanned from the top, and a "probe" is made to the
76 * device probe_dev. If that succeeds, the chain is considered
77 * to be of that type, and dd->i2c_chain_type is set to the index+1
78 * of the entry.
79 * The +1 is so static initialization can mean "unknown, do probe."
80 */
81static struct i2c_chain_desc {
82 u8 probe_dev; /* If seen at probe, chain is this type */
83 u8 eeprom_dev; /* Dev addr (if any) for EEPROM */
84 u8 temp_dev; /* Dev Addr (if any) for Temp-sense */
85} i2c_chains[] = {
86 { IPATH_BAD_DEV, IPATH_NO_DEV, IPATH_NO_DEV }, /* pre-iba7220 bds */
87 { IPATH_EEPROM_DEV_V1, IPATH_EEPROM_DEV_V1, IPATH_TEMP_DEV}, /* V1 */
88 { IPATH_EEPROM_DEV_V2, IPATH_EEPROM_DEV_V2, IPATH_TEMP_DEV}, /* V2 */
89 { IPATH_NO_DEV }
90};
91
65enum i2c_type { 92enum i2c_type {
66 i2c_line_scl = 0, 93 i2c_line_scl = 0,
67 i2c_line_sda 94 i2c_line_sda
@@ -75,13 +102,6 @@ enum i2c_state {
75#define READ_CMD 1 102#define READ_CMD 1
76#define WRITE_CMD 0 103#define WRITE_CMD 0
77 104
78static int eeprom_init;
79
80/*
81 * The gpioval manipulation really should be protected by spinlocks
82 * or be converted to use atomic operations.
83 */
84
85/** 105/**
86 * i2c_gpio_set - set a GPIO line 106 * i2c_gpio_set - set a GPIO line
87 * @dd: the infinipath device 107 * @dd: the infinipath device
@@ -241,6 +261,27 @@ static int i2c_ackrcv(struct ipath_devdata *dd)
241} 261}
242 262
243/** 263/**
264 * rd_byte - read a byte, leaving ACK, STOP, etc up to caller
265 * @dd: the infinipath device
266 *
267 * Returns byte shifted out of device
268 */
269static int rd_byte(struct ipath_devdata *dd)
270{
271 int bit_cntr, data;
272
273 data = 0;
274
275 for (bit_cntr = 7; bit_cntr >= 0; --bit_cntr) {
276 data <<= 1;
277 scl_out(dd, i2c_line_high);
278 data |= sda_in(dd, 0);
279 scl_out(dd, i2c_line_low);
280 }
281 return data;
282}
283
284/**
244 * wr_byte - write a byte, one bit at a time 285 * wr_byte - write a byte, one bit at a time
245 * @dd: the infinipath device 286 * @dd: the infinipath device
246 * @data: the byte to write 287 * @data: the byte to write
@@ -331,7 +372,6 @@ static int eeprom_reset(struct ipath_devdata *dd)
331 ipath_cdbg(VERBOSE, "Resetting i2c eeprom; initial gpioout reg " 372 ipath_cdbg(VERBOSE, "Resetting i2c eeprom; initial gpioout reg "
332 "is %llx\n", (unsigned long long) *gpioval); 373 "is %llx\n", (unsigned long long) *gpioval);
333 374
334 eeprom_init = 1;
335 /* 375 /*
336 * This is to get the i2c into a known state, by first going low, 376 * This is to get the i2c into a known state, by first going low,
337 * then tristate sda (and then tristate scl as first thing 377 * then tristate sda (and then tristate scl as first thing
@@ -340,12 +380,17 @@ static int eeprom_reset(struct ipath_devdata *dd)
340 scl_out(dd, i2c_line_low); 380 scl_out(dd, i2c_line_low);
341 sda_out(dd, i2c_line_high); 381 sda_out(dd, i2c_line_high);
342 382
383 /* Clock up to 9 cycles looking for SDA hi, then issue START and STOP */
343 while (clock_cycles_left--) { 384 while (clock_cycles_left--) {
344 scl_out(dd, i2c_line_high); 385 scl_out(dd, i2c_line_high);
345 386
387 /* SDA seen high, issue START by dropping it while SCL high */
346 if (sda_in(dd, 0)) { 388 if (sda_in(dd, 0)) {
347 sda_out(dd, i2c_line_low); 389 sda_out(dd, i2c_line_low);
348 scl_out(dd, i2c_line_low); 390 scl_out(dd, i2c_line_low);
391 /* ATMEL spec says must be followed by STOP. */
392 scl_out(dd, i2c_line_high);
393 sda_out(dd, i2c_line_high);
349 ret = 0; 394 ret = 0;
350 goto bail; 395 goto bail;
351 } 396 }
@@ -359,29 +404,121 @@ bail:
359 return ret; 404 return ret;
360} 405}
361 406
362/** 407/*
363 * ipath_eeprom_read - receives bytes from the eeprom via I2C 408 * Probe for I2C device at specified address. Returns 0 for "success"
364 * @dd: the infinipath device 409 * to match rest of this file.
365 * @eeprom_offset: address to read from 410 * Leave bus in "reasonable" state for further commands.
366 * @buffer: where to store result
367 * @len: number of bytes to receive
368 */ 411 */
412static int i2c_probe(struct ipath_devdata *dd, int devaddr)
413{
414 int ret = 0;
415
416 ret = eeprom_reset(dd);
417 if (ret) {
418 ipath_dev_err(dd, "Failed reset probing device 0x%02X\n",
419 devaddr);
420 return ret;
421 }
422 /*
423 * Reset no longer leaves bus in start condition, so normal
424 * i2c_startcmd() will do.
425 */
426 ret = i2c_startcmd(dd, devaddr | READ_CMD);
427 if (ret)
428 ipath_cdbg(VERBOSE, "Failed startcmd for device 0x%02X\n",
429 devaddr);
430 else {
431 /*
432 * Device did respond. Complete a single-byte read, because some
433 * devices apparently cannot handle STOP immediately after they
434 * ACK the start-cmd.
435 */
436 int data;
437 data = rd_byte(dd);
438 stop_cmd(dd);
439 ipath_cdbg(VERBOSE, "Response from device 0x%02X\n", devaddr);
440 }
441 return ret;
442}
443
444/*
445 * Returns the "i2c type". This is a pointer to a struct that describes
446 * the I2C chain on this board. To minimize impact on struct ipath_devdata,
447 * the (small integer) index into the table is actually memoized, rather
448 * then the pointer.
449 * Memoization is because the type is determined on the first call per chip.
450 * An alternative would be to move type determination to early
451 * init code.
452 */
453static struct i2c_chain_desc *ipath_i2c_type(struct ipath_devdata *dd)
454{
455 int idx;
456
457 /* Get memoized index, from previous successful probes */
458 idx = dd->ipath_i2c_chain_type - 1;
459 if (idx >= 0 && idx < (ARRAY_SIZE(i2c_chains) - 1))
460 goto done;
461
462 idx = 0;
463 while (i2c_chains[idx].probe_dev != IPATH_NO_DEV) {
464 /* if probe succeeds, this is type */
465 if (!i2c_probe(dd, i2c_chains[idx].probe_dev))
466 break;
467 ++idx;
468 }
469
470 /*
471 * Old EEPROM (first entry) may require a reset after probe,
472 * rather than being able to "start" after "stop"
473 */
474 if (idx == 0)
475 eeprom_reset(dd);
476
477 if (i2c_chains[idx].probe_dev == IPATH_NO_DEV)
478 idx = -1;
479 else
480 dd->ipath_i2c_chain_type = idx + 1;
481done:
482 return (idx >= 0) ? i2c_chains + idx : NULL;
483}
369 484
370static int ipath_eeprom_internal_read(struct ipath_devdata *dd, 485static int ipath_eeprom_internal_read(struct ipath_devdata *dd,
371 u8 eeprom_offset, void *buffer, int len) 486 u8 eeprom_offset, void *buffer, int len)
372{ 487{
373 /* compiler complains unless initialized */
374 u8 single_byte = 0;
375 int bit_cntr;
376 int ret; 488 int ret;
489 struct i2c_chain_desc *icd;
490 u8 *bp = buffer;
377 491
378 if (!eeprom_init) 492 ret = 1;
379 eeprom_reset(dd); 493 icd = ipath_i2c_type(dd);
380 494 if (!icd)
381 eeprom_offset = (eeprom_offset << 1) | READ_CMD; 495 goto bail;
382 496
383 if (i2c_startcmd(dd, eeprom_offset)) { 497 if (icd->eeprom_dev == IPATH_NO_DEV) {
384 ipath_dbg("Failed startcmd\n"); 498 /* legacy not-really-I2C */
499 ipath_cdbg(VERBOSE, "Start command only address\n");
500 eeprom_offset = (eeprom_offset << 1) | READ_CMD;
501 ret = i2c_startcmd(dd, eeprom_offset);
502 } else {
503 /* Actual I2C */
504 ipath_cdbg(VERBOSE, "Start command uses devaddr\n");
505 if (i2c_startcmd(dd, icd->eeprom_dev | WRITE_CMD)) {
506 ipath_dbg("Failed EEPROM startcmd\n");
507 stop_cmd(dd);
508 ret = 1;
509 goto bail;
510 }
511 ret = wr_byte(dd, eeprom_offset);
512 stop_cmd(dd);
513 if (ret) {
514 ipath_dev_err(dd, "Failed to write EEPROM address\n");
515 ret = 1;
516 goto bail;
517 }
518 ret = i2c_startcmd(dd, icd->eeprom_dev | READ_CMD);
519 }
520 if (ret) {
521 ipath_dbg("Failed startcmd for dev %02X\n", icd->eeprom_dev);
385 stop_cmd(dd); 522 stop_cmd(dd);
386 ret = 1; 523 ret = 1;
387 goto bail; 524 goto bail;
@@ -392,22 +529,11 @@ static int ipath_eeprom_internal_read(struct ipath_devdata *dd,
392 * incrementing the address. 529 * incrementing the address.
393 */ 530 */
394 while (len-- > 0) { 531 while (len-- > 0) {
395 /* get data */ 532 /* get and store data */
396 single_byte = 0; 533 *bp++ = rd_byte(dd);
397 for (bit_cntr = 8; bit_cntr; bit_cntr--) {
398 u8 bit;
399 scl_out(dd, i2c_line_high);
400 bit = sda_in(dd, 0);
401 single_byte |= bit << (bit_cntr - 1);
402 scl_out(dd, i2c_line_low);
403 }
404
405 /* send ack if not the last byte */ 534 /* send ack if not the last byte */
406 if (len) 535 if (len)
407 send_ack(dd); 536 send_ack(dd);
408
409 *((u8 *) buffer) = single_byte;
410 buffer++;
411 } 537 }
412 538
413 stop_cmd(dd); 539 stop_cmd(dd);
@@ -418,31 +544,40 @@ bail:
418 return ret; 544 return ret;
419} 545}
420 546
421
422/**
423 * ipath_eeprom_write - writes data to the eeprom via I2C
424 * @dd: the infinipath device
425 * @eeprom_offset: where to place data
426 * @buffer: data to write
427 * @len: number of bytes to write
428 */
429static int ipath_eeprom_internal_write(struct ipath_devdata *dd, u8 eeprom_offset, 547static int ipath_eeprom_internal_write(struct ipath_devdata *dd, u8 eeprom_offset,
430 const void *buffer, int len) 548 const void *buffer, int len)
431{ 549{
432 u8 single_byte;
433 int sub_len; 550 int sub_len;
434 const u8 *bp = buffer; 551 const u8 *bp = buffer;
435 int max_wait_time, i; 552 int max_wait_time, i;
436 int ret; 553 int ret;
554 struct i2c_chain_desc *icd;
437 555
438 if (!eeprom_init) 556 ret = 1;
439 eeprom_reset(dd); 557 icd = ipath_i2c_type(dd);
558 if (!icd)
559 goto bail;
440 560
441 while (len > 0) { 561 while (len > 0) {
442 if (i2c_startcmd(dd, (eeprom_offset << 1) | WRITE_CMD)) { 562 if (icd->eeprom_dev == IPATH_NO_DEV) {
443 ipath_dbg("Failed to start cmd offset %u\n", 563 if (i2c_startcmd(dd,
444 eeprom_offset); 564 (eeprom_offset << 1) | WRITE_CMD)) {
445 goto failed_write; 565 ipath_dbg("Failed to start cmd offset %u\n",
566 eeprom_offset);
567 goto failed_write;
568 }
569 } else {
570 /* Real I2C */
571 if (i2c_startcmd(dd, icd->eeprom_dev | WRITE_CMD)) {
572 ipath_dbg("Failed EEPROM startcmd\n");
573 goto failed_write;
574 }
575 ret = wr_byte(dd, eeprom_offset);
576 if (ret) {
577 ipath_dev_err(dd, "Failed to write EEPROM "
578 "address\n");
579 goto failed_write;
580 }
446 } 581 }
447 582
448 sub_len = min(len, 4); 583 sub_len = min(len, 4);
@@ -468,9 +603,11 @@ static int ipath_eeprom_internal_write(struct ipath_devdata *dd, u8 eeprom_offse
468 * the writes have completed. We do this inline to avoid 603 * the writes have completed. We do this inline to avoid
469 * the debug prints that are in the real read routine 604 * the debug prints that are in the real read routine
470 * if the startcmd fails. 605 * if the startcmd fails.
606 * We also use the proper device address, so it doesn't matter
607 * whether we have real eeprom_dev. legacy likes any address.
471 */ 608 */
472 max_wait_time = 100; 609 max_wait_time = 100;
473 while (i2c_startcmd(dd, READ_CMD)) { 610 while (i2c_startcmd(dd, icd->eeprom_dev | READ_CMD)) {
474 stop_cmd(dd); 611 stop_cmd(dd);
475 if (!--max_wait_time) { 612 if (!--max_wait_time) {
476 ipath_dbg("Did not get successful read to " 613 ipath_dbg("Did not get successful read to "
@@ -478,15 +615,8 @@ static int ipath_eeprom_internal_write(struct ipath_devdata *dd, u8 eeprom_offse
478 goto failed_write; 615 goto failed_write;
479 } 616 }
480 } 617 }
481 /* now read the zero byte */ 618 /* now read (and ignore) the resulting byte */
482 for (i = single_byte = 0; i < 8; i++) { 619 rd_byte(dd);
483 u8 bit;
484 scl_out(dd, i2c_line_high);
485 bit = sda_in(dd, 0);
486 scl_out(dd, i2c_line_low);
487 single_byte <<= 1;
488 single_byte |= bit;
489 }
490 stop_cmd(dd); 620 stop_cmd(dd);
491 } 621 }
492 622
@@ -501,9 +631,12 @@ bail:
501 return ret; 631 return ret;
502} 632}
503 633
504/* 634/**
505 * The public entry-points ipath_eeprom_read() and ipath_eeprom_write() 635 * ipath_eeprom_read - receives bytes from the eeprom via I2C
506 * are now just wrappers around the internal functions. 636 * @dd: the infinipath device
637 * @eeprom_offset: address to read from
638 * @buffer: where to store result
639 * @len: number of bytes to receive
507 */ 640 */
508int ipath_eeprom_read(struct ipath_devdata *dd, u8 eeprom_offset, 641int ipath_eeprom_read(struct ipath_devdata *dd, u8 eeprom_offset,
509 void *buff, int len) 642 void *buff, int len)
@@ -519,6 +652,13 @@ int ipath_eeprom_read(struct ipath_devdata *dd, u8 eeprom_offset,
519 return ret; 652 return ret;
520} 653}
521 654
655/**
656 * ipath_eeprom_write - writes data to the eeprom via I2C
657 * @dd: the infinipath device
658 * @eeprom_offset: where to place data
659 * @buffer: data to write
660 * @len: number of bytes to write
661 */
522int ipath_eeprom_write(struct ipath_devdata *dd, u8 eeprom_offset, 662int ipath_eeprom_write(struct ipath_devdata *dd, u8 eeprom_offset,
523 const void *buff, int len) 663 const void *buff, int len)
524{ 664{
@@ -820,7 +960,7 @@ int ipath_update_eeprom_log(struct ipath_devdata *dd)
820 * if we log an hour at 31 minutes, then we would need to set 960 * if we log an hour at 31 minutes, then we would need to set
821 * active_time to -29 to accurately count the _next_ hour. 961 * active_time to -29 to accurately count the _next_ hour.
822 */ 962 */
823 if (new_time > 3600) { 963 if (new_time >= 3600) {
824 new_hrs = new_time / 3600; 964 new_hrs = new_time / 3600;
825 atomic_sub((new_hrs * 3600), &dd->ipath_active_time); 965 atomic_sub((new_hrs * 3600), &dd->ipath_active_time);
826 new_hrs += dd->ipath_eep_hrs; 966 new_hrs += dd->ipath_eep_hrs;
@@ -885,3 +1025,159 @@ void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr)
885 spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags); 1025 spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
886 return; 1026 return;
887} 1027}
1028
1029static int ipath_tempsense_internal_read(struct ipath_devdata *dd, u8 regnum)
1030{
1031 int ret;
1032 struct i2c_chain_desc *icd;
1033
1034 ret = -ENOENT;
1035
1036 icd = ipath_i2c_type(dd);
1037 if (!icd)
1038 goto bail;
1039
1040 if (icd->temp_dev == IPATH_NO_DEV) {
1041 /* tempsense only exists on new, real-I2C boards */
1042 ret = -ENXIO;
1043 goto bail;
1044 }
1045
1046 if (i2c_startcmd(dd, icd->temp_dev | WRITE_CMD)) {
1047 ipath_dbg("Failed tempsense startcmd\n");
1048 stop_cmd(dd);
1049 ret = -ENXIO;
1050 goto bail;
1051 }
1052 ret = wr_byte(dd, regnum);
1053 stop_cmd(dd);
1054 if (ret) {
1055 ipath_dev_err(dd, "Failed tempsense WR command %02X\n",
1056 regnum);
1057 ret = -ENXIO;
1058 goto bail;
1059 }
1060 if (i2c_startcmd(dd, icd->temp_dev | READ_CMD)) {
1061 ipath_dbg("Failed tempsense RD startcmd\n");
1062 stop_cmd(dd);
1063 ret = -ENXIO;
1064 goto bail;
1065 }
1066 /*
1067 * We can only clock out one byte per command, sensibly
1068 */
1069 ret = rd_byte(dd);
1070 stop_cmd(dd);
1071
1072bail:
1073 return ret;
1074}
1075
1076#define VALID_TS_RD_REG_MASK 0xBF
1077
1078/**
1079 * ipath_tempsense_read - read register of temp sensor via I2C
1080 * @dd: the infinipath device
1081 * @regnum: register to read from
1082 *
1083 * returns reg contents (0..255) or < 0 for error
1084 */
1085int ipath_tempsense_read(struct ipath_devdata *dd, u8 regnum)
1086{
1087 int ret;
1088
1089 if (regnum > 7)
1090 return -EINVAL;
1091
1092 /* return a bogus value for (the one) register we do not have */
1093 if (!((1 << regnum) & VALID_TS_RD_REG_MASK))
1094 return 0;
1095
1096 ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
1097 if (!ret) {
1098 ret = ipath_tempsense_internal_read(dd, regnum);
1099 mutex_unlock(&dd->ipath_eep_lock);
1100 }
1101
1102 /*
1103 * There are three possibilities here:
1104 * ret is actual value (0..255)
1105 * ret is -ENXIO or -EINVAL from code in this file
1106 * ret is -EINTR from mutex_lock_interruptible.
1107 */
1108 return ret;
1109}
1110
1111static int ipath_tempsense_internal_write(struct ipath_devdata *dd,
1112 u8 regnum, u8 data)
1113{
1114 int ret = -ENOENT;
1115 struct i2c_chain_desc *icd;
1116
1117 icd = ipath_i2c_type(dd);
1118 if (!icd)
1119 goto bail;
1120
1121 if (icd->temp_dev == IPATH_NO_DEV) {
1122 /* tempsense only exists on new, real-I2C boards */
1123 ret = -ENXIO;
1124 goto bail;
1125 }
1126 if (i2c_startcmd(dd, icd->temp_dev | WRITE_CMD)) {
1127 ipath_dbg("Failed tempsense startcmd\n");
1128 stop_cmd(dd);
1129 ret = -ENXIO;
1130 goto bail;
1131 }
1132 ret = wr_byte(dd, regnum);
1133 if (ret) {
1134 stop_cmd(dd);
1135 ipath_dev_err(dd, "Failed to write tempsense command %02X\n",
1136 regnum);
1137 ret = -ENXIO;
1138 goto bail;
1139 }
1140 ret = wr_byte(dd, data);
1141 stop_cmd(dd);
1142 ret = i2c_startcmd(dd, icd->temp_dev | READ_CMD);
1143 if (ret) {
1144 ipath_dev_err(dd, "Failed tempsense data wrt to %02X\n",
1145 regnum);
1146 ret = -ENXIO;
1147 }
1148
1149bail:
1150 return ret;
1151}
1152
1153#define VALID_TS_WR_REG_MASK ((1 << 9) | (1 << 0xB) | (1 << 0xD))
1154
1155/**
1156 * ipath_tempsense_write - write register of temp sensor via I2C
1157 * @dd: the infinipath device
1158 * @regnum: register to write
1159 * @data: data to write
1160 *
1161 * returns 0 for success or < 0 for error
1162 */
1163int ipath_tempsense_write(struct ipath_devdata *dd, u8 regnum, u8 data)
1164{
1165 int ret;
1166
1167 if (regnum > 15 || !((1 << regnum) & VALID_TS_WR_REG_MASK))
1168 return -EINVAL;
1169
1170 ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
1171 if (!ret) {
1172 ret = ipath_tempsense_internal_write(dd, regnum, data);
1173 mutex_unlock(&dd->ipath_eep_lock);
1174 }
1175
1176 /*
1177 * There are three possibilities here:
1178 * ret is 0 for success
1179 * ret is -ENXIO or -EINVAL from code in this file
1180 * ret is -EINTR from mutex_lock_interruptible.
1181 */
1182 return ret;
1183}
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 7e025c8e01b6..1e627aab18bf 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -36,21 +36,28 @@
36#include <linux/cdev.h> 36#include <linux/cdev.h>
37#include <linux/swap.h> 37#include <linux/swap.h>
38#include <linux/vmalloc.h> 38#include <linux/vmalloc.h>
39#include <linux/highmem.h>
40#include <linux/io.h>
41#include <linux/jiffies.h>
39#include <asm/pgtable.h> 42#include <asm/pgtable.h>
40 43
41#include "ipath_kernel.h" 44#include "ipath_kernel.h"
42#include "ipath_common.h" 45#include "ipath_common.h"
46#include "ipath_user_sdma.h"
43 47
44static int ipath_open(struct inode *, struct file *); 48static int ipath_open(struct inode *, struct file *);
45static int ipath_close(struct inode *, struct file *); 49static int ipath_close(struct inode *, struct file *);
46static ssize_t ipath_write(struct file *, const char __user *, size_t, 50static ssize_t ipath_write(struct file *, const char __user *, size_t,
47 loff_t *); 51 loff_t *);
52static ssize_t ipath_writev(struct kiocb *, const struct iovec *,
53 unsigned long , loff_t);
48static unsigned int ipath_poll(struct file *, struct poll_table_struct *); 54static unsigned int ipath_poll(struct file *, struct poll_table_struct *);
49static int ipath_mmap(struct file *, struct vm_area_struct *); 55static int ipath_mmap(struct file *, struct vm_area_struct *);
50 56
51static const struct file_operations ipath_file_ops = { 57static const struct file_operations ipath_file_ops = {
52 .owner = THIS_MODULE, 58 .owner = THIS_MODULE,
53 .write = ipath_write, 59 .write = ipath_write,
60 .aio_write = ipath_writev,
54 .open = ipath_open, 61 .open = ipath_open,
55 .release = ipath_close, 62 .release = ipath_close,
56 .poll = ipath_poll, 63 .poll = ipath_poll,
@@ -184,6 +191,29 @@ static int ipath_get_base_info(struct file *fp,
184 kinfo->spi_piobufbase = (u64) pd->port_piobufs + 191 kinfo->spi_piobufbase = (u64) pd->port_piobufs +
185 dd->ipath_palign * kinfo->spi_piocnt * slave; 192 dd->ipath_palign * kinfo->spi_piocnt * slave;
186 } 193 }
194
195 /*
196 * Set the PIO avail update threshold to no larger
197 * than the number of buffers per process. Note that
198 * we decrease it here, but won't ever increase it.
199 */
200 if (dd->ipath_pioupd_thresh &&
201 kinfo->spi_piocnt < dd->ipath_pioupd_thresh) {
202 unsigned long flags;
203
204 dd->ipath_pioupd_thresh = kinfo->spi_piocnt;
205 ipath_dbg("Decreased pio update threshold to %u\n",
206 dd->ipath_pioupd_thresh);
207 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
208 dd->ipath_sendctrl &= ~(INFINIPATH_S_UPDTHRESH_MASK
209 << INFINIPATH_S_UPDTHRESH_SHIFT);
210 dd->ipath_sendctrl |= dd->ipath_pioupd_thresh
211 << INFINIPATH_S_UPDTHRESH_SHIFT;
212 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
213 dd->ipath_sendctrl);
214 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
215 }
216
187 if (shared) { 217 if (shared) {
188 kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase + 218 kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase +
189 dd->ipath_ureg_align * pd->port_port; 219 dd->ipath_ureg_align * pd->port_port;
@@ -219,7 +249,12 @@ static int ipath_get_base_info(struct file *fp,
219 kinfo->spi_pioalign = dd->ipath_palign; 249 kinfo->spi_pioalign = dd->ipath_palign;
220 250
221 kinfo->spi_qpair = IPATH_KD_QP; 251 kinfo->spi_qpair = IPATH_KD_QP;
222 kinfo->spi_piosize = dd->ipath_ibmaxlen; 252 /*
253 * user mode PIO buffers are always 2KB, even when 4KB can
254 * be received, and sent via the kernel; this is ibmaxlen
255 * for 2K MTU.
256 */
257 kinfo->spi_piosize = dd->ipath_piosize2k - 2 * sizeof(u32);
223 kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */ 258 kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */
224 kinfo->spi_port = pd->port_port; 259 kinfo->spi_port = pd->port_port;
225 kinfo->spi_subport = subport_fp(fp); 260 kinfo->spi_subport = subport_fp(fp);
@@ -1598,6 +1633,9 @@ static int try_alloc_port(struct ipath_devdata *dd, int port,
1598 port_fp(fp) = pd; 1633 port_fp(fp) = pd;
1599 pd->port_pid = current->pid; 1634 pd->port_pid = current->pid;
1600 strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm)); 1635 strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
1636 ipath_chg_pioavailkernel(dd,
1637 dd->ipath_pbufsport * (pd->port_port - 1),
1638 dd->ipath_pbufsport, 0);
1601 ipath_stats.sps_ports++; 1639 ipath_stats.sps_ports++;
1602 ret = 0; 1640 ret = 0;
1603 } else 1641 } else
@@ -1760,7 +1798,7 @@ static int find_shared_port(struct file *fp,
1760 for (ndev = 0; ndev < devmax; ndev++) { 1798 for (ndev = 0; ndev < devmax; ndev++) {
1761 struct ipath_devdata *dd = ipath_lookup(ndev); 1799 struct ipath_devdata *dd = ipath_lookup(ndev);
1762 1800
1763 if (!dd) 1801 if (!usable(dd))
1764 continue; 1802 continue;
1765 for (i = 1; i < dd->ipath_cfgports; i++) { 1803 for (i = 1; i < dd->ipath_cfgports; i++) {
1766 struct ipath_portdata *pd = dd->ipath_pd[i]; 1804 struct ipath_portdata *pd = dd->ipath_pd[i];
@@ -1839,10 +1877,9 @@ static int ipath_assign_port(struct file *fp,
1839 if (ipath_compatible_subports(swmajor, swminor) && 1877 if (ipath_compatible_subports(swmajor, swminor) &&
1840 uinfo->spu_subport_cnt && 1878 uinfo->spu_subport_cnt &&
1841 (ret = find_shared_port(fp, uinfo))) { 1879 (ret = find_shared_port(fp, uinfo))) {
1842 mutex_unlock(&ipath_mutex);
1843 if (ret > 0) 1880 if (ret > 0)
1844 ret = 0; 1881 ret = 0;
1845 goto done; 1882 goto done_chk_sdma;
1846 } 1883 }
1847 1884
1848 i_minor = iminor(fp->f_path.dentry->d_inode) - IPATH_USER_MINOR_BASE; 1885 i_minor = iminor(fp->f_path.dentry->d_inode) - IPATH_USER_MINOR_BASE;
@@ -1854,6 +1891,21 @@ static int ipath_assign_port(struct file *fp,
1854 else 1891 else
1855 ret = find_best_unit(fp, uinfo); 1892 ret = find_best_unit(fp, uinfo);
1856 1893
1894done_chk_sdma:
1895 if (!ret) {
1896 struct ipath_filedata *fd = fp->private_data;
1897 const struct ipath_portdata *pd = fd->pd;
1898 const struct ipath_devdata *dd = pd->port_dd;
1899
1900 fd->pq = ipath_user_sdma_queue_create(&dd->pcidev->dev,
1901 dd->ipath_unit,
1902 pd->port_port,
1903 fd->subport);
1904
1905 if (!fd->pq)
1906 ret = -ENOMEM;
1907 }
1908
1857 mutex_unlock(&ipath_mutex); 1909 mutex_unlock(&ipath_mutex);
1858 1910
1859done: 1911done:
@@ -1922,22 +1974,25 @@ static int ipath_do_user_init(struct file *fp,
1922 pd->port_hdrqfull_poll = pd->port_hdrqfull; 1974 pd->port_hdrqfull_poll = pd->port_hdrqfull;
1923 1975
1924 /* 1976 /*
1925 * now enable the port; the tail registers will be written to memory 1977 * Now enable the port for receive.
1926 * by the chip as soon as it sees the write to 1978 * For chips that are set to DMA the tail register to memory
1927 * dd->ipath_kregs->kr_rcvctrl. The update only happens on 1979 * when they change (and when the update bit transitions from
1928 * transition from 0 to 1, so clear it first, then set it as part of 1980 * 0 to 1. So for those chips, we turn it off and then back on.
1929 * enabling the port. This will (very briefly) affect any other 1981 * This will (very briefly) affect any other open ports, but the
1930 * open ports, but it shouldn't be long enough to be an issue. 1982 * duration is very short, and therefore isn't an issue. We
1931 * We explictly set the in-memory copy to 0 beforehand, so we don't 1983 * explictly set the in-memory tail copy to 0 beforehand, so we
1932 * have to wait to be sure the DMA update has happened. 1984 * don't have to wait to be sure the DMA update has happened
1985 * (chip resets head/tail to 0 on transition to enable).
1933 */ 1986 */
1934 if (pd->port_rcvhdrtail_kvaddr)
1935 ipath_clear_rcvhdrtail(pd);
1936 set_bit(dd->ipath_r_portenable_shift + pd->port_port, 1987 set_bit(dd->ipath_r_portenable_shift + pd->port_port,
1937 &dd->ipath_rcvctrl); 1988 &dd->ipath_rcvctrl);
1938 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 1989 if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
1990 if (pd->port_rcvhdrtail_kvaddr)
1991 ipath_clear_rcvhdrtail(pd);
1992 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1939 dd->ipath_rcvctrl & 1993 dd->ipath_rcvctrl &
1940 ~(1ULL << dd->ipath_r_tailupd_shift)); 1994 ~(1ULL << dd->ipath_r_tailupd_shift));
1995 }
1941 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 1996 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1942 dd->ipath_rcvctrl); 1997 dd->ipath_rcvctrl);
1943 /* Notify any waiting slaves */ 1998 /* Notify any waiting slaves */
@@ -1965,14 +2020,15 @@ static void unlock_expected_tids(struct ipath_portdata *pd)
1965 ipath_cdbg(VERBOSE, "Port %u unlocking any locked expTID pages\n", 2020 ipath_cdbg(VERBOSE, "Port %u unlocking any locked expTID pages\n",
1966 pd->port_port); 2021 pd->port_port);
1967 for (i = port_tidbase; i < maxtid; i++) { 2022 for (i = port_tidbase; i < maxtid; i++) {
1968 if (!dd->ipath_pageshadow[i]) 2023 struct page *ps = dd->ipath_pageshadow[i];
2024
2025 if (!ps)
1969 continue; 2026 continue;
1970 2027
2028 dd->ipath_pageshadow[i] = NULL;
1971 pci_unmap_page(dd->pcidev, dd->ipath_physshadow[i], 2029 pci_unmap_page(dd->pcidev, dd->ipath_physshadow[i],
1972 PAGE_SIZE, PCI_DMA_FROMDEVICE); 2030 PAGE_SIZE, PCI_DMA_FROMDEVICE);
1973 ipath_release_user_pages_on_close(&dd->ipath_pageshadow[i], 2031 ipath_release_user_pages_on_close(&ps, 1);
1974 1);
1975 dd->ipath_pageshadow[i] = NULL;
1976 cnt++; 2032 cnt++;
1977 ipath_stats.sps_pageunlocks++; 2033 ipath_stats.sps_pageunlocks++;
1978 } 2034 }
@@ -2007,6 +2063,13 @@ static int ipath_close(struct inode *in, struct file *fp)
2007 mutex_unlock(&ipath_mutex); 2063 mutex_unlock(&ipath_mutex);
2008 goto bail; 2064 goto bail;
2009 } 2065 }
2066
2067 dd = pd->port_dd;
2068
2069 /* drain user sdma queue */
2070 ipath_user_sdma_queue_drain(dd, fd->pq);
2071 ipath_user_sdma_queue_destroy(fd->pq);
2072
2010 if (--pd->port_cnt) { 2073 if (--pd->port_cnt) {
2011 /* 2074 /*
2012 * XXX If the master closes the port before the slave(s), 2075 * XXX If the master closes the port before the slave(s),
@@ -2019,7 +2082,6 @@ static int ipath_close(struct inode *in, struct file *fp)
2019 goto bail; 2082 goto bail;
2020 } 2083 }
2021 port = pd->port_port; 2084 port = pd->port_port;
2022 dd = pd->port_dd;
2023 2085
2024 if (pd->port_hdrqfull) { 2086 if (pd->port_hdrqfull) {
2025 ipath_cdbg(PROC, "%s[%u] had %u rcvhdrqfull errors " 2087 ipath_cdbg(PROC, "%s[%u] had %u rcvhdrqfull errors "
@@ -2039,7 +2101,7 @@ static int ipath_close(struct inode *in, struct file *fp)
2039 pd->port_rcvnowait = pd->port_pionowait = 0; 2101 pd->port_rcvnowait = pd->port_pionowait = 0;
2040 } 2102 }
2041 if (pd->port_flag) { 2103 if (pd->port_flag) {
2042 ipath_dbg("port %u port_flag still set to 0x%lx\n", 2104 ipath_cdbg(PROC, "port %u port_flag set: 0x%lx\n",
2043 pd->port_port, pd->port_flag); 2105 pd->port_port, pd->port_flag);
2044 pd->port_flag = 0; 2106 pd->port_flag = 0;
2045 } 2107 }
@@ -2076,6 +2138,7 @@ static int ipath_close(struct inode *in, struct file *fp)
2076 2138
2077 i = dd->ipath_pbufsport * (port - 1); 2139 i = dd->ipath_pbufsport * (port - 1);
2078 ipath_disarm_piobufs(dd, i, dd->ipath_pbufsport); 2140 ipath_disarm_piobufs(dd, i, dd->ipath_pbufsport);
2141 ipath_chg_pioavailkernel(dd, i, dd->ipath_pbufsport, 1);
2079 2142
2080 dd->ipath_f_clear_tids(dd, pd->port_port); 2143 dd->ipath_f_clear_tids(dd, pd->port_port);
2081 2144
@@ -2140,17 +2203,31 @@ static int ipath_get_slave_info(struct ipath_portdata *pd,
2140 return ret; 2203 return ret;
2141} 2204}
2142 2205
2143static int ipath_force_pio_avail_update(struct ipath_devdata *dd) 2206static int ipath_sdma_get_inflight(struct ipath_user_sdma_queue *pq,
2207 u32 __user *inflightp)
2144{ 2208{
2145 unsigned long flags; 2209 const u32 val = ipath_user_sdma_inflight_counter(pq);
2146 2210
2147 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); 2211 if (put_user(val, inflightp))
2148 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 2212 return -EFAULT;
2149 dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD); 2213
2150 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 2214 return 0;
2151 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); 2215}
2152 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 2216
2153 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); 2217static int ipath_sdma_get_complete(struct ipath_devdata *dd,
2218 struct ipath_user_sdma_queue *pq,
2219 u32 __user *completep)
2220{
2221 u32 val;
2222 int err;
2223
2224 err = ipath_user_sdma_make_progress(dd, pq);
2225 if (err < 0)
2226 return err;
2227
2228 val = ipath_user_sdma_complete_counter(pq);
2229 if (put_user(val, completep))
2230 return -EFAULT;
2154 2231
2155 return 0; 2232 return 0;
2156} 2233}
@@ -2229,6 +2306,16 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
2229 dest = &cmd.cmd.armlaunch_ctrl; 2306 dest = &cmd.cmd.armlaunch_ctrl;
2230 src = &ucmd->cmd.armlaunch_ctrl; 2307 src = &ucmd->cmd.armlaunch_ctrl;
2231 break; 2308 break;
2309 case IPATH_CMD_SDMA_INFLIGHT:
2310 copy = sizeof(cmd.cmd.sdma_inflight);
2311 dest = &cmd.cmd.sdma_inflight;
2312 src = &ucmd->cmd.sdma_inflight;
2313 break;
2314 case IPATH_CMD_SDMA_COMPLETE:
2315 copy = sizeof(cmd.cmd.sdma_complete);
2316 dest = &cmd.cmd.sdma_complete;
2317 src = &ucmd->cmd.sdma_complete;
2318 break;
2232 default: 2319 default:
2233 ret = -EINVAL; 2320 ret = -EINVAL;
2234 goto bail; 2321 goto bail;
@@ -2299,7 +2386,7 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
2299 cmd.cmd.slave_mask_addr); 2386 cmd.cmd.slave_mask_addr);
2300 break; 2387 break;
2301 case IPATH_CMD_PIOAVAILUPD: 2388 case IPATH_CMD_PIOAVAILUPD:
2302 ret = ipath_force_pio_avail_update(pd->port_dd); 2389 ipath_force_pio_avail_update(pd->port_dd);
2303 break; 2390 break;
2304 case IPATH_CMD_POLL_TYPE: 2391 case IPATH_CMD_POLL_TYPE:
2305 pd->poll_type = cmd.cmd.poll_type; 2392 pd->poll_type = cmd.cmd.poll_type;
@@ -2310,6 +2397,17 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
2310 else 2397 else
2311 ipath_disable_armlaunch(pd->port_dd); 2398 ipath_disable_armlaunch(pd->port_dd);
2312 break; 2399 break;
2400 case IPATH_CMD_SDMA_INFLIGHT:
2401 ret = ipath_sdma_get_inflight(user_sdma_queue_fp(fp),
2402 (u32 __user *) (unsigned long)
2403 cmd.cmd.sdma_inflight);
2404 break;
2405 case IPATH_CMD_SDMA_COMPLETE:
2406 ret = ipath_sdma_get_complete(pd->port_dd,
2407 user_sdma_queue_fp(fp),
2408 (u32 __user *) (unsigned long)
2409 cmd.cmd.sdma_complete);
2410 break;
2313 } 2411 }
2314 2412
2315 if (ret >= 0) 2413 if (ret >= 0)
@@ -2319,6 +2417,20 @@ bail:
2319 return ret; 2417 return ret;
2320} 2418}
2321 2419
2420static ssize_t ipath_writev(struct kiocb *iocb, const struct iovec *iov,
2421 unsigned long dim, loff_t off)
2422{
2423 struct file *filp = iocb->ki_filp;
2424 struct ipath_filedata *fp = filp->private_data;
2425 struct ipath_portdata *pd = port_fp(filp);
2426 struct ipath_user_sdma_queue *pq = fp->pq;
2427
2428 if (!dim)
2429 return -EINVAL;
2430
2431 return ipath_user_sdma_writev(pd->port_dd, pq, iov, dim);
2432}
2433
2322static struct class *ipath_class; 2434static struct class *ipath_class;
2323 2435
2324static int init_cdev(int minor, char *name, const struct file_operations *fops, 2436static int init_cdev(int minor, char *name, const struct file_operations *fops,
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6110.c b/drivers/infiniband/hw/ipath/ipath_iba6110.c
index 9e2ced3cdc5e..02831ad070b8 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6110.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6110.c
@@ -40,6 +40,7 @@
40#include <linux/pci.h> 40#include <linux/pci.h>
41#include <linux/delay.h> 41#include <linux/delay.h>
42#include <linux/htirq.h> 42#include <linux/htirq.h>
43#include <rdma/ib_verbs.h>
43 44
44#include "ipath_kernel.h" 45#include "ipath_kernel.h"
45#include "ipath_registers.h" 46#include "ipath_registers.h"
@@ -305,7 +306,9 @@ static const struct ipath_cregs ipath_ht_cregs = {
305 306
306/* kr_intstatus, kr_intclear, kr_intmask bits */ 307/* kr_intstatus, kr_intclear, kr_intmask bits */
307#define INFINIPATH_I_RCVURG_MASK ((1U<<9)-1) 308#define INFINIPATH_I_RCVURG_MASK ((1U<<9)-1)
309#define INFINIPATH_I_RCVURG_SHIFT 0
308#define INFINIPATH_I_RCVAVAIL_MASK ((1U<<9)-1) 310#define INFINIPATH_I_RCVAVAIL_MASK ((1U<<9)-1)
311#define INFINIPATH_I_RCVAVAIL_SHIFT 12
309 312
310/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */ 313/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
311#define INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT 0 314#define INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT 0
@@ -476,7 +479,13 @@ static const struct ipath_hwerror_msgs ipath_6110_hwerror_msgs[] = {
476#define RXE_EAGER_PARITY (INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID \ 479#define RXE_EAGER_PARITY (INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID \
477 << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) 480 << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT)
478 481
479static int ipath_ht_txe_recover(struct ipath_devdata *); 482static void ipath_ht_txe_recover(struct ipath_devdata *dd)
483{
484 ++ipath_stats.sps_txeparity;
485 dev_info(&dd->pcidev->dev,
486 "Recovering from TXE PIO parity error\n");
487}
488
480 489
481/** 490/**
482 * ipath_ht_handle_hwerrors - display hardware errors. 491 * ipath_ht_handle_hwerrors - display hardware errors.
@@ -557,11 +566,11 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg,
557 * occur if a processor speculative read is done to the PIO 566 * occur if a processor speculative read is done to the PIO
558 * buffer while we are sending a packet, for example. 567 * buffer while we are sending a packet, for example.
559 */ 568 */
560 if ((hwerrs & TXE_PIO_PARITY) && ipath_ht_txe_recover(dd)) 569 if (hwerrs & TXE_PIO_PARITY) {
570 ipath_ht_txe_recover(dd);
561 hwerrs &= ~TXE_PIO_PARITY; 571 hwerrs &= ~TXE_PIO_PARITY;
562 if (hwerrs & RXE_EAGER_PARITY) 572 }
563 ipath_dev_err(dd, "RXE parity, Eager TID error is not " 573
564 "recoverable\n");
565 if (!hwerrs) { 574 if (!hwerrs) {
566 ipath_dbg("Clearing freezemode on ignored or " 575 ipath_dbg("Clearing freezemode on ignored or "
567 "recovered hardware error\n"); 576 "recovered hardware error\n");
@@ -735,11 +744,10 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
735 */ 744 */
736 dd->ipath_flags |= IPATH_32BITCOUNTERS; 745 dd->ipath_flags |= IPATH_32BITCOUNTERS;
737 dd->ipath_flags |= IPATH_GPIO_INTR; 746 dd->ipath_flags |= IPATH_GPIO_INTR;
738 if (dd->ipath_htspeed != 800) 747 if (dd->ipath_lbus_speed != 800)
739 ipath_dev_err(dd, 748 ipath_dev_err(dd,
740 "Incorrectly configured for HT @ %uMHz\n", 749 "Incorrectly configured for HT @ %uMHz\n",
741 dd->ipath_htspeed); 750 dd->ipath_lbus_speed);
742 ret = 0;
743 751
744 /* 752 /*
745 * set here, not in ipath_init_*_funcs because we have to do 753 * set here, not in ipath_init_*_funcs because we have to do
@@ -839,7 +847,7 @@ static void slave_or_pri_blk(struct ipath_devdata *dd, struct pci_dev *pdev,
839 /* 847 /*
840 * now write them back to clear the error. 848 * now write them back to clear the error.
841 */ 849 */
842 pci_write_config_byte(pdev, link_off, 850 pci_write_config_word(pdev, link_off,
843 linkctrl & (0xf << 8)); 851 linkctrl & (0xf << 8));
844 } 852 }
845 } 853 }
@@ -904,7 +912,7 @@ static void slave_or_pri_blk(struct ipath_devdata *dd, struct pci_dev *pdev,
904 break; 912 break;
905 } 913 }
906 914
907 dd->ipath_htwidth = width; 915 dd->ipath_lbus_width = width;
908 916
909 if (linkwidth != 0x11) { 917 if (linkwidth != 0x11) {
910 ipath_dev_err(dd, "Not configured for 16 bit HT " 918 ipath_dev_err(dd, "Not configured for 16 bit HT "
@@ -952,8 +960,13 @@ static void slave_or_pri_blk(struct ipath_devdata *dd, struct pci_dev *pdev,
952 speed = 200; 960 speed = 200;
953 break; 961 break;
954 } 962 }
955 dd->ipath_htspeed = speed; 963 dd->ipath_lbus_speed = speed;
956 } 964 }
965
966 snprintf(dd->ipath_lbus_info, sizeof(dd->ipath_lbus_info),
967 "HyperTransport,%uMHz,x%u\n",
968 dd->ipath_lbus_speed,
969 dd->ipath_lbus_width);
957} 970}
958 971
959static int ipath_ht_intconfig(struct ipath_devdata *dd) 972static int ipath_ht_intconfig(struct ipath_devdata *dd)
@@ -1653,22 +1666,6 @@ static int ipath_ht_early_init(struct ipath_devdata *dd)
1653} 1666}
1654 1667
1655 1668
1656static int ipath_ht_txe_recover(struct ipath_devdata *dd)
1657{
1658 int cnt = ++ipath_stats.sps_txeparity;
1659 if (cnt >= IPATH_MAX_PARITY_ATTEMPTS) {
1660 if (cnt == IPATH_MAX_PARITY_ATTEMPTS)
1661 ipath_dev_err(dd,
1662 "Too many attempts to recover from "
1663 "TXE parity, giving up\n");
1664 return 0;
1665 }
1666 dev_info(&dd->pcidev->dev,
1667 "Recovering from TXE PIO parity error\n");
1668 return 1;
1669}
1670
1671
1672/** 1669/**
1673 * ipath_init_ht_get_base_info - set chip-specific flags for user code 1670 * ipath_init_ht_get_base_info - set chip-specific flags for user code
1674 * @dd: the infinipath device 1671 * @dd: the infinipath device
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6120.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c
index c7a2f50824c0..421cc2af891f 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6120.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6120.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -38,7 +38,7 @@
38#include <linux/interrupt.h> 38#include <linux/interrupt.h>
39#include <linux/pci.h> 39#include <linux/pci.h>
40#include <linux/delay.h> 40#include <linux/delay.h>
41 41#include <rdma/ib_verbs.h>
42 42
43#include "ipath_kernel.h" 43#include "ipath_kernel.h"
44#include "ipath_registers.h" 44#include "ipath_registers.h"
@@ -311,9 +311,14 @@ static const struct ipath_cregs ipath_pe_cregs = {
311 .cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt) 311 .cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt)
312}; 312};
313 313
314/* kr_control bits */
315#define INFINIPATH_C_RESET 1U
316
314/* kr_intstatus, kr_intclear, kr_intmask bits */ 317/* kr_intstatus, kr_intclear, kr_intmask bits */
315#define INFINIPATH_I_RCVURG_MASK ((1U<<5)-1) 318#define INFINIPATH_I_RCVURG_MASK ((1U<<5)-1)
319#define INFINIPATH_I_RCVURG_SHIFT 0
316#define INFINIPATH_I_RCVAVAIL_MASK ((1U<<5)-1) 320#define INFINIPATH_I_RCVAVAIL_MASK ((1U<<5)-1)
321#define INFINIPATH_I_RCVAVAIL_SHIFT 12
317 322
318/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */ 323/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
319#define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL 324#define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL
@@ -338,6 +343,9 @@ static const struct ipath_cregs ipath_pe_cregs = {
338#define INFINIPATH_EXTS_MEMBIST_ENDTEST 0x0000000000004000 343#define INFINIPATH_EXTS_MEMBIST_ENDTEST 0x0000000000004000
339#define INFINIPATH_EXTS_MEMBIST_FOUND 0x0000000000008000 344#define INFINIPATH_EXTS_MEMBIST_FOUND 0x0000000000008000
340 345
346/* kr_xgxsconfig bits */
347#define INFINIPATH_XGXS_RESET 0x5ULL
348
341#define _IPATH_GPIO_SDA_NUM 1 349#define _IPATH_GPIO_SDA_NUM 1
342#define _IPATH_GPIO_SCL_NUM 0 350#define _IPATH_GPIO_SCL_NUM 0
343 351
@@ -346,6 +354,16 @@ static const struct ipath_cregs ipath_pe_cregs = {
346#define IPATH_GPIO_SCL (1ULL << \ 354#define IPATH_GPIO_SCL (1ULL << \
347 (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT)) 355 (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
348 356
357#define INFINIPATH_RT_BUFSIZE_MASK 0xe0000000ULL
358#define INFINIPATH_RT_BUFSIZE_SHIFTVAL(tid) \
359 ((((tid) & INFINIPATH_RT_BUFSIZE_MASK) >> 29) + 11 - 1)
360#define INFINIPATH_RT_BUFSIZE(tid) (1 << INFINIPATH_RT_BUFSIZE_SHIFTVAL(tid))
361#define INFINIPATH_RT_IS_VALID(tid) \
362 (((tid) & INFINIPATH_RT_BUFSIZE_MASK) && \
363 ((((tid) & INFINIPATH_RT_BUFSIZE_MASK) != INFINIPATH_RT_BUFSIZE_MASK)))
364#define INFINIPATH_RT_ADDR_MASK 0x1FFFFFFFULL /* 29 bits valid */
365#define INFINIPATH_RT_ADDR_SHIFT 10
366
349#define INFINIPATH_R_INTRAVAIL_SHIFT 16 367#define INFINIPATH_R_INTRAVAIL_SHIFT 16
350#define INFINIPATH_R_TAILUPD_SHIFT 31 368#define INFINIPATH_R_TAILUPD_SHIFT 31
351 369
@@ -372,6 +390,8 @@ static const struct ipath_hwerror_msgs ipath_6120_hwerror_msgs[] = {
372#define TXE_PIO_PARITY ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | \ 390#define TXE_PIO_PARITY ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | \
373 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) \ 391 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) \
374 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) 392 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)
393#define RXE_EAGER_PARITY (INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID \
394 << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT)
375 395
376static void ipath_pe_put_tid_2(struct ipath_devdata *, u64 __iomem *, 396static void ipath_pe_put_tid_2(struct ipath_devdata *, u64 __iomem *,
377 u32, unsigned long); 397 u32, unsigned long);
@@ -450,10 +470,8 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
450 * make sure we get this much out, unless told to be quiet, 470 * make sure we get this much out, unless told to be quiet,
451 * or it's occurred within the last 5 seconds 471 * or it's occurred within the last 5 seconds
452 */ 472 */
453 if ((hwerrs & ~(dd->ipath_lasthwerror | 473 if ((hwerrs & ~(dd->ipath_lasthwerror | TXE_PIO_PARITY |
454 ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | 474 RXE_EAGER_PARITY)) ||
455 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
456 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT))) ||
457 (ipath_debug & __IPATH_VERBDBG)) 475 (ipath_debug & __IPATH_VERBDBG))
458 dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx " 476 dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx "
459 "(cleared)\n", (unsigned long long) hwerrs); 477 "(cleared)\n", (unsigned long long) hwerrs);
@@ -465,7 +483,7 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
465 (hwerrs & ~dd->ipath_hwe_bitsextant)); 483 (hwerrs & ~dd->ipath_hwe_bitsextant));
466 484
467 ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control); 485 ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
468 if (ctrl & INFINIPATH_C_FREEZEMODE) { 486 if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) {
469 /* 487 /*
470 * parity errors in send memory are recoverable, 488 * parity errors in send memory are recoverable,
471 * just cancel the send (if indicated in * sendbuffererror), 489 * just cancel the send (if indicated in * sendbuffererror),
@@ -540,12 +558,40 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
540 dd->ipath_hwerrmask); 558 dd->ipath_hwerrmask);
541 } 559 }
542 560
543 if (*msg) 561 if (hwerrs) {
562 /*
563 * if any set that we aren't ignoring; only
564 * make the complaint once, in case it's stuck
565 * or recurring, and we get here multiple
566 * times.
567 */
544 ipath_dev_err(dd, "%s hardware error\n", msg); 568 ipath_dev_err(dd, "%s hardware error\n", msg);
545 if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg) { 569 if (dd->ipath_flags & IPATH_INITTED) {
570 ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
571 ipath_setup_pe_setextled(dd,
572 INFINIPATH_IBCS_L_STATE_DOWN,
573 INFINIPATH_IBCS_LT_STATE_DISABLED);
574 ipath_dev_err(dd, "Fatal Hardware Error (freeze "
575 "mode), no longer usable, SN %.16s\n",
576 dd->ipath_serial);
577 isfatal = 1;
578 }
579 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
580 /* mark as having had error */
581 *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
546 /* 582 /*
547 * for /sys status file ; if no trailing } is copied, we'll 583 * mark as not usable, at a minimum until driver
548 * know it was truncated. 584 * is reloaded, probably until reboot, since no
585 * other reset is possible.
586 */
587 dd->ipath_flags &= ~IPATH_INITTED;
588 } else
589 *msg = 0; /* recovered from all of them */
590
591 if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg && msg) {
592 /*
593 * for /sys status file ; if no trailing brace is copied,
594 * we'll know it was truncated.
549 */ 595 */
550 snprintf(dd->ipath_freezemsg, dd->ipath_freezelen, 596 snprintf(dd->ipath_freezemsg, dd->ipath_freezelen,
551 "{%s}", msg); 597 "{%s}", msg);
@@ -610,7 +656,6 @@ static int ipath_pe_boardname(struct ipath_devdata *dd, char *name,
610 dd->ipath_f_put_tid = ipath_pe_put_tid_2; 656 dd->ipath_f_put_tid = ipath_pe_put_tid_2;
611 } 657 }
612 658
613
614 /* 659 /*
615 * set here, not in ipath_init_*_funcs because we have to do 660 * set here, not in ipath_init_*_funcs because we have to do
616 * it after we can read chip registers. 661 * it after we can read chip registers.
@@ -838,7 +883,7 @@ static void ipath_setup_pe_setextled(struct ipath_devdata *dd, u64 lst,
838 extctl = dd->ipath_extctrl & ~(INFINIPATH_EXTC_LED1PRIPORT_ON | 883 extctl = dd->ipath_extctrl & ~(INFINIPATH_EXTC_LED1PRIPORT_ON |
839 INFINIPATH_EXTC_LED2PRIPORT_ON); 884 INFINIPATH_EXTC_LED2PRIPORT_ON);
840 885
841 if (ltst & INFINIPATH_IBCS_LT_STATE_LINKUP) 886 if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP)
842 extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON; 887 extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON;
843 if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE) 888 if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
844 extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON; 889 extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON;
@@ -863,6 +908,62 @@ static void ipath_setup_pe_cleanup(struct ipath_devdata *dd)
863 pci_disable_msi(dd->pcidev); 908 pci_disable_msi(dd->pcidev);
864} 909}
865 910
911static void ipath_6120_pcie_params(struct ipath_devdata *dd)
912{
913 u16 linkstat, speed;
914 int pos;
915
916 pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
917 if (!pos) {
918 ipath_dev_err(dd, "Can't find PCI Express capability!\n");
919 goto bail;
920 }
921
922 pci_read_config_word(dd->pcidev, pos + PCI_EXP_LNKSTA,
923 &linkstat);
924 /*
925 * speed is bits 0-4, linkwidth is bits 4-8
926 * no defines for them in headers
927 */
928 speed = linkstat & 0xf;
929 linkstat >>= 4;
930 linkstat &= 0x1f;
931 dd->ipath_lbus_width = linkstat;
932
933 switch (speed) {
934 case 1:
935 dd->ipath_lbus_speed = 2500; /* Gen1, 2.5GHz */
936 break;
937 case 2:
938 dd->ipath_lbus_speed = 5000; /* Gen1, 5GHz */
939 break;
940 default: /* not defined, assume gen1 */
941 dd->ipath_lbus_speed = 2500;
942 break;
943 }
944
945 if (linkstat < 8)
946 ipath_dev_err(dd,
947 "PCIe width %u (x8 HCA), performance reduced\n",
948 linkstat);
949 else
950 ipath_cdbg(VERBOSE, "PCIe speed %u width %u (x8 HCA)\n",
951 dd->ipath_lbus_speed, linkstat);
952
953 if (speed != 1)
954 ipath_dev_err(dd,
955 "PCIe linkspeed %u is incorrect; "
956 "should be 1 (2500)!\n", speed);
957bail:
958 /* fill in string, even on errors */
959 snprintf(dd->ipath_lbus_info, sizeof(dd->ipath_lbus_info),
960 "PCIe,%uMHz,x%u\n",
961 dd->ipath_lbus_speed,
962 dd->ipath_lbus_width);
963
964 return;
965}
966
866/** 967/**
867 * ipath_setup_pe_config - setup PCIe config related stuff 968 * ipath_setup_pe_config - setup PCIe config related stuff
868 * @dd: the infinipath device 969 * @dd: the infinipath device
@@ -920,19 +1021,8 @@ static int ipath_setup_pe_config(struct ipath_devdata *dd,
920 } else 1021 } else
921 ipath_dev_err(dd, "Can't find MSI capability, " 1022 ipath_dev_err(dd, "Can't find MSI capability, "
922 "can't save MSI settings for reset\n"); 1023 "can't save MSI settings for reset\n");
923 if ((pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP))) { 1024
924 u16 linkstat; 1025 ipath_6120_pcie_params(dd);
925 pci_read_config_word(dd->pcidev, pos + PCI_EXP_LNKSTA,
926 &linkstat);
927 linkstat >>= 4;
928 linkstat &= 0x1f;
929 if (linkstat != 8)
930 ipath_dev_err(dd, "PCIe width %u, "
931 "performance reduced\n", linkstat);
932 }
933 else
934 ipath_dev_err(dd, "Can't find PCI Express "
935 "capability!\n");
936 1026
937 dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X; 1027 dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
938 dd->ipath_link_speed_supported = IPATH_IB_SDR; 1028 dd->ipath_link_speed_supported = IPATH_IB_SDR;
@@ -1065,10 +1155,7 @@ static void ipath_init_pe_variables(struct ipath_devdata *dd)
1065 INFINIPATH_HWE_RXEMEMPARITYERR_MASK << 1155 INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
1066 INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT; 1156 INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT;
1067 1157
1068 dd->ipath_eep_st_masks[2].errs_to_log = 1158 dd->ipath_eep_st_masks[2].errs_to_log = INFINIPATH_E_RESET;
1069 INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET;
1070
1071
1072 dd->delay_mult = 2; /* SDR, 4X, can't change */ 1159 dd->delay_mult = 2; /* SDR, 4X, can't change */
1073} 1160}
1074 1161
@@ -1142,6 +1229,9 @@ static int ipath_setup_pe_reset(struct ipath_devdata *dd)
1142 u64 val; 1229 u64 val;
1143 int i; 1230 int i;
1144 int ret; 1231 int ret;
1232 u16 cmdval;
1233
1234 pci_read_config_word(dd->pcidev, PCI_COMMAND, &cmdval);
1145 1235
1146 /* Use ERROR so it shows up in logs, etc. */ 1236 /* Use ERROR so it shows up in logs, etc. */
1147 ipath_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->ipath_unit); 1237 ipath_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->ipath_unit);
@@ -1169,10 +1259,14 @@ static int ipath_setup_pe_reset(struct ipath_devdata *dd)
1169 ipath_dev_err(dd, "rewrite of BAR1 failed: %d\n", 1259 ipath_dev_err(dd, "rewrite of BAR1 failed: %d\n",
1170 r); 1260 r);
1171 /* now re-enable memory access */ 1261 /* now re-enable memory access */
1262 pci_write_config_word(dd->pcidev, PCI_COMMAND, cmdval);
1172 if ((r = pci_enable_device(dd->pcidev))) 1263 if ((r = pci_enable_device(dd->pcidev)))
1173 ipath_dev_err(dd, "pci_enable_device failed after " 1264 ipath_dev_err(dd, "pci_enable_device failed after "
1174 "reset: %d\n", r); 1265 "reset: %d\n", r);
1175 /* whether it worked or not, mark as present, again */ 1266 /*
1267 * whether it fully enabled or not, mark as present,
1268 * again (but not INITTED)
1269 */
1176 dd->ipath_flags |= IPATH_PRESENT; 1270 dd->ipath_flags |= IPATH_PRESENT;
1177 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision); 1271 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision);
1178 if (val == dd->ipath_revision) { 1272 if (val == dd->ipath_revision) {
@@ -1190,6 +1284,8 @@ static int ipath_setup_pe_reset(struct ipath_devdata *dd)
1190 ret = 0; /* failed */ 1284 ret = 0; /* failed */
1191 1285
1192bail: 1286bail:
1287 if (ret)
1288 ipath_6120_pcie_params(dd);
1193 return ret; 1289 return ret;
1194} 1290}
1195 1291
@@ -1209,16 +1305,21 @@ static void ipath_pe_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr,
1209{ 1305{
1210 u32 __iomem *tidp32 = (u32 __iomem *)tidptr; 1306 u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
1211 unsigned long flags = 0; /* keep gcc quiet */ 1307 unsigned long flags = 0; /* keep gcc quiet */
1308 int tidx;
1309 spinlock_t *tidlockp;
1310
1311 if (!dd->ipath_kregbase)
1312 return;
1212 1313
1213 if (pa != dd->ipath_tidinvalid) { 1314 if (pa != dd->ipath_tidinvalid) {
1214 if (pa & ((1U << 11) - 1)) { 1315 if (pa & ((1U << 11) - 1)) {
1215 dev_info(&dd->pcidev->dev, "BUG: physaddr %lx " 1316 dev_info(&dd->pcidev->dev, "BUG: physaddr %lx "
1216 "not 4KB aligned!\n", pa); 1317 "not 2KB aligned!\n", pa);
1217 return; 1318 return;
1218 } 1319 }
1219 pa >>= 11; 1320 pa >>= 11;
1220 /* paranoia check */ 1321 /* paranoia check */
1221 if (pa & (7<<29)) 1322 if (pa & ~INFINIPATH_RT_ADDR_MASK)
1222 ipath_dev_err(dd, 1323 ipath_dev_err(dd,
1223 "BUG: Physical page address 0x%lx " 1324 "BUG: Physical page address 0x%lx "
1224 "has bits set in 31-29\n", pa); 1325 "has bits set in 31-29\n", pa);
@@ -1238,14 +1339,22 @@ static void ipath_pe_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr,
1238 * call can be done from interrupt level for the port 0 eager TIDs, 1339 * call can be done from interrupt level for the port 0 eager TIDs,
1239 * so we have to use irqsave locks. 1340 * so we have to use irqsave locks.
1240 */ 1341 */
1241 spin_lock_irqsave(&dd->ipath_tid_lock, flags); 1342 /*
1343 * Assumes tidptr always > ipath_egrtidbase
1344 * if type == RCVHQ_RCV_TYPE_EAGER.
1345 */
1346 tidx = tidptr - dd->ipath_egrtidbase;
1347
1348 tidlockp = (type == RCVHQ_RCV_TYPE_EAGER && tidx < dd->ipath_rcvegrcnt)
1349 ? &dd->ipath_kernel_tid_lock : &dd->ipath_user_tid_lock;
1350 spin_lock_irqsave(tidlockp, flags);
1242 ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xfeeddeaf); 1351 ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xfeeddeaf);
1243 if (dd->ipath_kregbase) 1352 writel(pa, tidp32);
1244 writel(pa, tidp32);
1245 ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xdeadbeef); 1353 ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xdeadbeef);
1246 mmiowb(); 1354 mmiowb();
1247 spin_unlock_irqrestore(&dd->ipath_tid_lock, flags); 1355 spin_unlock_irqrestore(tidlockp, flags);
1248} 1356}
1357
1249/** 1358/**
1250 * ipath_pe_put_tid_2 - write a TID in chip, Revision 2 or higher 1359 * ipath_pe_put_tid_2 - write a TID in chip, Revision 2 or higher
1251 * @dd: the infinipath device 1360 * @dd: the infinipath device
@@ -1261,6 +1370,10 @@ static void ipath_pe_put_tid_2(struct ipath_devdata *dd, u64 __iomem *tidptr,
1261 u32 type, unsigned long pa) 1370 u32 type, unsigned long pa)
1262{ 1371{
1263 u32 __iomem *tidp32 = (u32 __iomem *)tidptr; 1372 u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
1373 u32 tidx;
1374
1375 if (!dd->ipath_kregbase)
1376 return;
1264 1377
1265 if (pa != dd->ipath_tidinvalid) { 1378 if (pa != dd->ipath_tidinvalid) {
1266 if (pa & ((1U << 11) - 1)) { 1379 if (pa & ((1U << 11) - 1)) {
@@ -1270,7 +1383,7 @@ static void ipath_pe_put_tid_2(struct ipath_devdata *dd, u64 __iomem *tidptr,
1270 } 1383 }
1271 pa >>= 11; 1384 pa >>= 11;
1272 /* paranoia check */ 1385 /* paranoia check */
1273 if (pa & (7<<29)) 1386 if (pa & ~INFINIPATH_RT_ADDR_MASK)
1274 ipath_dev_err(dd, 1387 ipath_dev_err(dd,
1275 "BUG: Physical page address 0x%lx " 1388 "BUG: Physical page address 0x%lx "
1276 "has bits set in 31-29\n", pa); 1389 "has bits set in 31-29\n", pa);
@@ -1280,8 +1393,8 @@ static void ipath_pe_put_tid_2(struct ipath_devdata *dd, u64 __iomem *tidptr,
1280 else /* for now, always full 4KB page */ 1393 else /* for now, always full 4KB page */
1281 pa |= 2 << 29; 1394 pa |= 2 << 29;
1282 } 1395 }
1283 if (dd->ipath_kregbase) 1396 tidx = tidptr - dd->ipath_egrtidbase;
1284 writel(pa, tidp32); 1397 writel(pa, tidp32);
1285 mmiowb(); 1398 mmiowb();
1286} 1399}
1287 1400
@@ -1379,17 +1492,13 @@ static int ipath_pe_early_init(struct ipath_devdata *dd)
1379 dd->ipath_egrtidbase = (u64 __iomem *) 1492 dd->ipath_egrtidbase = (u64 __iomem *)
1380 ((char __iomem *) dd->ipath_kregbase + dd->ipath_rcvegrbase); 1493 ((char __iomem *) dd->ipath_kregbase + dd->ipath_rcvegrbase);
1381 1494
1382 /* 1495 dd->ipath_rcvegrbufsize = ipath_mtu4096 ? 4096 : 2048;
1383 * To truly support a 4KB MTU (for usermode), we need to
1384 * bump this to a larger value. For now, we use them for
1385 * the kernel only.
1386 */
1387 dd->ipath_rcvegrbufsize = 2048;
1388 /* 1496 /*
1389 * the min() check here is currently a nop, but it may not always 1497 * the min() check here is currently a nop, but it may not always
1390 * be, depending on just how we do ipath_rcvegrbufsize 1498 * be, depending on just how we do ipath_rcvegrbufsize
1391 */ 1499 */
1392 dd->ipath_ibmaxlen = min(dd->ipath_piosize2k, 1500 dd->ipath_ibmaxlen = min(ipath_mtu4096 ? dd->ipath_piosize4k :
1501 dd->ipath_piosize2k,
1393 dd->ipath_rcvegrbufsize + 1502 dd->ipath_rcvegrbufsize +
1394 (dd->ipath_rcvhdrentsize << 2)); 1503 (dd->ipath_rcvhdrentsize << 2));
1395 dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen; 1504 dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen;
diff --git a/drivers/infiniband/hw/ipath/ipath_iba7220.c b/drivers/infiniband/hw/ipath/ipath_iba7220.c
new file mode 100644
index 000000000000..1b2de2cfb69b
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_iba7220.c
@@ -0,0 +1,2571 @@
1/*
2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33/*
34 * This file contains all of the code that is specific to the
35 * InfiniPath 7220 chip (except that specific to the SerDes)
36 */
37
38#include <linux/interrupt.h>
39#include <linux/pci.h>
40#include <linux/delay.h>
41#include <linux/io.h>
42#include <rdma/ib_verbs.h>
43
44#include "ipath_kernel.h"
45#include "ipath_registers.h"
46#include "ipath_7220.h"
47
48static void ipath_setup_7220_setextled(struct ipath_devdata *, u64, u64);
49
50static unsigned ipath_compat_ddr_negotiate = 1;
51
52module_param_named(compat_ddr_negotiate, ipath_compat_ddr_negotiate, uint,
53 S_IWUSR | S_IRUGO);
54MODULE_PARM_DESC(compat_ddr_negotiate,
55 "Attempt pre-IBTA 1.2 DDR speed negotiation");
56
57static unsigned ipath_sdma_fetch_arb = 1;
58module_param_named(fetch_arb, ipath_sdma_fetch_arb, uint, S_IRUGO);
59MODULE_PARM_DESC(fetch_arb, "IBA7220: change SDMA descriptor arbitration");
60
61/*
62 * This file contains almost all the chip-specific register information and
63 * access functions for the QLogic InfiniPath 7220 PCI-Express chip, with the
64 * exception of SerDes support, which in in ipath_sd7220.c.
65 *
66 * This lists the InfiniPath registers, in the actual chip layout.
67 * This structure should never be directly accessed.
68 */
69struct _infinipath_do_not_use_kernel_regs {
70 unsigned long long Revision;
71 unsigned long long Control;
72 unsigned long long PageAlign;
73 unsigned long long PortCnt;
74 unsigned long long DebugPortSelect;
75 unsigned long long DebugSigsIntSel; /* was Reserved0;*/
76 unsigned long long SendRegBase;
77 unsigned long long UserRegBase;
78 unsigned long long CounterRegBase;
79 unsigned long long Scratch;
80 unsigned long long EEPROMAddrCmd; /* was Reserved1; */
81 unsigned long long EEPROMData; /* was Reserved2; */
82 unsigned long long IntBlocked;
83 unsigned long long IntMask;
84 unsigned long long IntStatus;
85 unsigned long long IntClear;
86 unsigned long long ErrorMask;
87 unsigned long long ErrorStatus;
88 unsigned long long ErrorClear;
89 unsigned long long HwErrMask;
90 unsigned long long HwErrStatus;
91 unsigned long long HwErrClear;
92 unsigned long long HwDiagCtrl;
93 unsigned long long MDIO;
94 unsigned long long IBCStatus;
95 unsigned long long IBCCtrl;
96 unsigned long long ExtStatus;
97 unsigned long long ExtCtrl;
98 unsigned long long GPIOOut;
99 unsigned long long GPIOMask;
100 unsigned long long GPIOStatus;
101 unsigned long long GPIOClear;
102 unsigned long long RcvCtrl;
103 unsigned long long RcvBTHQP;
104 unsigned long long RcvHdrSize;
105 unsigned long long RcvHdrCnt;
106 unsigned long long RcvHdrEntSize;
107 unsigned long long RcvTIDBase;
108 unsigned long long RcvTIDCnt;
109 unsigned long long RcvEgrBase;
110 unsigned long long RcvEgrCnt;
111 unsigned long long RcvBufBase;
112 unsigned long long RcvBufSize;
113 unsigned long long RxIntMemBase;
114 unsigned long long RxIntMemSize;
115 unsigned long long RcvPartitionKey;
116 unsigned long long RcvQPMulticastPort;
117 unsigned long long RcvPktLEDCnt;
118 unsigned long long IBCDDRCtrl;
119 unsigned long long HRTBT_GUID;
120 unsigned long long IB_SDTEST_IF_TX;
121 unsigned long long IB_SDTEST_IF_RX;
122 unsigned long long IBCDDRCtrl2;
123 unsigned long long IBCDDRStatus;
124 unsigned long long JIntReload;
125 unsigned long long IBNCModeCtrl;
126 unsigned long long SendCtrl;
127 unsigned long long SendBufBase;
128 unsigned long long SendBufSize;
129 unsigned long long SendBufCnt;
130 unsigned long long SendAvailAddr;
131 unsigned long long TxIntMemBase;
132 unsigned long long TxIntMemSize;
133 unsigned long long SendDmaBase;
134 unsigned long long SendDmaLenGen;
135 unsigned long long SendDmaTail;
136 unsigned long long SendDmaHead;
137 unsigned long long SendDmaHeadAddr;
138 unsigned long long SendDmaBufMask0;
139 unsigned long long SendDmaBufMask1;
140 unsigned long long SendDmaBufMask2;
141 unsigned long long SendDmaStatus;
142 unsigned long long SendBufferError;
143 unsigned long long SendBufferErrorCONT1;
144 unsigned long long SendBufErr2; /* was Reserved6SBE[0/6] */
145 unsigned long long Reserved6L[2];
146 unsigned long long AvailUpdCount;
147 unsigned long long RcvHdrAddr0;
148 unsigned long long RcvHdrAddrs[16]; /* Why enumerate? */
149 unsigned long long Reserved7hdtl; /* Align next to 300 */
150 unsigned long long RcvHdrTailAddr0; /* 300, like others */
151 unsigned long long RcvHdrTailAddrs[16];
152 unsigned long long Reserved9SW[7]; /* was [8]; we have 17 ports */
153 unsigned long long IbsdEpbAccCtl; /* IB Serdes EPB access control */
154 unsigned long long IbsdEpbTransReg; /* IB Serdes EPB Transaction */
155 unsigned long long Reserved10sds; /* was SerdesStatus on */
156 unsigned long long XGXSConfig;
157 unsigned long long IBSerDesCtrl; /* Was IBPLLCfg on Monty */
158 unsigned long long EEPCtlStat; /* for "boot" EEPROM/FLASH */
159 unsigned long long EEPAddrCmd;
160 unsigned long long EEPData;
161 unsigned long long PcieEpbAccCtl;
162 unsigned long long PcieEpbTransCtl;
163 unsigned long long EfuseCtl; /* E-Fuse control */
164 unsigned long long EfuseData[4];
165 unsigned long long ProcMon;
166 /* this chip moves following two from previous 200, 208 */
167 unsigned long long PCIeRBufTestReg0;
168 unsigned long long PCIeRBufTestReg1;
169 /* added for this chip */
170 unsigned long long PCIeRBufTestReg2;
171 unsigned long long PCIeRBufTestReg3;
172 /* added for this chip, debug only */
173 unsigned long long SPC_JTAG_ACCESS_REG;
174 unsigned long long LAControlReg;
175 unsigned long long GPIODebugSelReg;
176 unsigned long long DebugPortValueReg;
177 /* added for this chip, DMA */
178 unsigned long long SendDmaBufUsed[3];
179 unsigned long long SendDmaReqTagUsed;
180 /*
181 * added for this chip, EFUSE: note that these program 64-bit
182 * words 2 and 3 */
183 unsigned long long efuse_pgm_data[2];
184 unsigned long long Reserved11LAalign[10]; /* Skip 4B0..4F8 */
185 /* we have 30 regs for DDS and RXEQ in IB SERDES */
186 unsigned long long SerDesDDSRXEQ[30];
187 unsigned long long Reserved12LAalign[2]; /* Skip 5F0, 5F8 */
188 /* added for LA debug support */
189 unsigned long long LAMemory[32];
190};
191
192struct _infinipath_do_not_use_counters {
193 __u64 LBIntCnt;
194 __u64 LBFlowStallCnt;
195 __u64 TxSDmaDescCnt; /* was Reserved1 */
196 __u64 TxUnsupVLErrCnt;
197 __u64 TxDataPktCnt;
198 __u64 TxFlowPktCnt;
199 __u64 TxDwordCnt;
200 __u64 TxLenErrCnt;
201 __u64 TxMaxMinLenErrCnt;
202 __u64 TxUnderrunCnt;
203 __u64 TxFlowStallCnt;
204 __u64 TxDroppedPktCnt;
205 __u64 RxDroppedPktCnt;
206 __u64 RxDataPktCnt;
207 __u64 RxFlowPktCnt;
208 __u64 RxDwordCnt;
209 __u64 RxLenErrCnt;
210 __u64 RxMaxMinLenErrCnt;
211 __u64 RxICRCErrCnt;
212 __u64 RxVCRCErrCnt;
213 __u64 RxFlowCtrlErrCnt;
214 __u64 RxBadFormatCnt;
215 __u64 RxLinkProblemCnt;
216 __u64 RxEBPCnt;
217 __u64 RxLPCRCErrCnt;
218 __u64 RxBufOvflCnt;
219 __u64 RxTIDFullErrCnt;
220 __u64 RxTIDValidErrCnt;
221 __u64 RxPKeyMismatchCnt;
222 __u64 RxP0HdrEgrOvflCnt;
223 __u64 RxP1HdrEgrOvflCnt;
224 __u64 RxP2HdrEgrOvflCnt;
225 __u64 RxP3HdrEgrOvflCnt;
226 __u64 RxP4HdrEgrOvflCnt;
227 __u64 RxP5HdrEgrOvflCnt;
228 __u64 RxP6HdrEgrOvflCnt;
229 __u64 RxP7HdrEgrOvflCnt;
230 __u64 RxP8HdrEgrOvflCnt;
231 __u64 RxP9HdrEgrOvflCnt; /* was Reserved6 */
232 __u64 RxP10HdrEgrOvflCnt; /* was Reserved7 */
233 __u64 RxP11HdrEgrOvflCnt; /* new for IBA7220 */
234 __u64 RxP12HdrEgrOvflCnt; /* new for IBA7220 */
235 __u64 RxP13HdrEgrOvflCnt; /* new for IBA7220 */
236 __u64 RxP14HdrEgrOvflCnt; /* new for IBA7220 */
237 __u64 RxP15HdrEgrOvflCnt; /* new for IBA7220 */
238 __u64 RxP16HdrEgrOvflCnt; /* new for IBA7220 */
239 __u64 IBStatusChangeCnt;
240 __u64 IBLinkErrRecoveryCnt;
241 __u64 IBLinkDownedCnt;
242 __u64 IBSymbolErrCnt;
243 /* The following are new for IBA7220 */
244 __u64 RxVL15DroppedPktCnt;
245 __u64 RxOtherLocalPhyErrCnt;
246 __u64 PcieRetryBufDiagQwordCnt;
247 __u64 ExcessBufferOvflCnt;
248 __u64 LocalLinkIntegrityErrCnt;
249 __u64 RxVlErrCnt;
250 __u64 RxDlidFltrCnt;
251 __u64 Reserved8[7];
252 __u64 PSStat;
253 __u64 PSStart;
254 __u64 PSInterval;
255 __u64 PSRcvDataCount;
256 __u64 PSRcvPktsCount;
257 __u64 PSXmitDataCount;
258 __u64 PSXmitPktsCount;
259 __u64 PSXmitWaitCount;
260};
261
262#define IPATH_KREG_OFFSET(field) (offsetof( \
263 struct _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
264#define IPATH_CREG_OFFSET(field) (offsetof( \
265 struct _infinipath_do_not_use_counters, field) / sizeof(u64))
266
267static const struct ipath_kregs ipath_7220_kregs = {
268 .kr_control = IPATH_KREG_OFFSET(Control),
269 .kr_counterregbase = IPATH_KREG_OFFSET(CounterRegBase),
270 .kr_debugportselect = IPATH_KREG_OFFSET(DebugPortSelect),
271 .kr_errorclear = IPATH_KREG_OFFSET(ErrorClear),
272 .kr_errormask = IPATH_KREG_OFFSET(ErrorMask),
273 .kr_errorstatus = IPATH_KREG_OFFSET(ErrorStatus),
274 .kr_extctrl = IPATH_KREG_OFFSET(ExtCtrl),
275 .kr_extstatus = IPATH_KREG_OFFSET(ExtStatus),
276 .kr_gpio_clear = IPATH_KREG_OFFSET(GPIOClear),
277 .kr_gpio_mask = IPATH_KREG_OFFSET(GPIOMask),
278 .kr_gpio_out = IPATH_KREG_OFFSET(GPIOOut),
279 .kr_gpio_status = IPATH_KREG_OFFSET(GPIOStatus),
280 .kr_hwdiagctrl = IPATH_KREG_OFFSET(HwDiagCtrl),
281 .kr_hwerrclear = IPATH_KREG_OFFSET(HwErrClear),
282 .kr_hwerrmask = IPATH_KREG_OFFSET(HwErrMask),
283 .kr_hwerrstatus = IPATH_KREG_OFFSET(HwErrStatus),
284 .kr_ibcctrl = IPATH_KREG_OFFSET(IBCCtrl),
285 .kr_ibcstatus = IPATH_KREG_OFFSET(IBCStatus),
286 .kr_intblocked = IPATH_KREG_OFFSET(IntBlocked),
287 .kr_intclear = IPATH_KREG_OFFSET(IntClear),
288 .kr_intmask = IPATH_KREG_OFFSET(IntMask),
289 .kr_intstatus = IPATH_KREG_OFFSET(IntStatus),
290 .kr_mdio = IPATH_KREG_OFFSET(MDIO),
291 .kr_pagealign = IPATH_KREG_OFFSET(PageAlign),
292 .kr_partitionkey = IPATH_KREG_OFFSET(RcvPartitionKey),
293 .kr_portcnt = IPATH_KREG_OFFSET(PortCnt),
294 .kr_rcvbthqp = IPATH_KREG_OFFSET(RcvBTHQP),
295 .kr_rcvbufbase = IPATH_KREG_OFFSET(RcvBufBase),
296 .kr_rcvbufsize = IPATH_KREG_OFFSET(RcvBufSize),
297 .kr_rcvctrl = IPATH_KREG_OFFSET(RcvCtrl),
298 .kr_rcvegrbase = IPATH_KREG_OFFSET(RcvEgrBase),
299 .kr_rcvegrcnt = IPATH_KREG_OFFSET(RcvEgrCnt),
300 .kr_rcvhdrcnt = IPATH_KREG_OFFSET(RcvHdrCnt),
301 .kr_rcvhdrentsize = IPATH_KREG_OFFSET(RcvHdrEntSize),
302 .kr_rcvhdrsize = IPATH_KREG_OFFSET(RcvHdrSize),
303 .kr_rcvintmembase = IPATH_KREG_OFFSET(RxIntMemBase),
304 .kr_rcvintmemsize = IPATH_KREG_OFFSET(RxIntMemSize),
305 .kr_rcvtidbase = IPATH_KREG_OFFSET(RcvTIDBase),
306 .kr_rcvtidcnt = IPATH_KREG_OFFSET(RcvTIDCnt),
307 .kr_revision = IPATH_KREG_OFFSET(Revision),
308 .kr_scratch = IPATH_KREG_OFFSET(Scratch),
309 .kr_sendbuffererror = IPATH_KREG_OFFSET(SendBufferError),
310 .kr_sendctrl = IPATH_KREG_OFFSET(SendCtrl),
311 .kr_sendpioavailaddr = IPATH_KREG_OFFSET(SendAvailAddr),
312 .kr_sendpiobufbase = IPATH_KREG_OFFSET(SendBufBase),
313 .kr_sendpiobufcnt = IPATH_KREG_OFFSET(SendBufCnt),
314 .kr_sendpiosize = IPATH_KREG_OFFSET(SendBufSize),
315 .kr_sendregbase = IPATH_KREG_OFFSET(SendRegBase),
316 .kr_txintmembase = IPATH_KREG_OFFSET(TxIntMemBase),
317 .kr_txintmemsize = IPATH_KREG_OFFSET(TxIntMemSize),
318 .kr_userregbase = IPATH_KREG_OFFSET(UserRegBase),
319
320 .kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig),
321
322 /* send dma related regs */
323 .kr_senddmabase = IPATH_KREG_OFFSET(SendDmaBase),
324 .kr_senddmalengen = IPATH_KREG_OFFSET(SendDmaLenGen),
325 .kr_senddmatail = IPATH_KREG_OFFSET(SendDmaTail),
326 .kr_senddmahead = IPATH_KREG_OFFSET(SendDmaHead),
327 .kr_senddmaheadaddr = IPATH_KREG_OFFSET(SendDmaHeadAddr),
328 .kr_senddmabufmask0 = IPATH_KREG_OFFSET(SendDmaBufMask0),
329 .kr_senddmabufmask1 = IPATH_KREG_OFFSET(SendDmaBufMask1),
330 .kr_senddmabufmask2 = IPATH_KREG_OFFSET(SendDmaBufMask2),
331 .kr_senddmastatus = IPATH_KREG_OFFSET(SendDmaStatus),
332
333 /* SerDes related regs */
334 .kr_ibserdesctrl = IPATH_KREG_OFFSET(IBSerDesCtrl),
335 .kr_ib_epbacc = IPATH_KREG_OFFSET(IbsdEpbAccCtl),
336 .kr_ib_epbtrans = IPATH_KREG_OFFSET(IbsdEpbTransReg),
337 .kr_pcie_epbacc = IPATH_KREG_OFFSET(PcieEpbAccCtl),
338 .kr_pcie_epbtrans = IPATH_KREG_OFFSET(PcieEpbTransCtl),
339 .kr_ib_ddsrxeq = IPATH_KREG_OFFSET(SerDesDDSRXEQ),
340
341 /*
342 * These should not be used directly via ipath_read_kreg64(),
343 * use them with ipath_read_kreg64_port()
344 */
345 .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0),
346 .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0),
347
348 /*
349 * The rcvpktled register controls one of the debug port signals, so
350 * a packet activity LED can be connected to it.
351 */
352 .kr_rcvpktledcnt = IPATH_KREG_OFFSET(RcvPktLEDCnt),
353 .kr_pcierbuftestreg0 = IPATH_KREG_OFFSET(PCIeRBufTestReg0),
354 .kr_pcierbuftestreg1 = IPATH_KREG_OFFSET(PCIeRBufTestReg1),
355
356 .kr_hrtbt_guid = IPATH_KREG_OFFSET(HRTBT_GUID),
357 .kr_ibcddrctrl = IPATH_KREG_OFFSET(IBCDDRCtrl),
358 .kr_ibcddrstatus = IPATH_KREG_OFFSET(IBCDDRStatus),
359 .kr_jintreload = IPATH_KREG_OFFSET(JIntReload)
360};
361
362static const struct ipath_cregs ipath_7220_cregs = {
363 .cr_badformatcnt = IPATH_CREG_OFFSET(RxBadFormatCnt),
364 .cr_erricrccnt = IPATH_CREG_OFFSET(RxICRCErrCnt),
365 .cr_errlinkcnt = IPATH_CREG_OFFSET(RxLinkProblemCnt),
366 .cr_errlpcrccnt = IPATH_CREG_OFFSET(RxLPCRCErrCnt),
367 .cr_errpkey = IPATH_CREG_OFFSET(RxPKeyMismatchCnt),
368 .cr_errrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowCtrlErrCnt),
369 .cr_err_rlencnt = IPATH_CREG_OFFSET(RxLenErrCnt),
370 .cr_errslencnt = IPATH_CREG_OFFSET(TxLenErrCnt),
371 .cr_errtidfull = IPATH_CREG_OFFSET(RxTIDFullErrCnt),
372 .cr_errtidvalid = IPATH_CREG_OFFSET(RxTIDValidErrCnt),
373 .cr_errvcrccnt = IPATH_CREG_OFFSET(RxVCRCErrCnt),
374 .cr_ibstatuschange = IPATH_CREG_OFFSET(IBStatusChangeCnt),
375 .cr_intcnt = IPATH_CREG_OFFSET(LBIntCnt),
376 .cr_invalidrlencnt = IPATH_CREG_OFFSET(RxMaxMinLenErrCnt),
377 .cr_invalidslencnt = IPATH_CREG_OFFSET(TxMaxMinLenErrCnt),
378 .cr_lbflowstallcnt = IPATH_CREG_OFFSET(LBFlowStallCnt),
379 .cr_pktrcvcnt = IPATH_CREG_OFFSET(RxDataPktCnt),
380 .cr_pktrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowPktCnt),
381 .cr_pktsendcnt = IPATH_CREG_OFFSET(TxDataPktCnt),
382 .cr_pktsendflowcnt = IPATH_CREG_OFFSET(TxFlowPktCnt),
383 .cr_portovflcnt = IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt),
384 .cr_rcvebpcnt = IPATH_CREG_OFFSET(RxEBPCnt),
385 .cr_rcvovflcnt = IPATH_CREG_OFFSET(RxBufOvflCnt),
386 .cr_senddropped = IPATH_CREG_OFFSET(TxDroppedPktCnt),
387 .cr_sendstallcnt = IPATH_CREG_OFFSET(TxFlowStallCnt),
388 .cr_sendunderruncnt = IPATH_CREG_OFFSET(TxUnderrunCnt),
389 .cr_wordrcvcnt = IPATH_CREG_OFFSET(RxDwordCnt),
390 .cr_wordsendcnt = IPATH_CREG_OFFSET(TxDwordCnt),
391 .cr_unsupvlcnt = IPATH_CREG_OFFSET(TxUnsupVLErrCnt),
392 .cr_rxdroppktcnt = IPATH_CREG_OFFSET(RxDroppedPktCnt),
393 .cr_iblinkerrrecovcnt = IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt),
394 .cr_iblinkdowncnt = IPATH_CREG_OFFSET(IBLinkDownedCnt),
395 .cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt),
396 .cr_vl15droppedpktcnt = IPATH_CREG_OFFSET(RxVL15DroppedPktCnt),
397 .cr_rxotherlocalphyerrcnt =
398 IPATH_CREG_OFFSET(RxOtherLocalPhyErrCnt),
399 .cr_excessbufferovflcnt = IPATH_CREG_OFFSET(ExcessBufferOvflCnt),
400 .cr_locallinkintegrityerrcnt =
401 IPATH_CREG_OFFSET(LocalLinkIntegrityErrCnt),
402 .cr_rxvlerrcnt = IPATH_CREG_OFFSET(RxVlErrCnt),
403 .cr_rxdlidfltrcnt = IPATH_CREG_OFFSET(RxDlidFltrCnt),
404 .cr_psstat = IPATH_CREG_OFFSET(PSStat),
405 .cr_psstart = IPATH_CREG_OFFSET(PSStart),
406 .cr_psinterval = IPATH_CREG_OFFSET(PSInterval),
407 .cr_psrcvdatacount = IPATH_CREG_OFFSET(PSRcvDataCount),
408 .cr_psrcvpktscount = IPATH_CREG_OFFSET(PSRcvPktsCount),
409 .cr_psxmitdatacount = IPATH_CREG_OFFSET(PSXmitDataCount),
410 .cr_psxmitpktscount = IPATH_CREG_OFFSET(PSXmitPktsCount),
411 .cr_psxmitwaitcount = IPATH_CREG_OFFSET(PSXmitWaitCount),
412};
413
414/* kr_control bits */
415#define INFINIPATH_C_RESET (1U<<7)
416
417/* kr_intstatus, kr_intclear, kr_intmask bits */
418#define INFINIPATH_I_RCVURG_MASK ((1ULL<<17)-1)
419#define INFINIPATH_I_RCVURG_SHIFT 32
420#define INFINIPATH_I_RCVAVAIL_MASK ((1ULL<<17)-1)
421#define INFINIPATH_I_RCVAVAIL_SHIFT 0
422#define INFINIPATH_I_SERDESTRIMDONE (1ULL<<27)
423
424/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
425#define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK 0x00000000000000ffULL
426#define INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT 0
427#define INFINIPATH_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL
428#define INFINIPATH_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL
429#define INFINIPATH_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL
430#define INFINIPATH_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL
431#define INFINIPATH_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL
432#define INFINIPATH_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
433#define INFINIPATH_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
434#define INFINIPATH_HWE_PCIE1PLLFAILED 0x0400000000000000ULL
435#define INFINIPATH_HWE_PCIE0PLLFAILED 0x0800000000000000ULL
436#define INFINIPATH_HWE_SERDESPLLFAILED 0x1000000000000000ULL
437/* specific to this chip */
438#define INFINIPATH_HWE_PCIECPLDATAQUEUEERR 0x0000000000000040ULL
439#define INFINIPATH_HWE_PCIECPLHDRQUEUEERR 0x0000000000000080ULL
440#define INFINIPATH_HWE_SDMAMEMREADERR 0x0000000010000000ULL
441#define INFINIPATH_HWE_CLK_UC_PLLNOTLOCKED 0x2000000000000000ULL
442#define INFINIPATH_HWE_PCIESERDESQ0PCLKNOTDETECT 0x0100000000000000ULL
443#define INFINIPATH_HWE_PCIESERDESQ1PCLKNOTDETECT 0x0200000000000000ULL
444#define INFINIPATH_HWE_PCIESERDESQ2PCLKNOTDETECT 0x0400000000000000ULL
445#define INFINIPATH_HWE_PCIESERDESQ3PCLKNOTDETECT 0x0800000000000000ULL
446#define INFINIPATH_HWE_DDSRXEQMEMORYPARITYERR 0x0000008000000000ULL
447#define INFINIPATH_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL
448#define INFINIPATH_HWE_PCIE_UC_OCT0MEMORYPARITYERR 0x0000001000000000ULL
449#define INFINIPATH_HWE_PCIE_UC_OCT1MEMORYPARITYERR 0x0000002000000000ULL
450
451#define IBA7220_IBCS_LINKTRAININGSTATE_MASK 0x1F
452#define IBA7220_IBCS_LINKSTATE_SHIFT 5
453#define IBA7220_IBCS_LINKSPEED_SHIFT 8
454#define IBA7220_IBCS_LINKWIDTH_SHIFT 9
455
456#define IBA7220_IBCC_LINKINITCMD_MASK 0x7ULL
457#define IBA7220_IBCC_LINKCMD_SHIFT 19
458#define IBA7220_IBCC_MAXPKTLEN_SHIFT 21
459
460/* kr_ibcddrctrl bits */
461#define IBA7220_IBC_DLIDLMC_MASK 0xFFFFFFFFUL
462#define IBA7220_IBC_DLIDLMC_SHIFT 32
463#define IBA7220_IBC_HRTBT_MASK 3
464#define IBA7220_IBC_HRTBT_SHIFT 16
465#define IBA7220_IBC_HRTBT_ENB 0x10000UL
466#define IBA7220_IBC_LANE_REV_SUPPORTED (1<<8)
467#define IBA7220_IBC_LREV_MASK 1
468#define IBA7220_IBC_LREV_SHIFT 8
469#define IBA7220_IBC_RXPOL_MASK 1
470#define IBA7220_IBC_RXPOL_SHIFT 7
471#define IBA7220_IBC_WIDTH_SHIFT 5
472#define IBA7220_IBC_WIDTH_MASK 0x3
473#define IBA7220_IBC_WIDTH_1X_ONLY (0<<IBA7220_IBC_WIDTH_SHIFT)
474#define IBA7220_IBC_WIDTH_4X_ONLY (1<<IBA7220_IBC_WIDTH_SHIFT)
475#define IBA7220_IBC_WIDTH_AUTONEG (2<<IBA7220_IBC_WIDTH_SHIFT)
476#define IBA7220_IBC_SPEED_AUTONEG (1<<1)
477#define IBA7220_IBC_SPEED_SDR (1<<2)
478#define IBA7220_IBC_SPEED_DDR (1<<3)
479#define IBA7220_IBC_SPEED_AUTONEG_MASK (0x7<<1)
480#define IBA7220_IBC_IBTA_1_2_MASK (1)
481
482/* kr_ibcddrstatus */
483/* link latency shift is 0, don't bother defining */
484#define IBA7220_DDRSTAT_LINKLAT_MASK 0x3ffffff
485
486/* kr_extstatus bits */
487#define INFINIPATH_EXTS_FREQSEL 0x2
488#define INFINIPATH_EXTS_SERDESSEL 0x4
489#define INFINIPATH_EXTS_MEMBIST_ENDTEST 0x0000000000004000
490#define INFINIPATH_EXTS_MEMBIST_DISABLED 0x0000000000008000
491
492/* kr_xgxsconfig bits */
493#define INFINIPATH_XGXS_RESET 0x5ULL
494#define INFINIPATH_XGXS_FC_SAFE (1ULL<<63)
495
496/* kr_rcvpktledcnt */
497#define IBA7220_LEDBLINK_ON_SHIFT 32 /* 4ns period on after packet */
498#define IBA7220_LEDBLINK_OFF_SHIFT 0 /* 4ns period off before next on */
499
500#define _IPATH_GPIO_SDA_NUM 1
501#define _IPATH_GPIO_SCL_NUM 0
502
503#define IPATH_GPIO_SDA (1ULL << \
504 (_IPATH_GPIO_SDA_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
505#define IPATH_GPIO_SCL (1ULL << \
506 (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
507
508#define IBA7220_R_INTRAVAIL_SHIFT 17
509#define IBA7220_R_TAILUPD_SHIFT 35
510#define IBA7220_R_PORTCFG_SHIFT 36
511
512#define INFINIPATH_JINT_PACKETSHIFT 16
513#define INFINIPATH_JINT_DEFAULT_IDLE_TICKS 0
514#define INFINIPATH_JINT_DEFAULT_MAX_PACKETS 0
515
516#define IBA7220_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
517
518/*
519 * the size bits give us 2^N, in KB units. 0 marks as invalid,
520 * and 7 is reserved. We currently use only 2KB and 4KB
521 */
522#define IBA7220_TID_SZ_SHIFT 37 /* shift to 3bit size selector */
523#define IBA7220_TID_SZ_2K (1UL<<IBA7220_TID_SZ_SHIFT) /* 2KB */
524#define IBA7220_TID_SZ_4K (2UL<<IBA7220_TID_SZ_SHIFT) /* 4KB */
525#define IBA7220_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
526
527#define IPATH_AUTONEG_TRIES 5 /* sequential retries to negotiate DDR */
528
529static char int_type[16] = "auto";
530module_param_string(interrupt_type, int_type, sizeof(int_type), 0444);
531MODULE_PARM_DESC(int_type, " interrupt_type=auto|force_msi|force_intx\n");
532
533/* packet rate matching delay; chip has support */
534static u8 rate_to_delay[2][2] = {
535 /* 1x, 4x */
536 { 8, 2 }, /* SDR */
537 { 4, 1 } /* DDR */
538};
539
540/* 7220 specific hardware errors... */
541static const struct ipath_hwerror_msgs ipath_7220_hwerror_msgs[] = {
542 INFINIPATH_HWE_MSG(PCIEPOISONEDTLP, "PCIe Poisoned TLP"),
543 INFINIPATH_HWE_MSG(PCIECPLTIMEOUT, "PCIe completion timeout"),
544 /*
545 * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
546 * parity or memory parity error failures, because most likely we
547 * won't be able to talk to the core of the chip. Nonetheless, we
548 * might see them, if they are in parts of the PCIe core that aren't
549 * essential.
550 */
551 INFINIPATH_HWE_MSG(PCIE1PLLFAILED, "PCIePLL1"),
552 INFINIPATH_HWE_MSG(PCIE0PLLFAILED, "PCIePLL0"),
553 INFINIPATH_HWE_MSG(PCIEBUSPARITYXTLH, "PCIe XTLH core parity"),
554 INFINIPATH_HWE_MSG(PCIEBUSPARITYXADM, "PCIe ADM TX core parity"),
555 INFINIPATH_HWE_MSG(PCIEBUSPARITYRADM, "PCIe ADM RX core parity"),
556 INFINIPATH_HWE_MSG(RXDSYNCMEMPARITYERR, "Rx Dsync"),
557 INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"),
558 INFINIPATH_HWE_MSG(PCIECPLDATAQUEUEERR, "PCIe cpl header queue"),
559 INFINIPATH_HWE_MSG(PCIECPLHDRQUEUEERR, "PCIe cpl data queue"),
560 INFINIPATH_HWE_MSG(SDMAMEMREADERR, "Send DMA memory read"),
561 INFINIPATH_HWE_MSG(CLK_UC_PLLNOTLOCKED, "uC PLL clock not locked"),
562 INFINIPATH_HWE_MSG(PCIESERDESQ0PCLKNOTDETECT,
563 "PCIe serdes Q0 no clock"),
564 INFINIPATH_HWE_MSG(PCIESERDESQ1PCLKNOTDETECT,
565 "PCIe serdes Q1 no clock"),
566 INFINIPATH_HWE_MSG(PCIESERDESQ2PCLKNOTDETECT,
567 "PCIe serdes Q2 no clock"),
568 INFINIPATH_HWE_MSG(PCIESERDESQ3PCLKNOTDETECT,
569 "PCIe serdes Q3 no clock"),
570 INFINIPATH_HWE_MSG(DDSRXEQMEMORYPARITYERR,
571 "DDS RXEQ memory parity"),
572 INFINIPATH_HWE_MSG(IB_UC_MEMORYPARITYERR, "IB uC memory parity"),
573 INFINIPATH_HWE_MSG(PCIE_UC_OCT0MEMORYPARITYERR,
574 "PCIe uC oct0 memory parity"),
575 INFINIPATH_HWE_MSG(PCIE_UC_OCT1MEMORYPARITYERR,
576 "PCIe uC oct1 memory parity"),
577};
578
579static void autoneg_work(struct work_struct *);
580
581/*
582 * the offset is different for different configured port numbers, since
583 * port0 is fixed in size, but others can vary. Make it a function to
584 * make the issue more obvious.
585*/
586static inline u32 port_egrtid_idx(struct ipath_devdata *dd, unsigned port)
587{
588 return port ? dd->ipath_p0_rcvegrcnt +
589 (port-1) * dd->ipath_rcvegrcnt : 0;
590}
591
592static void ipath_7220_txe_recover(struct ipath_devdata *dd)
593{
594 ++ipath_stats.sps_txeparity;
595
596 dev_info(&dd->pcidev->dev,
597 "Recovering from TXE PIO parity error\n");
598 ipath_disarm_senderrbufs(dd, 1);
599}
600
601
602/**
603 * ipath_7220_handle_hwerrors - display hardware errors.
604 * @dd: the infinipath device
605 * @msg: the output buffer
606 * @msgl: the size of the output buffer
607 *
608 * Use same msg buffer as regular errors to avoid excessive stack
609 * use. Most hardware errors are catastrophic, but for right now,
610 * we'll print them and continue. We reuse the same message buffer as
611 * ipath_handle_errors() to avoid excessive stack usage.
612 */
613static void ipath_7220_handle_hwerrors(struct ipath_devdata *dd, char *msg,
614 size_t msgl)
615{
616 ipath_err_t hwerrs;
617 u32 bits, ctrl;
618 int isfatal = 0;
619 char bitsmsg[64];
620 int log_idx;
621
622 hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
623 if (!hwerrs) {
624 /*
625 * better than printing cofusing messages
626 * This seems to be related to clearing the crc error, or
627 * the pll error during init.
628 */
629 ipath_cdbg(VERBOSE, "Called but no hardware errors set\n");
630 goto bail;
631 } else if (hwerrs == ~0ULL) {
632 ipath_dev_err(dd, "Read of hardware error status failed "
633 "(all bits set); ignoring\n");
634 goto bail;
635 }
636 ipath_stats.sps_hwerrs++;
637
638 /*
639 * Always clear the error status register, except MEMBISTFAIL,
640 * regardless of whether we continue or stop using the chip.
641 * We want that set so we know it failed, even across driver reload.
642 * We'll still ignore it in the hwerrmask. We do this partly for
643 * diagnostics, but also for support.
644 */
645 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
646 hwerrs&~INFINIPATH_HWE_MEMBISTFAILED);
647
648 hwerrs &= dd->ipath_hwerrmask;
649
650 /* We log some errors to EEPROM, check if we have any of those. */
651 for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx)
652 if (hwerrs & dd->ipath_eep_st_masks[log_idx].hwerrs_to_log)
653 ipath_inc_eeprom_err(dd, log_idx, 1);
654 /*
655 * Make sure we get this much out, unless told to be quiet,
656 * or it's occurred within the last 5 seconds.
657 */
658 if ((hwerrs & ~(dd->ipath_lasthwerror |
659 ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
660 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
661 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT))) ||
662 (ipath_debug & __IPATH_VERBDBG))
663 dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx "
664 "(cleared)\n", (unsigned long long) hwerrs);
665 dd->ipath_lasthwerror |= hwerrs;
666
667 if (hwerrs & ~dd->ipath_hwe_bitsextant)
668 ipath_dev_err(dd, "hwerror interrupt with unknown errors "
669 "%llx set\n", (unsigned long long)
670 (hwerrs & ~dd->ipath_hwe_bitsextant));
671
672 if (hwerrs & INFINIPATH_HWE_IB_UC_MEMORYPARITYERR)
673 ipath_sd7220_clr_ibpar(dd);
674
675 ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
676 if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) {
677 /*
678 * Parity errors in send memory are recoverable,
679 * just cancel the send (if indicated in * sendbuffererror),
680 * count the occurrence, unfreeze (if no other handled
681 * hardware error bits are set), and continue.
682 */
683 if (hwerrs & ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
684 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
685 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)) {
686 ipath_7220_txe_recover(dd);
687 hwerrs &= ~((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
688 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
689 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT);
690 if (!hwerrs) {
691 /* else leave in freeze mode */
692 ipath_write_kreg(dd,
693 dd->ipath_kregs->kr_control,
694 dd->ipath_control);
695 goto bail;
696 }
697 }
698 if (hwerrs) {
699 /*
700 * If any set that we aren't ignoring only make the
701 * complaint once, in case it's stuck or recurring,
702 * and we get here multiple times
703 * Force link down, so switch knows, and
704 * LEDs are turned off.
705 */
706 if (dd->ipath_flags & IPATH_INITTED) {
707 ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
708 ipath_setup_7220_setextled(dd,
709 INFINIPATH_IBCS_L_STATE_DOWN,
710 INFINIPATH_IBCS_LT_STATE_DISABLED);
711 ipath_dev_err(dd, "Fatal Hardware Error "
712 "(freeze mode), no longer"
713 " usable, SN %.16s\n",
714 dd->ipath_serial);
715 isfatal = 1;
716 }
717 /*
718 * Mark as having had an error for driver, and also
719 * for /sys and status word mapped to user programs.
720 * This marks unit as not usable, until reset.
721 */
722 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
723 *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
724 dd->ipath_flags &= ~IPATH_INITTED;
725 } else {
726 ipath_dbg("Clearing freezemode on ignored hardware "
727 "error\n");
728 ipath_clear_freeze(dd);
729 }
730 }
731
732 *msg = '\0';
733
734 if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) {
735 strlcat(msg, "[Memory BIST test failed, "
736 "InfiniPath hardware unusable]", msgl);
737 /* ignore from now on, so disable until driver reloaded */
738 *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
739 dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED;
740 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
741 dd->ipath_hwerrmask);
742 }
743
744 ipath_format_hwerrors(hwerrs,
745 ipath_7220_hwerror_msgs,
746 ARRAY_SIZE(ipath_7220_hwerror_msgs),
747 msg, msgl);
748
749 if (hwerrs & (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK
750 << INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT)) {
751 bits = (u32) ((hwerrs >>
752 INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) &
753 INFINIPATH_HWE_PCIEMEMPARITYERR_MASK);
754 snprintf(bitsmsg, sizeof bitsmsg,
755 "[PCIe Mem Parity Errs %x] ", bits);
756 strlcat(msg, bitsmsg, msgl);
757 }
758
759#define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP | \
760 INFINIPATH_HWE_COREPLL_RFSLIP)
761
762 if (hwerrs & _IPATH_PLL_FAIL) {
763 snprintf(bitsmsg, sizeof bitsmsg,
764 "[PLL failed (%llx), InfiniPath hardware unusable]",
765 (unsigned long long) hwerrs & _IPATH_PLL_FAIL);
766 strlcat(msg, bitsmsg, msgl);
767 /* ignore from now on, so disable until driver reloaded */
768 dd->ipath_hwerrmask &= ~(hwerrs & _IPATH_PLL_FAIL);
769 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
770 dd->ipath_hwerrmask);
771 }
772
773 if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) {
774 /*
775 * If it occurs, it is left masked since the eternal
776 * interface is unused.
777 */
778 dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED;
779 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
780 dd->ipath_hwerrmask);
781 }
782
783 ipath_dev_err(dd, "%s hardware error\n", msg);
784 /*
785 * For /sys status file. if no trailing } is copied, we'll
786 * know it was truncated.
787 */
788 if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg)
789 snprintf(dd->ipath_freezemsg, dd->ipath_freezelen,
790 "{%s}", msg);
791bail:;
792}
793
794/**
795 * ipath_7220_boardname - fill in the board name
796 * @dd: the infinipath device
797 * @name: the output buffer
798 * @namelen: the size of the output buffer
799 *
800 * info is based on the board revision register
801 */
802static int ipath_7220_boardname(struct ipath_devdata *dd, char *name,
803 size_t namelen)
804{
805 char *n = NULL;
806 u8 boardrev = dd->ipath_boardrev;
807 int ret;
808
809 if (boardrev == 15) {
810 /*
811 * Emulator sometimes comes up all-ones, rather than zero.
812 */
813 boardrev = 0;
814 dd->ipath_boardrev = boardrev;
815 }
816 switch (boardrev) {
817 case 0:
818 n = "InfiniPath_7220_Emulation";
819 break;
820 case 1:
821 n = "InfiniPath_QLE7240";
822 break;
823 case 2:
824 n = "InfiniPath_QLE7280";
825 break;
826 case 3:
827 n = "InfiniPath_QLE7242";
828 break;
829 case 4:
830 n = "InfiniPath_QEM7240";
831 break;
832 case 5:
833 n = "InfiniPath_QMI7240";
834 break;
835 case 6:
836 n = "InfiniPath_QMI7264";
837 break;
838 case 7:
839 n = "InfiniPath_QMH7240";
840 break;
841 case 8:
842 n = "InfiniPath_QME7240";
843 break;
844 case 9:
845 n = "InfiniPath_QLE7250";
846 break;
847 case 10:
848 n = "InfiniPath_QLE7290";
849 break;
850 case 11:
851 n = "InfiniPath_QEM7250";
852 break;
853 case 12:
854 n = "InfiniPath_QLE-Bringup";
855 break;
856 default:
857 ipath_dev_err(dd,
858 "Don't yet know about board with ID %u\n",
859 boardrev);
860 snprintf(name, namelen, "Unknown_InfiniPath_PCIe_%u",
861 boardrev);
862 break;
863 }
864 if (n)
865 snprintf(name, namelen, "%s", n);
866
867 if (dd->ipath_majrev != 5 || !dd->ipath_minrev ||
868 dd->ipath_minrev > 2) {
869 ipath_dev_err(dd, "Unsupported InfiniPath hardware "
870 "revision %u.%u!\n",
871 dd->ipath_majrev, dd->ipath_minrev);
872 ret = 1;
873 } else if (dd->ipath_minrev == 1) {
874 /* Rev1 chips are prototype. Complain, but allow use */
875 ipath_dev_err(dd, "Unsupported hardware "
876 "revision %u.%u, Contact support@qlogic.com\n",
877 dd->ipath_majrev, dd->ipath_minrev);
878 ret = 0;
879 } else
880 ret = 0;
881
882 /*
883 * Set here not in ipath_init_*_funcs because we have to do
884 * it after we can read chip registers.
885 */
886 dd->ipath_ureg_align = 0x10000; /* 64KB alignment */
887
888 return ret;
889}
890
891/**
892 * ipath_7220_init_hwerrors - enable hardware errors
893 * @dd: the infinipath device
894 *
895 * now that we have finished initializing everything that might reasonably
896 * cause a hardware error, and cleared those errors bits as they occur,
897 * we can enable hardware errors in the mask (potentially enabling
898 * freeze mode), and enable hardware errors as errors (along with
899 * everything else) in errormask
900 */
901static void ipath_7220_init_hwerrors(struct ipath_devdata *dd)
902{
903 ipath_err_t val;
904 u64 extsval;
905
906 extsval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
907
908 if (!(extsval & (INFINIPATH_EXTS_MEMBIST_ENDTEST |
909 INFINIPATH_EXTS_MEMBIST_DISABLED)))
910 ipath_dev_err(dd, "MemBIST did not complete!\n");
911 if (extsval & INFINIPATH_EXTS_MEMBIST_DISABLED)
912 dev_info(&dd->pcidev->dev, "MemBIST is disabled.\n");
913
914 val = ~0ULL; /* barring bugs, all hwerrors become interrupts, */
915
916 if (!dd->ipath_boardrev) /* no PLL for Emulator */
917 val &= ~INFINIPATH_HWE_SERDESPLLFAILED;
918
919 if (dd->ipath_minrev == 1)
920 val &= ~(1ULL << 42); /* TXE LaunchFIFO Parity rev1 issue */
921
922 val &= ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR;
923 dd->ipath_hwerrmask = val;
924
925 /*
926 * special trigger "error" is for debugging purposes. It
927 * works around a processor/chipset problem. The error
928 * interrupt allows us to count occurrences, but we don't
929 * want to pay the overhead for normal use. Emulation only
930 */
931 if (!dd->ipath_boardrev)
932 dd->ipath_maskederrs = INFINIPATH_E_SENDSPECIALTRIGGER;
933}
934
935/*
936 * All detailed interaction with the SerDes has been moved to ipath_sd7220.c
937 *
938 * The portion of IBA7220-specific bringup_serdes() that actually deals with
939 * registers and memory within the SerDes itself is ipath_sd7220_init().
940 */
941
942/**
943 * ipath_7220_bringup_serdes - bring up the serdes
944 * @dd: the infinipath device
945 */
946static int ipath_7220_bringup_serdes(struct ipath_devdata *dd)
947{
948 int ret = 0;
949 u64 val, prev_val, guid;
950 int was_reset; /* Note whether uC was reset */
951
952 ipath_dbg("Trying to bringup serdes\n");
953
954 if (ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus) &
955 INFINIPATH_HWE_SERDESPLLFAILED) {
956 ipath_dbg("At start, serdes PLL failed bit set "
957 "in hwerrstatus, clearing and continuing\n");
958 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
959 INFINIPATH_HWE_SERDESPLLFAILED);
960 }
961
962 if (!dd->ipath_ibcddrctrl) {
963 /* not on re-init after reset */
964 dd->ipath_ibcddrctrl =
965 ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcddrctrl);
966
967 if (dd->ipath_link_speed_enabled ==
968 (IPATH_IB_SDR | IPATH_IB_DDR))
969 dd->ipath_ibcddrctrl |=
970 IBA7220_IBC_SPEED_AUTONEG_MASK |
971 IBA7220_IBC_IBTA_1_2_MASK;
972 else
973 dd->ipath_ibcddrctrl |=
974 dd->ipath_link_speed_enabled == IPATH_IB_DDR
975 ? IBA7220_IBC_SPEED_DDR :
976 IBA7220_IBC_SPEED_SDR;
977 if ((dd->ipath_link_width_enabled & (IB_WIDTH_1X |
978 IB_WIDTH_4X)) == (IB_WIDTH_1X | IB_WIDTH_4X))
979 dd->ipath_ibcddrctrl |= IBA7220_IBC_WIDTH_AUTONEG;
980 else
981 dd->ipath_ibcddrctrl |=
982 dd->ipath_link_width_enabled == IB_WIDTH_4X
983 ? IBA7220_IBC_WIDTH_4X_ONLY :
984 IBA7220_IBC_WIDTH_1X_ONLY;
985
986 /* always enable these on driver reload, not sticky */
987 dd->ipath_ibcddrctrl |=
988 IBA7220_IBC_RXPOL_MASK << IBA7220_IBC_RXPOL_SHIFT;
989 dd->ipath_ibcddrctrl |=
990 IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;
991 /*
992 * automatic lane reversal detection for receive
993 * doesn't work correctly in rev 1, so disable it
994 * on that rev, otherwise enable (disabling not
995 * sticky across reload for >rev1)
996 */
997 if (dd->ipath_minrev == 1)
998 dd->ipath_ibcddrctrl &=
999 ~IBA7220_IBC_LANE_REV_SUPPORTED;
1000 else
1001 dd->ipath_ibcddrctrl |=
1002 IBA7220_IBC_LANE_REV_SUPPORTED;
1003 }
1004
1005 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcddrctrl,
1006 dd->ipath_ibcddrctrl);
1007
1008 ipath_write_kreg(dd, IPATH_KREG_OFFSET(IBNCModeCtrl), 0Ull);
1009
1010 /* IBA7220 has SERDES MPU reset in D0 of what _was_ IBPLLCfg */
1011 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibserdesctrl);
1012 /* remember if uC was in Reset or not, for dactrim */
1013 was_reset = (val & 1);
1014 ipath_cdbg(VERBOSE, "IBReset %s xgxsconfig %llx\n",
1015 was_reset ? "Asserted" : "Negated", (unsigned long long)
1016 ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
1017
1018 if (dd->ipath_boardrev) {
1019 /*
1020 * Hardware is not emulator, and may have been reset. Init it.
1021 * Below will release reset, but needs to know if chip was
1022 * originally in reset, to only trim DACs on first time
1023 * after chip reset or powercycle (not driver reload)
1024 */
1025 ret = ipath_sd7220_init(dd, was_reset);
1026 }
1027
1028 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
1029 prev_val = val;
1030 val |= INFINIPATH_XGXS_FC_SAFE;
1031 if (val != prev_val) {
1032 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
1033 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
1034 }
1035 if (val & INFINIPATH_XGXS_RESET)
1036 val &= ~INFINIPATH_XGXS_RESET;
1037 if (val != prev_val)
1038 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
1039
1040 ipath_cdbg(VERBOSE, "done: xgxs=%llx from %llx\n",
1041 (unsigned long long)
1042 ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig),
1043 prev_val);
1044
1045 guid = be64_to_cpu(dd->ipath_guid);
1046
1047 if (!guid) {
1048 /* have to have something, so use likely unique tsc */
1049 guid = get_cycles();
1050 ipath_dbg("No GUID for heartbeat, faking %llx\n",
1051 (unsigned long long)guid);
1052 } else
1053 ipath_cdbg(VERBOSE, "Wrote %llX to HRTBT_GUID\n", guid);
1054 ipath_write_kreg(dd, dd->ipath_kregs->kr_hrtbt_guid, guid);
1055 return ret;
1056}
1057
1058static void ipath_7220_config_jint(struct ipath_devdata *dd,
1059 u16 idle_ticks, u16 max_packets)
1060{
1061
1062 /*
1063 * We can request a receive interrupt for 1 or more packets
1064 * from current offset.
1065 */
1066 if (idle_ticks == 0 || max_packets == 0)
1067 /* interrupt after one packet if no mitigation */
1068 dd->ipath_rhdrhead_intr_off =
1069 1ULL << IBA7220_HDRHEAD_PKTINT_SHIFT;
1070 else
1071 /* Turn off RcvHdrHead interrupts if using mitigation */
1072 dd->ipath_rhdrhead_intr_off = 0ULL;
1073
1074 /* refresh kernel RcvHdrHead registers... */
1075 ipath_write_ureg(dd, ur_rcvhdrhead,
1076 dd->ipath_rhdrhead_intr_off |
1077 dd->ipath_pd[0]->port_head, 0);
1078
1079 dd->ipath_jint_max_packets = max_packets;
1080 dd->ipath_jint_idle_ticks = idle_ticks;
1081 ipath_write_kreg(dd, dd->ipath_kregs->kr_jintreload,
1082 ((u64) max_packets << INFINIPATH_JINT_PACKETSHIFT) |
1083 idle_ticks);
1084}
1085
1086/**
1087 * ipath_7220_quiet_serdes - set serdes to txidle
1088 * @dd: the infinipath device
1089 * Called when driver is being unloaded
1090 */
1091static void ipath_7220_quiet_serdes(struct ipath_devdata *dd)
1092{
1093 u64 val;
1094 dd->ipath_flags &= ~IPATH_IB_AUTONEG_INPROG;
1095 wake_up(&dd->ipath_autoneg_wait);
1096 cancel_delayed_work(&dd->ipath_autoneg_work);
1097 flush_scheduled_work();
1098 ipath_shutdown_relock_poll(dd);
1099 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
1100 val |= INFINIPATH_XGXS_RESET;
1101 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
1102}
1103
1104static int ipath_7220_intconfig(struct ipath_devdata *dd)
1105{
1106 ipath_7220_config_jint(dd, dd->ipath_jint_idle_ticks,
1107 dd->ipath_jint_max_packets);
1108 return 0;
1109}
1110
1111/**
1112 * ipath_setup_7220_setextled - set the state of the two external LEDs
1113 * @dd: the infinipath device
1114 * @lst: the L state
1115 * @ltst: the LT state
1116 *
1117 * These LEDs indicate the physical and logical state of IB link.
1118 * For this chip (at least with recommended board pinouts), LED1
1119 * is Yellow (logical state) and LED2 is Green (physical state),
1120 *
1121 * Note: We try to match the Mellanox HCA LED behavior as best
1122 * we can. Green indicates physical link state is OK (something is
1123 * plugged in, and we can train).
1124 * Amber indicates the link is logically up (ACTIVE).
1125 * Mellanox further blinks the amber LED to indicate data packet
1126 * activity, but we have no hardware support for that, so it would
1127 * require waking up every 10-20 msecs and checking the counters
1128 * on the chip, and then turning the LED off if appropriate. That's
1129 * visible overhead, so not something we will do.
1130 *
1131 */
1132static void ipath_setup_7220_setextled(struct ipath_devdata *dd, u64 lst,
1133 u64 ltst)
1134{
1135 u64 extctl, ledblink = 0;
1136 unsigned long flags = 0;
1137
1138 /* the diags use the LED to indicate diag info, so we leave
1139 * the external LED alone when the diags are running */
1140 if (ipath_diag_inuse)
1141 return;
1142
1143 /* Allow override of LED display for, e.g. Locating system in rack */
1144 if (dd->ipath_led_override) {
1145 ltst = (dd->ipath_led_override & IPATH_LED_PHYS)
1146 ? INFINIPATH_IBCS_LT_STATE_LINKUP
1147 : INFINIPATH_IBCS_LT_STATE_DISABLED;
1148 lst = (dd->ipath_led_override & IPATH_LED_LOG)
1149 ? INFINIPATH_IBCS_L_STATE_ACTIVE
1150 : INFINIPATH_IBCS_L_STATE_DOWN;
1151 }
1152
1153 spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
1154 extctl = dd->ipath_extctrl & ~(INFINIPATH_EXTC_LED1PRIPORT_ON |
1155 INFINIPATH_EXTC_LED2PRIPORT_ON);
1156 if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP) {
1157 extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON;
1158 /*
1159 * counts are in chip clock (4ns) periods.
1160 * This is 1/16 sec (66.6ms) on,
1161 * 3/16 sec (187.5 ms) off, with packets rcvd
1162 */
1163 ledblink = ((66600*1000UL/4) << IBA7220_LEDBLINK_ON_SHIFT)
1164 | ((187500*1000UL/4) << IBA7220_LEDBLINK_OFF_SHIFT);
1165 }
1166 if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
1167 extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON;
1168 dd->ipath_extctrl = extctl;
1169 ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl);
1170 spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
1171
1172 if (ledblink) /* blink the LED on packet receive */
1173 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvpktledcnt,
1174 ledblink);
1175}
1176
1177/*
1178 * Similar to pci_intx(pdev, 1), except that we make sure
1179 * msi is off...
1180 */
1181static void ipath_enable_intx(struct pci_dev *pdev)
1182{
1183 u16 cw, new;
1184 int pos;
1185
1186 /* first, turn on INTx */
1187 pci_read_config_word(pdev, PCI_COMMAND, &cw);
1188 new = cw & ~PCI_COMMAND_INTX_DISABLE;
1189 if (new != cw)
1190 pci_write_config_word(pdev, PCI_COMMAND, new);
1191
1192 /* then turn off MSI */
1193 pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
1194 if (pos) {
1195 pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw);
1196 new = cw & ~PCI_MSI_FLAGS_ENABLE;
1197 if (new != cw)
1198 pci_write_config_word(pdev, pos + PCI_MSI_FLAGS, new);
1199 }
1200}
1201
1202static int ipath_msi_enabled(struct pci_dev *pdev)
1203{
1204 int pos, ret = 0;
1205
1206 pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
1207 if (pos) {
1208 u16 cw;
1209
1210 pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw);
1211 ret = !!(cw & PCI_MSI_FLAGS_ENABLE);
1212 }
1213 return ret;
1214}
1215
1216/*
1217 * disable msi interrupt if enabled, and clear the flag.
1218 * flag is used primarily for the fallback to IntX, but
1219 * is also used in reinit after reset as a flag.
1220 */
1221static void ipath_7220_nomsi(struct ipath_devdata *dd)
1222{
1223 dd->ipath_msi_lo = 0;
1224#ifdef CONFIG_PCI_MSI
1225 if (ipath_msi_enabled(dd->pcidev)) {
1226 /*
1227 * free, but don't zero; later kernels require
1228 * it be freed before disable_msi, so the intx
1229 * setup has to request it again.
1230 */
1231 if (dd->ipath_irq)
1232 free_irq(dd->ipath_irq, dd);
1233 pci_disable_msi(dd->pcidev);
1234 }
1235#endif
1236}
1237
1238/*
1239 * ipath_setup_7220_cleanup - clean up any per-chip chip-specific stuff
1240 * @dd: the infinipath device
1241 *
1242 * Nothing but msi interrupt cleanup for now.
1243 *
1244 * This is called during driver unload.
1245 */
1246static void ipath_setup_7220_cleanup(struct ipath_devdata *dd)
1247{
1248 ipath_7220_nomsi(dd);
1249}
1250
1251
1252static void ipath_7220_pcie_params(struct ipath_devdata *dd, u32 boardrev)
1253{
1254 u16 linkstat, minwidth, speed;
1255 int pos;
1256
1257 pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
1258 if (!pos) {
1259 ipath_dev_err(dd, "Can't find PCI Express capability!\n");
1260 goto bail;
1261 }
1262
1263 pci_read_config_word(dd->pcidev, pos + PCI_EXP_LNKSTA,
1264 &linkstat);
1265 /*
1266 * speed is bits 0-4, linkwidth is bits 4-8
1267 * no defines for them in headers
1268 */
1269 speed = linkstat & 0xf;
1270 linkstat >>= 4;
1271 linkstat &= 0x1f;
1272 dd->ipath_lbus_width = linkstat;
1273 switch (boardrev) {
1274 case 0:
1275 case 2:
1276 case 10:
1277 case 12:
1278 minwidth = 16; /* x16 capable boards */
1279 break;
1280 default:
1281 minwidth = 8; /* x8 capable boards */
1282 break;
1283 }
1284
1285 switch (speed) {
1286 case 1:
1287 dd->ipath_lbus_speed = 2500; /* Gen1, 2.5GHz */
1288 break;
1289 case 2:
1290 dd->ipath_lbus_speed = 5000; /* Gen1, 5GHz */
1291 break;
1292 default: /* not defined, assume gen1 */
1293 dd->ipath_lbus_speed = 2500;
1294 break;
1295 }
1296
1297 if (linkstat < minwidth)
1298 ipath_dev_err(dd,
1299 "PCIe width %u (x%u HCA), performance "
1300 "reduced\n", linkstat, minwidth);
1301 else
1302 ipath_cdbg(VERBOSE, "PCIe speed %u width %u (x%u HCA)\n",
1303 dd->ipath_lbus_speed, linkstat, minwidth);
1304
1305 if (speed != 1)
1306 ipath_dev_err(dd,
1307 "PCIe linkspeed %u is incorrect; "
1308 "should be 1 (2500)!\n", speed);
1309
1310bail:
1311 /* fill in string, even on errors */
1312 snprintf(dd->ipath_lbus_info, sizeof(dd->ipath_lbus_info),
1313 "PCIe,%uMHz,x%u\n",
1314 dd->ipath_lbus_speed,
1315 dd->ipath_lbus_width);
1316 return;
1317}
1318
1319
1320/**
1321 * ipath_setup_7220_config - setup PCIe config related stuff
1322 * @dd: the infinipath device
1323 * @pdev: the PCI device
1324 *
1325 * The pci_enable_msi() call will fail on systems with MSI quirks
1326 * such as those with AMD8131, even if the device of interest is not
1327 * attached to that device, (in the 2.6.13 - 2.6.15 kernels, at least, fixed
1328 * late in 2.6.16).
1329 * All that can be done is to edit the kernel source to remove the quirk
1330 * check until that is fixed.
1331 * We do not need to call enable_msi() for our HyperTransport chip,
1332 * even though it uses MSI, and we want to avoid the quirk warning, so
1333 * So we call enable_msi only for PCIe. If we do end up needing
1334 * pci_enable_msi at some point in the future for HT, we'll move the
1335 * call back into the main init_one code.
1336 * We save the msi lo and hi values, so we can restore them after
1337 * chip reset (the kernel PCI infrastructure doesn't yet handle that
1338 * correctly).
1339 */
1340static int ipath_setup_7220_config(struct ipath_devdata *dd,
1341 struct pci_dev *pdev)
1342{
1343 int pos, ret = -1;
1344 u32 boardrev;
1345
1346 dd->ipath_msi_lo = 0; /* used as a flag during reset processing */
1347#ifdef CONFIG_PCI_MSI
1348 pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
1349 if (!strcmp(int_type, "force_msi") || !strcmp(int_type, "auto"))
1350 ret = pci_enable_msi(pdev);
1351 if (ret) {
1352 if (!strcmp(int_type, "force_msi")) {
1353 ipath_dev_err(dd, "pci_enable_msi failed: %d, "
1354 "force_msi is on, so not continuing.\n",
1355 ret);
1356 return ret;
1357 }
1358
1359 ipath_enable_intx(pdev);
1360 if (!strcmp(int_type, "auto"))
1361 ipath_dev_err(dd, "pci_enable_msi failed: %d, "
1362 "falling back to INTx\n", ret);
1363 } else if (pos) {
1364 u16 control;
1365 pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO,
1366 &dd->ipath_msi_lo);
1367 pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_HI,
1368 &dd->ipath_msi_hi);
1369 pci_read_config_word(pdev, pos + PCI_MSI_FLAGS,
1370 &control);
1371 /* now save the data (vector) info */
1372 pci_read_config_word(pdev,
1373 pos + ((control & PCI_MSI_FLAGS_64BIT)
1374 ? PCI_MSI_DATA_64 :
1375 PCI_MSI_DATA_32),
1376 &dd->ipath_msi_data);
1377 } else
1378 ipath_dev_err(dd, "Can't find MSI capability, "
1379 "can't save MSI settings for reset\n");
1380#else
1381 ipath_dbg("PCI_MSI not configured, using IntX interrupts\n");
1382 ipath_enable_intx(pdev);
1383#endif
1384
1385 dd->ipath_irq = pdev->irq;
1386
1387 /*
1388 * We save the cachelinesize also, although it doesn't
1389 * really matter.
1390 */
1391 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
1392 &dd->ipath_pci_cacheline);
1393
1394 /*
1395 * this function called early, ipath_boardrev not set yet. Can't
1396 * use ipath_read_kreg64() yet, too early in init, so use readq()
1397 */
1398 boardrev = (readq(&dd->ipath_kregbase[dd->ipath_kregs->kr_revision])
1399 >> INFINIPATH_R_BOARDID_SHIFT) & INFINIPATH_R_BOARDID_MASK;
1400
1401 ipath_7220_pcie_params(dd, boardrev);
1402
1403 dd->ipath_flags |= IPATH_NODMA_RTAIL | IPATH_HAS_SEND_DMA |
1404 IPATH_HAS_PBC_CNT | IPATH_HAS_THRESH_UPDATE;
1405 dd->ipath_pioupd_thresh = 4U; /* set default update threshold */
1406 return 0;
1407}
1408
1409static void ipath_init_7220_variables(struct ipath_devdata *dd)
1410{
1411 /*
1412 * setup the register offsets, since they are different for each
1413 * chip
1414 */
1415 dd->ipath_kregs = &ipath_7220_kregs;
1416 dd->ipath_cregs = &ipath_7220_cregs;
1417
1418 /*
1419 * bits for selecting i2c direction and values,
1420 * used for I2C serial flash
1421 */
1422 dd->ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM;
1423 dd->ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM;
1424 dd->ipath_gpio_sda = IPATH_GPIO_SDA;
1425 dd->ipath_gpio_scl = IPATH_GPIO_SCL;
1426
1427 /*
1428 * Fill in data for field-values that change in IBA7220.
1429 * We dynamically specify only the mask for LINKTRAININGSTATE
1430 * and only the shift for LINKSTATE, as they are the only ones
1431 * that change. Also precalculate the 3 link states of interest
1432 * and the combined mask.
1433 */
1434 dd->ibcs_ls_shift = IBA7220_IBCS_LINKSTATE_SHIFT;
1435 dd->ibcs_lts_mask = IBA7220_IBCS_LINKTRAININGSTATE_MASK;
1436 dd->ibcs_mask = (INFINIPATH_IBCS_LINKSTATE_MASK <<
1437 dd->ibcs_ls_shift) | dd->ibcs_lts_mask;
1438 dd->ib_init = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
1439 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
1440 (INFINIPATH_IBCS_L_STATE_INIT << dd->ibcs_ls_shift);
1441 dd->ib_arm = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
1442 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
1443 (INFINIPATH_IBCS_L_STATE_ARM << dd->ibcs_ls_shift);
1444 dd->ib_active = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
1445 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
1446 (INFINIPATH_IBCS_L_STATE_ACTIVE << dd->ibcs_ls_shift);
1447
1448 /*
1449 * Fill in data for ibcc field-values that change in IBA7220.
1450 * We dynamically specify only the mask for LINKINITCMD
1451 * and only the shift for LINKCMD and MAXPKTLEN, as they are
1452 * the only ones that change.
1453 */
1454 dd->ibcc_lic_mask = IBA7220_IBCC_LINKINITCMD_MASK;
1455 dd->ibcc_lc_shift = IBA7220_IBCC_LINKCMD_SHIFT;
1456 dd->ibcc_mpl_shift = IBA7220_IBCC_MAXPKTLEN_SHIFT;
1457
1458 /* Fill in shifts for RcvCtrl. */
1459 dd->ipath_r_portenable_shift = INFINIPATH_R_PORTENABLE_SHIFT;
1460 dd->ipath_r_intravail_shift = IBA7220_R_INTRAVAIL_SHIFT;
1461 dd->ipath_r_tailupd_shift = IBA7220_R_TAILUPD_SHIFT;
1462 dd->ipath_r_portcfg_shift = IBA7220_R_PORTCFG_SHIFT;
1463
1464 /* variables for sanity checking interrupt and errors */
1465 dd->ipath_hwe_bitsextant =
1466 (INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
1467 INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) |
1468 (INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
1469 INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) |
1470 (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK <<
1471 INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) |
1472 INFINIPATH_HWE_PCIE1PLLFAILED |
1473 INFINIPATH_HWE_PCIE0PLLFAILED |
1474 INFINIPATH_HWE_PCIEPOISONEDTLP |
1475 INFINIPATH_HWE_PCIECPLTIMEOUT |
1476 INFINIPATH_HWE_PCIEBUSPARITYXTLH |
1477 INFINIPATH_HWE_PCIEBUSPARITYXADM |
1478 INFINIPATH_HWE_PCIEBUSPARITYRADM |
1479 INFINIPATH_HWE_MEMBISTFAILED |
1480 INFINIPATH_HWE_COREPLL_FBSLIP |
1481 INFINIPATH_HWE_COREPLL_RFSLIP |
1482 INFINIPATH_HWE_SERDESPLLFAILED |
1483 INFINIPATH_HWE_IBCBUSTOSPCPARITYERR |
1484 INFINIPATH_HWE_IBCBUSFRSPCPARITYERR |
1485 INFINIPATH_HWE_PCIECPLDATAQUEUEERR |
1486 INFINIPATH_HWE_PCIECPLHDRQUEUEERR |
1487 INFINIPATH_HWE_SDMAMEMREADERR |
1488 INFINIPATH_HWE_CLK_UC_PLLNOTLOCKED |
1489 INFINIPATH_HWE_PCIESERDESQ0PCLKNOTDETECT |
1490 INFINIPATH_HWE_PCIESERDESQ1PCLKNOTDETECT |
1491 INFINIPATH_HWE_PCIESERDESQ2PCLKNOTDETECT |
1492 INFINIPATH_HWE_PCIESERDESQ3PCLKNOTDETECT |
1493 INFINIPATH_HWE_DDSRXEQMEMORYPARITYERR |
1494 INFINIPATH_HWE_IB_UC_MEMORYPARITYERR |
1495 INFINIPATH_HWE_PCIE_UC_OCT0MEMORYPARITYERR |
1496 INFINIPATH_HWE_PCIE_UC_OCT1MEMORYPARITYERR;
1497 dd->ipath_i_bitsextant =
1498 INFINIPATH_I_SDMAINT | INFINIPATH_I_SDMADISABLED |
1499 (INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) |
1500 (INFINIPATH_I_RCVAVAIL_MASK <<
1501 INFINIPATH_I_RCVAVAIL_SHIFT) |
1502 INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT |
1503 INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO |
1504 INFINIPATH_I_JINT | INFINIPATH_I_SERDESTRIMDONE;
1505 dd->ipath_e_bitsextant =
1506 INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC |
1507 INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN |
1508 INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN |
1509 INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RUNEXPCHAR |
1510 INFINIPATH_E_RUNSUPVL | INFINIPATH_E_REBP |
1511 INFINIPATH_E_RIBFLOW | INFINIPATH_E_RBADVERSION |
1512 INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
1513 INFINIPATH_E_RBADTID | INFINIPATH_E_RHDRLEN |
1514 INFINIPATH_E_RHDR | INFINIPATH_E_RIBLOSTLINK |
1515 INFINIPATH_E_SENDSPECIALTRIGGER |
1516 INFINIPATH_E_SDMADISABLED | INFINIPATH_E_SMINPKTLEN |
1517 INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SUNDERRUN |
1518 INFINIPATH_E_SPKTLEN | INFINIPATH_E_SDROPPEDSMPPKT |
1519 INFINIPATH_E_SDROPPEDDATAPKT |
1520 INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM |
1521 INFINIPATH_E_SUNSUPVL | INFINIPATH_E_SENDBUFMISUSE |
1522 INFINIPATH_E_SDMAGENMISMATCH | INFINIPATH_E_SDMAOUTOFBOUND |
1523 INFINIPATH_E_SDMATAILOUTOFBOUND | INFINIPATH_E_SDMABASE |
1524 INFINIPATH_E_SDMA1STDESC | INFINIPATH_E_SDMARPYTAG |
1525 INFINIPATH_E_SDMADWEN | INFINIPATH_E_SDMAMISSINGDW |
1526 INFINIPATH_E_SDMAUNEXPDATA |
1527 INFINIPATH_E_IBSTATUSCHANGED | INFINIPATH_E_INVALIDADDR |
1528 INFINIPATH_E_RESET | INFINIPATH_E_HARDWARE |
1529 INFINIPATH_E_SDMADESCADDRMISALIGN |
1530 INFINIPATH_E_INVALIDEEPCMD;
1531
1532 dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
1533 dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
1534 dd->ipath_i_rcvavail_shift = INFINIPATH_I_RCVAVAIL_SHIFT;
1535 dd->ipath_i_rcvurg_shift = INFINIPATH_I_RCVURG_SHIFT;
1536 dd->ipath_flags |= IPATH_INTREG_64 | IPATH_HAS_MULT_IB_SPEED
1537 | IPATH_HAS_LINK_LATENCY;
1538
1539 /*
1540 * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
1541 * 2 is Some Misc, 3 is reserved for future.
1542 */
1543 dd->ipath_eep_st_masks[0].hwerrs_to_log =
1544 INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
1545 INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT;
1546
1547 dd->ipath_eep_st_masks[1].hwerrs_to_log =
1548 INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
1549 INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT;
1550
1551 dd->ipath_eep_st_masks[2].errs_to_log = INFINIPATH_E_RESET;
1552
1553 ipath_linkrecovery = 0;
1554
1555 init_waitqueue_head(&dd->ipath_autoneg_wait);
1556 INIT_DELAYED_WORK(&dd->ipath_autoneg_work, autoneg_work);
1557
1558 dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
1559 dd->ipath_link_speed_supported = IPATH_IB_SDR | IPATH_IB_DDR;
1560
1561 dd->ipath_link_width_enabled = dd->ipath_link_width_supported;
1562 dd->ipath_link_speed_enabled = dd->ipath_link_speed_supported;
1563 /*
1564 * set the initial values to reasonable default, will be set
1565 * for real when link is up.
1566 */
1567 dd->ipath_link_width_active = IB_WIDTH_4X;
1568 dd->ipath_link_speed_active = IPATH_IB_SDR;
1569 dd->delay_mult = rate_to_delay[0][1];
1570}
1571
1572
1573/*
1574 * Setup the MSI stuff again after a reset. I'd like to just call
1575 * pci_enable_msi() and request_irq() again, but when I do that,
1576 * the MSI enable bit doesn't get set in the command word, and
1577 * we switch to to a different interrupt vector, which is confusing,
1578 * so I instead just do it all inline. Perhaps somehow can tie this
1579 * into the PCIe hotplug support at some point
1580 * Note, because I'm doing it all here, I don't call pci_disable_msi()
1581 * or free_irq() at the start of ipath_setup_7220_reset().
1582 */
1583static int ipath_reinit_msi(struct ipath_devdata *dd)
1584{
1585 int ret = 0;
1586#ifdef CONFIG_PCI_MSI
1587 int pos;
1588 u16 control;
1589 if (!dd->ipath_msi_lo) /* Using intX, or init problem */
1590 goto bail;
1591
1592 pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI);
1593 if (!pos) {
1594 ipath_dev_err(dd, "Can't find MSI capability, "
1595 "can't restore MSI settings\n");
1596 goto bail;
1597 }
1598 ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n",
1599 dd->ipath_msi_lo, pos + PCI_MSI_ADDRESS_LO);
1600 pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
1601 dd->ipath_msi_lo);
1602 ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n",
1603 dd->ipath_msi_hi, pos + PCI_MSI_ADDRESS_HI);
1604 pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
1605 dd->ipath_msi_hi);
1606 pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control);
1607 if (!(control & PCI_MSI_FLAGS_ENABLE)) {
1608 ipath_cdbg(VERBOSE, "MSI control at off %x was %x, "
1609 "setting MSI enable (%x)\n", pos + PCI_MSI_FLAGS,
1610 control, control | PCI_MSI_FLAGS_ENABLE);
1611 control |= PCI_MSI_FLAGS_ENABLE;
1612 pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
1613 control);
1614 }
1615 /* now rewrite the data (vector) info */
1616 pci_write_config_word(dd->pcidev, pos +
1617 ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
1618 dd->ipath_msi_data);
1619 ret = 1;
1620bail:
1621#endif
1622 if (!ret) {
1623 ipath_dbg("Using IntX, MSI disabled or not configured\n");
1624 ipath_enable_intx(dd->pcidev);
1625 ret = 1;
1626 }
1627 /*
1628 * We restore the cachelinesize also, although it doesn't really
1629 * matter.
1630 */
1631 pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE,
1632 dd->ipath_pci_cacheline);
1633 /* and now set the pci master bit again */
1634 pci_set_master(dd->pcidev);
1635
1636 return ret;
1637}
1638
1639/*
1640 * This routine sleeps, so it can only be called from user context, not
1641 * from interrupt context. If we need interrupt context, we can split
1642 * it into two routines.
1643 */
1644static int ipath_setup_7220_reset(struct ipath_devdata *dd)
1645{
1646 u64 val;
1647 int i;
1648 int ret;
1649 u16 cmdval;
1650
1651 pci_read_config_word(dd->pcidev, PCI_COMMAND, &cmdval);
1652
1653 /* Use dev_err so it shows up in logs, etc. */
1654 ipath_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->ipath_unit);
1655
1656 /* keep chip from being accessed in a few places */
1657 dd->ipath_flags &= ~(IPATH_INITTED | IPATH_PRESENT);
1658 val = dd->ipath_control | INFINIPATH_C_RESET;
1659 ipath_write_kreg(dd, dd->ipath_kregs->kr_control, val);
1660 mb();
1661
1662 for (i = 1; i <= 5; i++) {
1663 int r;
1664
1665 /*
1666 * Allow MBIST, etc. to complete; longer on each retry.
1667 * We sometimes get machine checks from bus timeout if no
1668 * response, so for now, make it *really* long.
1669 */
1670 msleep(1000 + (1 + i) * 2000);
1671 r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
1672 dd->ipath_pcibar0);
1673 if (r)
1674 ipath_dev_err(dd, "rewrite of BAR0 failed: %d\n", r);
1675 r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
1676 dd->ipath_pcibar1);
1677 if (r)
1678 ipath_dev_err(dd, "rewrite of BAR1 failed: %d\n", r);
1679 /* now re-enable memory access */
1680 pci_write_config_word(dd->pcidev, PCI_COMMAND, cmdval);
1681 r = pci_enable_device(dd->pcidev);
1682 if (r)
1683 ipath_dev_err(dd, "pci_enable_device failed after "
1684 "reset: %d\n", r);
1685 /*
1686 * whether it fully enabled or not, mark as present,
1687 * again (but not INITTED)
1688 */
1689 dd->ipath_flags |= IPATH_PRESENT;
1690 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision);
1691 if (val == dd->ipath_revision) {
1692 ipath_cdbg(VERBOSE, "Got matching revision "
1693 "register %llx on try %d\n",
1694 (unsigned long long) val, i);
1695 ret = ipath_reinit_msi(dd);
1696 goto bail;
1697 }
1698 /* Probably getting -1 back */
1699 ipath_dbg("Didn't get expected revision register, "
1700 "got %llx, try %d\n", (unsigned long long) val,
1701 i + 1);
1702 }
1703 ret = 0; /* failed */
1704
1705bail:
1706 if (ret)
1707 ipath_7220_pcie_params(dd, dd->ipath_boardrev);
1708
1709 return ret;
1710}
1711
1712/**
1713 * ipath_7220_put_tid - write a TID to the chip
1714 * @dd: the infinipath device
1715 * @tidptr: pointer to the expected TID (in chip) to udpate
1716 * @tidtype: 0 for eager, 1 for expected
1717 * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
1718 *
1719 * This exists as a separate routine to allow for selection of the
1720 * appropriate "flavor". The static calls in cleanup just use the
1721 * revision-agnostic form, as they are not performance critical.
1722 */
1723static void ipath_7220_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr,
1724 u32 type, unsigned long pa)
1725{
1726 if (pa != dd->ipath_tidinvalid) {
1727 u64 chippa = pa >> IBA7220_TID_PA_SHIFT;
1728
1729 /* paranoia checks */
1730 if (pa != (chippa << IBA7220_TID_PA_SHIFT)) {
1731 dev_info(&dd->pcidev->dev, "BUG: physaddr %lx "
1732 "not 2KB aligned!\n", pa);
1733 return;
1734 }
1735 if (pa >= (1UL << IBA7220_TID_SZ_SHIFT)) {
1736 ipath_dev_err(dd,
1737 "BUG: Physical page address 0x%lx "
1738 "larger than supported\n", pa);
1739 return;
1740 }
1741
1742 if (type == RCVHQ_RCV_TYPE_EAGER)
1743 chippa |= dd->ipath_tidtemplate;
1744 else /* for now, always full 4KB page */
1745 chippa |= IBA7220_TID_SZ_4K;
1746 writeq(chippa, tidptr);
1747 } else
1748 writeq(pa, tidptr);
1749 mmiowb();
1750}
1751
1752/**
1753 * ipath_7220_clear_tid - clear all TID entries for a port, expected and eager
1754 * @dd: the infinipath device
1755 * @port: the port
1756 *
1757 * clear all TID entries for a port, expected and eager.
1758 * Used from ipath_close(). On this chip, TIDs are only 32 bits,
1759 * not 64, but they are still on 64 bit boundaries, so tidbase
1760 * is declared as u64 * for the pointer math, even though we write 32 bits
1761 */
1762static void ipath_7220_clear_tids(struct ipath_devdata *dd, unsigned port)
1763{
1764 u64 __iomem *tidbase;
1765 unsigned long tidinv;
1766 int i;
1767
1768 if (!dd->ipath_kregbase)
1769 return;
1770
1771 ipath_cdbg(VERBOSE, "Invalidate TIDs for port %u\n", port);
1772
1773 tidinv = dd->ipath_tidinvalid;
1774 tidbase = (u64 __iomem *)
1775 ((char __iomem *)(dd->ipath_kregbase) +
1776 dd->ipath_rcvtidbase +
1777 port * dd->ipath_rcvtidcnt * sizeof(*tidbase));
1778
1779 for (i = 0; i < dd->ipath_rcvtidcnt; i++)
1780 ipath_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
1781 tidinv);
1782
1783 tidbase = (u64 __iomem *)
1784 ((char __iomem *)(dd->ipath_kregbase) +
1785 dd->ipath_rcvegrbase + port_egrtid_idx(dd, port)
1786 * sizeof(*tidbase));
1787
1788 for (i = port ? dd->ipath_rcvegrcnt : dd->ipath_p0_rcvegrcnt; i; i--)
1789 ipath_7220_put_tid(dd, &tidbase[i-1], RCVHQ_RCV_TYPE_EAGER,
1790 tidinv);
1791}
1792
1793/**
1794 * ipath_7220_tidtemplate - setup constants for TID updates
1795 * @dd: the infinipath device
1796 *
1797 * We setup stuff that we use a lot, to avoid calculating each time
1798 */
1799static void ipath_7220_tidtemplate(struct ipath_devdata *dd)
1800{
1801 /* For now, we always allocate 4KB buffers (at init) so we can
1802 * receive max size packets. We may want a module parameter to
1803 * specify 2KB or 4KB and/or make be per port instead of per device
1804 * for those who want to reduce memory footprint. Note that the
1805 * ipath_rcvhdrentsize size must be large enough to hold the largest
1806 * IB header (currently 96 bytes) that we expect to handle (plus of
1807 * course the 2 dwords of RHF).
1808 */
1809 if (dd->ipath_rcvegrbufsize == 2048)
1810 dd->ipath_tidtemplate = IBA7220_TID_SZ_2K;
1811 else if (dd->ipath_rcvegrbufsize == 4096)
1812 dd->ipath_tidtemplate = IBA7220_TID_SZ_4K;
1813 else {
1814 dev_info(&dd->pcidev->dev, "BUG: unsupported egrbufsize "
1815 "%u, using %u\n", dd->ipath_rcvegrbufsize,
1816 4096);
1817 dd->ipath_tidtemplate = IBA7220_TID_SZ_4K;
1818 }
1819 dd->ipath_tidinvalid = 0;
1820}
1821
1822static int ipath_7220_early_init(struct ipath_devdata *dd)
1823{
1824 u32 i, s;
1825
1826 if (strcmp(int_type, "auto") &&
1827 strcmp(int_type, "force_msi") &&
1828 strcmp(int_type, "force_intx")) {
1829 ipath_dev_err(dd, "Invalid interrupt_type: '%s', expecting "
1830 "auto, force_msi or force_intx\n", int_type);
1831 return -EINVAL;
1832 }
1833
1834 /*
1835 * Control[4] has been added to change the arbitration within
1836 * the SDMA engine between favoring data fetches over descriptor
1837 * fetches. ipath_sdma_fetch_arb==0 gives data fetches priority.
1838 */
1839 if (ipath_sdma_fetch_arb && (dd->ipath_minrev > 1))
1840 dd->ipath_control |= 1<<4;
1841
1842 dd->ipath_flags |= IPATH_4BYTE_TID;
1843
1844 /*
1845 * For openfabrics, we need to be able to handle an IB header of
1846 * 24 dwords. HT chip has arbitrary sized receive buffers, so we
1847 * made them the same size as the PIO buffers. This chip does not
1848 * handle arbitrary size buffers, so we need the header large enough
1849 * to handle largest IB header, but still have room for a 2KB MTU
1850 * standard IB packet.
1851 */
1852 dd->ipath_rcvhdrentsize = 24;
1853 dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
1854 dd->ipath_rhf_offset =
1855 dd->ipath_rcvhdrentsize - sizeof(u64) / sizeof(u32);
1856
1857 dd->ipath_rcvegrbufsize = ipath_mtu4096 ? 4096 : 2048;
1858 /*
1859 * the min() check here is currently a nop, but it may not always
1860 * be, depending on just how we do ipath_rcvegrbufsize
1861 */
1862 dd->ipath_ibmaxlen = min(ipath_mtu4096 ? dd->ipath_piosize4k :
1863 dd->ipath_piosize2k,
1864 dd->ipath_rcvegrbufsize +
1865 (dd->ipath_rcvhdrentsize << 2));
1866 dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen;
1867
1868 ipath_7220_config_jint(dd, INFINIPATH_JINT_DEFAULT_IDLE_TICKS,
1869 INFINIPATH_JINT_DEFAULT_MAX_PACKETS);
1870
1871 if (dd->ipath_boardrev) /* no eeprom on emulator */
1872 ipath_get_eeprom_info(dd);
1873
1874 /* start of code to check and print procmon */
1875 s = ipath_read_kreg32(dd, IPATH_KREG_OFFSET(ProcMon));
1876 s &= ~(1U<<31); /* clear done bit */
1877 s |= 1U<<14; /* clear counter (write 1 to clear) */
1878 ipath_write_kreg(dd, IPATH_KREG_OFFSET(ProcMon), s);
1879 /* make sure clear_counter low long enough before start */
1880 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
1881 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
1882
1883 s &= ~(1U<<14); /* allow counter to count (before starting) */
1884 ipath_write_kreg(dd, IPATH_KREG_OFFSET(ProcMon), s);
1885 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
1886 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
1887 s = ipath_read_kreg32(dd, IPATH_KREG_OFFSET(ProcMon));
1888
1889 s |= 1U<<15; /* start the counter */
1890 s &= ~(1U<<31); /* clear done bit */
1891 s &= ~0x7ffU; /* clear frequency bits */
1892 s |= 0xe29; /* set frequency bits, in case cleared */
1893 ipath_write_kreg(dd, IPATH_KREG_OFFSET(ProcMon), s);
1894
1895 s = 0;
1896 for (i = 500; i > 0 && !(s&(1ULL<<31)); i--) {
1897 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
1898 s = ipath_read_kreg32(dd, IPATH_KREG_OFFSET(ProcMon));
1899 }
1900 if (!(s&(1U<<31)))
1901 ipath_dev_err(dd, "ProcMon register not valid: 0x%x\n", s);
1902 else
1903 ipath_dbg("ProcMon=0x%x, count=0x%x\n", s, (s>>16)&0x1ff);
1904
1905 return 0;
1906}
1907
1908/**
1909 * ipath_init_7220_get_base_info - set chip-specific flags for user code
1910 * @pd: the infinipath port
1911 * @kbase: ipath_base_info pointer
1912 *
1913 * We set the PCIE flag because the lower bandwidth on PCIe vs
1914 * HyperTransport can affect some user packet algorithims.
1915 */
1916static int ipath_7220_get_base_info(struct ipath_portdata *pd, void *kbase)
1917{
1918 struct ipath_base_info *kinfo = kbase;
1919
1920 kinfo->spi_runtime_flags |=
1921 IPATH_RUNTIME_PCIE | IPATH_RUNTIME_NODMA_RTAIL |
1922 IPATH_RUNTIME_SDMA;
1923
1924 return 0;
1925}
1926
1927static void ipath_7220_free_irq(struct ipath_devdata *dd)
1928{
1929 free_irq(dd->ipath_irq, dd);
1930 dd->ipath_irq = 0;
1931}
1932
1933static struct ipath_message_header *
1934ipath_7220_get_msgheader(struct ipath_devdata *dd, __le32 *rhf_addr)
1935{
1936 u32 offset = ipath_hdrget_offset(rhf_addr);
1937
1938 return (struct ipath_message_header *)
1939 (rhf_addr - dd->ipath_rhf_offset + offset);
1940}
1941
1942static void ipath_7220_config_ports(struct ipath_devdata *dd, ushort cfgports)
1943{
1944 u32 nchipports;
1945
1946 nchipports = ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
1947 if (!cfgports) {
1948 int ncpus = num_online_cpus();
1949
1950 if (ncpus <= 4)
1951 dd->ipath_portcnt = 5;
1952 else if (ncpus <= 8)
1953 dd->ipath_portcnt = 9;
1954 if (dd->ipath_portcnt)
1955 ipath_dbg("Auto-configured for %u ports, %d cpus "
1956 "online\n", dd->ipath_portcnt, ncpus);
1957 } else if (cfgports <= nchipports)
1958 dd->ipath_portcnt = cfgports;
1959 if (!dd->ipath_portcnt) /* none of the above, set to max */
1960 dd->ipath_portcnt = nchipports;
1961 /*
1962 * chip can be configured for 5, 9, or 17 ports, and choice
1963 * affects number of eager TIDs per port (1K, 2K, 4K).
1964 */
1965 if (dd->ipath_portcnt > 9)
1966 dd->ipath_rcvctrl |= 2ULL << IBA7220_R_PORTCFG_SHIFT;
1967 else if (dd->ipath_portcnt > 5)
1968 dd->ipath_rcvctrl |= 1ULL << IBA7220_R_PORTCFG_SHIFT;
1969 /* else configure for default 5 receive ports */
1970 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1971 dd->ipath_rcvctrl);
1972 dd->ipath_p0_rcvegrcnt = 2048; /* always */
1973 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
1974 dd->ipath_pioreserved = 1; /* reserve a buffer */
1975}
1976
1977
1978static int ipath_7220_get_ib_cfg(struct ipath_devdata *dd, int which)
1979{
1980 int lsb, ret = 0;
1981 u64 maskr; /* right-justified mask */
1982
1983 switch (which) {
1984 case IPATH_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
1985 lsb = IBA7220_IBC_HRTBT_SHIFT;
1986 maskr = IBA7220_IBC_HRTBT_MASK;
1987 break;
1988
1989 case IPATH_IB_CFG_LWID_ENB: /* Get allowed Link-width */
1990 ret = dd->ipath_link_width_enabled;
1991 goto done;
1992
1993 case IPATH_IB_CFG_LWID: /* Get currently active Link-width */
1994 ret = dd->ipath_link_width_active;
1995 goto done;
1996
1997 case IPATH_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
1998 ret = dd->ipath_link_speed_enabled;
1999 goto done;
2000
2001 case IPATH_IB_CFG_SPD: /* Get current Link spd */
2002 ret = dd->ipath_link_speed_active;
2003 goto done;
2004
2005 case IPATH_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
2006 lsb = IBA7220_IBC_RXPOL_SHIFT;
2007 maskr = IBA7220_IBC_RXPOL_MASK;
2008 break;
2009
2010 case IPATH_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
2011 lsb = IBA7220_IBC_LREV_SHIFT;
2012 maskr = IBA7220_IBC_LREV_MASK;
2013 break;
2014
2015 case IPATH_IB_CFG_LINKLATENCY:
2016 ret = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcddrstatus)
2017 & IBA7220_DDRSTAT_LINKLAT_MASK;
2018 goto done;
2019
2020 default:
2021 ret = -ENOTSUPP;
2022 goto done;
2023 }
2024 ret = (int)((dd->ipath_ibcddrctrl >> lsb) & maskr);
2025done:
2026 return ret;
2027}
2028
2029static int ipath_7220_set_ib_cfg(struct ipath_devdata *dd, int which, u32 val)
2030{
2031 int lsb, ret = 0, setforce = 0;
2032 u64 maskr; /* right-justified mask */
2033
2034 switch (which) {
2035 case IPATH_IB_CFG_LIDLMC:
2036 /*
2037 * Set LID and LMC. Combined to avoid possible hazard
2038 * caller puts LMC in 16MSbits, DLID in 16LSbits of val
2039 */
2040 lsb = IBA7220_IBC_DLIDLMC_SHIFT;
2041 maskr = IBA7220_IBC_DLIDLMC_MASK;
2042 break;
2043
2044 case IPATH_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
2045 if (val & IPATH_IB_HRTBT_ON &&
2046 (dd->ipath_flags & IPATH_NO_HRTBT))
2047 goto bail;
2048 lsb = IBA7220_IBC_HRTBT_SHIFT;
2049 maskr = IBA7220_IBC_HRTBT_MASK;
2050 break;
2051
2052 case IPATH_IB_CFG_LWID_ENB: /* set allowed Link-width */
2053 /*
2054 * As with speed, only write the actual register if
2055 * the link is currently down, otherwise takes effect
2056 * on next link change.
2057 */
2058 dd->ipath_link_width_enabled = val;
2059 if ((dd->ipath_flags & (IPATH_LINKDOWN|IPATH_LINKINIT)) !=
2060 IPATH_LINKDOWN)
2061 goto bail;
2062 /*
2063 * We set the IPATH_IB_FORCE_NOTIFY bit so updown
2064 * will get called because we want update
2065 * link_width_active, and the change may not take
2066 * effect for some time (if we are in POLL), so this
2067 * flag will force the updown routine to be called
2068 * on the next ibstatuschange down interrupt, even
2069 * if it's not an down->up transition.
2070 */
2071 val--; /* convert from IB to chip */
2072 maskr = IBA7220_IBC_WIDTH_MASK;
2073 lsb = IBA7220_IBC_WIDTH_SHIFT;
2074 setforce = 1;
2075 dd->ipath_flags |= IPATH_IB_FORCE_NOTIFY;
2076 break;
2077
2078 case IPATH_IB_CFG_SPD_ENB: /* set allowed Link speeds */
2079 /*
2080 * If we turn off IB1.2, need to preset SerDes defaults,
2081 * but not right now. Set a flag for the next time
2082 * we command the link down. As with width, only write the
2083 * actual register if the link is currently down, otherwise
2084 * takes effect on next link change. Since setting is being
2085 * explictly requested (via MAD or sysfs), clear autoneg
2086 * failure status if speed autoneg is enabled.
2087 */
2088 dd->ipath_link_speed_enabled = val;
2089 if (dd->ipath_ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK &&
2090 !(val & (val - 1)))
2091 dd->ipath_presets_needed = 1;
2092 if ((dd->ipath_flags & (IPATH_LINKDOWN|IPATH_LINKINIT)) !=
2093 IPATH_LINKDOWN)
2094 goto bail;
2095 /*
2096 * We set the IPATH_IB_FORCE_NOTIFY bit so updown
2097 * will get called because we want update
2098 * link_speed_active, and the change may not take
2099 * effect for some time (if we are in POLL), so this
2100 * flag will force the updown routine to be called
2101 * on the next ibstatuschange down interrupt, even
2102 * if it's not an down->up transition. When setting
2103 * speed autoneg, clear AUTONEG_FAILED.
2104 */
2105 if (val == (IPATH_IB_SDR | IPATH_IB_DDR)) {
2106 val = IBA7220_IBC_SPEED_AUTONEG_MASK |
2107 IBA7220_IBC_IBTA_1_2_MASK;
2108 dd->ipath_flags &= ~IPATH_IB_AUTONEG_FAILED;
2109 } else
2110 val = val == IPATH_IB_DDR ? IBA7220_IBC_SPEED_DDR
2111 : IBA7220_IBC_SPEED_SDR;
2112 maskr = IBA7220_IBC_SPEED_AUTONEG_MASK |
2113 IBA7220_IBC_IBTA_1_2_MASK;
2114 lsb = 0; /* speed bits are low bits */
2115 setforce = 1;
2116 break;
2117
2118 case IPATH_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
2119 lsb = IBA7220_IBC_RXPOL_SHIFT;
2120 maskr = IBA7220_IBC_RXPOL_MASK;
2121 break;
2122
2123 case IPATH_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
2124 lsb = IBA7220_IBC_LREV_SHIFT;
2125 maskr = IBA7220_IBC_LREV_MASK;
2126 break;
2127
2128 default:
2129 ret = -ENOTSUPP;
2130 goto bail;
2131 }
2132 dd->ipath_ibcddrctrl &= ~(maskr << lsb);
2133 dd->ipath_ibcddrctrl |= (((u64) val & maskr) << lsb);
2134 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcddrctrl,
2135 dd->ipath_ibcddrctrl);
2136 if (setforce)
2137 dd->ipath_flags |= IPATH_IB_FORCE_NOTIFY;
2138bail:
2139 return ret;
2140}
2141
2142static void ipath_7220_read_counters(struct ipath_devdata *dd,
2143 struct infinipath_counters *cntrs)
2144{
2145 u64 *counters = (u64 *) cntrs;
2146 int i;
2147
2148 for (i = 0; i < sizeof(*cntrs) / sizeof(u64); i++)
2149 counters[i] = ipath_snap_cntr(dd, i);
2150}
2151
2152/* if we are using MSI, try to fallback to IntX */
2153static int ipath_7220_intr_fallback(struct ipath_devdata *dd)
2154{
2155 if (dd->ipath_msi_lo) {
2156 dev_info(&dd->pcidev->dev, "MSI interrupt not detected,"
2157 " trying IntX interrupts\n");
2158 ipath_7220_nomsi(dd);
2159 ipath_enable_intx(dd->pcidev);
2160 /*
2161 * some newer kernels require free_irq before disable_msi,
2162 * and irq can be changed during disable and intx enable
2163 * and we need to therefore use the pcidev->irq value,
2164 * not our saved MSI value.
2165 */
2166 dd->ipath_irq = dd->pcidev->irq;
2167 if (request_irq(dd->ipath_irq, ipath_intr, IRQF_SHARED,
2168 IPATH_DRV_NAME, dd))
2169 ipath_dev_err(dd,
2170 "Could not re-request_irq for IntX\n");
2171 return 1;
2172 }
2173 return 0;
2174}
2175
2176/*
2177 * reset the XGXS (between serdes and IBC). Slightly less intrusive
2178 * than resetting the IBC or external link state, and useful in some
2179 * cases to cause some retraining. To do this right, we reset IBC
2180 * as well.
2181 */
2182static void ipath_7220_xgxs_reset(struct ipath_devdata *dd)
2183{
2184 u64 val, prev_val;
2185
2186 prev_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
2187 val = prev_val | INFINIPATH_XGXS_RESET;
2188 prev_val &= ~INFINIPATH_XGXS_RESET; /* be sure */
2189 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
2190 dd->ipath_control & ~INFINIPATH_C_LINKENABLE);
2191 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
2192 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
2193 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, prev_val);
2194 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
2195 dd->ipath_control);
2196}
2197
2198
2199/* Still needs cleanup, too much hardwired stuff */
2200static void autoneg_send(struct ipath_devdata *dd,
2201 u32 *hdr, u32 dcnt, u32 *data)
2202{
2203 int i;
2204 u64 cnt;
2205 u32 __iomem *piobuf;
2206 u32 pnum;
2207
2208 i = 0;
2209 cnt = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
2210 while (!(piobuf = ipath_getpiobuf(dd, cnt, &pnum))) {
2211 if (i++ > 15) {
2212 ipath_dbg("Couldn't get pio buffer for send\n");
2213 return;
2214 }
2215 udelay(2);
2216 }
2217 if (dd->ipath_flags&IPATH_HAS_PBC_CNT)
2218 cnt |= 0x80000000UL<<32; /* mark as VL15 */
2219 writeq(cnt, piobuf);
2220 ipath_flush_wc();
2221 __iowrite32_copy(piobuf + 2, hdr, 7);
2222 __iowrite32_copy(piobuf + 9, data, dcnt);
2223 ipath_flush_wc();
2224}
2225
2226/*
2227 * _start packet gets sent twice at start, _done gets sent twice at end
2228 */
2229static void ipath_autoneg_send(struct ipath_devdata *dd, int which)
2230{
2231 static u32 swapped;
2232 u32 dw, i, hcnt, dcnt, *data;
2233 static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
2234 static u32 madpayload_start[0x40] = {
2235 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
2236 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
2237 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
2238 };
2239 static u32 madpayload_done[0x40] = {
2240 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
2241 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
2242 0x40000001, 0x1388, 0x15e, /* rest 0's */
2243 };
2244 dcnt = sizeof(madpayload_start)/sizeof(madpayload_start[0]);
2245 hcnt = sizeof(hdr)/sizeof(hdr[0]);
2246 if (!swapped) {
2247 /* for maintainability, do it at runtime */
2248 for (i = 0; i < hcnt; i++) {
2249 dw = (__force u32) cpu_to_be32(hdr[i]);
2250 hdr[i] = dw;
2251 }
2252 for (i = 0; i < dcnt; i++) {
2253 dw = (__force u32) cpu_to_be32(madpayload_start[i]);
2254 madpayload_start[i] = dw;
2255 dw = (__force u32) cpu_to_be32(madpayload_done[i]);
2256 madpayload_done[i] = dw;
2257 }
2258 swapped = 1;
2259 }
2260
2261 data = which ? madpayload_done : madpayload_start;
2262 ipath_cdbg(PKT, "Sending %s special MADs\n", which?"done":"start");
2263
2264 autoneg_send(dd, hdr, dcnt, data);
2265 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
2266 udelay(2);
2267 autoneg_send(dd, hdr, dcnt, data);
2268 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
2269 udelay(2);
2270}
2271
2272
2273
2274/*
2275 * Do the absolute minimum to cause an IB speed change, and make it
2276 * ready, but don't actually trigger the change. The caller will
2277 * do that when ready (if link is in Polling training state, it will
2278 * happen immediately, otherwise when link next goes down)
2279 *
2280 * This routine should only be used as part of the DDR autonegotation
2281 * code for devices that are not compliant with IB 1.2 (or code that
2282 * fixes things up for same).
2283 *
2284 * When link has gone down, and autoneg enabled, or autoneg has
2285 * failed and we give up until next time we set both speeds, and
2286 * then we want IBTA enabled as well as "use max enabled speed.
2287 */
2288static void set_speed_fast(struct ipath_devdata *dd, u32 speed)
2289{
2290 dd->ipath_ibcddrctrl &= ~(IBA7220_IBC_SPEED_AUTONEG_MASK |
2291 IBA7220_IBC_IBTA_1_2_MASK |
2292 (IBA7220_IBC_WIDTH_MASK << IBA7220_IBC_WIDTH_SHIFT));
2293
2294 if (speed == (IPATH_IB_SDR | IPATH_IB_DDR))
2295 dd->ipath_ibcddrctrl |= IBA7220_IBC_SPEED_AUTONEG_MASK |
2296 IBA7220_IBC_IBTA_1_2_MASK;
2297 else
2298 dd->ipath_ibcddrctrl |= speed == IPATH_IB_DDR ?
2299 IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
2300
2301 /*
2302 * Convert from IB-style 1 = 1x, 2 = 4x, 3 = auto
2303 * to chip-centric 0 = 1x, 1 = 4x, 2 = auto
2304 */
2305 dd->ipath_ibcddrctrl |= (u64)(dd->ipath_link_width_enabled - 1) <<
2306 IBA7220_IBC_WIDTH_SHIFT;
2307 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcddrctrl,
2308 dd->ipath_ibcddrctrl);
2309 ipath_cdbg(VERBOSE, "setup for IB speed (%x) done\n", speed);
2310}
2311
2312
2313/*
2314 * this routine is only used when we are not talking to another
2315 * IB 1.2-compliant device that we think can do DDR.
2316 * (This includes all existing switch chips as of Oct 2007.)
2317 * 1.2-compliant devices go directly to DDR prior to reaching INIT
2318 */
2319static void try_auto_neg(struct ipath_devdata *dd)
2320{
2321 /*
2322 * required for older non-IB1.2 DDR switches. Newer
2323 * non-IB-compliant switches don't need it, but so far,
2324 * aren't bothered by it either. "Magic constant"
2325 */
2326 ipath_write_kreg(dd, IPATH_KREG_OFFSET(IBNCModeCtrl),
2327 0x3b9dc07);
2328 dd->ipath_flags |= IPATH_IB_AUTONEG_INPROG;
2329 ipath_autoneg_send(dd, 0);
2330 set_speed_fast(dd, IPATH_IB_DDR);
2331 ipath_toggle_rclkrls(dd);
2332 /* 2 msec is minimum length of a poll cycle */
2333 schedule_delayed_work(&dd->ipath_autoneg_work,
2334 msecs_to_jiffies(2));
2335}
2336
2337
2338static int ipath_7220_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs)
2339{
2340 int ret = 0;
2341 u32 ltstate = ipath_ib_linkstate(dd, ibcs);
2342
2343 dd->ipath_link_width_active =
2344 ((ibcs >> IBA7220_IBCS_LINKWIDTH_SHIFT) & 1) ?
2345 IB_WIDTH_4X : IB_WIDTH_1X;
2346 dd->ipath_link_speed_active =
2347 ((ibcs >> IBA7220_IBCS_LINKSPEED_SHIFT) & 1) ?
2348 IPATH_IB_DDR : IPATH_IB_SDR;
2349
2350 if (!ibup) {
2351 /*
2352 * when link goes down we don't want aeq running, so it
2353 * won't't interfere with IBC training, etc., and we need
2354 * to go back to the static SerDes preset values
2355 */
2356 if (dd->ipath_x1_fix_tries &&
2357 ltstate <= INFINIPATH_IBCS_LT_STATE_SLEEPQUIET &&
2358 ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP)
2359 dd->ipath_x1_fix_tries = 0;
2360 if (!(dd->ipath_flags & (IPATH_IB_AUTONEG_FAILED |
2361 IPATH_IB_AUTONEG_INPROG)))
2362 set_speed_fast(dd, dd->ipath_link_speed_enabled);
2363 if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)) {
2364 ipath_cdbg(VERBOSE, "Setting RXEQ defaults\n");
2365 ipath_sd7220_presets(dd);
2366 }
2367 /* this might better in ipath_sd7220_presets() */
2368 ipath_set_relock_poll(dd, ibup);
2369 } else {
2370 if (ipath_compat_ddr_negotiate &&
2371 !(dd->ipath_flags & (IPATH_IB_AUTONEG_FAILED |
2372 IPATH_IB_AUTONEG_INPROG)) &&
2373 dd->ipath_link_speed_active == IPATH_IB_SDR &&
2374 (dd->ipath_link_speed_enabled &
2375 (IPATH_IB_DDR | IPATH_IB_SDR)) ==
2376 (IPATH_IB_DDR | IPATH_IB_SDR) &&
2377 dd->ipath_autoneg_tries < IPATH_AUTONEG_TRIES) {
2378 /* we are SDR, and DDR auto-negotiation enabled */
2379 ++dd->ipath_autoneg_tries;
2380 ipath_dbg("DDR negotiation try, %u/%u\n",
2381 dd->ipath_autoneg_tries,
2382 IPATH_AUTONEG_TRIES);
2383 try_auto_neg(dd);
2384 ret = 1; /* no other IB status change processing */
2385 } else if ((dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)
2386 && dd->ipath_link_speed_active == IPATH_IB_SDR) {
2387 ipath_autoneg_send(dd, 1);
2388 set_speed_fast(dd, IPATH_IB_DDR);
2389 udelay(2);
2390 ipath_toggle_rclkrls(dd);
2391 ret = 1; /* no other IB status change processing */
2392 } else {
2393 if ((dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) &&
2394 (dd->ipath_link_speed_active & IPATH_IB_DDR)) {
2395 ipath_dbg("Got to INIT with DDR autoneg\n");
2396 dd->ipath_flags &= ~(IPATH_IB_AUTONEG_INPROG
2397 | IPATH_IB_AUTONEG_FAILED);
2398 dd->ipath_autoneg_tries = 0;
2399 /* re-enable SDR, for next link down */
2400 set_speed_fast(dd,
2401 dd->ipath_link_speed_enabled);
2402 wake_up(&dd->ipath_autoneg_wait);
2403 } else if (dd->ipath_flags & IPATH_IB_AUTONEG_FAILED) {
2404 /*
2405 * clear autoneg failure flag, and do setup
2406 * so we'll try next time link goes down and
2407 * back to INIT (possibly connected to different
2408 * device).
2409 */
2410 ipath_dbg("INIT %sDR after autoneg failure\n",
2411 (dd->ipath_link_speed_active &
2412 IPATH_IB_DDR) ? "D" : "S");
2413 dd->ipath_flags &= ~IPATH_IB_AUTONEG_FAILED;
2414 dd->ipath_ibcddrctrl |=
2415 IBA7220_IBC_IBTA_1_2_MASK;
2416 ipath_write_kreg(dd,
2417 IPATH_KREG_OFFSET(IBNCModeCtrl), 0);
2418 }
2419 }
2420 /*
2421 * if we are in 1X, and are in autoneg width, it
2422 * could be due to an xgxs problem, so if we haven't
2423 * already tried, try twice to get to 4X; if we
2424 * tried, and couldn't, report it, since it will
2425 * probably not be what is desired.
2426 */
2427 if ((dd->ipath_link_width_enabled & (IB_WIDTH_1X |
2428 IB_WIDTH_4X)) == (IB_WIDTH_1X | IB_WIDTH_4X)
2429 && dd->ipath_link_width_active == IB_WIDTH_1X
2430 && dd->ipath_x1_fix_tries < 3) {
2431 if (++dd->ipath_x1_fix_tries == 3)
2432 dev_info(&dd->pcidev->dev,
2433 "IB link is in 1X mode\n");
2434 else {
2435 ipath_cdbg(VERBOSE, "IB 1X in "
2436 "auto-width, try %u to be "
2437 "sure it's really 1X; "
2438 "ltstate %u\n",
2439 dd->ipath_x1_fix_tries,
2440 ltstate);
2441 dd->ipath_f_xgxs_reset(dd);
2442 ret = 1; /* skip other processing */
2443 }
2444 }
2445
2446 if (!ret) {
2447 dd->delay_mult = rate_to_delay
2448 [(ibcs >> IBA7220_IBCS_LINKSPEED_SHIFT) & 1]
2449 [(ibcs >> IBA7220_IBCS_LINKWIDTH_SHIFT) & 1];
2450
2451 ipath_set_relock_poll(dd, ibup);
2452 }
2453 }
2454
2455 if (!ret)
2456 ipath_setup_7220_setextled(dd, ipath_ib_linkstate(dd, ibcs),
2457 ltstate);
2458 return ret;
2459}
2460
2461
2462/*
2463 * Handle the empirically determined mechanism for auto-negotiation
2464 * of DDR speed with switches.
2465 */
2466static void autoneg_work(struct work_struct *work)
2467{
2468 struct ipath_devdata *dd;
2469 u64 startms;
2470 u32 lastlts, i;
2471
2472 dd = container_of(work, struct ipath_devdata,
2473 ipath_autoneg_work.work);
2474
2475 startms = jiffies_to_msecs(jiffies);
2476
2477 /*
2478 * busy wait for this first part, it should be at most a
2479 * few hundred usec, since we scheduled ourselves for 2msec.
2480 */
2481 for (i = 0; i < 25; i++) {
2482 lastlts = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat);
2483 if (lastlts == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
2484 ipath_set_linkstate(dd, IPATH_IB_LINKDOWN_DISABLE);
2485 break;
2486 }
2487 udelay(100);
2488 }
2489
2490 if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG))
2491 goto done; /* we got there early or told to stop */
2492
2493 /* we expect this to timeout */
2494 if (wait_event_timeout(dd->ipath_autoneg_wait,
2495 !(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG),
2496 msecs_to_jiffies(90)))
2497 goto done;
2498
2499 ipath_toggle_rclkrls(dd);
2500
2501 /* we expect this to timeout */
2502 if (wait_event_timeout(dd->ipath_autoneg_wait,
2503 !(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG),
2504 msecs_to_jiffies(1700)))
2505 goto done;
2506
2507 set_speed_fast(dd, IPATH_IB_SDR);
2508 ipath_toggle_rclkrls(dd);
2509
2510 /*
2511 * wait up to 250 msec for link to train and get to INIT at DDR;
2512 * this should terminate early
2513 */
2514 wait_event_timeout(dd->ipath_autoneg_wait,
2515 !(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG),
2516 msecs_to_jiffies(250));
2517done:
2518 if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) {
2519 ipath_dbg("Did not get to DDR INIT (%x) after %Lu msecs\n",
2520 ipath_ib_state(dd, dd->ipath_lastibcstat),
2521 jiffies_to_msecs(jiffies)-startms);
2522 dd->ipath_flags &= ~IPATH_IB_AUTONEG_INPROG;
2523 if (dd->ipath_autoneg_tries == IPATH_AUTONEG_TRIES) {
2524 dd->ipath_flags |= IPATH_IB_AUTONEG_FAILED;
2525 ipath_dbg("Giving up on DDR until next IB "
2526 "link Down\n");
2527 dd->ipath_autoneg_tries = 0;
2528 }
2529 set_speed_fast(dd, dd->ipath_link_speed_enabled);
2530 }
2531}
2532
2533
2534/**
2535 * ipath_init_iba7220_funcs - set up the chip-specific function pointers
2536 * @dd: the infinipath device
2537 *
2538 * This is global, and is called directly at init to set up the
2539 * chip-specific function pointers for later use.
2540 */
2541void ipath_init_iba7220_funcs(struct ipath_devdata *dd)
2542{
2543 dd->ipath_f_intrsetup = ipath_7220_intconfig;
2544 dd->ipath_f_bus = ipath_setup_7220_config;
2545 dd->ipath_f_reset = ipath_setup_7220_reset;
2546 dd->ipath_f_get_boardname = ipath_7220_boardname;
2547 dd->ipath_f_init_hwerrors = ipath_7220_init_hwerrors;
2548 dd->ipath_f_early_init = ipath_7220_early_init;
2549 dd->ipath_f_handle_hwerrors = ipath_7220_handle_hwerrors;
2550 dd->ipath_f_quiet_serdes = ipath_7220_quiet_serdes;
2551 dd->ipath_f_bringup_serdes = ipath_7220_bringup_serdes;
2552 dd->ipath_f_clear_tids = ipath_7220_clear_tids;
2553 dd->ipath_f_put_tid = ipath_7220_put_tid;
2554 dd->ipath_f_cleanup = ipath_setup_7220_cleanup;
2555 dd->ipath_f_setextled = ipath_setup_7220_setextled;
2556 dd->ipath_f_get_base_info = ipath_7220_get_base_info;
2557 dd->ipath_f_free_irq = ipath_7220_free_irq;
2558 dd->ipath_f_tidtemplate = ipath_7220_tidtemplate;
2559 dd->ipath_f_intr_fallback = ipath_7220_intr_fallback;
2560 dd->ipath_f_xgxs_reset = ipath_7220_xgxs_reset;
2561 dd->ipath_f_get_ib_cfg = ipath_7220_get_ib_cfg;
2562 dd->ipath_f_set_ib_cfg = ipath_7220_set_ib_cfg;
2563 dd->ipath_f_config_jint = ipath_7220_config_jint;
2564 dd->ipath_f_config_ports = ipath_7220_config_ports;
2565 dd->ipath_f_read_counters = ipath_7220_read_counters;
2566 dd->ipath_f_get_msgheader = ipath_7220_get_msgheader;
2567 dd->ipath_f_ib_updown = ipath_7220_ib_updown;
2568
2569 /* initialize chip-specific variables */
2570 ipath_init_7220_variables(dd);
2571}
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
index 4471674975cd..27dd89476660 100644
--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
+++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -155,24 +155,13 @@ static int bringup_link(struct ipath_devdata *dd)
155 dd->ipath_control); 155 dd->ipath_control);
156 156
157 /* 157 /*
158 * Note that prior to try 14 or 15 of IB, the credit scaling 158 * set initial max size pkt IBC will send, including ICRC; it's the
159 * wasn't working, because it was swapped for writes with the 159 * PIO buffer size in dwords, less 1; also see ipath_set_mtu()
160 * 1 bit default linkstate field
161 */ 160 */
161 val = (dd->ipath_ibmaxlen >> 2) + 1;
162 ibc = val << dd->ibcc_mpl_shift;
162 163
163 /* ignore pbc and align word */ 164 /* flowcontrolwatermark is in units of KBytes */
164 val = dd->ipath_piosize2k - 2 * sizeof(u32);
165 /*
166 * for ICRC, which we only send in diag test pkt mode, and we
167 * don't need to worry about that for mtu
168 */
169 val += 1;
170 /*
171 * Set the IBC maxpktlength to the size of our pio buffers the
172 * maxpktlength is in words. This is *not* the IB data MTU.
173 */
174 ibc = (val / sizeof(u32)) << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
175 /* in KB */
176 ibc |= 0x5ULL << INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT; 165 ibc |= 0x5ULL << INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT;
177 /* 166 /*
178 * How often flowctrl sent. More or less in usecs; balance against 167 * How often flowctrl sent. More or less in usecs; balance against
@@ -191,10 +180,13 @@ static int bringup_link(struct ipath_devdata *dd)
191 /* 180 /*
192 * Want to start out with both LINKCMD and LINKINITCMD in NOP 181 * Want to start out with both LINKCMD and LINKINITCMD in NOP
193 * (0 and 0). Don't put linkinitcmd in ipath_ibcctrl, want that 182 * (0 and 0). Don't put linkinitcmd in ipath_ibcctrl, want that
194 * to stay a NOP 183 * to stay a NOP. Flag that we are disabled, for the (unlikely)
184 * case that some recovery path is trying to bring the link up
185 * before we are ready.
195 */ 186 */
196 ibc |= INFINIPATH_IBCC_LINKINITCMD_DISABLE << 187 ibc |= INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
197 INFINIPATH_IBCC_LINKINITCMD_SHIFT; 188 INFINIPATH_IBCC_LINKINITCMD_SHIFT;
189 dd->ipath_flags |= IPATH_IB_LINK_DISABLED;
198 ipath_cdbg(VERBOSE, "Writing 0x%llx to ibcctrl\n", 190 ipath_cdbg(VERBOSE, "Writing 0x%llx to ibcctrl\n",
199 (unsigned long long) ibc); 191 (unsigned long long) ibc);
200 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, ibc); 192 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, ibc);
@@ -227,17 +219,26 @@ static struct ipath_portdata *create_portdata0(struct ipath_devdata *dd)
227 pd->port_cnt = 1; 219 pd->port_cnt = 1;
228 /* The port 0 pkey table is used by the layer interface. */ 220 /* The port 0 pkey table is used by the layer interface. */
229 pd->port_pkeys[0] = IPATH_DEFAULT_P_KEY; 221 pd->port_pkeys[0] = IPATH_DEFAULT_P_KEY;
222 pd->port_seq_cnt = 1;
230 } 223 }
231 return pd; 224 return pd;
232} 225}
233 226
234static int init_chip_first(struct ipath_devdata *dd, 227static int init_chip_first(struct ipath_devdata *dd)
235 struct ipath_portdata **pdp)
236{ 228{
237 struct ipath_portdata *pd = NULL; 229 struct ipath_portdata *pd;
238 int ret = 0; 230 int ret = 0;
239 u64 val; 231 u64 val;
240 232
233 spin_lock_init(&dd->ipath_kernel_tid_lock);
234 spin_lock_init(&dd->ipath_user_tid_lock);
235 spin_lock_init(&dd->ipath_sendctrl_lock);
236 spin_lock_init(&dd->ipath_sdma_lock);
237 spin_lock_init(&dd->ipath_gpio_lock);
238 spin_lock_init(&dd->ipath_eep_st_lock);
239 spin_lock_init(&dd->ipath_sdepb_lock);
240 mutex_init(&dd->ipath_eep_lock);
241
241 /* 242 /*
242 * skip cfgports stuff because we are not allocating memory, 243 * skip cfgports stuff because we are not allocating memory,
243 * and we don't want problems if the portcnt changed due to 244 * and we don't want problems if the portcnt changed due to
@@ -250,12 +251,14 @@ static int init_chip_first(struct ipath_devdata *dd,
250 else if (ipath_cfgports <= dd->ipath_portcnt) { 251 else if (ipath_cfgports <= dd->ipath_portcnt) {
251 dd->ipath_cfgports = ipath_cfgports; 252 dd->ipath_cfgports = ipath_cfgports;
252 ipath_dbg("Configured to use %u ports out of %u in chip\n", 253 ipath_dbg("Configured to use %u ports out of %u in chip\n",
253 dd->ipath_cfgports, dd->ipath_portcnt); 254 dd->ipath_cfgports, ipath_read_kreg32(dd,
255 dd->ipath_kregs->kr_portcnt));
254 } else { 256 } else {
255 dd->ipath_cfgports = dd->ipath_portcnt; 257 dd->ipath_cfgports = dd->ipath_portcnt;
256 ipath_dbg("Tried to configured to use %u ports; chip " 258 ipath_dbg("Tried to configured to use %u ports; chip "
257 "only supports %u\n", ipath_cfgports, 259 "only supports %u\n", ipath_cfgports,
258 dd->ipath_portcnt); 260 ipath_read_kreg32(dd,
261 dd->ipath_kregs->kr_portcnt));
259 } 262 }
260 /* 263 /*
261 * Allocate full portcnt array, rather than just cfgports, because 264 * Allocate full portcnt array, rather than just cfgports, because
@@ -295,12 +298,9 @@ static int init_chip_first(struct ipath_devdata *dd,
295 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiosize); 298 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiosize);
296 dd->ipath_piosize2k = val & ~0U; 299 dd->ipath_piosize2k = val & ~0U;
297 dd->ipath_piosize4k = val >> 32; 300 dd->ipath_piosize4k = val >> 32;
298 /* 301 if (dd->ipath_piosize4k == 0 && ipath_mtu4096)
299 * Note: the chips support a maximum MTU of 4096, but the driver 302 ipath_mtu4096 = 0; /* 4KB not supported by this chip */
300 * hasn't implemented this feature yet, so set the initial value 303 dd->ipath_ibmtu = ipath_mtu4096 ? 4096 : 2048;
301 * to 2048.
302 */
303 dd->ipath_ibmtu = 2048;
304 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufcnt); 304 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufcnt);
305 dd->ipath_piobcnt2k = val & ~0U; 305 dd->ipath_piobcnt2k = val & ~0U;
306 dd->ipath_piobcnt4k = val >> 32; 306 dd->ipath_piobcnt4k = val >> 32;
@@ -328,43 +328,46 @@ static int init_chip_first(struct ipath_devdata *dd,
328 else ipath_dbg("%u 2k piobufs @ %p\n", 328 else ipath_dbg("%u 2k piobufs @ %p\n",
329 dd->ipath_piobcnt2k, dd->ipath_pio2kbase); 329 dd->ipath_piobcnt2k, dd->ipath_pio2kbase);
330 330
331 spin_lock_init(&dd->ipath_tid_lock);
332 spin_lock_init(&dd->ipath_sendctrl_lock);
333 spin_lock_init(&dd->ipath_gpio_lock);
334 spin_lock_init(&dd->ipath_eep_st_lock);
335 mutex_init(&dd->ipath_eep_lock);
336
337done: 331done:
338 *pdp = pd;
339 return ret; 332 return ret;
340} 333}
341 334
342/** 335/**
343 * init_chip_reset - re-initialize after a reset, or enable 336 * init_chip_reset - re-initialize after a reset, or enable
344 * @dd: the infinipath device 337 * @dd: the infinipath device
345 * @pdp: output for port data
346 * 338 *
347 * sanity check at least some of the values after reset, and 339 * sanity check at least some of the values after reset, and
348 * ensure no receive or transmit (explictly, in case reset 340 * ensure no receive or transmit (explictly, in case reset
349 * failed 341 * failed
350 */ 342 */
351static int init_chip_reset(struct ipath_devdata *dd, 343static int init_chip_reset(struct ipath_devdata *dd)
352 struct ipath_portdata **pdp)
353{ 344{
354 u32 rtmp; 345 u32 rtmp;
346 int i;
347 unsigned long flags;
348
349 /*
350 * ensure chip does no sends or receives, tail updates, or
351 * pioavail updates while we re-initialize
352 */
353 dd->ipath_rcvctrl &= ~(1ULL << dd->ipath_r_tailupd_shift);
354 for (i = 0; i < dd->ipath_portcnt; i++) {
355 clear_bit(dd->ipath_r_portenable_shift + i,
356 &dd->ipath_rcvctrl);
357 clear_bit(dd->ipath_r_intravail_shift + i,
358 &dd->ipath_rcvctrl);
359 }
360 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
361 dd->ipath_rcvctrl);
355 362
356 *pdp = dd->ipath_pd[0]; 363 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
357 /* ensure chip does no sends or receives while we re-initialize */ 364 dd->ipath_sendctrl = 0U; /* no sdma, etc */
358 dd->ipath_control = dd->ipath_sendctrl = dd->ipath_rcvctrl = 0U;
359 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl);
360 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); 365 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
361 ipath_write_kreg(dd, dd->ipath_kregs->kr_control, dd->ipath_control); 366 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
367 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
368
369 ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL);
362 370
363 rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
364 if (dd->ipath_portcnt != rtmp)
365 dev_info(&dd->pcidev->dev, "portcnt was %u before "
366 "reset, now %u, using original\n",
367 dd->ipath_portcnt, rtmp);
368 rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt); 371 rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt);
369 if (rtmp != dd->ipath_rcvtidcnt) 372 if (rtmp != dd->ipath_rcvtidcnt)
370 dev_info(&dd->pcidev->dev, "tidcnt was %u before " 373 dev_info(&dd->pcidev->dev, "tidcnt was %u before "
@@ -467,10 +470,10 @@ static void init_shadow_tids(struct ipath_devdata *dd)
467 dd->ipath_physshadow = addrs; 470 dd->ipath_physshadow = addrs;
468} 471}
469 472
470static void enable_chip(struct ipath_devdata *dd, 473static void enable_chip(struct ipath_devdata *dd, int reinit)
471 struct ipath_portdata *pd, int reinit)
472{ 474{
473 u32 val; 475 u32 val;
476 u64 rcvmask;
474 unsigned long flags; 477 unsigned long flags;
475 int i; 478 int i;
476 479
@@ -484,17 +487,28 @@ static void enable_chip(struct ipath_devdata *dd,
484 /* Enable PIO send, and update of PIOavail regs to memory. */ 487 /* Enable PIO send, and update of PIOavail regs to memory. */
485 dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE | 488 dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE |
486 INFINIPATH_S_PIOBUFAVAILUPD; 489 INFINIPATH_S_PIOBUFAVAILUPD;
490
491 /*
492 * Set the PIO avail update threshold to host memory
493 * on chips that support it.
494 */
495 if (dd->ipath_pioupd_thresh)
496 dd->ipath_sendctrl |= dd->ipath_pioupd_thresh
497 << INFINIPATH_S_UPDTHRESH_SHIFT;
487 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); 498 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
488 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 499 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
489 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); 500 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
490 501
491 /* 502 /*
492 * enable port 0 receive, and receive interrupt. other ports 503 * Enable kernel ports' receive and receive interrupt.
493 * done as user opens and inits them. 504 * Other ports done as user opens and inits them.
494 */ 505 */
495 dd->ipath_rcvctrl = (1ULL << dd->ipath_r_tailupd_shift) | 506 rcvmask = 1ULL;
496 (1ULL << dd->ipath_r_portenable_shift) | 507 dd->ipath_rcvctrl |= (rcvmask << dd->ipath_r_portenable_shift) |
497 (1ULL << dd->ipath_r_intravail_shift); 508 (rcvmask << dd->ipath_r_intravail_shift);
509 if (!(dd->ipath_flags & IPATH_NODMA_RTAIL))
510 dd->ipath_rcvctrl |= (1ULL << dd->ipath_r_tailupd_shift);
511
498 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 512 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
499 dd->ipath_rcvctrl); 513 dd->ipath_rcvctrl);
500 514
@@ -505,16 +519,16 @@ static void enable_chip(struct ipath_devdata *dd,
505 dd->ipath_flags |= IPATH_INITTED; 519 dd->ipath_flags |= IPATH_INITTED;
506 520
507 /* 521 /*
508 * init our shadow copies of head from tail values, and write 522 * Init our shadow copies of head from tail values,
509 * head values to match. 523 * and write head values to match.
510 */ 524 */
511 val = ipath_read_ureg32(dd, ur_rcvegrindextail, 0); 525 val = ipath_read_ureg32(dd, ur_rcvegrindextail, 0);
512 (void)ipath_write_ureg(dd, ur_rcvegrindexhead, val, 0); 526 ipath_write_ureg(dd, ur_rcvegrindexhead, val, 0);
513 527
514 /* Initialize so we interrupt on next packet received */ 528 /* Initialize so we interrupt on next packet received */
515 (void)ipath_write_ureg(dd, ur_rcvhdrhead, 529 ipath_write_ureg(dd, ur_rcvhdrhead,
516 dd->ipath_rhdrhead_intr_off | 530 dd->ipath_rhdrhead_intr_off |
517 dd->ipath_pd[0]->port_head, 0); 531 dd->ipath_pd[0]->port_head, 0);
518 532
519 /* 533 /*
520 * by now pioavail updates to memory should have occurred, so 534 * by now pioavail updates to memory should have occurred, so
@@ -523,25 +537,26 @@ static void enable_chip(struct ipath_devdata *dd,
523 * initial values of the generation bit correct. 537 * initial values of the generation bit correct.
524 */ 538 */
525 for (i = 0; i < dd->ipath_pioavregs; i++) { 539 for (i = 0; i < dd->ipath_pioavregs; i++) {
526 __le64 val; 540 __le64 pioavail;
527 541
528 /* 542 /*
529 * Chip Errata bug 6641; even and odd qwords>3 are swapped. 543 * Chip Errata bug 6641; even and odd qwords>3 are swapped.
530 */ 544 */
531 if (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) 545 if (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS))
532 val = dd->ipath_pioavailregs_dma[i ^ 1]; 546 pioavail = dd->ipath_pioavailregs_dma[i ^ 1];
533 else 547 else
534 val = dd->ipath_pioavailregs_dma[i]; 548 pioavail = dd->ipath_pioavailregs_dma[i];
535 dd->ipath_pioavailshadow[i] = le64_to_cpu(val); 549 dd->ipath_pioavailshadow[i] = le64_to_cpu(pioavail) |
550 (~dd->ipath_pioavailkernel[i] <<
551 INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT);
536 } 552 }
537 /* can get counters, stats, etc. */ 553 /* can get counters, stats, etc. */
538 dd->ipath_flags |= IPATH_PRESENT; 554 dd->ipath_flags |= IPATH_PRESENT;
539} 555}
540 556
541static int init_housekeeping(struct ipath_devdata *dd, 557static int init_housekeeping(struct ipath_devdata *dd, int reinit)
542 struct ipath_portdata **pdp, int reinit)
543{ 558{
544 char boardn[32]; 559 char boardn[40];
545 int ret = 0; 560 int ret = 0;
546 561
547 /* 562 /*
@@ -600,18 +615,9 @@ static int init_housekeeping(struct ipath_devdata *dd,
600 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, 615 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
601 INFINIPATH_E_RESET); 616 INFINIPATH_E_RESET);
602 617
603 if (reinit) 618 ipath_cdbg(VERBOSE, "Revision %llx (PCI %x)\n",
604 ret = init_chip_reset(dd, pdp); 619 (unsigned long long) dd->ipath_revision,
605 else 620 dd->ipath_pcirev);
606 ret = init_chip_first(dd, pdp);
607
608 if (ret)
609 goto done;
610
611 ipath_cdbg(VERBOSE, "Revision %llx (PCI %x), %u ports, %u tids, "
612 "%u egrtids\n", (unsigned long long) dd->ipath_revision,
613 dd->ipath_pcirev, dd->ipath_portcnt, dd->ipath_rcvtidcnt,
614 dd->ipath_rcvegrcnt);
615 621
616 if (((dd->ipath_revision >> INFINIPATH_R_SOFTWARE_SHIFT) & 622 if (((dd->ipath_revision >> INFINIPATH_R_SOFTWARE_SHIFT) &
617 INFINIPATH_R_SOFTWARE_MASK) != IPATH_CHIP_SWVERSION) { 623 INFINIPATH_R_SOFTWARE_MASK) != IPATH_CHIP_SWVERSION) {
@@ -650,10 +656,39 @@ static int init_housekeeping(struct ipath_devdata *dd,
650 656
651 ipath_dbg("%s", dd->ipath_boardversion); 657 ipath_dbg("%s", dd->ipath_boardversion);
652 658
659 if (ret)
660 goto done;
661
662 if (reinit)
663 ret = init_chip_reset(dd);
664 else
665 ret = init_chip_first(dd);
666
653done: 667done:
654 return ret; 668 return ret;
655} 669}
656 670
671static void verify_interrupt(unsigned long opaque)
672{
673 struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
674
675 if (!dd)
676 return; /* being torn down */
677
678 /*
679 * If we don't have any interrupts, let the user know and
680 * don't bother checking again.
681 */
682 if (dd->ipath_int_counter == 0) {
683 if (!dd->ipath_f_intr_fallback(dd))
684 dev_err(&dd->pcidev->dev, "No interrupts detected, "
685 "not usable.\n");
686 else /* re-arm the timer to see if fallback works */
687 mod_timer(&dd->ipath_intrchk_timer, jiffies + HZ/2);
688 } else
689 ipath_cdbg(VERBOSE, "%u interrupts at timer check\n",
690 dd->ipath_int_counter);
691}
657 692
658/** 693/**
659 * ipath_init_chip - do the actual initialization sequence on the chip 694 * ipath_init_chip - do the actual initialization sequence on the chip
@@ -676,11 +711,11 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
676 u32 val32, kpiobufs; 711 u32 val32, kpiobufs;
677 u32 piobufs, uports; 712 u32 piobufs, uports;
678 u64 val; 713 u64 val;
679 struct ipath_portdata *pd = NULL; /* keep gcc4 happy */ 714 struct ipath_portdata *pd;
680 gfp_t gfp_flags = GFP_USER | __GFP_COMP; 715 gfp_t gfp_flags = GFP_USER | __GFP_COMP;
681 unsigned long flags; 716 unsigned long flags;
682 717
683 ret = init_housekeeping(dd, &pd, reinit); 718 ret = init_housekeeping(dd, reinit);
684 if (ret) 719 if (ret)
685 goto done; 720 goto done;
686 721
@@ -700,7 +735,7 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
700 * we now use routines that backend onto __get_free_pages, the 735 * we now use routines that backend onto __get_free_pages, the
701 * rest would be wasted. 736 * rest would be wasted.
702 */ 737 */
703 dd->ipath_rcvhdrcnt = dd->ipath_rcvegrcnt; 738 dd->ipath_rcvhdrcnt = max(dd->ipath_p0_rcvegrcnt, dd->ipath_rcvegrcnt);
704 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrcnt, 739 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrcnt,
705 dd->ipath_rcvhdrcnt); 740 dd->ipath_rcvhdrcnt);
706 741
@@ -731,8 +766,8 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
731 if (kpiobufs + (uports * IPATH_MIN_USER_PORT_BUFCNT) > piobufs) { 766 if (kpiobufs + (uports * IPATH_MIN_USER_PORT_BUFCNT) > piobufs) {
732 int i = (int) piobufs - 767 int i = (int) piobufs -
733 (int) (uports * IPATH_MIN_USER_PORT_BUFCNT); 768 (int) (uports * IPATH_MIN_USER_PORT_BUFCNT);
734 if (i < 0) 769 if (i < 1)
735 i = 0; 770 i = 1;
736 dev_info(&dd->pcidev->dev, "Allocating %d PIO bufs of " 771 dev_info(&dd->pcidev->dev, "Allocating %d PIO bufs of "
737 "%d for kernel leaves too few for %d user ports " 772 "%d for kernel leaves too few for %d user ports "
738 "(%d each); using %u\n", kpiobufs, 773 "(%d each); using %u\n", kpiobufs,
@@ -751,24 +786,40 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
751 ipath_dbg("allocating %u pbufs/port leaves %u unused, " 786 ipath_dbg("allocating %u pbufs/port leaves %u unused, "
752 "add to kernel\n", dd->ipath_pbufsport, val32); 787 "add to kernel\n", dd->ipath_pbufsport, val32);
753 dd->ipath_lastport_piobuf -= val32; 788 dd->ipath_lastport_piobuf -= val32;
789 kpiobufs += val32;
754 ipath_dbg("%u pbufs/port leaves %u unused, add to kernel\n", 790 ipath_dbg("%u pbufs/port leaves %u unused, add to kernel\n",
755 dd->ipath_pbufsport, val32); 791 dd->ipath_pbufsport, val32);
756 } 792 }
757 dd->ipath_lastpioindex = dd->ipath_lastport_piobuf; 793 dd->ipath_lastpioindex = 0;
794 dd->ipath_lastpioindexl = dd->ipath_piobcnt2k;
795 ipath_chg_pioavailkernel(dd, 0, piobufs, 1);
758 ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u " 796 ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u "
759 "each for %u user ports\n", kpiobufs, 797 "each for %u user ports\n", kpiobufs,
760 piobufs, dd->ipath_pbufsport, uports); 798 piobufs, dd->ipath_pbufsport, uports);
799 if (dd->ipath_pioupd_thresh) {
800 if (dd->ipath_pbufsport < dd->ipath_pioupd_thresh)
801 dd->ipath_pioupd_thresh = dd->ipath_pbufsport;
802 if (kpiobufs < dd->ipath_pioupd_thresh)
803 dd->ipath_pioupd_thresh = kpiobufs;
804 }
805
806 ret = dd->ipath_f_early_init(dd);
807 if (ret) {
808 ipath_dev_err(dd, "Early initialization failure\n");
809 goto done;
810 }
761 811
762 dd->ipath_f_early_init(dd);
763 /* 812 /*
764 * cancel any possible active sends from early driver load. 813 * Cancel any possible active sends from early driver load.
765 * Follows early_init because some chips have to initialize 814 * Follows early_init because some chips have to initialize
766 * PIO buffers in early_init to avoid false parity errors. 815 * PIO buffers in early_init to avoid false parity errors.
767 */ 816 */
768 ipath_cancel_sends(dd, 0); 817 ipath_cancel_sends(dd, 0);
769 818
770 /* early_init sets rcvhdrentsize and rcvhdrsize, so this must be 819 /*
771 * done after early_init */ 820 * Early_init sets rcvhdrentsize and rcvhdrsize, so this must be
821 * done after early_init.
822 */
772 dd->ipath_hdrqlast = 823 dd->ipath_hdrqlast =
773 dd->ipath_rcvhdrentsize * (dd->ipath_rcvhdrcnt - 1); 824 dd->ipath_rcvhdrentsize * (dd->ipath_rcvhdrcnt - 1);
774 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrentsize, 825 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrentsize,
@@ -783,8 +834,8 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
783 goto done; 834 goto done;
784 } 835 }
785 836
786 (void)ipath_write_kreg(dd, dd->ipath_kregs->kr_sendpioavailaddr, 837 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendpioavailaddr,
787 dd->ipath_pioavailregs_phys); 838 dd->ipath_pioavailregs_phys);
788 /* 839 /*
789 * this is to detect s/w errors, which the h/w works around by 840 * this is to detect s/w errors, which the h/w works around by
790 * ignoring the low 6 bits of address, if it wasn't aligned. 841 * ignoring the low 6 bits of address, if it wasn't aligned.
@@ -843,58 +894,65 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
843 /* enable errors that are masked, at least this first time. */ 894 /* enable errors that are masked, at least this first time. */
844 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 895 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
845 ~dd->ipath_maskederrs); 896 ~dd->ipath_maskederrs);
846 dd->ipath_errormask = ipath_read_kreg64(dd, 897 dd->ipath_maskederrs = 0; /* don't re-enable ignored in timer */
847 dd->ipath_kregs->kr_errormask); 898 dd->ipath_errormask =
899 ipath_read_kreg64(dd, dd->ipath_kregs->kr_errormask);
848 /* clear any interrupts up to this point (ints still not enabled) */ 900 /* clear any interrupts up to this point (ints still not enabled) */
849 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL); 901 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
850 902
903 dd->ipath_f_tidtemplate(dd);
904
851 /* 905 /*
852 * Set up the port 0 (kernel) rcvhdr q and egr TIDs. If doing 906 * Set up the port 0 (kernel) rcvhdr q and egr TIDs. If doing
853 * re-init, the simplest way to handle this is to free 907 * re-init, the simplest way to handle this is to free
854 * existing, and re-allocate. 908 * existing, and re-allocate.
855 * Need to re-create rest of port 0 portdata as well. 909 * Need to re-create rest of port 0 portdata as well.
856 */ 910 */
911 pd = dd->ipath_pd[0];
857 if (reinit) { 912 if (reinit) {
858 /* Alloc and init new ipath_portdata for port0, 913 struct ipath_portdata *npd;
914
915 /*
916 * Alloc and init new ipath_portdata for port0,
859 * Then free old pd. Could lead to fragmentation, but also 917 * Then free old pd. Could lead to fragmentation, but also
860 * makes later support for hot-swap easier. 918 * makes later support for hot-swap easier.
861 */ 919 */
862 struct ipath_portdata *npd;
863 npd = create_portdata0(dd); 920 npd = create_portdata0(dd);
864 if (npd) { 921 if (npd) {
865 ipath_free_pddata(dd, pd); 922 ipath_free_pddata(dd, pd);
866 dd->ipath_pd[0] = pd = npd; 923 dd->ipath_pd[0] = npd;
924 pd = npd;
867 } else { 925 } else {
868 ipath_dev_err(dd, "Unable to allocate portdata for" 926 ipath_dev_err(dd, "Unable to allocate portdata"
869 " port 0, failing\n"); 927 " for port 0, failing\n");
870 ret = -ENOMEM; 928 ret = -ENOMEM;
871 goto done; 929 goto done;
872 } 930 }
873 } 931 }
874 dd->ipath_f_tidtemplate(dd);
875 ret = ipath_create_rcvhdrq(dd, pd); 932 ret = ipath_create_rcvhdrq(dd, pd);
876 if (!ret) { 933 if (!ret)
877 dd->ipath_hdrqtailptr =
878 (volatile __le64 *)pd->port_rcvhdrtail_kvaddr;
879 ret = create_port0_egr(dd); 934 ret = create_port0_egr(dd);
880 } 935 if (ret) {
881 if (ret) 936 ipath_dev_err(dd, "failed to allocate kernel port's "
882 ipath_dev_err(dd, "failed to allocate port 0 (kernel) "
883 "rcvhdrq and/or egr bufs\n"); 937 "rcvhdrq and/or egr bufs\n");
938 goto done;
939 }
884 else 940 else
885 enable_chip(dd, pd, reinit); 941 enable_chip(dd, reinit);
886
887 942
888 if (!ret && !reinit) { 943 if (!reinit) {
889 /* used when we close a port, for DMA already in flight at close */ 944 /*
945 * Used when we close a port, for DMA already in flight
946 * at close.
947 */
890 dd->ipath_dummy_hdrq = dma_alloc_coherent( 948 dd->ipath_dummy_hdrq = dma_alloc_coherent(
891 &dd->pcidev->dev, pd->port_rcvhdrq_size, 949 &dd->pcidev->dev, dd->ipath_pd[0]->port_rcvhdrq_size,
892 &dd->ipath_dummy_hdrq_phys, 950 &dd->ipath_dummy_hdrq_phys,
893 gfp_flags); 951 gfp_flags);
894 if (!dd->ipath_dummy_hdrq ) { 952 if (!dd->ipath_dummy_hdrq) {
895 dev_info(&dd->pcidev->dev, 953 dev_info(&dd->pcidev->dev,
896 "Couldn't allocate 0x%lx bytes for dummy hdrq\n", 954 "Couldn't allocate 0x%lx bytes for dummy hdrq\n",
897 pd->port_rcvhdrq_size); 955 dd->ipath_pd[0]->port_rcvhdrq_size);
898 /* fallback to just 0'ing */ 956 /* fallback to just 0'ing */
899 dd->ipath_dummy_hdrq_phys = 0UL; 957 dd->ipath_dummy_hdrq_phys = 0UL;
900 } 958 }
@@ -906,7 +964,7 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
906 */ 964 */
907 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL); 965 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);
908 966
909 if(!dd->ipath_stats_timer_active) { 967 if (!dd->ipath_stats_timer_active) {
910 /* 968 /*
911 * first init, or after an admin disable/enable 969 * first init, or after an admin disable/enable
912 * set up stats retrieval timer, even if we had errors 970 * set up stats retrieval timer, even if we had errors
@@ -922,6 +980,16 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
922 dd->ipath_stats_timer_active = 1; 980 dd->ipath_stats_timer_active = 1;
923 } 981 }
924 982
983 /* Set up SendDMA if chip supports it */
984 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
985 ret = setup_sdma(dd);
986
987 /* Set up HoL state */
988 init_timer(&dd->ipath_hol_timer);
989 dd->ipath_hol_timer.function = ipath_hol_event;
990 dd->ipath_hol_timer.data = (unsigned long)dd;
991 dd->ipath_hol_state = IPATH_HOL_UP;
992
925done: 993done:
926 if (!ret) { 994 if (!ret) {
927 *dd->ipath_statusp |= IPATH_STATUS_CHIP_PRESENT; 995 *dd->ipath_statusp |= IPATH_STATUS_CHIP_PRESENT;
@@ -934,6 +1002,20 @@ done:
934 0ULL); 1002 0ULL);
935 /* chip is usable; mark it as initialized */ 1003 /* chip is usable; mark it as initialized */
936 *dd->ipath_statusp |= IPATH_STATUS_INITTED; 1004 *dd->ipath_statusp |= IPATH_STATUS_INITTED;
1005
1006 /*
1007 * setup to verify we get an interrupt, and fallback
1008 * to an alternate if necessary and possible
1009 */
1010 if (!reinit) {
1011 init_timer(&dd->ipath_intrchk_timer);
1012 dd->ipath_intrchk_timer.function =
1013 verify_interrupt;
1014 dd->ipath_intrchk_timer.data =
1015 (unsigned long) dd;
1016 }
1017 dd->ipath_intrchk_timer.expires = jiffies + HZ/2;
1018 add_timer(&dd->ipath_intrchk_timer);
937 } else 1019 } else
938 ipath_dev_err(dd, "No interrupts enabled, couldn't " 1020 ipath_dev_err(dd, "No interrupts enabled, couldn't "
939 "setup interrupt address\n"); 1021 "setup interrupt address\n");
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
index 92e58c921522..1b58f4737c71 100644
--- a/drivers/infiniband/hw/ipath/ipath_intr.c
+++ b/drivers/infiniband/hw/ipath/ipath_intr.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -32,6 +32,7 @@
32 */ 32 */
33 33
34#include <linux/pci.h> 34#include <linux/pci.h>
35#include <linux/delay.h>
35 36
36#include "ipath_kernel.h" 37#include "ipath_kernel.h"
37#include "ipath_verbs.h" 38#include "ipath_verbs.h"
@@ -59,9 +60,11 @@ static void ipath_clrpiobuf(struct ipath_devdata *dd, u32 pnum)
59 dev_info(&dd->pcidev->dev, 60 dev_info(&dd->pcidev->dev,
60 "Rewrite PIO buffer %u, to recover from parity error\n", 61 "Rewrite PIO buffer %u, to recover from parity error\n",
61 pnum); 62 pnum);
62 *pbuf = dwcnt+1; /* no flush required, since already in freeze */ 63
63 while(--dwcnt) 64 /* no flush required, since already in freeze */
64 *pbuf++ = 0; 65 writel(dwcnt + 1, pbuf);
66 while (--dwcnt)
67 writel(0, pbuf++);
65} 68}
66 69
67/* 70/*
@@ -70,7 +73,7 @@ static void ipath_clrpiobuf(struct ipath_devdata *dd, u32 pnum)
70 * If rewrite is true, and bits are set in the sendbufferror registers, 73 * If rewrite is true, and bits are set in the sendbufferror registers,
71 * we'll write to the buffer, for error recovery on parity errors. 74 * we'll write to the buffer, for error recovery on parity errors.
72 */ 75 */
73static void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite) 76void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite)
74{ 77{
75 u32 piobcnt; 78 u32 piobcnt;
76 unsigned long sbuf[4]; 79 unsigned long sbuf[4];
@@ -84,12 +87,14 @@ static void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite)
84 dd, dd->ipath_kregs->kr_sendbuffererror); 87 dd, dd->ipath_kregs->kr_sendbuffererror);
85 sbuf[1] = ipath_read_kreg64( 88 sbuf[1] = ipath_read_kreg64(
86 dd, dd->ipath_kregs->kr_sendbuffererror + 1); 89 dd, dd->ipath_kregs->kr_sendbuffererror + 1);
87 if (piobcnt > 128) { 90 if (piobcnt > 128)
88 sbuf[2] = ipath_read_kreg64( 91 sbuf[2] = ipath_read_kreg64(
89 dd, dd->ipath_kregs->kr_sendbuffererror + 2); 92 dd, dd->ipath_kregs->kr_sendbuffererror + 2);
93 if (piobcnt > 192)
90 sbuf[3] = ipath_read_kreg64( 94 sbuf[3] = ipath_read_kreg64(
91 dd, dd->ipath_kregs->kr_sendbuffererror + 3); 95 dd, dd->ipath_kregs->kr_sendbuffererror + 3);
92 } 96 else
97 sbuf[3] = 0;
93 98
94 if (sbuf[0] || sbuf[1] || (piobcnt > 128 && (sbuf[2] || sbuf[3]))) { 99 if (sbuf[0] || sbuf[1] || (piobcnt > 128 && (sbuf[2] || sbuf[3]))) {
95 int i; 100 int i;
@@ -254,24 +259,20 @@ void ipath_format_hwerrors(u64 hwerrs,
254} 259}
255 260
256/* return the strings for the most common link states */ 261/* return the strings for the most common link states */
257static char *ib_linkstate(u32 linkstate) 262static char *ib_linkstate(struct ipath_devdata *dd, u64 ibcs)
258{ 263{
259 char *ret; 264 char *ret;
265 u32 state;
260 266
261 switch (linkstate) { 267 state = ipath_ib_state(dd, ibcs);
262 case IPATH_IBSTATE_INIT: 268 if (state == dd->ib_init)
263 ret = "Init"; 269 ret = "Init";
264 break; 270 else if (state == dd->ib_arm)
265 case IPATH_IBSTATE_ARM:
266 ret = "Arm"; 271 ret = "Arm";
267 break; 272 else if (state == dd->ib_active)
268 case IPATH_IBSTATE_ACTIVE:
269 ret = "Active"; 273 ret = "Active";
270 break; 274 else
271 default:
272 ret = "Down"; 275 ret = "Down";
273 }
274
275 return ret; 276 return ret;
276} 277}
277 278
@@ -286,103 +287,172 @@ void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev)
286} 287}
287 288
288static void handle_e_ibstatuschanged(struct ipath_devdata *dd, 289static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
289 ipath_err_t errs, int noprint) 290 ipath_err_t errs)
290{ 291{
291 u64 val; 292 u32 ltstate, lstate, ibstate, lastlstate;
292 u32 ltstate, lstate; 293 u32 init = dd->ib_init;
294 u32 arm = dd->ib_arm;
295 u32 active = dd->ib_active;
296 const u64 ibcs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
297
298 lstate = ipath_ib_linkstate(dd, ibcs); /* linkstate */
299 ibstate = ipath_ib_state(dd, ibcs);
300 /* linkstate at last interrupt */
301 lastlstate = ipath_ib_linkstate(dd, dd->ipath_lastibcstat);
302 ltstate = ipath_ib_linktrstate(dd, ibcs); /* linktrainingtate */
293 303
294 /* 304 /*
295 * even if diags are enabled, we want to notice LINKINIT, etc. 305 * Since going into a recovery state causes the link state to go
296 * We just don't want to change the LED state, or 306 * down and since recovery is transitory, it is better if we "miss"
297 * dd->ipath_kregs->kr_ibcctrl 307 * ever seeing the link training state go into recovery (i.e.,
308 * ignore this transition for link state special handling purposes)
309 * without even updating ipath_lastibcstat.
298 */ 310 */
299 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus); 311 if ((ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN) ||
300 lstate = val & IPATH_IBSTATE_MASK; 312 (ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT) ||
313 (ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERIDLE))
314 goto done;
301 315
302 /* 316 /*
303 * this is confusing enough when it happens that I want to always put it 317 * if linkstate transitions into INIT from any of the various down
304 * on the console and in the logs. If it was a requested state change, 318 * states, or if it transitions from any of the up (INIT or better)
305 * we'll have already cleared the flags, so we won't print this warning 319 * states into any of the down states (except link recovery), then
320 * call the chip-specific code to take appropriate actions.
306 */ 321 */
307 if ((lstate != IPATH_IBSTATE_ARM && lstate != IPATH_IBSTATE_ACTIVE) 322 if (lstate >= INFINIPATH_IBCS_L_STATE_INIT &&
308 && (dd->ipath_flags & (IPATH_LINKARMED | IPATH_LINKACTIVE))) { 323 lastlstate == INFINIPATH_IBCS_L_STATE_DOWN) {
309 dev_info(&dd->pcidev->dev, "Link state changed from %s to %s\n", 324 /* transitioned to UP */
310 (dd->ipath_flags & IPATH_LINKARMED) ? "ARM" : "ACTIVE", 325 if (dd->ipath_f_ib_updown(dd, 1, ibcs)) {
311 ib_linkstate(lstate)); 326 /* link came up, so we must no longer be disabled */
312 /* 327 dd->ipath_flags &= ~IPATH_IB_LINK_DISABLED;
313 * Flush all queued sends when link went to DOWN or INIT, 328 ipath_cdbg(LINKVERB, "LinkUp handled, skipped\n");
314 * to be sure that they don't block SMA and other MAD packets 329 goto skip_ibchange; /* chip-code handled */
315 */ 330 }
316 ipath_cancel_sends(dd, 1); 331 } else if ((lastlstate >= INFINIPATH_IBCS_L_STATE_INIT ||
317 } 332 (dd->ipath_flags & IPATH_IB_FORCE_NOTIFY)) &&
318 else if (lstate == IPATH_IBSTATE_INIT || lstate == IPATH_IBSTATE_ARM || 333 ltstate <= INFINIPATH_IBCS_LT_STATE_CFGWAITRMT &&
319 lstate == IPATH_IBSTATE_ACTIVE) { 334 ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP) {
320 /* 335 int handled;
321 * only print at SMA if there is a change, debug if not 336 handled = dd->ipath_f_ib_updown(dd, 0, ibcs);
322 * (sometimes we want to know that, usually not). 337 dd->ipath_flags &= ~IPATH_IB_FORCE_NOTIFY;
323 */ 338 if (handled) {
324 if (lstate == ((unsigned) dd->ipath_lastibcstat 339 ipath_cdbg(LINKVERB, "LinkDown handled, skipped\n");
325 & IPATH_IBSTATE_MASK)) { 340 goto skip_ibchange; /* chip-code handled */
326 ipath_dbg("Status change intr but no change (%s)\n",
327 ib_linkstate(lstate));
328 } 341 }
329 else
330 ipath_cdbg(VERBOSE, "Unit %u link state %s, last "
331 "was %s\n", dd->ipath_unit,
332 ib_linkstate(lstate),
333 ib_linkstate((unsigned)
334 dd->ipath_lastibcstat
335 & IPATH_IBSTATE_MASK));
336 } 342 }
337 else { 343
338 lstate = dd->ipath_lastibcstat & IPATH_IBSTATE_MASK; 344 /*
339 if (lstate == IPATH_IBSTATE_INIT || 345 * Significant enough to always print and get into logs, if it was
340 lstate == IPATH_IBSTATE_ARM || 346 * unexpected. If it was a requested state change, we'll have
341 lstate == IPATH_IBSTATE_ACTIVE) 347 * already cleared the flags, so we won't print this warning
342 ipath_cdbg(VERBOSE, "Unit %u link state down" 348 */
343 " (state 0x%x), from %s\n", 349 if ((ibstate != arm && ibstate != active) &&
344 dd->ipath_unit, 350 (dd->ipath_flags & (IPATH_LINKARMED | IPATH_LINKACTIVE))) {
345 (u32)val & IPATH_IBSTATE_MASK, 351 dev_info(&dd->pcidev->dev, "Link state changed from %s "
346 ib_linkstate(lstate)); 352 "to %s\n", (dd->ipath_flags & IPATH_LINKARMED) ?
347 else 353 "ARM" : "ACTIVE", ib_linkstate(dd, ibcs));
348 ipath_cdbg(VERBOSE, "Unit %u link state changed "
349 "to 0x%x from down (%x)\n",
350 dd->ipath_unit, (u32) val, lstate);
351 } 354 }
352 ltstate = (val >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
353 INFINIPATH_IBCS_LINKTRAININGSTATE_MASK;
354 lstate = (val >> INFINIPATH_IBCS_LINKSTATE_SHIFT) &
355 INFINIPATH_IBCS_LINKSTATE_MASK;
356 355
357 if (ltstate == INFINIPATH_IBCS_LT_STATE_POLLACTIVE || 356 if (ltstate == INFINIPATH_IBCS_LT_STATE_POLLACTIVE ||
358 ltstate == INFINIPATH_IBCS_LT_STATE_POLLQUIET) { 357 ltstate == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
359 u32 last_ltstate; 358 u32 lastlts;
360 359 lastlts = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat);
361 /* 360 /*
362 * Ignore cycling back and forth from Polling.Active 361 * Ignore cycling back and forth from Polling.Active to
363 * to Polling.Quiet while waiting for the other end of 362 * Polling.Quiet while waiting for the other end of the link
364 * the link to come up. We will cycle back and forth 363 * to come up, except to try and decide if we are connected
365 * between them if no cable is plugged in, 364 * to a live IB device or not. We will cycle back and
366 * the other device is powered off or disabled, etc. 365 * forth between them if no cable is plugged in, the other
366 * device is powered off or disabled, etc.
367 */ 367 */
368 last_ltstate = (dd->ipath_lastibcstat >> 368 if (lastlts == INFINIPATH_IBCS_LT_STATE_POLLACTIVE ||
369 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) 369 lastlts == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
370 & INFINIPATH_IBCS_LINKTRAININGSTATE_MASK; 370 if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) &&
371 if (last_ltstate == INFINIPATH_IBCS_LT_STATE_POLLACTIVE 371 (++dd->ipath_ibpollcnt == 40)) {
372 || last_ltstate ==
373 INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
374 if (dd->ipath_ibpollcnt > 40) {
375 dd->ipath_flags |= IPATH_NOCABLE; 372 dd->ipath_flags |= IPATH_NOCABLE;
376 *dd->ipath_statusp |= 373 *dd->ipath_statusp |=
377 IPATH_STATUS_IB_NOCABLE; 374 IPATH_STATUS_IB_NOCABLE;
378 } else 375 ipath_cdbg(LINKVERB, "Set NOCABLE\n");
379 dd->ipath_ibpollcnt++; 376 }
377 ipath_cdbg(LINKVERB, "POLL change to %s (%x)\n",
378 ipath_ibcstatus_str[ltstate], ibstate);
380 goto skip_ibchange; 379 goto skip_ibchange;
381 } 380 }
382 } 381 }
383 dd->ipath_ibpollcnt = 0; /* some state other than 2 or 3 */ 382
383 dd->ipath_ibpollcnt = 0; /* not poll*, now */
384 ipath_stats.sps_iblink++; 384 ipath_stats.sps_iblink++;
385 if (ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP) { 385
386 if (ibstate != init && dd->ipath_lastlinkrecov && ipath_linkrecovery) {
387 u64 linkrecov;
388 linkrecov = ipath_snap_cntr(dd,
389 dd->ipath_cregs->cr_iblinkerrrecovcnt);
390 if (linkrecov != dd->ipath_lastlinkrecov) {
391 ipath_dbg("IB linkrecov up %Lx (%s %s) recov %Lu\n",
392 ibcs, ib_linkstate(dd, ibcs),
393 ipath_ibcstatus_str[ltstate],
394 linkrecov);
395 /* and no more until active again */
396 dd->ipath_lastlinkrecov = 0;
397 ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
398 goto skip_ibchange;
399 }
400 }
401
402 if (ibstate == init || ibstate == arm || ibstate == active) {
403 *dd->ipath_statusp &= ~IPATH_STATUS_IB_NOCABLE;
404 if (ibstate == init || ibstate == arm) {
405 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
406 if (dd->ipath_flags & IPATH_LINKACTIVE)
407 signal_ib_event(dd, IB_EVENT_PORT_ERR);
408 }
409 if (ibstate == arm) {
410 dd->ipath_flags |= IPATH_LINKARMED;
411 dd->ipath_flags &= ~(IPATH_LINKUNK |
412 IPATH_LINKINIT | IPATH_LINKDOWN |
413 IPATH_LINKACTIVE | IPATH_NOCABLE);
414 ipath_hol_down(dd);
415 } else if (ibstate == init) {
416 /*
417 * set INIT and DOWN. Down is checked by
418 * most of the other code, but INIT is
419 * useful to know in a few places.
420 */
421 dd->ipath_flags |= IPATH_LINKINIT |
422 IPATH_LINKDOWN;
423 dd->ipath_flags &= ~(IPATH_LINKUNK |
424 IPATH_LINKARMED | IPATH_LINKACTIVE |
425 IPATH_NOCABLE);
426 ipath_hol_down(dd);
427 } else { /* active */
428 dd->ipath_lastlinkrecov = ipath_snap_cntr(dd,
429 dd->ipath_cregs->cr_iblinkerrrecovcnt);
430 *dd->ipath_statusp |=
431 IPATH_STATUS_IB_READY | IPATH_STATUS_IB_CONF;
432 dd->ipath_flags |= IPATH_LINKACTIVE;
433 dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
434 | IPATH_LINKDOWN | IPATH_LINKARMED |
435 IPATH_NOCABLE);
436 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
437 ipath_restart_sdma(dd);
438 signal_ib_event(dd, IB_EVENT_PORT_ACTIVE);
439 /* LED active not handled in chip _f_updown */
440 dd->ipath_f_setextled(dd, lstate, ltstate);
441 ipath_hol_up(dd);
442 }
443
444 /*
445 * print after we've already done the work, so as not to
446 * delay the state changes and notifications, for debugging
447 */
448 if (lstate == lastlstate)
449 ipath_cdbg(LINKVERB, "Unchanged from last: %s "
450 "(%x)\n", ib_linkstate(dd, ibcs), ibstate);
451 else
452 ipath_cdbg(VERBOSE, "Unit %u: link up to %s %s (%x)\n",
453 dd->ipath_unit, ib_linkstate(dd, ibcs),
454 ipath_ibcstatus_str[ltstate], ibstate);
455 } else { /* down */
386 if (dd->ipath_flags & IPATH_LINKACTIVE) 456 if (dd->ipath_flags & IPATH_LINKACTIVE)
387 signal_ib_event(dd, IB_EVENT_PORT_ERR); 457 signal_ib_event(dd, IB_EVENT_PORT_ERR);
388 dd->ipath_flags |= IPATH_LINKDOWN; 458 dd->ipath_flags |= IPATH_LINKDOWN;
@@ -391,69 +461,28 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
391 IPATH_LINKARMED); 461 IPATH_LINKARMED);
392 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY; 462 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
393 dd->ipath_lli_counter = 0; 463 dd->ipath_lli_counter = 0;
394 if (!noprint) {
395 if (((dd->ipath_lastibcstat >>
396 INFINIPATH_IBCS_LINKSTATE_SHIFT) &
397 INFINIPATH_IBCS_LINKSTATE_MASK)
398 == INFINIPATH_IBCS_L_STATE_ACTIVE)
399 /* if from up to down be more vocal */
400 ipath_cdbg(VERBOSE,
401 "Unit %u link now down (%s)\n",
402 dd->ipath_unit,
403 ipath_ibcstatus_str[ltstate]);
404 else
405 ipath_cdbg(VERBOSE, "Unit %u link is "
406 "down (%s)\n", dd->ipath_unit,
407 ipath_ibcstatus_str[ltstate]);
408 }
409 464
410 dd->ipath_f_setextled(dd, lstate, ltstate); 465 if (lastlstate != INFINIPATH_IBCS_L_STATE_DOWN)
411 } else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_ACTIVE) { 466 ipath_cdbg(VERBOSE, "Unit %u link state down "
412 dd->ipath_flags |= IPATH_LINKACTIVE; 467 "(state 0x%x), from %s\n",
413 dd->ipath_flags &= 468 dd->ipath_unit, lstate,
414 ~(IPATH_LINKUNK | IPATH_LINKINIT | IPATH_LINKDOWN | 469 ib_linkstate(dd, dd->ipath_lastibcstat));
415 IPATH_LINKARMED | IPATH_NOCABLE); 470 else
416 *dd->ipath_statusp &= ~IPATH_STATUS_IB_NOCABLE; 471 ipath_cdbg(LINKVERB, "Unit %u link state changed "
417 *dd->ipath_statusp |= 472 "to %s (0x%x) from down (%x)\n",
418 IPATH_STATUS_IB_READY | IPATH_STATUS_IB_CONF; 473 dd->ipath_unit,
419 dd->ipath_f_setextled(dd, lstate, ltstate); 474 ipath_ibcstatus_str[ltstate],
420 signal_ib_event(dd, IB_EVENT_PORT_ACTIVE); 475 ibstate, lastlstate);
421 } else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_INIT) {
422 if (dd->ipath_flags & IPATH_LINKACTIVE)
423 signal_ib_event(dd, IB_EVENT_PORT_ERR);
424 /*
425 * set INIT and DOWN. Down is checked by most of the other
426 * code, but INIT is useful to know in a few places.
427 */
428 dd->ipath_flags |= IPATH_LINKINIT | IPATH_LINKDOWN;
429 dd->ipath_flags &=
430 ~(IPATH_LINKUNK | IPATH_LINKACTIVE | IPATH_LINKARMED
431 | IPATH_NOCABLE);
432 *dd->ipath_statusp &= ~(IPATH_STATUS_IB_NOCABLE
433 | IPATH_STATUS_IB_READY);
434 dd->ipath_f_setextled(dd, lstate, ltstate);
435 } else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_ARM) {
436 if (dd->ipath_flags & IPATH_LINKACTIVE)
437 signal_ib_event(dd, IB_EVENT_PORT_ERR);
438 dd->ipath_flags |= IPATH_LINKARMED;
439 dd->ipath_flags &=
440 ~(IPATH_LINKUNK | IPATH_LINKDOWN | IPATH_LINKINIT |
441 IPATH_LINKACTIVE | IPATH_NOCABLE);
442 *dd->ipath_statusp &= ~(IPATH_STATUS_IB_NOCABLE
443 | IPATH_STATUS_IB_READY);
444 dd->ipath_f_setextled(dd, lstate, ltstate);
445 } else {
446 if (!noprint)
447 ipath_dbg("IBstatuschange unit %u: %s (%x)\n",
448 dd->ipath_unit,
449 ipath_ibcstatus_str[ltstate], ltstate);
450 } 476 }
477
451skip_ibchange: 478skip_ibchange:
452 dd->ipath_lastibcstat = val; 479 dd->ipath_lastibcstat = ibcs;
480done:
481 return;
453} 482}
454 483
455static void handle_supp_msgs(struct ipath_devdata *dd, 484static void handle_supp_msgs(struct ipath_devdata *dd,
456 unsigned supp_msgs, char *msg, int msgsz) 485 unsigned supp_msgs, char *msg, u32 msgsz)
457{ 486{
458 /* 487 /*
459 * Print the message unless it's ibc status change only, which 488 * Print the message unless it's ibc status change only, which
@@ -461,12 +490,19 @@ static void handle_supp_msgs(struct ipath_devdata *dd,
461 */ 490 */
462 if (dd->ipath_lasterror & ~INFINIPATH_E_IBSTATUSCHANGED) { 491 if (dd->ipath_lasterror & ~INFINIPATH_E_IBSTATUSCHANGED) {
463 int iserr; 492 int iserr;
464 iserr = ipath_decode_err(msg, msgsz, 493 ipath_err_t mask;
494 iserr = ipath_decode_err(dd, msg, msgsz,
465 dd->ipath_lasterror & 495 dd->ipath_lasterror &
466 ~INFINIPATH_E_IBSTATUSCHANGED); 496 ~INFINIPATH_E_IBSTATUSCHANGED);
467 if (dd->ipath_lasterror & 497
468 ~(INFINIPATH_E_RRCVEGRFULL | 498 mask = INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
469 INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS)) 499 INFINIPATH_E_PKTERRS | INFINIPATH_E_SDMADISABLED;
500
501 /* if we're in debug, then don't mask SDMADISABLED msgs */
502 if (ipath_debug & __IPATH_DBG)
503 mask &= ~INFINIPATH_E_SDMADISABLED;
504
505 if (dd->ipath_lasterror & ~mask)
470 ipath_dev_err(dd, "Suppressed %u messages for " 506 ipath_dev_err(dd, "Suppressed %u messages for "
471 "fast-repeating errors (%s) (%llx)\n", 507 "fast-repeating errors (%s) (%llx)\n",
472 supp_msgs, msg, 508 supp_msgs, msg,
@@ -493,7 +529,7 @@ static void handle_supp_msgs(struct ipath_devdata *dd,
493 529
494static unsigned handle_frequent_errors(struct ipath_devdata *dd, 530static unsigned handle_frequent_errors(struct ipath_devdata *dd,
495 ipath_err_t errs, char *msg, 531 ipath_err_t errs, char *msg,
496 int msgsz, int *noprint) 532 u32 msgsz, int *noprint)
497{ 533{
498 unsigned long nc; 534 unsigned long nc;
499 static unsigned long nextmsg_time; 535 static unsigned long nextmsg_time;
@@ -523,19 +559,125 @@ static unsigned handle_frequent_errors(struct ipath_devdata *dd,
523 return supp_msgs; 559 return supp_msgs;
524} 560}
525 561
562static void handle_sdma_errors(struct ipath_devdata *dd, ipath_err_t errs)
563{
564 unsigned long flags;
565 int expected;
566
567 if (ipath_debug & __IPATH_DBG) {
568 char msg[128];
569 ipath_decode_err(dd, msg, sizeof msg, errs &
570 INFINIPATH_E_SDMAERRS);
571 ipath_dbg("errors %lx (%s)\n", (unsigned long)errs, msg);
572 }
573 if (ipath_debug & __IPATH_VERBDBG) {
574 unsigned long tl, hd, status, lengen;
575 tl = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmatail);
576 hd = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmahead);
577 status = ipath_read_kreg64(dd
578 , dd->ipath_kregs->kr_senddmastatus);
579 lengen = ipath_read_kreg64(dd,
580 dd->ipath_kregs->kr_senddmalengen);
581 ipath_cdbg(VERBOSE, "sdma tl 0x%lx hd 0x%lx status 0x%lx "
582 "lengen 0x%lx\n", tl, hd, status, lengen);
583 }
584
585 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
586 __set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
587 expected = test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
588 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
589 if (!expected)
590 ipath_cancel_sends(dd, 1);
591}
592
593static void handle_sdma_intr(struct ipath_devdata *dd, u64 istat)
594{
595 unsigned long flags;
596 int expected;
597
598 if ((istat & INFINIPATH_I_SDMAINT) &&
599 !test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
600 ipath_sdma_intr(dd);
601
602 if (istat & INFINIPATH_I_SDMADISABLED) {
603 expected = test_bit(IPATH_SDMA_ABORTING,
604 &dd->ipath_sdma_status);
605 ipath_dbg("%s SDmaDisabled intr\n",
606 expected ? "expected" : "unexpected");
607 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
608 __set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
609 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
610 if (!expected)
611 ipath_cancel_sends(dd, 1);
612 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
613 tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
614 }
615}
616
617static int handle_hdrq_full(struct ipath_devdata *dd)
618{
619 int chkerrpkts = 0;
620 u32 hd, tl;
621 u32 i;
622
623 ipath_stats.sps_hdrqfull++;
624 for (i = 0; i < dd->ipath_cfgports; i++) {
625 struct ipath_portdata *pd = dd->ipath_pd[i];
626
627 if (i == 0) {
628 /*
629 * For kernel receive queues, we just want to know
630 * if there are packets in the queue that we can
631 * process.
632 */
633 if (pd->port_head != ipath_get_hdrqtail(pd))
634 chkerrpkts |= 1 << i;
635 continue;
636 }
637
638 /* Skip if user context is not open */
639 if (!pd || !pd->port_cnt)
640 continue;
641
642 /* Don't report the same point multiple times. */
643 if (dd->ipath_flags & IPATH_NODMA_RTAIL)
644 tl = ipath_read_ureg32(dd, ur_rcvhdrtail, i);
645 else
646 tl = ipath_get_rcvhdrtail(pd);
647 if (tl == pd->port_lastrcvhdrqtail)
648 continue;
649
650 hd = ipath_read_ureg32(dd, ur_rcvhdrhead, i);
651 if (hd == (tl + 1) || (!hd && tl == dd->ipath_hdrqlast)) {
652 pd->port_lastrcvhdrqtail = tl;
653 pd->port_hdrqfull++;
654 /* flush hdrqfull so that poll() sees it */
655 wmb();
656 wake_up_interruptible(&pd->port_wait);
657 }
658 }
659
660 return chkerrpkts;
661}
662
526static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs) 663static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
527{ 664{
528 char msg[128]; 665 char msg[128];
529 u64 ignore_this_time = 0; 666 u64 ignore_this_time = 0;
530 int i, iserr = 0; 667 u64 iserr = 0;
531 int chkerrpkts = 0, noprint = 0; 668 int chkerrpkts = 0, noprint = 0;
532 unsigned supp_msgs; 669 unsigned supp_msgs;
533 int log_idx; 670 int log_idx;
534 671
535 supp_msgs = handle_frequent_errors(dd, errs, msg, sizeof msg, &noprint); 672 /*
673 * don't report errors that are masked, either at init
674 * (not set in ipath_errormask), or temporarily (set in
675 * ipath_maskederrs)
676 */
677 errs &= dd->ipath_errormask & ~dd->ipath_maskederrs;
536 678
537 /* don't report errors that are masked */ 679 supp_msgs = handle_frequent_errors(dd, errs, msg, (u32)sizeof msg,
538 errs &= ~dd->ipath_maskederrs; 680 &noprint);
539 681
540 /* do these first, they are most important */ 682 /* do these first, they are most important */
541 if (errs & INFINIPATH_E_HARDWARE) { 683 if (errs & INFINIPATH_E_HARDWARE) {
@@ -550,6 +692,9 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
550 } 692 }
551 } 693 }
552 694
695 if (errs & INFINIPATH_E_SDMAERRS)
696 handle_sdma_errors(dd, errs);
697
553 if (!noprint && (errs & ~dd->ipath_e_bitsextant)) 698 if (!noprint && (errs & ~dd->ipath_e_bitsextant))
554 ipath_dev_err(dd, "error interrupt with unknown errors " 699 ipath_dev_err(dd, "error interrupt with unknown errors "
555 "%llx set\n", (unsigned long long) 700 "%llx set\n", (unsigned long long)
@@ -580,18 +725,19 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
580 * ones on this particular interrupt, which also isn't great 725 * ones on this particular interrupt, which also isn't great
581 */ 726 */
582 dd->ipath_maskederrs |= dd->ipath_lasterror | errs; 727 dd->ipath_maskederrs |= dd->ipath_lasterror | errs;
728
583 dd->ipath_errormask &= ~dd->ipath_maskederrs; 729 dd->ipath_errormask &= ~dd->ipath_maskederrs;
584 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 730 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
585 dd->ipath_errormask); 731 dd->ipath_errormask);
586 s_iserr = ipath_decode_err(msg, sizeof msg, 732 s_iserr = ipath_decode_err(dd, msg, sizeof msg,
587 dd->ipath_maskederrs); 733 dd->ipath_maskederrs);
588 734
589 if (dd->ipath_maskederrs & 735 if (dd->ipath_maskederrs &
590 ~(INFINIPATH_E_RRCVEGRFULL | 736 ~(INFINIPATH_E_RRCVEGRFULL |
591 INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS)) 737 INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS))
592 ipath_dev_err(dd, "Temporarily disabling " 738 ipath_dev_err(dd, "Temporarily disabling "
593 "error(s) %llx reporting; too frequent (%s)\n", 739 "error(s) %llx reporting; too frequent (%s)\n",
594 (unsigned long long)dd->ipath_maskederrs, 740 (unsigned long long) dd->ipath_maskederrs,
595 msg); 741 msg);
596 else { 742 else {
597 /* 743 /*
@@ -633,26 +779,43 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
633 INFINIPATH_E_IBSTATUSCHANGED); 779 INFINIPATH_E_IBSTATUSCHANGED);
634 } 780 }
635 781
636 /* likely due to cancel, so suppress */ 782 if (errs & INFINIPATH_E_SENDSPECIALTRIGGER) {
783 dd->ipath_spectriggerhit++;
784 ipath_dbg("%lu special trigger hits\n",
785 dd->ipath_spectriggerhit);
786 }
787
788 /* likely due to cancel; so suppress message unless verbose */
637 if ((errs & (INFINIPATH_E_SPKTLEN | INFINIPATH_E_SPIOARMLAUNCH)) && 789 if ((errs & (INFINIPATH_E_SPKTLEN | INFINIPATH_E_SPIOARMLAUNCH)) &&
638 dd->ipath_lastcancel > jiffies) { 790 dd->ipath_lastcancel > jiffies) {
639 ipath_dbg("Suppressed armlaunch/spktlen after error send cancel\n"); 791 /* armlaunch takes precedence; it often causes both. */
792 ipath_cdbg(VERBOSE,
793 "Suppressed %s error (%llx) after sendbuf cancel\n",
794 (errs & INFINIPATH_E_SPIOARMLAUNCH) ?
795 "armlaunch" : "sendpktlen", (unsigned long long)errs);
640 errs &= ~(INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SPKTLEN); 796 errs &= ~(INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SPKTLEN);
641 } 797 }
642 798
643 if (!errs) 799 if (!errs)
644 return 0; 800 return 0;
645 801
646 if (!noprint) 802 if (!noprint) {
803 ipath_err_t mask;
647 /* 804 /*
648 * the ones we mask off are handled specially below or above 805 * The ones we mask off are handled specially below
806 * or above. Also mask SDMADISABLED by default as it
807 * is too chatty.
649 */ 808 */
650 ipath_decode_err(msg, sizeof msg, 809 mask = INFINIPATH_E_IBSTATUSCHANGED |
651 errs & ~(INFINIPATH_E_IBSTATUSCHANGED | 810 INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
652 INFINIPATH_E_RRCVEGRFULL | 811 INFINIPATH_E_HARDWARE | INFINIPATH_E_SDMADISABLED;
653 INFINIPATH_E_RRCVHDRFULL | 812
654 INFINIPATH_E_HARDWARE)); 813 /* if we're in debug, then don't mask SDMADISABLED msgs */
655 else 814 if (ipath_debug & __IPATH_DBG)
815 mask &= ~INFINIPATH_E_SDMADISABLED;
816
817 ipath_decode_err(dd, msg, sizeof msg, errs & ~mask);
818 } else
656 /* so we don't need if (!noprint) at strlcat's below */ 819 /* so we don't need if (!noprint) at strlcat's below */
657 *msg = 0; 820 *msg = 0;
658 821
@@ -677,40 +840,8 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
677 * fast_stats, no more than every 5 seconds, user ports get printed 840 * fast_stats, no more than every 5 seconds, user ports get printed
678 * on close 841 * on close
679 */ 842 */
680 if (errs & INFINIPATH_E_RRCVHDRFULL) { 843 if (errs & INFINIPATH_E_RRCVHDRFULL)
681 u32 hd, tl; 844 chkerrpkts |= handle_hdrq_full(dd);
682 ipath_stats.sps_hdrqfull++;
683 for (i = 0; i < dd->ipath_cfgports; i++) {
684 struct ipath_portdata *pd = dd->ipath_pd[i];
685 if (i == 0) {
686 hd = pd->port_head;
687 tl = (u32) le64_to_cpu(
688 *dd->ipath_hdrqtailptr);
689 } else if (pd && pd->port_cnt &&
690 pd->port_rcvhdrtail_kvaddr) {
691 /*
692 * don't report same point multiple times,
693 * except kernel
694 */
695 tl = *(u64 *) pd->port_rcvhdrtail_kvaddr;
696 if (tl == pd->port_lastrcvhdrqtail)
697 continue;
698 hd = ipath_read_ureg32(dd, ur_rcvhdrhead,
699 i);
700 } else
701 continue;
702 if (hd == (tl + 1) ||
703 (!hd && tl == dd->ipath_hdrqlast)) {
704 if (i == 0)
705 chkerrpkts = 1;
706 pd->port_lastrcvhdrqtail = tl;
707 pd->port_hdrqfull++;
708 /* flush hdrqfull so that poll() sees it */
709 wmb();
710 wake_up_interruptible(&pd->port_wait);
711 }
712 }
713 }
714 if (errs & INFINIPATH_E_RRCVEGRFULL) { 845 if (errs & INFINIPATH_E_RRCVEGRFULL) {
715 struct ipath_portdata *pd = dd->ipath_pd[0]; 846 struct ipath_portdata *pd = dd->ipath_pd[0];
716 847
@@ -721,9 +852,8 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
721 * vs user) 852 * vs user)
722 */ 853 */
723 ipath_stats.sps_etidfull++; 854 ipath_stats.sps_etidfull++;
724 if (pd->port_head != 855 if (pd->port_head != ipath_get_hdrqtail(pd))
725 (u32) le64_to_cpu(*dd->ipath_hdrqtailptr)) 856 chkerrpkts |= 1;
726 chkerrpkts = 1;
727 } 857 }
728 858
729 /* 859 /*
@@ -741,16 +871,13 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
741 dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT 871 dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
742 | IPATH_LINKARMED | IPATH_LINKACTIVE); 872 | IPATH_LINKARMED | IPATH_LINKACTIVE);
743 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY; 873 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
744 if (!noprint) {
745 u64 st = ipath_read_kreg64(
746 dd, dd->ipath_kregs->kr_ibcstatus);
747 874
748 ipath_dbg("Lost link, link now down (%s)\n", 875 ipath_dbg("Lost link, link now down (%s)\n",
749 ipath_ibcstatus_str[st & 0xf]); 876 ipath_ibcstatus_str[ipath_read_kreg64(dd,
750 } 877 dd->ipath_kregs->kr_ibcstatus) & 0xf]);
751 } 878 }
752 if (errs & INFINIPATH_E_IBSTATUSCHANGED) 879 if (errs & INFINIPATH_E_IBSTATUSCHANGED)
753 handle_e_ibstatuschanged(dd, errs, noprint); 880 handle_e_ibstatuschanged(dd, errs);
754 881
755 if (errs & INFINIPATH_E_RESET) { 882 if (errs & INFINIPATH_E_RESET) {
756 if (!noprint) 883 if (!noprint)
@@ -765,9 +892,6 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
765 if (!noprint && *msg) { 892 if (!noprint && *msg) {
766 if (iserr) 893 if (iserr)
767 ipath_dev_err(dd, "%s error\n", msg); 894 ipath_dev_err(dd, "%s error\n", msg);
768 else
769 dev_info(&dd->pcidev->dev, "%s packet problems\n",
770 msg);
771 } 895 }
772 if (dd->ipath_state_wanted & dd->ipath_flags) { 896 if (dd->ipath_state_wanted & dd->ipath_flags) {
773 ipath_cdbg(VERBOSE, "driver wanted state %x, iflags now %x, " 897 ipath_cdbg(VERBOSE, "driver wanted state %x, iflags now %x, "
@@ -779,7 +903,6 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
779 return chkerrpkts; 903 return chkerrpkts;
780} 904}
781 905
782
783/* 906/*
784 * try to cleanup as much as possible for anything that might have gone 907 * try to cleanup as much as possible for anything that might have gone
785 * wrong while in freeze mode, such as pio buffers being written by user 908 * wrong while in freeze mode, such as pio buffers being written by user
@@ -796,8 +919,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
796void ipath_clear_freeze(struct ipath_devdata *dd) 919void ipath_clear_freeze(struct ipath_devdata *dd)
797{ 920{
798 int i, im; 921 int i, im;
799 __le64 val; 922 u64 val;
800 unsigned long flags;
801 923
802 /* disable error interrupts, to avoid confusion */ 924 /* disable error interrupts, to avoid confusion */
803 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL); 925 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL);
@@ -816,14 +938,7 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
816 dd->ipath_control); 938 dd->ipath_control);
817 939
818 /* ensure pio avail updates continue */ 940 /* ensure pio avail updates continue */
819 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); 941 ipath_force_pio_avail_update(dd);
820 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
821 dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
822 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
823 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
824 dd->ipath_sendctrl);
825 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
826 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
827 942
828 /* 943 /*
829 * We just enabled pioavailupdate, so dma copy is almost certainly 944 * We just enabled pioavailupdate, so dma copy is almost certainly
@@ -831,10 +946,13 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
831 */ 946 */
832 for (i = 0; i < dd->ipath_pioavregs; i++) { 947 for (i = 0; i < dd->ipath_pioavregs; i++) {
833 /* deal with 6110 chip bug */ 948 /* deal with 6110 chip bug */
834 im = i > 3 ? i ^ 1 : i; 949 im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
950 i ^ 1 : i;
835 val = ipath_read_kreg64(dd, (0x1000 / sizeof(u64)) + im); 951 val = ipath_read_kreg64(dd, (0x1000 / sizeof(u64)) + im);
836 dd->ipath_pioavailregs_dma[i] = dd->ipath_pioavailshadow[i] 952 dd->ipath_pioavailregs_dma[i] = cpu_to_le64(val);
837 = le64_to_cpu(val); 953 dd->ipath_pioavailshadow[i] = val |
954 (~dd->ipath_pioavailkernel[i] <<
955 INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT);
838 } 956 }
839 957
840 /* 958 /*
@@ -950,7 +1068,7 @@ set:
950 * process was waiting for a packet to arrive, and didn't want 1068 * process was waiting for a packet to arrive, and didn't want
951 * to poll 1069 * to poll
952 */ 1070 */
953static void handle_urcv(struct ipath_devdata *dd, u32 istat) 1071static void handle_urcv(struct ipath_devdata *dd, u64 istat)
954{ 1072{
955 u64 portr; 1073 u64 portr;
956 int i; 1074 int i;
@@ -966,12 +1084,13 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat)
966 * and ipath_poll_next()... 1084 * and ipath_poll_next()...
967 */ 1085 */
968 rmb(); 1086 rmb();
969 portr = ((istat >> INFINIPATH_I_RCVAVAIL_SHIFT) & 1087 portr = ((istat >> dd->ipath_i_rcvavail_shift) &
970 dd->ipath_i_rcvavail_mask) 1088 dd->ipath_i_rcvavail_mask) |
971 | ((istat >> INFINIPATH_I_RCVURG_SHIFT) & 1089 ((istat >> dd->ipath_i_rcvurg_shift) &
972 dd->ipath_i_rcvurg_mask); 1090 dd->ipath_i_rcvurg_mask);
973 for (i = 1; i < dd->ipath_cfgports; i++) { 1091 for (i = 1; i < dd->ipath_cfgports; i++) {
974 struct ipath_portdata *pd = dd->ipath_pd[i]; 1092 struct ipath_portdata *pd = dd->ipath_pd[i];
1093
975 if (portr & (1 << i) && pd && pd->port_cnt) { 1094 if (portr & (1 << i) && pd && pd->port_cnt) {
976 if (test_and_clear_bit(IPATH_PORT_WAITING_RCV, 1095 if (test_and_clear_bit(IPATH_PORT_WAITING_RCV,
977 &pd->port_flag)) { 1096 &pd->port_flag)) {
@@ -988,7 +1107,7 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat)
988 } 1107 }
989 if (rcvdint) { 1108 if (rcvdint) {
990 /* only want to take one interrupt, so turn off the rcv 1109 /* only want to take one interrupt, so turn off the rcv
991 * interrupt for all the ports that we did the wakeup on 1110 * interrupt for all the ports that we set the rcv_waiting
992 * (but never for kernel port) 1111 * (but never for kernel port)
993 */ 1112 */
994 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 1113 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
@@ -999,12 +1118,11 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat)
999irqreturn_t ipath_intr(int irq, void *data) 1118irqreturn_t ipath_intr(int irq, void *data)
1000{ 1119{
1001 struct ipath_devdata *dd = data; 1120 struct ipath_devdata *dd = data;
1002 u32 istat, chk0rcv = 0; 1121 u64 istat, chk0rcv = 0;
1003 ipath_err_t estat = 0; 1122 ipath_err_t estat = 0;
1004 irqreturn_t ret; 1123 irqreturn_t ret;
1005 static unsigned unexpected = 0; 1124 static unsigned unexpected = 0;
1006 static const u32 port0rbits = (1U<<INFINIPATH_I_RCVAVAIL_SHIFT) | 1125 u64 kportrbits;
1007 (1U<<INFINIPATH_I_RCVURG_SHIFT);
1008 1126
1009 ipath_stats.sps_ints++; 1127 ipath_stats.sps_ints++;
1010 1128
@@ -1053,17 +1171,17 @@ irqreturn_t ipath_intr(int irq, void *data)
1053 1171
1054 if (unlikely(istat & ~dd->ipath_i_bitsextant)) 1172 if (unlikely(istat & ~dd->ipath_i_bitsextant))
1055 ipath_dev_err(dd, 1173 ipath_dev_err(dd,
1056 "interrupt with unknown interrupts %x set\n", 1174 "interrupt with unknown interrupts %Lx set\n",
1057 istat & (u32) ~ dd->ipath_i_bitsextant); 1175 istat & ~dd->ipath_i_bitsextant);
1058 else 1176 else if (istat & ~INFINIPATH_I_ERROR) /* errors do own printing */
1059 ipath_cdbg(VERBOSE, "intr stat=0x%x\n", istat); 1177 ipath_cdbg(VERBOSE, "intr stat=0x%Lx\n", istat);
1060 1178
1061 if (unlikely(istat & INFINIPATH_I_ERROR)) { 1179 if (istat & INFINIPATH_I_ERROR) {
1062 ipath_stats.sps_errints++; 1180 ipath_stats.sps_errints++;
1063 estat = ipath_read_kreg64(dd, 1181 estat = ipath_read_kreg64(dd,
1064 dd->ipath_kregs->kr_errorstatus); 1182 dd->ipath_kregs->kr_errorstatus);
1065 if (!estat) 1183 if (!estat)
1066 dev_info(&dd->pcidev->dev, "error interrupt (%x), " 1184 dev_info(&dd->pcidev->dev, "error interrupt (%Lx), "
1067 "but no error bits set!\n", istat); 1185 "but no error bits set!\n", istat);
1068 else if (estat == -1LL) 1186 else if (estat == -1LL)
1069 /* 1187 /*
@@ -1073,9 +1191,7 @@ irqreturn_t ipath_intr(int irq, void *data)
1073 ipath_dev_err(dd, "Read of error status failed " 1191 ipath_dev_err(dd, "Read of error status failed "
1074 "(all bits set); ignoring\n"); 1192 "(all bits set); ignoring\n");
1075 else 1193 else
1076 if (handle_errors(dd, estat)) 1194 chk0rcv |= handle_errors(dd, estat);
1077 /* force calling ipath_kreceive() */
1078 chk0rcv = 1;
1079 } 1195 }
1080 1196
1081 if (istat & INFINIPATH_I_GPIO) { 1197 if (istat & INFINIPATH_I_GPIO) {
@@ -1093,8 +1209,7 @@ irqreturn_t ipath_intr(int irq, void *data)
1093 1209
1094 gpiostatus = ipath_read_kreg32( 1210 gpiostatus = ipath_read_kreg32(
1095 dd, dd->ipath_kregs->kr_gpio_status); 1211 dd, dd->ipath_kregs->kr_gpio_status);
1096 /* First the error-counter case. 1212 /* First the error-counter case. */
1097 */
1098 if ((gpiostatus & IPATH_GPIO_ERRINTR_MASK) && 1213 if ((gpiostatus & IPATH_GPIO_ERRINTR_MASK) &&
1099 (dd->ipath_flags & IPATH_GPIO_ERRINTRS)) { 1214 (dd->ipath_flags & IPATH_GPIO_ERRINTRS)) {
1100 /* want to clear the bits we see asserted. */ 1215 /* want to clear the bits we see asserted. */
@@ -1156,7 +1271,6 @@ irqreturn_t ipath_intr(int irq, void *data)
1156 (u64) to_clear); 1271 (u64) to_clear);
1157 } 1272 }
1158 } 1273 }
1159 chk0rcv |= istat & port0rbits;
1160 1274
1161 /* 1275 /*
1162 * Clear the interrupt bits we found set, unless they are receive 1276 * Clear the interrupt bits we found set, unless they are receive
@@ -1169,22 +1283,25 @@ irqreturn_t ipath_intr(int irq, void *data)
1169 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat); 1283 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat);
1170 1284
1171 /* 1285 /*
1172 * handle port0 receive before checking for pio buffers available, 1286 * Handle kernel receive queues before checking for pio buffers
1173 * since receives can overflow; piobuf waiters can afford a few 1287 * available since receives can overflow; piobuf waiters can afford
1174 * extra cycles, since they were waiting anyway, and user's waiting 1288 * a few extra cycles, since they were waiting anyway, and user's
1175 * for receive are at the bottom. 1289 * waiting for receive are at the bottom.
1176 */ 1290 */
1177 if (chk0rcv) { 1291 kportrbits = (1ULL << dd->ipath_i_rcvavail_shift) |
1292 (1ULL << dd->ipath_i_rcvurg_shift);
1293 if (chk0rcv || (istat & kportrbits)) {
1294 istat &= ~kportrbits;
1178 ipath_kreceive(dd->ipath_pd[0]); 1295 ipath_kreceive(dd->ipath_pd[0]);
1179 istat &= ~port0rbits;
1180 } 1296 }
1181 1297
1182 if (istat & ((dd->ipath_i_rcvavail_mask << 1298 if (istat & ((dd->ipath_i_rcvavail_mask << dd->ipath_i_rcvavail_shift) |
1183 INFINIPATH_I_RCVAVAIL_SHIFT) 1299 (dd->ipath_i_rcvurg_mask << dd->ipath_i_rcvurg_shift)))
1184 | (dd->ipath_i_rcvurg_mask <<
1185 INFINIPATH_I_RCVURG_SHIFT)))
1186 handle_urcv(dd, istat); 1300 handle_urcv(dd, istat);
1187 1301
1302 if (istat & (INFINIPATH_I_SDMAINT | INFINIPATH_I_SDMADISABLED))
1303 handle_sdma_intr(dd, istat);
1304
1188 if (istat & INFINIPATH_I_SPIOBUFAVAIL) { 1305 if (istat & INFINIPATH_I_SPIOBUFAVAIL) {
1189 unsigned long flags; 1306 unsigned long flags;
1190 1307
@@ -1195,7 +1312,10 @@ irqreturn_t ipath_intr(int irq, void *data)
1195 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 1312 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1196 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); 1313 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1197 1314
1198 handle_layer_pioavail(dd); 1315 if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA))
1316 handle_layer_pioavail(dd);
1317 else
1318 ipath_dbg("unexpected BUFAVAIL intr\n");
1199 } 1319 }
1200 1320
1201 ret = IRQ_HANDLED; 1321 ret = IRQ_HANDLED;
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
index ecf3f7ff7717..5863cbe99303 100644
--- a/drivers/infiniband/hw/ipath/ipath_kernel.h
+++ b/drivers/infiniband/hw/ipath/ipath_kernel.h
@@ -1,7 +1,7 @@
1#ifndef _IPATH_KERNEL_H 1#ifndef _IPATH_KERNEL_H
2#define _IPATH_KERNEL_H 2#define _IPATH_KERNEL_H
3/* 3/*
4 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 4 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
5 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 5 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
6 * 6 *
7 * This software is available to you under a choice of one of two 7 * This software is available to you under a choice of one of two
@@ -42,6 +42,8 @@
42#include <linux/pci.h> 42#include <linux/pci.h>
43#include <linux/dma-mapping.h> 43#include <linux/dma-mapping.h>
44#include <linux/mutex.h> 44#include <linux/mutex.h>
45#include <linux/list.h>
46#include <linux/scatterlist.h>
45#include <asm/io.h> 47#include <asm/io.h>
46#include <rdma/ib_verbs.h> 48#include <rdma/ib_verbs.h>
47 49
@@ -175,9 +177,13 @@ struct ipath_portdata {
175 u16 poll_type; 177 u16 poll_type;
176 /* port rcvhdrq head offset */ 178 /* port rcvhdrq head offset */
177 u32 port_head; 179 u32 port_head;
180 /* receive packet sequence counter */
181 u32 port_seq_cnt;
178}; 182};
179 183
180struct sk_buff; 184struct sk_buff;
185struct ipath_sge_state;
186struct ipath_verbs_txreq;
181 187
182/* 188/*
183 * control information for layered drivers 189 * control information for layered drivers
@@ -191,6 +197,40 @@ struct ipath_skbinfo {
191 dma_addr_t phys; 197 dma_addr_t phys;
192}; 198};
193 199
200struct ipath_sdma_txreq {
201 int flags;
202 int sg_count;
203 union {
204 struct scatterlist *sg;
205 void *map_addr;
206 };
207 void (*callback)(void *, int);
208 void *callback_cookie;
209 int callback_status;
210 u16 start_idx; /* sdma private */
211 u16 next_descq_idx; /* sdma private */
212 struct list_head list; /* sdma private */
213};
214
215struct ipath_sdma_desc {
216 __le64 qw[2];
217};
218
219#define IPATH_SDMA_TXREQ_F_USELARGEBUF 0x1
220#define IPATH_SDMA_TXREQ_F_HEADTOHOST 0x2
221#define IPATH_SDMA_TXREQ_F_INTREQ 0x4
222#define IPATH_SDMA_TXREQ_F_FREEBUF 0x8
223#define IPATH_SDMA_TXREQ_F_FREEDESC 0x10
224#define IPATH_SDMA_TXREQ_F_VL15 0x20
225
226#define IPATH_SDMA_TXREQ_S_OK 0
227#define IPATH_SDMA_TXREQ_S_SENDERROR 1
228#define IPATH_SDMA_TXREQ_S_ABORTED 2
229#define IPATH_SDMA_TXREQ_S_SHUTDOWN 3
230
231/* max dwords in small buffer packet */
232#define IPATH_SMALLBUF_DWORDS (dd->ipath_piosize2k >> 2)
233
194/* 234/*
195 * Possible IB config parameters for ipath_f_get/set_ib_cfg() 235 * Possible IB config parameters for ipath_f_get/set_ib_cfg()
196 */ 236 */
@@ -221,11 +261,6 @@ struct ipath_devdata {
221 unsigned long ipath_physaddr; 261 unsigned long ipath_physaddr;
222 /* base of memory alloced for ipath_kregbase, for free */ 262 /* base of memory alloced for ipath_kregbase, for free */
223 u64 *ipath_kregalloc; 263 u64 *ipath_kregalloc;
224 /*
225 * virtual address where port0 rcvhdrqtail updated for this unit.
226 * only written to by the chip, not the driver.
227 */
228 volatile __le64 *ipath_hdrqtailptr;
229 /* ipath_cfgports pointers */ 264 /* ipath_cfgports pointers */
230 struct ipath_portdata **ipath_pd; 265 struct ipath_portdata **ipath_pd;
231 /* sk_buffs used by port 0 eager receive queue */ 266 /* sk_buffs used by port 0 eager receive queue */
@@ -283,6 +318,7 @@ struct ipath_devdata {
283 /* per chip actions needed for IB Link up/down changes */ 318 /* per chip actions needed for IB Link up/down changes */
284 int (*ipath_f_ib_updown)(struct ipath_devdata *, int, u64); 319 int (*ipath_f_ib_updown)(struct ipath_devdata *, int, u64);
285 320
321 unsigned ipath_lastegr_idx;
286 struct ipath_ibdev *verbs_dev; 322 struct ipath_ibdev *verbs_dev;
287 struct timer_list verbs_timer; 323 struct timer_list verbs_timer;
288 /* total dwords sent (summed from counter) */ 324 /* total dwords sent (summed from counter) */
@@ -309,6 +345,7 @@ struct ipath_devdata {
309 ipath_err_t ipath_lasthwerror; 345 ipath_err_t ipath_lasthwerror;
310 /* errors masked because they occur too fast */ 346 /* errors masked because they occur too fast */
311 ipath_err_t ipath_maskederrs; 347 ipath_err_t ipath_maskederrs;
348 u64 ipath_lastlinkrecov; /* link recoveries at last ACTIVE */
312 /* time in jiffies at which to re-enable maskederrs */ 349 /* time in jiffies at which to re-enable maskederrs */
313 unsigned long ipath_unmasktime; 350 unsigned long ipath_unmasktime;
314 /* count of egrfull errors, combined for all ports */ 351 /* count of egrfull errors, combined for all ports */
@@ -347,6 +384,7 @@ struct ipath_devdata {
347 u32 ipath_lastrpkts; 384 u32 ipath_lastrpkts;
348 /* pio bufs allocated per port */ 385 /* pio bufs allocated per port */
349 u32 ipath_pbufsport; 386 u32 ipath_pbufsport;
387 u32 ipath_pioupd_thresh; /* update threshold, some chips */
350 /* 388 /*
351 * number of ports configured as max; zero is set to number chip 389 * number of ports configured as max; zero is set to number chip
352 * supports, less gives more pio bufs/port, etc. 390 * supports, less gives more pio bufs/port, etc.
@@ -365,6 +403,7 @@ struct ipath_devdata {
365 * get to multiple devices 403 * get to multiple devices
366 */ 404 */
367 u32 ipath_lastpioindex; 405 u32 ipath_lastpioindex;
406 u32 ipath_lastpioindexl;
368 /* max length of freezemsg */ 407 /* max length of freezemsg */
369 u32 ipath_freezelen; 408 u32 ipath_freezelen;
370 /* 409 /*
@@ -381,6 +420,15 @@ struct ipath_devdata {
381 u32 ipath_pcibar0; 420 u32 ipath_pcibar0;
382 /* so we can rewrite it after a chip reset */ 421 /* so we can rewrite it after a chip reset */
383 u32 ipath_pcibar1; 422 u32 ipath_pcibar1;
423 u32 ipath_x1_fix_tries;
424 u32 ipath_autoneg_tries;
425 u32 serdes_first_init_done;
426
427 struct ipath_relock {
428 atomic_t ipath_relock_timer_active;
429 struct timer_list ipath_relock_timer;
430 unsigned int ipath_relock_interval; /* in jiffies */
431 } ipath_relock_singleton;
384 432
385 /* interrupt number */ 433 /* interrupt number */
386 int ipath_irq; 434 int ipath_irq;
@@ -403,7 +451,7 @@ struct ipath_devdata {
403 u64 __iomem *ipath_egrtidbase; 451 u64 __iomem *ipath_egrtidbase;
404 /* lock to workaround chip bug 9437 and others */ 452 /* lock to workaround chip bug 9437 and others */
405 spinlock_t ipath_kernel_tid_lock; 453 spinlock_t ipath_kernel_tid_lock;
406 spinlock_t ipath_tid_lock; 454 spinlock_t ipath_user_tid_lock;
407 spinlock_t ipath_sendctrl_lock; 455 spinlock_t ipath_sendctrl_lock;
408 456
409 /* 457 /*
@@ -422,11 +470,48 @@ struct ipath_devdata {
422 struct class_device *diag_class_dev; 470 struct class_device *diag_class_dev;
423 /* timer used to prevent stats overflow, error throttling, etc. */ 471 /* timer used to prevent stats overflow, error throttling, etc. */
424 struct timer_list ipath_stats_timer; 472 struct timer_list ipath_stats_timer;
473 /* timer to verify interrupts work, and fallback if possible */
474 struct timer_list ipath_intrchk_timer;
425 void *ipath_dummy_hdrq; /* used after port close */ 475 void *ipath_dummy_hdrq; /* used after port close */
426 dma_addr_t ipath_dummy_hdrq_phys; 476 dma_addr_t ipath_dummy_hdrq_phys;
427 477
478 /* SendDMA related entries */
479 spinlock_t ipath_sdma_lock;
480 u64 ipath_sdma_status;
481 unsigned long ipath_sdma_abort_jiffies;
482 unsigned long ipath_sdma_abort_intr_timeout;
483 unsigned long ipath_sdma_buf_jiffies;
484 struct ipath_sdma_desc *ipath_sdma_descq;
485 u64 ipath_sdma_descq_added;
486 u64 ipath_sdma_descq_removed;
487 int ipath_sdma_desc_nreserved;
488 u16 ipath_sdma_descq_cnt;
489 u16 ipath_sdma_descq_tail;
490 u16 ipath_sdma_descq_head;
491 u16 ipath_sdma_next_intr;
492 u16 ipath_sdma_reset_wait;
493 u8 ipath_sdma_generation;
494 struct tasklet_struct ipath_sdma_abort_task;
495 struct tasklet_struct ipath_sdma_notify_task;
496 struct list_head ipath_sdma_activelist;
497 struct list_head ipath_sdma_notifylist;
498 atomic_t ipath_sdma_vl15_count;
499 struct timer_list ipath_sdma_vl15_timer;
500
501 dma_addr_t ipath_sdma_descq_phys;
502 volatile __le64 *ipath_sdma_head_dma;
503 dma_addr_t ipath_sdma_head_phys;
504
428 unsigned long ipath_ureg_align; /* user register alignment */ 505 unsigned long ipath_ureg_align; /* user register alignment */
429 506
507 struct delayed_work ipath_autoneg_work;
508 wait_queue_head_t ipath_autoneg_wait;
509
510 /* HoL blocking / user app forward-progress state */
511 unsigned ipath_hol_state;
512 unsigned ipath_hol_next;
513 struct timer_list ipath_hol_timer;
514
430 /* 515 /*
431 * Shadow copies of registers; size indicates read access size. 516 * Shadow copies of registers; size indicates read access size.
432 * Most of them are readonly, but some are write-only register, 517 * Most of them are readonly, but some are write-only register,
@@ -447,6 +532,8 @@ struct ipath_devdata {
447 * init time. 532 * init time.
448 */ 533 */
449 unsigned long ipath_pioavailshadow[8]; 534 unsigned long ipath_pioavailshadow[8];
535 /* bitmap of send buffers available for the kernel to use with PIO. */
536 unsigned long ipath_pioavailkernel[8];
450 /* shadow of kr_gpio_out, for rmw ops */ 537 /* shadow of kr_gpio_out, for rmw ops */
451 u64 ipath_gpio_out; 538 u64 ipath_gpio_out;
452 /* shadow the gpio mask register */ 539 /* shadow the gpio mask register */
@@ -472,6 +559,8 @@ struct ipath_devdata {
472 u64 ipath_intconfig; 559 u64 ipath_intconfig;
473 /* kr_sendpiobufbase value */ 560 /* kr_sendpiobufbase value */
474 u64 ipath_piobufbase; 561 u64 ipath_piobufbase;
562 /* kr_ibcddrctrl shadow */
563 u64 ipath_ibcddrctrl;
475 564
476 /* these are the "32 bit" regs */ 565 /* these are the "32 bit" regs */
477 566
@@ -488,7 +577,10 @@ struct ipath_devdata {
488 unsigned long ipath_rcvctrl; 577 unsigned long ipath_rcvctrl;
489 /* shadow kr_sendctrl */ 578 /* shadow kr_sendctrl */
490 unsigned long ipath_sendctrl; 579 unsigned long ipath_sendctrl;
491 unsigned long ipath_lastcancel; /* to not count armlaunch after cancel */ 580 /* to not count armlaunch after cancel */
581 unsigned long ipath_lastcancel;
582 /* count cases where special trigger was needed (double write) */
583 unsigned long ipath_spectriggerhit;
492 584
493 /* value we put in kr_rcvhdrcnt */ 585 /* value we put in kr_rcvhdrcnt */
494 u32 ipath_rcvhdrcnt; 586 u32 ipath_rcvhdrcnt;
@@ -510,6 +602,7 @@ struct ipath_devdata {
510 u32 ipath_piobcnt4k; 602 u32 ipath_piobcnt4k;
511 /* size in bytes of "4KB" PIO buffers */ 603 /* size in bytes of "4KB" PIO buffers */
512 u32 ipath_piosize4k; 604 u32 ipath_piosize4k;
605 u32 ipath_pioreserved; /* reserved special-inkernel; */
513 /* kr_rcvegrbase value */ 606 /* kr_rcvegrbase value */
514 u32 ipath_rcvegrbase; 607 u32 ipath_rcvegrbase;
515 /* kr_rcvegrcnt value */ 608 /* kr_rcvegrcnt value */
@@ -546,10 +639,10 @@ struct ipath_devdata {
546 u32 ipath_init_ibmaxlen; 639 u32 ipath_init_ibmaxlen;
547 /* size of each rcvegrbuffer */ 640 /* size of each rcvegrbuffer */
548 u32 ipath_rcvegrbufsize; 641 u32 ipath_rcvegrbufsize;
549 /* width (2,4,8,16,32) from HT config reg */ 642 /* localbus width (1, 2,4,8,16,32) from config space */
550 u32 ipath_htwidth; 643 u32 ipath_lbus_width;
551 /* HT speed (200,400,800,1000) from HT config */ 644 /* localbus speed (HT: 200,400,800,1000; PCIe 2500) */
552 u32 ipath_htspeed; 645 u32 ipath_lbus_speed;
553 /* 646 /*
554 * number of sequential ibcstatus change for polling active/quiet 647 * number of sequential ibcstatus change for polling active/quiet
555 * (i.e., link not coming up). 648 * (i.e., link not coming up).
@@ -573,21 +666,14 @@ struct ipath_devdata {
573 */ 666 */
574 u8 ipath_serial[16]; 667 u8 ipath_serial[16];
575 /* human readable board version */ 668 /* human readable board version */
576 u8 ipath_boardversion[80]; 669 u8 ipath_boardversion[96];
670 u8 ipath_lbus_info[32]; /* human readable localbus info */
577 /* chip major rev, from ipath_revision */ 671 /* chip major rev, from ipath_revision */
578 u8 ipath_majrev; 672 u8 ipath_majrev;
579 /* chip minor rev, from ipath_revision */ 673 /* chip minor rev, from ipath_revision */
580 u8 ipath_minrev; 674 u8 ipath_minrev;
581 /* board rev, from ipath_revision */ 675 /* board rev, from ipath_revision */
582 u8 ipath_boardrev; 676 u8 ipath_boardrev;
583
584 u8 ipath_r_portenable_shift;
585 u8 ipath_r_intravail_shift;
586 u8 ipath_r_tailupd_shift;
587 u8 ipath_r_portcfg_shift;
588
589 /* unit # of this chip, if present */
590 int ipath_unit;
591 /* saved for restore after reset */ 677 /* saved for restore after reset */
592 u8 ipath_pci_cacheline; 678 u8 ipath_pci_cacheline;
593 /* LID mask control */ 679 /* LID mask control */
@@ -603,6 +689,14 @@ struct ipath_devdata {
603 /* Rx Polarity inversion (compensate for ~tx on partner) */ 689 /* Rx Polarity inversion (compensate for ~tx on partner) */
604 u8 ipath_rx_pol_inv; 690 u8 ipath_rx_pol_inv;
605 691
692 u8 ipath_r_portenable_shift;
693 u8 ipath_r_intravail_shift;
694 u8 ipath_r_tailupd_shift;
695 u8 ipath_r_portcfg_shift;
696
697 /* unit # of this chip, if present */
698 int ipath_unit;
699
606 /* local link integrity counter */ 700 /* local link integrity counter */
607 u32 ipath_lli_counter; 701 u32 ipath_lli_counter;
608 /* local link integrity errors */ 702 /* local link integrity errors */
@@ -617,9 +711,6 @@ struct ipath_devdata {
617 u32 ipath_overrun_thresh_errs; 711 u32 ipath_overrun_thresh_errs;
618 u32 ipath_lli_errs; 712 u32 ipath_lli_errs;
619 713
620 /* status check work */
621 struct delayed_work status_work;
622
623 /* 714 /*
624 * Not all devices managed by a driver instance are the same 715 * Not all devices managed by a driver instance are the same
625 * type, so these fields must be per-device. 716 * type, so these fields must be per-device.
@@ -632,8 +723,8 @@ struct ipath_devdata {
632 * Below should be computable from number of ports, 723 * Below should be computable from number of ports,
633 * since they are never modified. 724 * since they are never modified.
634 */ 725 */
635 u32 ipath_i_rcvavail_mask; 726 u64 ipath_i_rcvavail_mask;
636 u32 ipath_i_rcvurg_mask; 727 u64 ipath_i_rcvurg_mask;
637 u16 ipath_i_rcvurg_shift; 728 u16 ipath_i_rcvurg_shift;
638 u16 ipath_i_rcvavail_shift; 729 u16 ipath_i_rcvavail_shift;
639 730
@@ -641,8 +732,9 @@ struct ipath_devdata {
641 * Register bits for selecting i2c direction and values, used for 732 * Register bits for selecting i2c direction and values, used for
642 * I2C serial flash. 733 * I2C serial flash.
643 */ 734 */
644 u16 ipath_gpio_sda_num; 735 u8 ipath_gpio_sda_num;
645 u16 ipath_gpio_scl_num; 736 u8 ipath_gpio_scl_num;
737 u8 ipath_i2c_chain_type;
646 u64 ipath_gpio_sda; 738 u64 ipath_gpio_sda;
647 u64 ipath_gpio_scl; 739 u64 ipath_gpio_scl;
648 740
@@ -703,13 +795,51 @@ struct ipath_devdata {
703 /* interrupt mitigation reload register info */ 795 /* interrupt mitigation reload register info */
704 u16 ipath_jint_idle_ticks; /* idle clock ticks */ 796 u16 ipath_jint_idle_ticks; /* idle clock ticks */
705 u16 ipath_jint_max_packets; /* max packets across all ports */ 797 u16 ipath_jint_max_packets; /* max packets across all ports */
798
799 /*
800 * lock for access to SerDes, and flags to sequence preset
801 * versus steady-state. 7220-only at the moment.
802 */
803 spinlock_t ipath_sdepb_lock;
804 u8 ipath_presets_needed; /* Set if presets to be restored next DOWN */
706}; 805};
707 806
807/* ipath_hol_state values (stopping/starting user proc, send flushing) */
808#define IPATH_HOL_UP 0
809#define IPATH_HOL_DOWN 1
810/* ipath_hol_next toggle values, used when hol_state IPATH_HOL_DOWN */
811#define IPATH_HOL_DOWNSTOP 0
812#define IPATH_HOL_DOWNCONT 1
813
814/* bit positions for sdma_status */
815#define IPATH_SDMA_ABORTING 0
816#define IPATH_SDMA_DISARMED 1
817#define IPATH_SDMA_DISABLED 2
818#define IPATH_SDMA_LAYERBUF 3
819#define IPATH_SDMA_RUNNING 62
820#define IPATH_SDMA_SHUTDOWN 63
821
822/* bit combinations that correspond to abort states */
823#define IPATH_SDMA_ABORT_NONE 0
824#define IPATH_SDMA_ABORT_ABORTING (1UL << IPATH_SDMA_ABORTING)
825#define IPATH_SDMA_ABORT_DISARMED ((1UL << IPATH_SDMA_ABORTING) | \
826 (1UL << IPATH_SDMA_DISARMED))
827#define IPATH_SDMA_ABORT_DISABLED ((1UL << IPATH_SDMA_ABORTING) | \
828 (1UL << IPATH_SDMA_DISABLED))
829#define IPATH_SDMA_ABORT_ABORTED ((1UL << IPATH_SDMA_ABORTING) | \
830 (1UL << IPATH_SDMA_DISARMED) | (1UL << IPATH_SDMA_DISABLED))
831#define IPATH_SDMA_ABORT_MASK ((1UL<<IPATH_SDMA_ABORTING) | \
832 (1UL << IPATH_SDMA_DISARMED) | (1UL << IPATH_SDMA_DISABLED))
833
834#define IPATH_SDMA_BUF_NONE 0
835#define IPATH_SDMA_BUF_MASK (1UL<<IPATH_SDMA_LAYERBUF)
836
708/* Private data for file operations */ 837/* Private data for file operations */
709struct ipath_filedata { 838struct ipath_filedata {
710 struct ipath_portdata *pd; 839 struct ipath_portdata *pd;
711 unsigned subport; 840 unsigned subport;
712 unsigned tidcursor; 841 unsigned tidcursor;
842 struct ipath_user_sdma_queue *pq;
713}; 843};
714extern struct list_head ipath_dev_list; 844extern struct list_head ipath_dev_list;
715extern spinlock_t ipath_devs_lock; 845extern spinlock_t ipath_devs_lock;
@@ -718,7 +848,7 @@ extern struct ipath_devdata *ipath_lookup(int unit);
718int ipath_init_chip(struct ipath_devdata *, int); 848int ipath_init_chip(struct ipath_devdata *, int);
719int ipath_enable_wc(struct ipath_devdata *dd); 849int ipath_enable_wc(struct ipath_devdata *dd);
720void ipath_disable_wc(struct ipath_devdata *dd); 850void ipath_disable_wc(struct ipath_devdata *dd);
721int ipath_count_units(int *npresentp, int *nupp, u32 *maxportsp); 851int ipath_count_units(int *npresentp, int *nupp, int *maxportsp);
722void ipath_shutdown_device(struct ipath_devdata *); 852void ipath_shutdown_device(struct ipath_devdata *);
723void ipath_clear_freeze(struct ipath_devdata *); 853void ipath_clear_freeze(struct ipath_devdata *);
724 854
@@ -741,7 +871,8 @@ struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd, gfp_t);
741extern int ipath_diag_inuse; 871extern int ipath_diag_inuse;
742 872
743irqreturn_t ipath_intr(int irq, void *devid); 873irqreturn_t ipath_intr(int irq, void *devid);
744int ipath_decode_err(char *buf, size_t blen, ipath_err_t err); 874int ipath_decode_err(struct ipath_devdata *dd, char *buf, size_t blen,
875 ipath_err_t err);
745#if __IPATH_INFO || __IPATH_DBG 876#if __IPATH_INFO || __IPATH_DBG
746extern const char *ipath_ibcstatus_str[]; 877extern const char *ipath_ibcstatus_str[];
747#endif 878#endif
@@ -774,6 +905,13 @@ int ipath_set_lid(struct ipath_devdata *, u32, u8);
774int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv); 905int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv);
775void ipath_enable_armlaunch(struct ipath_devdata *); 906void ipath_enable_armlaunch(struct ipath_devdata *);
776void ipath_disable_armlaunch(struct ipath_devdata *); 907void ipath_disable_armlaunch(struct ipath_devdata *);
908void ipath_hol_down(struct ipath_devdata *);
909void ipath_hol_up(struct ipath_devdata *);
910void ipath_hol_event(unsigned long);
911void ipath_toggle_rclkrls(struct ipath_devdata *);
912void ipath_sd7220_clr_ibpar(struct ipath_devdata *);
913void ipath_set_relock_poll(struct ipath_devdata *, int);
914void ipath_shutdown_relock_poll(struct ipath_devdata *);
777 915
778/* for use in system calls, where we want to know device type, etc. */ 916/* for use in system calls, where we want to know device type, etc. */
779#define port_fp(fp) ((struct ipath_filedata *)(fp)->private_data)->pd 917#define port_fp(fp) ((struct ipath_filedata *)(fp)->private_data)->pd
@@ -781,11 +919,15 @@ void ipath_disable_armlaunch(struct ipath_devdata *);
781 ((struct ipath_filedata *)(fp)->private_data)->subport 919 ((struct ipath_filedata *)(fp)->private_data)->subport
782#define tidcursor_fp(fp) \ 920#define tidcursor_fp(fp) \
783 ((struct ipath_filedata *)(fp)->private_data)->tidcursor 921 ((struct ipath_filedata *)(fp)->private_data)->tidcursor
922#define user_sdma_queue_fp(fp) \
923 ((struct ipath_filedata *)(fp)->private_data)->pq
784 924
785/* 925/*
786 * values for ipath_flags 926 * values for ipath_flags
787 */ 927 */
788/* The chip is up and initted */ 928 /* chip can report link latency (IB 1.2) */
929#define IPATH_HAS_LINK_LATENCY 0x1
930 /* The chip is up and initted */
789#define IPATH_INITTED 0x2 931#define IPATH_INITTED 0x2
790 /* set if any user code has set kr_rcvhdrsize */ 932 /* set if any user code has set kr_rcvhdrsize */
791#define IPATH_RCVHDRSZ_SET 0x4 933#define IPATH_RCVHDRSZ_SET 0x4
@@ -809,6 +951,8 @@ void ipath_disable_armlaunch(struct ipath_devdata *);
809#define IPATH_LINKUNK 0x400 951#define IPATH_LINKUNK 0x400
810 /* Write combining flush needed for PIO */ 952 /* Write combining flush needed for PIO */
811#define IPATH_PIO_FLUSH_WC 0x1000 953#define IPATH_PIO_FLUSH_WC 0x1000
954 /* DMA Receive tail pointer */
955#define IPATH_NODMA_RTAIL 0x2000
812 /* no IB cable, or no device on IB cable */ 956 /* no IB cable, or no device on IB cable */
813#define IPATH_NOCABLE 0x4000 957#define IPATH_NOCABLE 0x4000
814 /* Supports port zero per packet receive interrupts via 958 /* Supports port zero per packet receive interrupts via
@@ -819,16 +963,26 @@ void ipath_disable_armlaunch(struct ipath_devdata *);
819 /* packet/word counters are 32 bit, else those 4 counters 963 /* packet/word counters are 32 bit, else those 4 counters
820 * are 64bit */ 964 * are 64bit */
821#define IPATH_32BITCOUNTERS 0x20000 965#define IPATH_32BITCOUNTERS 0x20000
822 /* can miss port0 rx interrupts */
823 /* Interrupt register is 64 bits */ 966 /* Interrupt register is 64 bits */
824#define IPATH_INTREG_64 0x40000 967#define IPATH_INTREG_64 0x40000
968 /* can miss port0 rx interrupts */
825#define IPATH_DISABLED 0x80000 /* administratively disabled */ 969#define IPATH_DISABLED 0x80000 /* administratively disabled */
826 /* Use GPIO interrupts for new counters */ 970 /* Use GPIO interrupts for new counters */
827#define IPATH_GPIO_ERRINTRS 0x100000 971#define IPATH_GPIO_ERRINTRS 0x100000
828#define IPATH_SWAP_PIOBUFS 0x200000 972#define IPATH_SWAP_PIOBUFS 0x200000
973 /* Supports Send DMA */
974#define IPATH_HAS_SEND_DMA 0x400000
975 /* Supports Send Count (not just word count) in PBC */
976#define IPATH_HAS_PBC_CNT 0x800000
829 /* Suppress heartbeat, even if turning off loopback */ 977 /* Suppress heartbeat, even if turning off loopback */
830#define IPATH_NO_HRTBT 0x1000000 978#define IPATH_NO_HRTBT 0x1000000
979#define IPATH_HAS_THRESH_UPDATE 0x4000000
831#define IPATH_HAS_MULT_IB_SPEED 0x8000000 980#define IPATH_HAS_MULT_IB_SPEED 0x8000000
981#define IPATH_IB_AUTONEG_INPROG 0x10000000
982#define IPATH_IB_AUTONEG_FAILED 0x20000000
983 /* Linkdown-disable intentionally, Do not attempt to bring up */
984#define IPATH_IB_LINK_DISABLED 0x40000000
985#define IPATH_IB_FORCE_NOTIFY 0x80000000 /* force notify on next ib change */
832 986
833/* Bits in GPIO for the added interrupts */ 987/* Bits in GPIO for the added interrupts */
834#define IPATH_GPIO_PORT0_BIT 2 988#define IPATH_GPIO_PORT0_BIT 2
@@ -847,13 +1001,18 @@ void ipath_disable_armlaunch(struct ipath_devdata *);
847 1001
848/* free up any allocated data at closes */ 1002/* free up any allocated data at closes */
849void ipath_free_data(struct ipath_portdata *dd); 1003void ipath_free_data(struct ipath_portdata *dd);
850u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32 *); 1004u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32, u32 *);
1005void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
1006 unsigned len, int avail);
1007void ipath_init_iba7220_funcs(struct ipath_devdata *);
851void ipath_init_iba6120_funcs(struct ipath_devdata *); 1008void ipath_init_iba6120_funcs(struct ipath_devdata *);
852void ipath_init_iba6110_funcs(struct ipath_devdata *); 1009void ipath_init_iba6110_funcs(struct ipath_devdata *);
853void ipath_get_eeprom_info(struct ipath_devdata *); 1010void ipath_get_eeprom_info(struct ipath_devdata *);
854int ipath_update_eeprom_log(struct ipath_devdata *dd); 1011int ipath_update_eeprom_log(struct ipath_devdata *dd);
855void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr); 1012void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr);
856u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg); 1013u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg);
1014void ipath_disarm_senderrbufs(struct ipath_devdata *, int);
1015void ipath_force_pio_avail_update(struct ipath_devdata *);
857void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev); 1016void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev);
858 1017
859/* 1018/*
@@ -865,6 +1024,34 @@ void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev);
865#define IPATH_LED_LOG 2 /* Logical (link) YELLOW LED */ 1024#define IPATH_LED_LOG 2 /* Logical (link) YELLOW LED */
866void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val); 1025void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val);
867 1026
1027/* send dma routines */
1028int setup_sdma(struct ipath_devdata *);
1029void teardown_sdma(struct ipath_devdata *);
1030void ipath_restart_sdma(struct ipath_devdata *);
1031void ipath_sdma_intr(struct ipath_devdata *);
1032int ipath_sdma_verbs_send(struct ipath_devdata *, struct ipath_sge_state *,
1033 u32, struct ipath_verbs_txreq *);
1034/* ipath_sdma_lock should be locked before calling this. */
1035int ipath_sdma_make_progress(struct ipath_devdata *dd);
1036
1037/* must be called under ipath_sdma_lock */
1038static inline u16 ipath_sdma_descq_freecnt(const struct ipath_devdata *dd)
1039{
1040 return dd->ipath_sdma_descq_cnt -
1041 (dd->ipath_sdma_descq_added - dd->ipath_sdma_descq_removed) -
1042 1 - dd->ipath_sdma_desc_nreserved;
1043}
1044
1045static inline void ipath_sdma_desc_reserve(struct ipath_devdata *dd, u16 cnt)
1046{
1047 dd->ipath_sdma_desc_nreserved += cnt;
1048}
1049
1050static inline void ipath_sdma_desc_unreserve(struct ipath_devdata *dd, u16 cnt)
1051{
1052 dd->ipath_sdma_desc_nreserved -= cnt;
1053}
1054
868/* 1055/*
869 * number of words used for protocol header if not set by ipath_userinit(); 1056 * number of words used for protocol header if not set by ipath_userinit();
870 */ 1057 */
@@ -875,6 +1062,8 @@ void ipath_release_user_pages(struct page **, size_t);
875void ipath_release_user_pages_on_close(struct page **, size_t); 1062void ipath_release_user_pages_on_close(struct page **, size_t);
876int ipath_eeprom_read(struct ipath_devdata *, u8, void *, int); 1063int ipath_eeprom_read(struct ipath_devdata *, u8, void *, int);
877int ipath_eeprom_write(struct ipath_devdata *, u8, const void *, int); 1064int ipath_eeprom_write(struct ipath_devdata *, u8, const void *, int);
1065int ipath_tempsense_read(struct ipath_devdata *, u8 regnum);
1066int ipath_tempsense_write(struct ipath_devdata *, u8 regnum, u8 data);
878 1067
879/* these are used for the registers that vary with port */ 1068/* these are used for the registers that vary with port */
880void ipath_write_kreg_port(const struct ipath_devdata *, ipath_kreg, 1069void ipath_write_kreg_port(const struct ipath_devdata *, ipath_kreg,
@@ -891,8 +1080,7 @@ void ipath_write_kreg_port(const struct ipath_devdata *, ipath_kreg,
891 1080
892/* 1081/*
893 * At the moment, none of the s-registers are writable, so no 1082 * At the moment, none of the s-registers are writable, so no
894 * ipath_write_sreg(), and none of the c-registers are writable, so no 1083 * ipath_write_sreg().
895 * ipath_write_creg().
896 */ 1084 */
897 1085
898/** 1086/**
@@ -1001,6 +1189,27 @@ static inline u32 ipath_get_rcvhdrtail(const struct ipath_portdata *pd)
1001 pd->port_rcvhdrtail_kvaddr)); 1189 pd->port_rcvhdrtail_kvaddr));
1002} 1190}
1003 1191
1192static inline u32 ipath_get_hdrqtail(const struct ipath_portdata *pd)
1193{
1194 const struct ipath_devdata *dd = pd->port_dd;
1195 u32 hdrqtail;
1196
1197 if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
1198 __le32 *rhf_addr;
1199 u32 seq;
1200
1201 rhf_addr = (__le32 *) pd->port_rcvhdrq +
1202 pd->port_head + dd->ipath_rhf_offset;
1203 seq = ipath_hdrget_seq(rhf_addr);
1204 hdrqtail = pd->port_head;
1205 if (seq == pd->port_seq_cnt)
1206 hdrqtail++;
1207 } else
1208 hdrqtail = ipath_get_rcvhdrtail(pd);
1209
1210 return hdrqtail;
1211}
1212
1004static inline u64 ipath_read_ireg(const struct ipath_devdata *dd, ipath_kreg r) 1213static inline u64 ipath_read_ireg(const struct ipath_devdata *dd, ipath_kreg r)
1005{ 1214{
1006 return (dd->ipath_flags & IPATH_INTREG_64) ? 1215 return (dd->ipath_flags & IPATH_INTREG_64) ?
@@ -1029,6 +1238,21 @@ static inline u32 ipath_ib_linktrstate(struct ipath_devdata *dd, u64 ibcs)
1029} 1238}
1030 1239
1031/* 1240/*
1241 * from contents of IBCStatus (or a saved copy), return logical link state
1242 * combination of link state and linktraining state (down, active, init,
1243 * arm, etc.
1244 */
1245static inline u32 ipath_ib_state(struct ipath_devdata *dd, u64 ibcs)
1246{
1247 u32 ibs;
1248 ibs = (u32)(ibcs >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
1249 dd->ibcs_lts_mask;
1250 ibs |= (u32)(ibcs &
1251 (INFINIPATH_IBCS_LINKSTATE_MASK << dd->ibcs_ls_shift));
1252 return ibs;
1253}
1254
1255/*
1032 * sysfs interface. 1256 * sysfs interface.
1033 */ 1257 */
1034 1258
@@ -1053,6 +1277,7 @@ int ipathfs_remove_device(struct ipath_devdata *);
1053dma_addr_t ipath_map_page(struct pci_dev *, struct page *, unsigned long, 1277dma_addr_t ipath_map_page(struct pci_dev *, struct page *, unsigned long,
1054 size_t, int); 1278 size_t, int);
1055dma_addr_t ipath_map_single(struct pci_dev *, void *, size_t, int); 1279dma_addr_t ipath_map_single(struct pci_dev *, void *, size_t, int);
1280const char *ipath_get_unit_name(int unit);
1056 1281
1057/* 1282/*
1058 * Flush write combining store buffers (if present) and perform a write 1283 * Flush write combining store buffers (if present) and perform a write
@@ -1065,11 +1290,8 @@ dma_addr_t ipath_map_single(struct pci_dev *, void *, size_t, int);
1065#endif 1290#endif
1066 1291
1067extern unsigned ipath_debug; /* debugging bit mask */ 1292extern unsigned ipath_debug; /* debugging bit mask */
1068 1293extern unsigned ipath_linkrecovery;
1069#define IPATH_MAX_PARITY_ATTEMPTS 10000 /* max times to try recovery */ 1294extern unsigned ipath_mtu4096;
1070
1071const char *ipath_get_unit_name(int unit);
1072
1073extern struct mutex ipath_mutex; 1295extern struct mutex ipath_mutex;
1074 1296
1075#define IPATH_DRV_NAME "ib_ipath" 1297#define IPATH_DRV_NAME "ib_ipath"
@@ -1096,7 +1318,7 @@ extern struct mutex ipath_mutex;
1096 1318
1097# define __IPATH_DBG_WHICH(which,fmt,...) \ 1319# define __IPATH_DBG_WHICH(which,fmt,...) \
1098 do { \ 1320 do { \
1099 if(unlikely(ipath_debug&(which))) \ 1321 if (unlikely(ipath_debug & (which))) \
1100 printk(KERN_DEBUG IPATH_DRV_NAME ": %s: " fmt, \ 1322 printk(KERN_DEBUG IPATH_DRV_NAME ": %s: " fmt, \
1101 __func__,##__VA_ARGS__); \ 1323 __func__,##__VA_ARGS__); \
1102 } while(0) 1324 } while(0)
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c
index b34b91d3723a..1ff46ae7dd99 100644
--- a/drivers/infiniband/hw/ipath/ipath_mad.c
+++ b/drivers/infiniband/hw/ipath/ipath_mad.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -146,6 +146,15 @@ static int recv_subn_get_guidinfo(struct ib_smp *smp,
146 return reply(smp); 146 return reply(smp);
147} 147}
148 148
149static void set_link_width_enabled(struct ipath_devdata *dd, u32 w)
150{
151 (void) dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LWID_ENB, w);
152}
153
154static void set_link_speed_enabled(struct ipath_devdata *dd, u32 s)
155{
156 (void) dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_SPD_ENB, s);
157}
149 158
150static int get_overrunthreshold(struct ipath_devdata *dd) 159static int get_overrunthreshold(struct ipath_devdata *dd)
151{ 160{
@@ -226,6 +235,7 @@ static int recv_subn_get_portinfo(struct ib_smp *smp,
226 struct ib_device *ibdev, u8 port) 235 struct ib_device *ibdev, u8 port)
227{ 236{
228 struct ipath_ibdev *dev; 237 struct ipath_ibdev *dev;
238 struct ipath_devdata *dd;
229 struct ib_port_info *pip = (struct ib_port_info *)smp->data; 239 struct ib_port_info *pip = (struct ib_port_info *)smp->data;
230 u16 lid; 240 u16 lid;
231 u8 ibcstat; 241 u8 ibcstat;
@@ -239,6 +249,7 @@ static int recv_subn_get_portinfo(struct ib_smp *smp,
239 } 249 }
240 250
241 dev = to_idev(ibdev); 251 dev = to_idev(ibdev);
252 dd = dev->dd;
242 253
243 /* Clear all fields. Only set the non-zero fields. */ 254 /* Clear all fields. Only set the non-zero fields. */
244 memset(smp->data, 0, sizeof(smp->data)); 255 memset(smp->data, 0, sizeof(smp->data));
@@ -248,25 +259,28 @@ static int recv_subn_get_portinfo(struct ib_smp *smp,
248 dev->mkeyprot == 0) 259 dev->mkeyprot == 0)
249 pip->mkey = dev->mkey; 260 pip->mkey = dev->mkey;
250 pip->gid_prefix = dev->gid_prefix; 261 pip->gid_prefix = dev->gid_prefix;
251 lid = dev->dd->ipath_lid; 262 lid = dd->ipath_lid;
252 pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE; 263 pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE;
253 pip->sm_lid = cpu_to_be16(dev->sm_lid); 264 pip->sm_lid = cpu_to_be16(dev->sm_lid);
254 pip->cap_mask = cpu_to_be32(dev->port_cap_flags); 265 pip->cap_mask = cpu_to_be32(dev->port_cap_flags);
255 /* pip->diag_code; */ 266 /* pip->diag_code; */
256 pip->mkey_lease_period = cpu_to_be16(dev->mkey_lease_period); 267 pip->mkey_lease_period = cpu_to_be16(dev->mkey_lease_period);
257 pip->local_port_num = port; 268 pip->local_port_num = port;
258 pip->link_width_enabled = dev->link_width_enabled; 269 pip->link_width_enabled = dd->ipath_link_width_enabled;
259 pip->link_width_supported = 3; /* 1x or 4x */ 270 pip->link_width_supported = dd->ipath_link_width_supported;
260 pip->link_width_active = 2; /* 4x */ 271 pip->link_width_active = dd->ipath_link_width_active;
261 pip->linkspeed_portstate = 0x10; /* 2.5Gbps */ 272 pip->linkspeed_portstate = dd->ipath_link_speed_supported << 4;
262 ibcstat = dev->dd->ipath_lastibcstat; 273 ibcstat = dd->ipath_lastibcstat;
263 pip->linkspeed_portstate |= ((ibcstat >> 4) & 0x3) + 1; 274 /* map LinkState to IB portinfo values. */
275 pip->linkspeed_portstate |= ipath_ib_linkstate(dd, ibcstat) + 1;
276
264 pip->portphysstate_linkdown = 277 pip->portphysstate_linkdown =
265 (ipath_cvt_physportstate[ibcstat & 0xf] << 4) | 278 (ipath_cvt_physportstate[ibcstat & dd->ibcs_lts_mask] << 4) |
266 (get_linkdowndefaultstate(dev->dd) ? 1 : 2); 279 (get_linkdowndefaultstate(dd) ? 1 : 2);
267 pip->mkeyprot_resv_lmc = (dev->mkeyprot << 6) | dev->dd->ipath_lmc; 280 pip->mkeyprot_resv_lmc = (dev->mkeyprot << 6) | dd->ipath_lmc;
268 pip->linkspeedactive_enabled = 0x11; /* 2.5Gbps, 2.5Gbps */ 281 pip->linkspeedactive_enabled = (dd->ipath_link_speed_active << 4) |
269 switch (dev->dd->ipath_ibmtu) { 282 dd->ipath_link_speed_enabled;
283 switch (dd->ipath_ibmtu) {
270 case 4096: 284 case 4096:
271 mtu = IB_MTU_4096; 285 mtu = IB_MTU_4096;
272 break; 286 break;
@@ -292,19 +306,15 @@ static int recv_subn_get_portinfo(struct ib_smp *smp,
292 /* pip->vl_arb_high_cap; // only one VL */ 306 /* pip->vl_arb_high_cap; // only one VL */
293 /* pip->vl_arb_low_cap; // only one VL */ 307 /* pip->vl_arb_low_cap; // only one VL */
294 /* InitTypeReply = 0 */ 308 /* InitTypeReply = 0 */
295 /* 309 /* our mtu cap depends on whether 4K MTU enabled or not */
296 * Note: the chips support a maximum MTU of 4096, but the driver 310 pip->inittypereply_mtucap = ipath_mtu4096 ? IB_MTU_4096 : IB_MTU_2048;
297 * hasn't implemented this feature yet, so set the maximum value 311 /* HCAs ignore VLStallCount and HOQLife */
298 * to 2048.
299 */
300 pip->inittypereply_mtucap = IB_MTU_2048;
301 // HCAs ignore VLStallCount and HOQLife
302 /* pip->vlstallcnt_hoqlife; */ 312 /* pip->vlstallcnt_hoqlife; */
303 pip->operationalvl_pei_peo_fpi_fpo = 0x10; /* OVLs = 1 */ 313 pip->operationalvl_pei_peo_fpi_fpo = 0x10; /* OVLs = 1 */
304 pip->mkey_violations = cpu_to_be16(dev->mkey_violations); 314 pip->mkey_violations = cpu_to_be16(dev->mkey_violations);
305 /* P_KeyViolations are counted by hardware. */ 315 /* P_KeyViolations are counted by hardware. */
306 pip->pkey_violations = 316 pip->pkey_violations =
307 cpu_to_be16((ipath_get_cr_errpkey(dev->dd) - 317 cpu_to_be16((ipath_get_cr_errpkey(dd) -
308 dev->z_pkey_violations) & 0xFFFF); 318 dev->z_pkey_violations) & 0xFFFF);
309 pip->qkey_violations = cpu_to_be16(dev->qkey_violations); 319 pip->qkey_violations = cpu_to_be16(dev->qkey_violations);
310 /* Only the hardware GUID is supported for now */ 320 /* Only the hardware GUID is supported for now */
@@ -313,10 +323,17 @@ static int recv_subn_get_portinfo(struct ib_smp *smp,
313 /* 32.768 usec. response time (guessing) */ 323 /* 32.768 usec. response time (guessing) */
314 pip->resv_resptimevalue = 3; 324 pip->resv_resptimevalue = 3;
315 pip->localphyerrors_overrunerrors = 325 pip->localphyerrors_overrunerrors =
316 (get_phyerrthreshold(dev->dd) << 4) | 326 (get_phyerrthreshold(dd) << 4) |
317 get_overrunthreshold(dev->dd); 327 get_overrunthreshold(dd);
318 /* pip->max_credit_hint; */ 328 /* pip->max_credit_hint; */
319 /* pip->link_roundtrip_latency[3]; */ 329 if (dev->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
330 u32 v;
331
332 v = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LINKLATENCY);
333 pip->link_roundtrip_latency[0] = v >> 16;
334 pip->link_roundtrip_latency[1] = v >> 8;
335 pip->link_roundtrip_latency[2] = v;
336 }
320 337
321 ret = reply(smp); 338 ret = reply(smp);
322 339
@@ -444,19 +461,25 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
444 ib_dispatch_event(&event); 461 ib_dispatch_event(&event);
445 } 462 }
446 463
447 /* Only 4x supported but allow 1x or 4x to be set (see 14.2.6.6). */ 464 /* Allow 1x or 4x to be set (see 14.2.6.6). */
448 lwe = pip->link_width_enabled; 465 lwe = pip->link_width_enabled;
449 if ((lwe >= 4 && lwe <= 8) || (lwe >= 0xC && lwe <= 0xFE)) 466 if (lwe) {
450 goto err; 467 if (lwe == 0xFF)
451 if (lwe == 0xFF) 468 lwe = dd->ipath_link_width_supported;
452 dev->link_width_enabled = 3; /* 1x or 4x */ 469 else if (lwe >= 16 || (lwe & ~dd->ipath_link_width_supported))
453 else if (lwe) 470 goto err;
454 dev->link_width_enabled = lwe; 471 set_link_width_enabled(dd, lwe);
472 }
455 473
456 /* Only 2.5 Gbs supported. */ 474 /* Allow 2.5 or 5.0 Gbs. */
457 lse = pip->linkspeedactive_enabled & 0xF; 475 lse = pip->linkspeedactive_enabled & 0xF;
458 if (lse >= 2 && lse <= 0xE) 476 if (lse) {
459 goto err; 477 if (lse == 15)
478 lse = dd->ipath_link_speed_supported;
479 else if (lse >= 8 || (lse & ~dd->ipath_link_speed_supported))
480 goto err;
481 set_link_speed_enabled(dd, lse);
482 }
460 483
461 /* Set link down default state. */ 484 /* Set link down default state. */
462 switch (pip->portphysstate_linkdown & 0xF) { 485 switch (pip->portphysstate_linkdown & 0xF) {
@@ -491,6 +514,8 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
491 mtu = 2048; 514 mtu = 2048;
492 break; 515 break;
493 case IB_MTU_4096: 516 case IB_MTU_4096:
517 if (!ipath_mtu4096)
518 goto err;
494 mtu = 4096; 519 mtu = 4096;
495 break; 520 break;
496 default: 521 default:
@@ -565,6 +590,10 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
565 else 590 else
566 goto err; 591 goto err;
567 ipath_set_linkstate(dd, lstate); 592 ipath_set_linkstate(dd, lstate);
593 if (lstate == IPATH_IB_LINKDOWN_DISABLE) {
594 ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
595 goto done;
596 }
568 ipath_wait_linkstate(dd, IPATH_LINKINIT | IPATH_LINKARMED | 597 ipath_wait_linkstate(dd, IPATH_LINKINIT | IPATH_LINKARMED |
569 IPATH_LINKACTIVE, 1000); 598 IPATH_LINKACTIVE, 1000);
570 break; 599 break;
@@ -948,10 +977,14 @@ static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp,
948 * nsec. 0 == 4 nsec., 1 == 8 nsec., ..., 255 == 1020 nsec. Sample 977 * nsec. 0 == 4 nsec., 1 == 8 nsec., ..., 255 == 1020 nsec. Sample
949 * intervals are counted in ticks. Since we use Linux timers, that 978 * intervals are counted in ticks. Since we use Linux timers, that
950 * count in jiffies, we can't sample for less than 1000 ticks if HZ 979 * count in jiffies, we can't sample for less than 1000 ticks if HZ
951 * == 1000 (4000 ticks if HZ is 250). 980 * == 1000 (4000 ticks if HZ is 250). link_speed_active returns 2 for
981 * DDR, 1 for SDR, set the tick to 1 for DDR, 0 for SDR on chips that
982 * have hardware support for delaying packets.
952 */ 983 */
953 /* XXX This is WRONG. */ 984 if (crp->cr_psstat)
954 p->tick = 250; /* 1 usec. */ 985 p->tick = dev->dd->ipath_link_speed_active - 1;
986 else
987 p->tick = 250; /* 1 usec. */
955 p->counter_width = 4; /* 32 bit counters */ 988 p->counter_width = 4; /* 32 bit counters */
956 p->counter_mask0_9 = COUNTER_MASK0_9; 989 p->counter_mask0_9 = COUNTER_MASK0_9;
957 spin_lock_irqsave(&dev->pending_lock, flags); 990 spin_lock_irqsave(&dev->pending_lock, flags);
@@ -1364,7 +1397,8 @@ static int process_subn(struct ib_device *ibdev, int mad_flags,
1364 } 1397 }
1365 1398
1366 /* Is the mkey in the process of expiring? */ 1399 /* Is the mkey in the process of expiring? */
1367 if (dev->mkey_lease_timeout && jiffies >= dev->mkey_lease_timeout) { 1400 if (dev->mkey_lease_timeout &&
1401 time_after_eq(jiffies, dev->mkey_lease_timeout)) {
1368 /* Clear timeout and mkey protection field. */ 1402 /* Clear timeout and mkey protection field. */
1369 dev->mkey_lease_timeout = 0; 1403 dev->mkey_lease_timeout = 0;
1370 dev->mkeyprot = 0; 1404 dev->mkeyprot = 0;
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index 087ed3166479..dd5b6e9d57c2 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -340,6 +340,7 @@ static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type)
340 qp->s_flags &= IPATH_S_SIGNAL_REQ_WR; 340 qp->s_flags &= IPATH_S_SIGNAL_REQ_WR;
341 qp->s_hdrwords = 0; 341 qp->s_hdrwords = 0;
342 qp->s_wqe = NULL; 342 qp->s_wqe = NULL;
343 qp->s_pkt_delay = 0;
343 qp->s_psn = 0; 344 qp->s_psn = 0;
344 qp->r_psn = 0; 345 qp->r_psn = 0;
345 qp->r_msn = 0; 346 qp->r_msn = 0;
@@ -392,7 +393,6 @@ int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
392 qp->ibqp.qp_num, qp->remote_qpn, err); 393 qp->ibqp.qp_num, qp->remote_qpn, err);
393 394
394 spin_lock(&dev->pending_lock); 395 spin_lock(&dev->pending_lock);
395 /* XXX What if its already removed by the timeout code? */
396 if (!list_empty(&qp->timerwait)) 396 if (!list_empty(&qp->timerwait))
397 list_del_init(&qp->timerwait); 397 list_del_init(&qp->timerwait);
398 if (!list_empty(&qp->piowait)) 398 if (!list_empty(&qp->piowait))
@@ -516,13 +516,13 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
516 goto inval; 516 goto inval;
517 517
518 /* 518 /*
519 * Note: the chips support a maximum MTU of 4096, but the driver 519 * don't allow invalid Path MTU values or greater than 2048
520 * hasn't implemented this feature yet, so don't allow Path MTU 520 * unless we are configured for a 4KB MTU
521 * values greater than 2048.
522 */ 521 */
523 if (attr_mask & IB_QP_PATH_MTU) 522 if ((attr_mask & IB_QP_PATH_MTU) &&
524 if (attr->path_mtu > IB_MTU_2048) 523 (ib_mtu_enum_to_int(attr->path_mtu) == -1 ||
525 goto inval; 524 (attr->path_mtu > IB_MTU_2048 && !ipath_mtu4096)))
525 goto inval;
526 526
527 if (attr_mask & IB_QP_PATH_MIG_STATE) 527 if (attr_mask & IB_QP_PATH_MIG_STATE)
528 if (attr->path_mig_state != IB_MIG_MIGRATED && 528 if (attr->path_mig_state != IB_MIG_MIGRATED &&
@@ -564,8 +564,10 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
564 if (attr_mask & IB_QP_ACCESS_FLAGS) 564 if (attr_mask & IB_QP_ACCESS_FLAGS)
565 qp->qp_access_flags = attr->qp_access_flags; 565 qp->qp_access_flags = attr->qp_access_flags;
566 566
567 if (attr_mask & IB_QP_AV) 567 if (attr_mask & IB_QP_AV) {
568 qp->remote_ah_attr = attr->ah_attr; 568 qp->remote_ah_attr = attr->ah_attr;
569 qp->s_dmult = ipath_ib_rate_to_mult(attr->ah_attr.static_rate);
570 }
569 571
570 if (attr_mask & IB_QP_PATH_MTU) 572 if (attr_mask & IB_QP_PATH_MTU)
571 qp->path_mtu = attr->path_mtu; 573 qp->path_mtu = attr->path_mtu;
@@ -748,22 +750,33 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
748 size_t sz; 750 size_t sz;
749 struct ib_qp *ret; 751 struct ib_qp *ret;
750 752
751 if (init_attr->cap.max_send_sge > ib_ipath_max_sges || 753 if (init_attr->create_flags) {
752 init_attr->cap.max_recv_sge > ib_ipath_max_sges || 754 ret = ERR_PTR(-EINVAL);
753 init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs ||
754 init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) {
755 ret = ERR_PTR(-ENOMEM);
756 goto bail; 755 goto bail;
757 } 756 }
758 757
759 if (init_attr->cap.max_send_sge + 758 if (init_attr->cap.max_send_sge > ib_ipath_max_sges ||
760 init_attr->cap.max_recv_sge + 759 init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs) {
761 init_attr->cap.max_send_wr +
762 init_attr->cap.max_recv_wr == 0) {
763 ret = ERR_PTR(-EINVAL); 760 ret = ERR_PTR(-EINVAL);
764 goto bail; 761 goto bail;
765 } 762 }
766 763
764 /* Check receive queue parameters if no SRQ is specified. */
765 if (!init_attr->srq) {
766 if (init_attr->cap.max_recv_sge > ib_ipath_max_sges ||
767 init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) {
768 ret = ERR_PTR(-EINVAL);
769 goto bail;
770 }
771 if (init_attr->cap.max_send_sge +
772 init_attr->cap.max_send_wr +
773 init_attr->cap.max_recv_sge +
774 init_attr->cap.max_recv_wr == 0) {
775 ret = ERR_PTR(-EINVAL);
776 goto bail;
777 }
778 }
779
767 switch (init_attr->qp_type) { 780 switch (init_attr->qp_type) {
768 case IB_QPT_UC: 781 case IB_QPT_UC:
769 case IB_QPT_RC: 782 case IB_QPT_RC:
@@ -840,6 +853,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
840 goto bail_qp; 853 goto bail_qp;
841 } 854 }
842 qp->ip = NULL; 855 qp->ip = NULL;
856 qp->s_tx = NULL;
843 ipath_reset_qp(qp, init_attr->qp_type); 857 ipath_reset_qp(qp, init_attr->qp_type);
844 break; 858 break;
845 859
@@ -945,12 +959,20 @@ int ipath_destroy_qp(struct ib_qp *ibqp)
945 /* Stop the sending tasklet. */ 959 /* Stop the sending tasklet. */
946 tasklet_kill(&qp->s_task); 960 tasklet_kill(&qp->s_task);
947 961
962 if (qp->s_tx) {
963 atomic_dec(&qp->refcount);
964 if (qp->s_tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
965 kfree(qp->s_tx->txreq.map_addr);
966 }
967
948 /* Make sure the QP isn't on the timeout list. */ 968 /* Make sure the QP isn't on the timeout list. */
949 spin_lock_irqsave(&dev->pending_lock, flags); 969 spin_lock_irqsave(&dev->pending_lock, flags);
950 if (!list_empty(&qp->timerwait)) 970 if (!list_empty(&qp->timerwait))
951 list_del_init(&qp->timerwait); 971 list_del_init(&qp->timerwait);
952 if (!list_empty(&qp->piowait)) 972 if (!list_empty(&qp->piowait))
953 list_del_init(&qp->piowait); 973 list_del_init(&qp->piowait);
974 if (qp->s_tx)
975 list_add(&qp->s_tx->txreq.list, &dev->txreq_free);
954 spin_unlock_irqrestore(&dev->pending_lock, flags); 976 spin_unlock_irqrestore(&dev->pending_lock, flags);
955 977
956 /* 978 /*
@@ -1021,7 +1043,6 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
1021 qp->ibqp.qp_num, qp->remote_qpn, wc->status); 1043 qp->ibqp.qp_num, qp->remote_qpn, wc->status);
1022 1044
1023 spin_lock(&dev->pending_lock); 1045 spin_lock(&dev->pending_lock);
1024 /* XXX What if its already removed by the timeout code? */
1025 if (!list_empty(&qp->timerwait)) 1046 if (!list_empty(&qp->timerwait))
1026 list_del_init(&qp->timerwait); 1047 list_del_init(&qp->timerwait);
1027 if (!list_empty(&qp->piowait)) 1048 if (!list_empty(&qp->piowait))
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index 40f3e37d7adc..c405dfba5531 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -31,6 +31,8 @@
31 * SOFTWARE. 31 * SOFTWARE.
32 */ 32 */
33 33
34#include <linux/io.h>
35
34#include "ipath_verbs.h" 36#include "ipath_verbs.h"
35#include "ipath_kernel.h" 37#include "ipath_kernel.h"
36 38
@@ -306,7 +308,7 @@ int ipath_make_rc_req(struct ipath_qp *qp)
306 else { 308 else {
307 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE); 309 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
308 /* Immediate data comes after the BTH */ 310 /* Immediate data comes after the BTH */
309 ohdr->u.imm_data = wqe->wr.imm_data; 311 ohdr->u.imm_data = wqe->wr.ex.imm_data;
310 hwords += 1; 312 hwords += 1;
311 } 313 }
312 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 314 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
@@ -344,7 +346,7 @@ int ipath_make_rc_req(struct ipath_qp *qp)
344 qp->s_state = 346 qp->s_state =
345 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE); 347 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
346 /* Immediate data comes after RETH */ 348 /* Immediate data comes after RETH */
347 ohdr->u.rc.imm_data = wqe->wr.imm_data; 349 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
348 hwords += 1; 350 hwords += 1;
349 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 351 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
350 bth0 |= 1 << 23; 352 bth0 |= 1 << 23;
@@ -488,7 +490,7 @@ int ipath_make_rc_req(struct ipath_qp *qp)
488 else { 490 else {
489 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); 491 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
490 /* Immediate data comes after the BTH */ 492 /* Immediate data comes after the BTH */
491 ohdr->u.imm_data = wqe->wr.imm_data; 493 ohdr->u.imm_data = wqe->wr.ex.imm_data;
492 hwords += 1; 494 hwords += 1;
493 } 495 }
494 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 496 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
@@ -524,7 +526,7 @@ int ipath_make_rc_req(struct ipath_qp *qp)
524 else { 526 else {
525 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE); 527 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
526 /* Immediate data comes after the BTH */ 528 /* Immediate data comes after the BTH */
527 ohdr->u.imm_data = wqe->wr.imm_data; 529 ohdr->u.imm_data = wqe->wr.ex.imm_data;
528 hwords += 1; 530 hwords += 1;
529 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 531 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
530 bth0 |= 1 << 23; 532 bth0 |= 1 << 23;
@@ -585,19 +587,39 @@ bail:
585static void send_rc_ack(struct ipath_qp *qp) 587static void send_rc_ack(struct ipath_qp *qp)
586{ 588{
587 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 589 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
590 struct ipath_devdata *dd;
588 u16 lrh0; 591 u16 lrh0;
589 u32 bth0; 592 u32 bth0;
590 u32 hwords; 593 u32 hwords;
594 u32 __iomem *piobuf;
591 struct ipath_ib_header hdr; 595 struct ipath_ib_header hdr;
592 struct ipath_other_headers *ohdr; 596 struct ipath_other_headers *ohdr;
593 unsigned long flags; 597 unsigned long flags;
594 598
599 spin_lock_irqsave(&qp->s_lock, flags);
600
595 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */ 601 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
596 if (qp->r_head_ack_queue != qp->s_tail_ack_queue || 602 if (qp->r_head_ack_queue != qp->s_tail_ack_queue ||
597 (qp->s_flags & IPATH_S_ACK_PENDING) || 603 (qp->s_flags & IPATH_S_ACK_PENDING) ||
598 qp->s_ack_state != OP(ACKNOWLEDGE)) 604 qp->s_ack_state != OP(ACKNOWLEDGE))
599 goto queue_ack; 605 goto queue_ack;
600 606
607 spin_unlock_irqrestore(&qp->s_lock, flags);
608
609 dd = dev->dd;
610 piobuf = ipath_getpiobuf(dd, 0, NULL);
611 if (!piobuf) {
612 /*
613 * We are out of PIO buffers at the moment.
614 * Pass responsibility for sending the ACK to the
615 * send tasklet so that when a PIO buffer becomes
616 * available, the ACK is sent ahead of other outgoing
617 * packets.
618 */
619 spin_lock_irqsave(&qp->s_lock, flags);
620 goto queue_ack;
621 }
622
601 /* Construct the header. */ 623 /* Construct the header. */
602 ohdr = &hdr.u.oth; 624 ohdr = &hdr.u.oth;
603 lrh0 = IPATH_LRH_BTH; 625 lrh0 = IPATH_LRH_BTH;
@@ -611,7 +633,7 @@ static void send_rc_ack(struct ipath_qp *qp)
611 lrh0 = IPATH_LRH_GRH; 633 lrh0 = IPATH_LRH_GRH;
612 } 634 }
613 /* read pkey_index w/o lock (its atomic) */ 635 /* read pkey_index w/o lock (its atomic) */
614 bth0 = ipath_get_pkey(dev->dd, qp->s_pkey_index) | 636 bth0 = ipath_get_pkey(dd, qp->s_pkey_index) |
615 (OP(ACKNOWLEDGE) << 24) | (1 << 22); 637 (OP(ACKNOWLEDGE) << 24) | (1 << 22);
616 if (qp->r_nak_state) 638 if (qp->r_nak_state)
617 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) | 639 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
@@ -623,30 +645,29 @@ static void send_rc_ack(struct ipath_qp *qp)
623 hdr.lrh[0] = cpu_to_be16(lrh0); 645 hdr.lrh[0] = cpu_to_be16(lrh0);
624 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); 646 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
625 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC); 647 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
626 hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid); 648 hdr.lrh[3] = cpu_to_be16(dd->ipath_lid);
627 ohdr->bth[0] = cpu_to_be32(bth0); 649 ohdr->bth[0] = cpu_to_be32(bth0);
628 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); 650 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
629 ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK); 651 ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK);
630 652
631 /* 653 writeq(hwords + 1, piobuf);
632 * If we can send the ACK, clear the ACK state.
633 */
634 if (ipath_verbs_send(qp, &hdr, hwords, NULL, 0) == 0) {
635 dev->n_unicast_xmit++;
636 goto done;
637 }
638 654
639 /* 655 if (dd->ipath_flags & IPATH_PIO_FLUSH_WC) {
640 * We are out of PIO buffers at the moment. 656 u32 *hdrp = (u32 *) &hdr;
641 * Pass responsibility for sending the ACK to the 657
642 * send tasklet so that when a PIO buffer becomes 658 ipath_flush_wc();
643 * available, the ACK is sent ahead of other outgoing 659 __iowrite32_copy(piobuf + 2, hdrp, hwords - 1);
644 * packets. 660 ipath_flush_wc();
645 */ 661 __raw_writel(hdrp[hwords - 1], piobuf + hwords + 1);
646 dev->n_rc_qacks++; 662 } else
663 __iowrite32_copy(piobuf + 2, (u32 *) &hdr, hwords);
664
665 ipath_flush_wc();
666
667 dev->n_unicast_xmit++;
668 goto done;
647 669
648queue_ack: 670queue_ack:
649 spin_lock_irqsave(&qp->s_lock, flags);
650 dev->n_rc_qacks++; 671 dev->n_rc_qacks++;
651 qp->s_flags |= IPATH_S_ACK_PENDING; 672 qp->s_flags |= IPATH_S_ACK_PENDING;
652 qp->s_nak_state = qp->r_nak_state; 673 qp->s_nak_state = qp->r_nak_state;
diff --git a/drivers/infiniband/hw/ipath/ipath_registers.h b/drivers/infiniband/hw/ipath/ipath_registers.h
index 92ad73a7fff0..8f44d0cf3833 100644
--- a/drivers/infiniband/hw/ipath/ipath_registers.h
+++ b/drivers/infiniband/hw/ipath/ipath_registers.h
@@ -63,67 +63,92 @@
63/* kr_control bits */ 63/* kr_control bits */
64#define INFINIPATH_C_FREEZEMODE 0x00000002 64#define INFINIPATH_C_FREEZEMODE 0x00000002
65#define INFINIPATH_C_LINKENABLE 0x00000004 65#define INFINIPATH_C_LINKENABLE 0x00000004
66#define INFINIPATH_C_RESET 0x00000001
67 66
68/* kr_sendctrl bits */ 67/* kr_sendctrl bits */
69#define INFINIPATH_S_DISARMPIOBUF_SHIFT 16 68#define INFINIPATH_S_DISARMPIOBUF_SHIFT 16
69#define INFINIPATH_S_UPDTHRESH_SHIFT 24
70#define INFINIPATH_S_UPDTHRESH_MASK 0x1f
70 71
71#define IPATH_S_ABORT 0 72#define IPATH_S_ABORT 0
72#define IPATH_S_PIOINTBUFAVAIL 1 73#define IPATH_S_PIOINTBUFAVAIL 1
73#define IPATH_S_PIOBUFAVAILUPD 2 74#define IPATH_S_PIOBUFAVAILUPD 2
74#define IPATH_S_PIOENABLE 3 75#define IPATH_S_PIOENABLE 3
76#define IPATH_S_SDMAINTENABLE 9
77#define IPATH_S_SDMASINGLEDESCRIPTOR 10
78#define IPATH_S_SDMAENABLE 11
79#define IPATH_S_SDMAHALT 12
75#define IPATH_S_DISARM 31 80#define IPATH_S_DISARM 31
76 81
77#define INFINIPATH_S_ABORT (1U << IPATH_S_ABORT) 82#define INFINIPATH_S_ABORT (1U << IPATH_S_ABORT)
78#define INFINIPATH_S_PIOINTBUFAVAIL (1U << IPATH_S_PIOINTBUFAVAIL) 83#define INFINIPATH_S_PIOINTBUFAVAIL (1U << IPATH_S_PIOINTBUFAVAIL)
79#define INFINIPATH_S_PIOBUFAVAILUPD (1U << IPATH_S_PIOBUFAVAILUPD) 84#define INFINIPATH_S_PIOBUFAVAILUPD (1U << IPATH_S_PIOBUFAVAILUPD)
80#define INFINIPATH_S_PIOENABLE (1U << IPATH_S_PIOENABLE) 85#define INFINIPATH_S_PIOENABLE (1U << IPATH_S_PIOENABLE)
86#define INFINIPATH_S_SDMAINTENABLE (1U << IPATH_S_SDMAINTENABLE)
87#define INFINIPATH_S_SDMASINGLEDESCRIPTOR \
88 (1U << IPATH_S_SDMASINGLEDESCRIPTOR)
89#define INFINIPATH_S_SDMAENABLE (1U << IPATH_S_SDMAENABLE)
90#define INFINIPATH_S_SDMAHALT (1U << IPATH_S_SDMAHALT)
81#define INFINIPATH_S_DISARM (1U << IPATH_S_DISARM) 91#define INFINIPATH_S_DISARM (1U << IPATH_S_DISARM)
82 92
83/* kr_rcvctrl bits */ 93/* kr_rcvctrl bits that are the same on multiple chips */
84#define INFINIPATH_R_PORTENABLE_SHIFT 0 94#define INFINIPATH_R_PORTENABLE_SHIFT 0
85#define INFINIPATH_R_QPMAP_ENABLE (1ULL << 38) 95#define INFINIPATH_R_QPMAP_ENABLE (1ULL << 38)
86 96
87/* kr_intstatus, kr_intclear, kr_intmask bits */ 97/* kr_intstatus, kr_intclear, kr_intmask bits */
88#define INFINIPATH_I_RCVURG_SHIFT 0 98#define INFINIPATH_I_SDMAINT 0x8000000000000000ULL
89#define INFINIPATH_I_RCVAVAIL_SHIFT 12 99#define INFINIPATH_I_SDMADISABLED 0x4000000000000000ULL
90#define INFINIPATH_I_ERROR 0x80000000 100#define INFINIPATH_I_ERROR 0x0000000080000000ULL
91#define INFINIPATH_I_SPIOSENT 0x40000000 101#define INFINIPATH_I_SPIOSENT 0x0000000040000000ULL
92#define INFINIPATH_I_SPIOBUFAVAIL 0x20000000 102#define INFINIPATH_I_SPIOBUFAVAIL 0x0000000020000000ULL
93#define INFINIPATH_I_GPIO 0x10000000 103#define INFINIPATH_I_GPIO 0x0000000010000000ULL
104#define INFINIPATH_I_JINT 0x0000000004000000ULL
94 105
95/* kr_errorstatus, kr_errorclear, kr_errormask bits */ 106/* kr_errorstatus, kr_errorclear, kr_errormask bits */
96#define INFINIPATH_E_RFORMATERR 0x0000000000000001ULL 107#define INFINIPATH_E_RFORMATERR 0x0000000000000001ULL
97#define INFINIPATH_E_RVCRC 0x0000000000000002ULL 108#define INFINIPATH_E_RVCRC 0x0000000000000002ULL
98#define INFINIPATH_E_RICRC 0x0000000000000004ULL 109#define INFINIPATH_E_RICRC 0x0000000000000004ULL
99#define INFINIPATH_E_RMINPKTLEN 0x0000000000000008ULL 110#define INFINIPATH_E_RMINPKTLEN 0x0000000000000008ULL
100#define INFINIPATH_E_RMAXPKTLEN 0x0000000000000010ULL 111#define INFINIPATH_E_RMAXPKTLEN 0x0000000000000010ULL
101#define INFINIPATH_E_RLONGPKTLEN 0x0000000000000020ULL 112#define INFINIPATH_E_RLONGPKTLEN 0x0000000000000020ULL
102#define INFINIPATH_E_RSHORTPKTLEN 0x0000000000000040ULL 113#define INFINIPATH_E_RSHORTPKTLEN 0x0000000000000040ULL
103#define INFINIPATH_E_RUNEXPCHAR 0x0000000000000080ULL 114#define INFINIPATH_E_RUNEXPCHAR 0x0000000000000080ULL
104#define INFINIPATH_E_RUNSUPVL 0x0000000000000100ULL 115#define INFINIPATH_E_RUNSUPVL 0x0000000000000100ULL
105#define INFINIPATH_E_REBP 0x0000000000000200ULL 116#define INFINIPATH_E_REBP 0x0000000000000200ULL
106#define INFINIPATH_E_RIBFLOW 0x0000000000000400ULL 117#define INFINIPATH_E_RIBFLOW 0x0000000000000400ULL
107#define INFINIPATH_E_RBADVERSION 0x0000000000000800ULL 118#define INFINIPATH_E_RBADVERSION 0x0000000000000800ULL
108#define INFINIPATH_E_RRCVEGRFULL 0x0000000000001000ULL 119#define INFINIPATH_E_RRCVEGRFULL 0x0000000000001000ULL
109#define INFINIPATH_E_RRCVHDRFULL 0x0000000000002000ULL 120#define INFINIPATH_E_RRCVHDRFULL 0x0000000000002000ULL
110#define INFINIPATH_E_RBADTID 0x0000000000004000ULL 121#define INFINIPATH_E_RBADTID 0x0000000000004000ULL
111#define INFINIPATH_E_RHDRLEN 0x0000000000008000ULL 122#define INFINIPATH_E_RHDRLEN 0x0000000000008000ULL
112#define INFINIPATH_E_RHDR 0x0000000000010000ULL 123#define INFINIPATH_E_RHDR 0x0000000000010000ULL
113#define INFINIPATH_E_RIBLOSTLINK 0x0000000000020000ULL 124#define INFINIPATH_E_RIBLOSTLINK 0x0000000000020000ULL
114#define INFINIPATH_E_SMINPKTLEN 0x0000000020000000ULL 125#define INFINIPATH_E_SENDSPECIALTRIGGER 0x0000000008000000ULL
115#define INFINIPATH_E_SMAXPKTLEN 0x0000000040000000ULL 126#define INFINIPATH_E_SDMADISABLED 0x0000000010000000ULL
116#define INFINIPATH_E_SUNDERRUN 0x0000000080000000ULL 127#define INFINIPATH_E_SMINPKTLEN 0x0000000020000000ULL
117#define INFINIPATH_E_SPKTLEN 0x0000000100000000ULL 128#define INFINIPATH_E_SMAXPKTLEN 0x0000000040000000ULL
118#define INFINIPATH_E_SDROPPEDSMPPKT 0x0000000200000000ULL 129#define INFINIPATH_E_SUNDERRUN 0x0000000080000000ULL
119#define INFINIPATH_E_SDROPPEDDATAPKT 0x0000000400000000ULL 130#define INFINIPATH_E_SPKTLEN 0x0000000100000000ULL
120#define INFINIPATH_E_SPIOARMLAUNCH 0x0000000800000000ULL 131#define INFINIPATH_E_SDROPPEDSMPPKT 0x0000000200000000ULL
121#define INFINIPATH_E_SUNEXPERRPKTNUM 0x0000001000000000ULL 132#define INFINIPATH_E_SDROPPEDDATAPKT 0x0000000400000000ULL
122#define INFINIPATH_E_SUNSUPVL 0x0000002000000000ULL 133#define INFINIPATH_E_SPIOARMLAUNCH 0x0000000800000000ULL
123#define INFINIPATH_E_IBSTATUSCHANGED 0x0001000000000000ULL 134#define INFINIPATH_E_SUNEXPERRPKTNUM 0x0000001000000000ULL
124#define INFINIPATH_E_INVALIDADDR 0x0002000000000000ULL 135#define INFINIPATH_E_SUNSUPVL 0x0000002000000000ULL
125#define INFINIPATH_E_RESET 0x0004000000000000ULL 136#define INFINIPATH_E_SENDBUFMISUSE 0x0000004000000000ULL
126#define INFINIPATH_E_HARDWARE 0x0008000000000000ULL 137#define INFINIPATH_E_SDMAGENMISMATCH 0x0000008000000000ULL
138#define INFINIPATH_E_SDMAOUTOFBOUND 0x0000010000000000ULL
139#define INFINIPATH_E_SDMATAILOUTOFBOUND 0x0000020000000000ULL
140#define INFINIPATH_E_SDMABASE 0x0000040000000000ULL
141#define INFINIPATH_E_SDMA1STDESC 0x0000080000000000ULL
142#define INFINIPATH_E_SDMARPYTAG 0x0000100000000000ULL
143#define INFINIPATH_E_SDMADWEN 0x0000200000000000ULL
144#define INFINIPATH_E_SDMAMISSINGDW 0x0000400000000000ULL
145#define INFINIPATH_E_SDMAUNEXPDATA 0x0000800000000000ULL
146#define INFINIPATH_E_IBSTATUSCHANGED 0x0001000000000000ULL
147#define INFINIPATH_E_INVALIDADDR 0x0002000000000000ULL
148#define INFINIPATH_E_RESET 0x0004000000000000ULL
149#define INFINIPATH_E_HARDWARE 0x0008000000000000ULL
150#define INFINIPATH_E_SDMADESCADDRMISALIGN 0x0010000000000000ULL
151#define INFINIPATH_E_INVALIDEEPCMD 0x0020000000000000ULL
127 152
128/* 153/*
129 * this is used to print "common" packet errors only when the 154 * this is used to print "common" packet errors only when the
@@ -134,6 +159,17 @@
134 | INFINIPATH_E_RICRC | INFINIPATH_E_RSHORTPKTLEN \ 159 | INFINIPATH_E_RICRC | INFINIPATH_E_RSHORTPKTLEN \
135 | INFINIPATH_E_REBP ) 160 | INFINIPATH_E_REBP )
136 161
162/* Convenience for decoding Send DMA errors */
163#define INFINIPATH_E_SDMAERRS ( \
164 INFINIPATH_E_SDMAGENMISMATCH | INFINIPATH_E_SDMAOUTOFBOUND | \
165 INFINIPATH_E_SDMATAILOUTOFBOUND | INFINIPATH_E_SDMABASE | \
166 INFINIPATH_E_SDMA1STDESC | INFINIPATH_E_SDMARPYTAG | \
167 INFINIPATH_E_SDMADWEN | INFINIPATH_E_SDMAMISSINGDW | \
168 INFINIPATH_E_SDMAUNEXPDATA | \
169 INFINIPATH_E_SDMADESCADDRMISALIGN | \
170 INFINIPATH_E_SDMADISABLED | \
171 INFINIPATH_E_SENDBUFMISUSE)
172
137/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */ 173/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
138/* TXEMEMPARITYERR bit 0: PIObuf, 1: PIOpbc, 2: launchfifo 174/* TXEMEMPARITYERR bit 0: PIObuf, 1: PIOpbc, 2: launchfifo
139 * RXEMEMPARITYERR bit 0: rcvbuf, 1: lookupq, 2: expTID, 3: eagerTID 175 * RXEMEMPARITYERR bit 0: rcvbuf, 1: lookupq, 2: expTID, 3: eagerTID
@@ -158,7 +194,7 @@
158#define INFINIPATH_HWE_RXEMEMPARITYERR_HDRINFO 0x40ULL 194#define INFINIPATH_HWE_RXEMEMPARITYERR_HDRINFO 0x40ULL
159/* waldo specific -- find the rest in ipath_6110.c */ 195/* waldo specific -- find the rest in ipath_6110.c */
160#define INFINIPATH_HWE_RXDSYNCMEMPARITYERR 0x0000000400000000ULL 196#define INFINIPATH_HWE_RXDSYNCMEMPARITYERR 0x0000000400000000ULL
161/* monty specific -- find the rest in ipath_6120.c */ 197/* 6120/7220 specific -- find the rest in ipath_6120.c and ipath_7220.c */
162#define INFINIPATH_HWE_MEMBISTFAILED 0x0040000000000000ULL 198#define INFINIPATH_HWE_MEMBISTFAILED 0x0040000000000000ULL
163 199
164/* kr_hwdiagctrl bits */ 200/* kr_hwdiagctrl bits */
@@ -185,8 +221,8 @@
185#define INFINIPATH_IBCC_LINKINITCMD_SLEEP 3 221#define INFINIPATH_IBCC_LINKINITCMD_SLEEP 3
186#define INFINIPATH_IBCC_LINKINITCMD_SHIFT 16 222#define INFINIPATH_IBCC_LINKINITCMD_SHIFT 16
187#define INFINIPATH_IBCC_LINKCMD_MASK 0x3ULL 223#define INFINIPATH_IBCC_LINKCMD_MASK 0x3ULL
188#define INFINIPATH_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */ 224#define INFINIPATH_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
189#define INFINIPATH_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */ 225#define INFINIPATH_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
190#define INFINIPATH_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */ 226#define INFINIPATH_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
191#define INFINIPATH_IBCC_LINKCMD_SHIFT 18 227#define INFINIPATH_IBCC_LINKCMD_SHIFT 18
192#define INFINIPATH_IBCC_MAXPKTLEN_MASK 0x7FFULL 228#define INFINIPATH_IBCC_MAXPKTLEN_MASK 0x7FFULL
@@ -201,10 +237,9 @@
201#define INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE 0x4000000000000000ULL 237#define INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE 0x4000000000000000ULL
202 238
203/* kr_ibcstatus bits */ 239/* kr_ibcstatus bits */
204#define INFINIPATH_IBCS_LINKTRAININGSTATE_MASK 0xF
205#define INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT 0 240#define INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT 0
206#define INFINIPATH_IBCS_LINKSTATE_MASK 0x7 241#define INFINIPATH_IBCS_LINKSTATE_MASK 0x7
207#define INFINIPATH_IBCS_LINKSTATE_SHIFT 4 242
208#define INFINIPATH_IBCS_TXREADY 0x40000000 243#define INFINIPATH_IBCS_TXREADY 0x40000000
209#define INFINIPATH_IBCS_TXCREDITOK 0x80000000 244#define INFINIPATH_IBCS_TXCREDITOK 0x80000000
210/* link training states (shift by 245/* link training states (shift by
@@ -222,30 +257,13 @@
222#define INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN 0x0c 257#define INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN 0x0c
223#define INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT 0x0e 258#define INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT 0x0e
224#define INFINIPATH_IBCS_LT_STATE_RECOVERIDLE 0x0f 259#define INFINIPATH_IBCS_LT_STATE_RECOVERIDLE 0x0f
225/* link state machine states (shift by INFINIPATH_IBCS_LINKSTATE_SHIFT) */ 260/* link state machine states (shift by ibcs_ls_shift) */
226#define INFINIPATH_IBCS_L_STATE_DOWN 0x0 261#define INFINIPATH_IBCS_L_STATE_DOWN 0x0
227#define INFINIPATH_IBCS_L_STATE_INIT 0x1 262#define INFINIPATH_IBCS_L_STATE_INIT 0x1
228#define INFINIPATH_IBCS_L_STATE_ARM 0x2 263#define INFINIPATH_IBCS_L_STATE_ARM 0x2
229#define INFINIPATH_IBCS_L_STATE_ACTIVE 0x3 264#define INFINIPATH_IBCS_L_STATE_ACTIVE 0x3
230#define INFINIPATH_IBCS_L_STATE_ACT_DEFER 0x4 265#define INFINIPATH_IBCS_L_STATE_ACT_DEFER 0x4
231 266
232/* combination link status states that we use with some frequency */
233#define IPATH_IBSTATE_MASK ((INFINIPATH_IBCS_LINKTRAININGSTATE_MASK \
234 << INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) | \
235 (INFINIPATH_IBCS_LINKSTATE_MASK \
236 <<INFINIPATH_IBCS_LINKSTATE_SHIFT))
237#define IPATH_IBSTATE_INIT ((INFINIPATH_IBCS_L_STATE_INIT \
238 << INFINIPATH_IBCS_LINKSTATE_SHIFT) | \
239 (INFINIPATH_IBCS_LT_STATE_LINKUP \
240 <<INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT))
241#define IPATH_IBSTATE_ARM ((INFINIPATH_IBCS_L_STATE_ARM \
242 << INFINIPATH_IBCS_LINKSTATE_SHIFT) | \
243 (INFINIPATH_IBCS_LT_STATE_LINKUP \
244 <<INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT))
245#define IPATH_IBSTATE_ACTIVE ((INFINIPATH_IBCS_L_STATE_ACTIVE \
246 << INFINIPATH_IBCS_LINKSTATE_SHIFT) | \
247 (INFINIPATH_IBCS_LT_STATE_LINKUP \
248 <<INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT))
249 267
250/* kr_extstatus bits */ 268/* kr_extstatus bits */
251#define INFINIPATH_EXTS_SERDESPLLLOCK 0x1 269#define INFINIPATH_EXTS_SERDESPLLLOCK 0x1
@@ -286,8 +304,7 @@
286/* L1 Power down; use with RXDETECT, Otherwise not used on IB side */ 304/* L1 Power down; use with RXDETECT, Otherwise not used on IB side */
287#define INFINIPATH_SERDC0_L1PWR_DN 0xF0ULL 305#define INFINIPATH_SERDC0_L1PWR_DN 0xF0ULL
288 306
289/* kr_xgxsconfig bits */ 307/* common kr_xgxsconfig bits (or safe in all, even if not implemented) */
290#define INFINIPATH_XGXS_RESET 0x7ULL
291#define INFINIPATH_XGXS_RX_POL_SHIFT 19 308#define INFINIPATH_XGXS_RX_POL_SHIFT 19
292#define INFINIPATH_XGXS_RX_POL_MASK 0xfULL 309#define INFINIPATH_XGXS_RX_POL_MASK 0xfULL
293 310
@@ -417,6 +434,29 @@ struct ipath_kregs {
417 ipath_kreg kr_pcieq1serdesconfig0; 434 ipath_kreg kr_pcieq1serdesconfig0;
418 ipath_kreg kr_pcieq1serdesconfig1; 435 ipath_kreg kr_pcieq1serdesconfig1;
419 ipath_kreg kr_pcieq1serdesstatus; 436 ipath_kreg kr_pcieq1serdesstatus;
437 ipath_kreg kr_hrtbt_guid;
438 ipath_kreg kr_ibcddrctrl;
439 ipath_kreg kr_ibcddrstatus;
440 ipath_kreg kr_jintreload;
441
442 /* send dma related regs */
443 ipath_kreg kr_senddmabase;
444 ipath_kreg kr_senddmalengen;
445 ipath_kreg kr_senddmatail;
446 ipath_kreg kr_senddmahead;
447 ipath_kreg kr_senddmaheadaddr;
448 ipath_kreg kr_senddmabufmask0;
449 ipath_kreg kr_senddmabufmask1;
450 ipath_kreg kr_senddmabufmask2;
451 ipath_kreg kr_senddmastatus;
452
453 /* SerDes related regs (IBA7220-only) */
454 ipath_kreg kr_ibserdesctrl;
455 ipath_kreg kr_ib_epbacc;
456 ipath_kreg kr_ib_epbtrans;
457 ipath_kreg kr_pcie_epbacc;
458 ipath_kreg kr_pcie_epbtrans;
459 ipath_kreg kr_ib_ddsrxeq;
420}; 460};
421 461
422struct ipath_cregs { 462struct ipath_cregs {
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index a59bdbd0ed87..8ac5c1d82ccd 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -310,7 +310,7 @@ again:
310 switch (wqe->wr.opcode) { 310 switch (wqe->wr.opcode) {
311 case IB_WR_SEND_WITH_IMM: 311 case IB_WR_SEND_WITH_IMM:
312 wc.wc_flags = IB_WC_WITH_IMM; 312 wc.wc_flags = IB_WC_WITH_IMM;
313 wc.imm_data = wqe->wr.imm_data; 313 wc.imm_data = wqe->wr.ex.imm_data;
314 /* FALLTHROUGH */ 314 /* FALLTHROUGH */
315 case IB_WR_SEND: 315 case IB_WR_SEND:
316 if (!ipath_get_rwqe(qp, 0)) { 316 if (!ipath_get_rwqe(qp, 0)) {
@@ -339,7 +339,7 @@ again:
339 goto err; 339 goto err;
340 } 340 }
341 wc.wc_flags = IB_WC_WITH_IMM; 341 wc.wc_flags = IB_WC_WITH_IMM;
342 wc.imm_data = wqe->wr.imm_data; 342 wc.imm_data = wqe->wr.ex.imm_data;
343 if (!ipath_get_rwqe(qp, 1)) 343 if (!ipath_get_rwqe(qp, 1))
344 goto rnr_nak; 344 goto rnr_nak;
345 /* FALLTHROUGH */ 345 /* FALLTHROUGH */
@@ -483,14 +483,16 @@ done:
483 483
484static void want_buffer(struct ipath_devdata *dd) 484static void want_buffer(struct ipath_devdata *dd)
485{ 485{
486 unsigned long flags; 486 if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA)) {
487 487 unsigned long flags;
488 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); 488
489 dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL; 489 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
490 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 490 dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
491 dd->ipath_sendctrl); 491 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
492 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 492 dd->ipath_sendctrl);
493 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); 493 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
494 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
495 }
494} 496}
495 497
496/** 498/**
diff --git a/drivers/infiniband/hw/ipath/ipath_sd7220.c b/drivers/infiniband/hw/ipath/ipath_sd7220.c
new file mode 100644
index 000000000000..aa47eb549520
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_sd7220.c
@@ -0,0 +1,1462 @@
1/*
2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33/*
34 * This file contains all of the code that is specific to the SerDes
35 * on the InfiniPath 7220 chip.
36 */
37
38#include <linux/pci.h>
39#include <linux/delay.h>
40
41#include "ipath_kernel.h"
42#include "ipath_registers.h"
43#include "ipath_7220.h"
44
45/*
46 * The IBSerDesMappTable is a memory that holds values to be stored in
47 * various SerDes registers by IBC. It is not part of the normal kregs
48 * map and is used in exactly one place, hence the #define below.
49 */
50#define KR_IBSerDesMappTable (0x94000 / (sizeof(uint64_t)))
51
52/*
53 * Below used for sdnum parameter, selecting one of the two sections
54 * used for PCIe, or the single SerDes used for IB.
55 */
56#define PCIE_SERDES0 0
57#define PCIE_SERDES1 1
58
59/*
60 * The EPB requires addressing in a particular form. EPB_LOC() is intended
61 * to make #definitions a little more readable.
62 */
63#define EPB_ADDR_SHF 8
64#define EPB_LOC(chn, elt, reg) \
65 (((elt & 0xf) | ((chn & 7) << 4) | ((reg & 0x3f) << 9)) << \
66 EPB_ADDR_SHF)
67#define EPB_IB_QUAD0_CS_SHF (25)
68#define EPB_IB_QUAD0_CS (1U << EPB_IB_QUAD0_CS_SHF)
69#define EPB_IB_UC_CS_SHF (26)
70#define EPB_PCIE_UC_CS_SHF (27)
71#define EPB_GLOBAL_WR (1U << (EPB_ADDR_SHF + 8))
72
73/* Forward declarations. */
74static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
75 u32 data, u32 mask);
76static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val,
77 int mask);
78static int ipath_sd_trimdone_poll(struct ipath_devdata *dd);
79static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd,
80 const char *where);
81static int ipath_sd_setvals(struct ipath_devdata *dd);
82static int ipath_sd_early(struct ipath_devdata *dd);
83static int ipath_sd_dactrim(struct ipath_devdata *dd);
84/* Set the registers that IBC may muck with to their default "preset" values */
85int ipath_sd7220_presets(struct ipath_devdata *dd);
86static int ipath_internal_presets(struct ipath_devdata *dd);
87/* Tweak the register (CMUCTRL5) that contains the TRIMSELF controls */
88static int ipath_sd_trimself(struct ipath_devdata *dd, int val);
89static int epb_access(struct ipath_devdata *dd, int sdnum, int claim);
90
91void ipath_set_relock_poll(struct ipath_devdata *dd, int ibup);
92
93/*
94 * Below keeps track of whether the "once per power-on" initialization has
95 * been done, because uC code Version 1.32.17 or higher allows the uC to
96 * be reset at will, and Automatic Equalization may require it. So the
97 * state of the reset "pin", as reflected in was_reset parameter to
98 * ipath_sd7220_init() is no longer valid. Instead, we check for the
99 * actual uC code having been loaded.
100 */
101static int ipath_ibsd_ucode_loaded(struct ipath_devdata *dd)
102{
103 if (!dd->serdes_first_init_done && (ipath_sd7220_ib_vfy(dd) > 0))
104 dd->serdes_first_init_done = 1;
105 return dd->serdes_first_init_done;
106}
107
108/* repeat #define for local use. "Real" #define is in ipath_iba7220.c */
109#define INFINIPATH_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL
110#define IB_MPREG5 (EPB_LOC(6, 0, 0xE) | (1L << EPB_IB_UC_CS_SHF))
111#define IB_MPREG6 (EPB_LOC(6, 0, 0xF) | (1U << EPB_IB_UC_CS_SHF))
112#define UC_PAR_CLR_D 8
113#define UC_PAR_CLR_M 0xC
114#define IB_CTRL2(chn) (EPB_LOC(chn, 7, 3) | EPB_IB_QUAD0_CS)
115#define START_EQ1(chan) EPB_LOC(chan, 7, 0x27)
116
117void ipath_sd7220_clr_ibpar(struct ipath_devdata *dd)
118{
119 int ret;
120
121 /* clear, then re-enable parity errs */
122 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6,
123 UC_PAR_CLR_D, UC_PAR_CLR_M);
124 if (ret < 0) {
125 ipath_dev_err(dd, "Failed clearing IBSerDes Parity err\n");
126 goto bail;
127 }
128 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0,
129 UC_PAR_CLR_M);
130
131 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
132 udelay(4);
133 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
134 INFINIPATH_HWE_IB_UC_MEMORYPARITYERR);
135 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
136bail:
137 return;
138}
139
140/*
141 * After a reset or other unusual event, the epb interface may need
142 * to be re-synchronized, between the host and the uC.
143 * returns <0 for failure to resync within IBSD_RESYNC_TRIES (not expected)
144 */
145#define IBSD_RESYNC_TRIES 3
146#define IB_PGUDP(chn) (EPB_LOC((chn), 2, 1) | EPB_IB_QUAD0_CS)
147#define IB_CMUDONE(chn) (EPB_LOC((chn), 7, 0xF) | EPB_IB_QUAD0_CS)
148
149static int ipath_resync_ibepb(struct ipath_devdata *dd)
150{
151 int ret, pat, tries, chn;
152 u32 loc;
153
154 ret = -1;
155 chn = 0;
156 for (tries = 0; tries < (4 * IBSD_RESYNC_TRIES); ++tries) {
157 loc = IB_PGUDP(chn);
158 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
159 if (ret < 0) {
160 ipath_dev_err(dd, "Failed read in resync\n");
161 continue;
162 }
163 if (ret != 0xF0 && ret != 0x55 && tries == 0)
164 ipath_dev_err(dd, "unexpected pattern in resync\n");
165 pat = ret ^ 0xA5; /* alternate F0 and 55 */
166 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, pat, 0xFF);
167 if (ret < 0) {
168 ipath_dev_err(dd, "Failed write in resync\n");
169 continue;
170 }
171 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
172 if (ret < 0) {
173 ipath_dev_err(dd, "Failed re-read in resync\n");
174 continue;
175 }
176 if (ret != pat) {
177 ipath_dev_err(dd, "Failed compare1 in resync\n");
178 continue;
179 }
180 loc = IB_CMUDONE(chn);
181 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
182 if (ret < 0) {
183 ipath_dev_err(dd, "Failed CMUDONE rd in resync\n");
184 continue;
185 }
186 if ((ret & 0x70) != ((chn << 4) | 0x40)) {
187 ipath_dev_err(dd, "Bad CMUDONE value %02X, chn %d\n",
188 ret, chn);
189 continue;
190 }
191 if (++chn == 4)
192 break; /* Success */
193 }
194 ipath_cdbg(VERBOSE, "Resync in %d tries\n", tries);
195 return (ret > 0) ? 0 : ret;
196}
197
198/*
199 * Localize the stuff that should be done to change IB uC reset
200 * returns <0 for errors.
201 */
202static int ipath_ibsd_reset(struct ipath_devdata *dd, int assert_rst)
203{
204 u64 rst_val;
205 int ret = 0;
206 unsigned long flags;
207
208 rst_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibserdesctrl);
209 if (assert_rst) {
210 /*
211 * Vendor recommends "interrupting" uC before reset, to
212 * minimize possible glitches.
213 */
214 spin_lock_irqsave(&dd->ipath_sdepb_lock, flags);
215 epb_access(dd, IB_7220_SERDES, 1);
216 rst_val |= 1ULL;
217 /* Squelch possible parity error from _asserting_ reset */
218 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
219 dd->ipath_hwerrmask &
220 ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR);
221 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, rst_val);
222 /* flush write, delay to ensure it took effect */
223 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
224 udelay(2);
225 /* once it's reset, can remove interrupt */
226 epb_access(dd, IB_7220_SERDES, -1);
227 spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
228 } else {
229 /*
230 * Before we de-assert reset, we need to deal with
231 * possible glitch on the Parity-error line.
232 * Suppress it around the reset, both in chip-level
233 * hwerrmask and in IB uC control reg. uC will allow
234 * it again during startup.
235 */
236 u64 val;
237 rst_val &= ~(1ULL);
238 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
239 dd->ipath_hwerrmask &
240 ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR);
241
242 ret = ipath_resync_ibepb(dd);
243 if (ret < 0)
244 ipath_dev_err(dd, "unable to re-sync IB EPB\n");
245
246 /* set uC control regs to suppress parity errs */
247 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG5, 1, 1);
248 if (ret < 0)
249 goto bail;
250 /* IB uC code past Version 1.32.17 allow suppression of wdog */
251 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80,
252 0x80);
253 if (ret < 0) {
254 ipath_dev_err(dd, "Failed to set WDOG disable\n");
255 goto bail;
256 }
257 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, rst_val);
258 /* flush write, delay for startup */
259 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
260 udelay(1);
261 /* clear, then re-enable parity errs */
262 ipath_sd7220_clr_ibpar(dd);
263 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
264 if (val & INFINIPATH_HWE_IB_UC_MEMORYPARITYERR) {
265 ipath_dev_err(dd, "IBUC Parity still set after RST\n");
266 dd->ipath_hwerrmask &=
267 ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR;
268 }
269 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
270 dd->ipath_hwerrmask);
271 }
272
273bail:
274 return ret;
275}
276
277static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd,
278 const char *where)
279{
280 int ret, chn, baduns;
281 u64 val;
282
283 if (!where)
284 where = "?";
285
286 /* give time for reset to settle out in EPB */
287 udelay(2);
288
289 ret = ipath_resync_ibepb(dd);
290 if (ret < 0)
291 ipath_dev_err(dd, "not able to re-sync IB EPB (%s)\n", where);
292
293 /* Do "sacrificial read" to get EPB in sane state after reset */
294 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(0), 0, 0);
295 if (ret < 0)
296 ipath_dev_err(dd, "Failed TRIMDONE 1st read, (%s)\n", where);
297
298 /* Check/show "summary" Trim-done bit in IBCStatus */
299 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
300 if (val & (1ULL << 11))
301 ipath_cdbg(VERBOSE, "IBCS TRIMDONE set (%s)\n", where);
302 else
303 ipath_dev_err(dd, "IBCS TRIMDONE clear (%s)\n", where);
304
305 udelay(2);
306
307 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, 0x80);
308 if (ret < 0)
309 ipath_dev_err(dd, "Failed Dummy RMW, (%s)\n", where);
310 udelay(10);
311
312 baduns = 0;
313
314 for (chn = 3; chn >= 0; --chn) {
315 /* Read CTRL reg for each channel to check TRIMDONE */
316 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
317 IB_CTRL2(chn), 0, 0);
318 if (ret < 0)
319 ipath_dev_err(dd, "Failed checking TRIMDONE, chn %d"
320 " (%s)\n", chn, where);
321
322 if (!(ret & 0x10)) {
323 int probe;
324 baduns |= (1 << chn);
325 ipath_dev_err(dd, "TRIMDONE cleared on chn %d (%02X)."
326 " (%s)\n", chn, ret, where);
327 probe = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
328 IB_PGUDP(0), 0, 0);
329 ipath_dev_err(dd, "probe is %d (%02X)\n",
330 probe, probe);
331 probe = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
332 IB_CTRL2(chn), 0, 0);
333 ipath_dev_err(dd, "re-read: %d (%02X)\n",
334 probe, probe);
335 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
336 IB_CTRL2(chn), 0x10, 0x10);
337 if (ret < 0)
338 ipath_dev_err(dd,
339 "Err on TRIMDONE rewrite1\n");
340 }
341 }
342 for (chn = 3; chn >= 0; --chn) {
343 /* Read CTRL reg for each channel to check TRIMDONE */
344 if (baduns & (1 << chn)) {
345 ipath_dev_err(dd,
346 "Reseting TRIMDONE on chn %d (%s)\n",
347 chn, where);
348 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
349 IB_CTRL2(chn), 0x10, 0x10);
350 if (ret < 0)
351 ipath_dev_err(dd, "Failed re-setting "
352 "TRIMDONE, chn %d (%s)\n",
353 chn, where);
354 }
355 }
356}
357
358/*
359 * Below is portion of IBA7220-specific bringup_serdes() that actually
360 * deals with registers and memory within the SerDes itself.
361 * Post IB uC code version 1.32.17, was_reset being 1 is not really
362 * informative, so we double-check.
363 */
364int ipath_sd7220_init(struct ipath_devdata *dd, int was_reset)
365{
366 int ret = 1; /* default to failure */
367 int first_reset;
368 int val_stat;
369
370 if (!was_reset) {
371 /* entered with reset not asserted, we need to do it */
372 ipath_ibsd_reset(dd, 1);
373 ipath_sd_trimdone_monitor(dd, "Driver-reload");
374 }
375
376 /* Substitute our deduced value for was_reset */
377 ret = ipath_ibsd_ucode_loaded(dd);
378 if (ret < 0) {
379 ret = 1;
380 goto done;
381 }
382 first_reset = !ret; /* First reset if IBSD uCode not yet loaded */
383
384 /*
385 * Alter some regs per vendor latest doc, reset-defaults
386 * are not right for IB.
387 */
388 ret = ipath_sd_early(dd);
389 if (ret < 0) {
390 ipath_dev_err(dd, "Failed to set IB SERDES early defaults\n");
391 ret = 1;
392 goto done;
393 }
394
395 /*
396 * Set DAC manual trim IB.
397 * We only do this once after chip has been reset (usually
398 * same as once per system boot).
399 */
400 if (first_reset) {
401 ret = ipath_sd_dactrim(dd);
402 if (ret < 0) {
403 ipath_dev_err(dd, "Failed IB SERDES DAC trim\n");
404 ret = 1;
405 goto done;
406 }
407 }
408
409 /*
410 * Set various registers (DDS and RXEQ) that will be
411 * controlled by IBC (in 1.2 mode) to reasonable preset values
412 * Calling the "internal" version avoids the "check for needed"
413 * and "trimdone monitor" that might be counter-productive.
414 */
415 ret = ipath_internal_presets(dd);
416 if (ret < 0) {
417 ipath_dev_err(dd, "Failed to set IB SERDES presets\n");
418 ret = 1;
419 goto done;
420 }
421 ret = ipath_sd_trimself(dd, 0x80);
422 if (ret < 0) {
423 ipath_dev_err(dd, "Failed to set IB SERDES TRIMSELF\n");
424 ret = 1;
425 goto done;
426 }
427
428 /* Load image, then try to verify */
429 ret = 0; /* Assume success */
430 if (first_reset) {
431 int vfy;
432 int trim_done;
433 ipath_dbg("SerDes uC was reset, reloading PRAM\n");
434 ret = ipath_sd7220_ib_load(dd);
435 if (ret < 0) {
436 ipath_dev_err(dd, "Failed to load IB SERDES image\n");
437 ret = 1;
438 goto done;
439 }
440
441 /* Loaded image, try to verify */
442 vfy = ipath_sd7220_ib_vfy(dd);
443 if (vfy != ret) {
444 ipath_dev_err(dd, "SERDES PRAM VFY failed\n");
445 ret = 1;
446 goto done;
447 }
448 /*
449 * Loaded and verified. Almost good...
450 * hold "success" in ret
451 */
452 ret = 0;
453
454 /*
455 * Prev steps all worked, continue bringup
456 * De-assert RESET to uC, only in first reset, to allow
457 * trimming.
458 *
459 * Since our default setup sets START_EQ1 to
460 * PRESET, we need to clear that for this very first run.
461 */
462 ret = ibsd_mod_allchnls(dd, START_EQ1(0), 0, 0x38);
463 if (ret < 0) {
464 ipath_dev_err(dd, "Failed clearing START_EQ1\n");
465 ret = 1;
466 goto done;
467 }
468
469 ipath_ibsd_reset(dd, 0);
470 /*
471 * If this is not the first reset, trimdone should be set
472 * already.
473 */
474 trim_done = ipath_sd_trimdone_poll(dd);
475 /*
476 * Whether or not trimdone succeeded, we need to put the
477 * uC back into reset to avoid a possible fight with the
478 * IBC state-machine.
479 */
480 ipath_ibsd_reset(dd, 1);
481
482 if (!trim_done) {
483 ipath_dev_err(dd, "No TRIMDONE seen\n");
484 ret = 1;
485 goto done;
486 }
487
488 ipath_sd_trimdone_monitor(dd, "First-reset");
489 /* Remember so we do not re-do the load, dactrim, etc. */
490 dd->serdes_first_init_done = 1;
491 }
492 /*
493 * Setup for channel training and load values for
494 * RxEq and DDS in tables used by IBC in IB1.2 mode
495 */
496
497 val_stat = ipath_sd_setvals(dd);
498 if (val_stat < 0)
499 ret = 1;
500done:
501 /* start relock timer regardless, but start at 1 second */
502 ipath_set_relock_poll(dd, -1);
503 return ret;
504}
505
506#define EPB_ACC_REQ 1
507#define EPB_ACC_GNT 0x100
508#define EPB_DATA_MASK 0xFF
509#define EPB_RD (1ULL << 24)
510#define EPB_TRANS_RDY (1ULL << 31)
511#define EPB_TRANS_ERR (1ULL << 30)
512#define EPB_TRANS_TRIES 5
513
514/*
515 * query, claim, release ownership of the EPB (External Parallel Bus)
516 * for a specified SERDES.
517 * the "claim" parameter is >0 to claim, <0 to release, 0 to query.
518 * Returns <0 for errors, >0 if we had ownership, else 0.
519 */
520static int epb_access(struct ipath_devdata *dd, int sdnum, int claim)
521{
522 u16 acc;
523 u64 accval;
524 int owned = 0;
525 u64 oct_sel = 0;
526
527 switch (sdnum) {
528 case IB_7220_SERDES :
529 /*
530 * The IB SERDES "ownership" is fairly simple. A single each
531 * request/grant.
532 */
533 acc = dd->ipath_kregs->kr_ib_epbacc;
534 break;
535 case PCIE_SERDES0 :
536 case PCIE_SERDES1 :
537 /* PCIe SERDES has two "octants", need to select which */
538 acc = dd->ipath_kregs->kr_pcie_epbacc;
539 oct_sel = (2 << (sdnum - PCIE_SERDES0));
540 break;
541 default :
542 return 0;
543 }
544
545 /* Make sure any outstanding transaction was seen */
546 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
547 udelay(15);
548
549 accval = ipath_read_kreg32(dd, acc);
550
551 owned = !!(accval & EPB_ACC_GNT);
552 if (claim < 0) {
553 /* Need to release */
554 u64 pollval;
555 /*
556 * The only writeable bits are the request and CS.
557 * Both should be clear
558 */
559 u64 newval = 0;
560 ipath_write_kreg(dd, acc, newval);
561 /* First read after write is not trustworthy */
562 pollval = ipath_read_kreg32(dd, acc);
563 udelay(5);
564 pollval = ipath_read_kreg32(dd, acc);
565 if (pollval & EPB_ACC_GNT)
566 owned = -1;
567 } else if (claim > 0) {
568 /* Need to claim */
569 u64 pollval;
570 u64 newval = EPB_ACC_REQ | oct_sel;
571 ipath_write_kreg(dd, acc, newval);
572 /* First read after write is not trustworthy */
573 pollval = ipath_read_kreg32(dd, acc);
574 udelay(5);
575 pollval = ipath_read_kreg32(dd, acc);
576 if (!(pollval & EPB_ACC_GNT))
577 owned = -1;
578 }
579 return owned;
580}
581
582/*
583 * Lemma to deal with race condition of write..read to epb regs
584 */
585static int epb_trans(struct ipath_devdata *dd, u16 reg, u64 i_val, u64 *o_vp)
586{
587 int tries;
588 u64 transval;
589
590
591 ipath_write_kreg(dd, reg, i_val);
592 /* Throw away first read, as RDY bit may be stale */
593 transval = ipath_read_kreg64(dd, reg);
594
595 for (tries = EPB_TRANS_TRIES; tries; --tries) {
596 transval = ipath_read_kreg32(dd, reg);
597 if (transval & EPB_TRANS_RDY)
598 break;
599 udelay(5);
600 }
601 if (transval & EPB_TRANS_ERR)
602 return -1;
603 if (tries > 0 && o_vp)
604 *o_vp = transval;
605 return tries;
606}
607
608/**
609 *
610 * ipath_sd7220_reg_mod - modify SERDES register
611 * @dd: the infinipath device
612 * @sdnum: which SERDES to access
613 * @loc: location - channel, element, register, as packed by EPB_LOC() macro.
614 * @wd: Write Data - value to set in register
615 * @mask: ones where data should be spliced into reg.
616 *
617 * Basic register read/modify/write, with un-needed acesses elided. That is,
618 * a mask of zero will prevent write, while a mask of 0xFF will prevent read.
619 * returns current (presumed, if a write was done) contents of selected
620 * register, or <0 if errors.
621 */
622static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
623 u32 wd, u32 mask)
624{
625 u16 trans;
626 u64 transval;
627 int owned;
628 int tries, ret;
629 unsigned long flags;
630
631 switch (sdnum) {
632 case IB_7220_SERDES :
633 trans = dd->ipath_kregs->kr_ib_epbtrans;
634 break;
635 case PCIE_SERDES0 :
636 case PCIE_SERDES1 :
637 trans = dd->ipath_kregs->kr_pcie_epbtrans;
638 break;
639 default :
640 return -1;
641 }
642
643 /*
644 * All access is locked in software (vs other host threads) and
645 * hardware (vs uC access).
646 */
647 spin_lock_irqsave(&dd->ipath_sdepb_lock, flags);
648
649 owned = epb_access(dd, sdnum, 1);
650 if (owned < 0) {
651 spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
652 return -1;
653 }
654 ret = 0;
655 for (tries = EPB_TRANS_TRIES; tries; --tries) {
656 transval = ipath_read_kreg32(dd, trans);
657 if (transval & EPB_TRANS_RDY)
658 break;
659 udelay(5);
660 }
661
662 if (tries > 0) {
663 tries = 1; /* to make read-skip work */
664 if (mask != 0xFF) {
665 /*
666 * Not a pure write, so need to read.
667 * loc encodes chip-select as well as address
668 */
669 transval = loc | EPB_RD;
670 tries = epb_trans(dd, trans, transval, &transval);
671 }
672 if (tries > 0 && mask != 0) {
673 /*
674 * Not a pure read, so need to write.
675 */
676 wd = (wd & mask) | (transval & ~mask);
677 transval = loc | (wd & EPB_DATA_MASK);
678 tries = epb_trans(dd, trans, transval, &transval);
679 }
680 }
681 /* else, failed to see ready, what error-handling? */
682
683 /*
684 * Release bus. Failure is an error.
685 */
686 if (epb_access(dd, sdnum, -1) < 0)
687 ret = -1;
688 else
689 ret = transval & EPB_DATA_MASK;
690
691 spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
692 if (tries <= 0)
693 ret = -1;
694 return ret;
695}
696
697#define EPB_ROM_R (2)
698#define EPB_ROM_W (1)
699/*
700 * Below, all uC-related, use appropriate UC_CS, depending
701 * on which SerDes is used.
702 */
703#define EPB_UC_CTL EPB_LOC(6, 0, 0)
704#define EPB_MADDRL EPB_LOC(6, 0, 2)
705#define EPB_MADDRH EPB_LOC(6, 0, 3)
706#define EPB_ROMDATA EPB_LOC(6, 0, 4)
707#define EPB_RAMDATA EPB_LOC(6, 0, 5)
708
709/* Transfer date to/from uC Program RAM of IB or PCIe SerDes */
710static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
711 u8 *buf, int cnt, int rd_notwr)
712{
713 u16 trans;
714 u64 transval;
715 u64 csbit;
716 int owned;
717 int tries;
718 int sofar;
719 int addr;
720 int ret;
721 unsigned long flags;
722 const char *op;
723
724 /* Pick appropriate transaction reg and "Chip select" for this serdes */
725 switch (sdnum) {
726 case IB_7220_SERDES :
727 csbit = 1ULL << EPB_IB_UC_CS_SHF;
728 trans = dd->ipath_kregs->kr_ib_epbtrans;
729 break;
730 case PCIE_SERDES0 :
731 case PCIE_SERDES1 :
732 /* PCIe SERDES has uC "chip select" in different bit, too */
733 csbit = 1ULL << EPB_PCIE_UC_CS_SHF;
734 trans = dd->ipath_kregs->kr_pcie_epbtrans;
735 break;
736 default :
737 return -1;
738 }
739
740 op = rd_notwr ? "Rd" : "Wr";
741 spin_lock_irqsave(&dd->ipath_sdepb_lock, flags);
742
743 owned = epb_access(dd, sdnum, 1);
744 if (owned < 0) {
745 spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
746 ipath_dbg("Could not get %s access to %s EPB: %X, loc %X\n",
747 op, (sdnum == IB_7220_SERDES) ? "IB" : "PCIe",
748 owned, loc);
749 return -1;
750 }
751
752 /*
753 * In future code, we may need to distinguish several address ranges,
754 * and select various memories based on this. For now, just trim
755 * "loc" (location including address and memory select) to
756 * "addr" (address within memory). we will only support PRAM
757 * The memory is 8KB.
758 */
759 addr = loc & 0x1FFF;
760 for (tries = EPB_TRANS_TRIES; tries; --tries) {
761 transval = ipath_read_kreg32(dd, trans);
762 if (transval & EPB_TRANS_RDY)
763 break;
764 udelay(5);
765 }
766
767 sofar = 0;
768 if (tries <= 0)
769 ipath_dbg("No initial RDY on EPB access request\n");
770 else {
771 /*
772 * Every "memory" access is doubly-indirect.
773 * We set two bytes of address, then read/write
774 * one or mores bytes of data.
775 */
776
777 /* First, we set control to "Read" or "Write" */
778 transval = csbit | EPB_UC_CTL |
779 (rd_notwr ? EPB_ROM_R : EPB_ROM_W);
780 tries = epb_trans(dd, trans, transval, &transval);
781 if (tries <= 0)
782 ipath_dbg("No EPB response to uC %s cmd\n", op);
783 while (tries > 0 && sofar < cnt) {
784 if (!sofar) {
785 /* Only set address at start of chunk */
786 int addrbyte = (addr + sofar) >> 8;
787 transval = csbit | EPB_MADDRH | addrbyte;
788 tries = epb_trans(dd, trans, transval,
789 &transval);
790 if (tries <= 0) {
791 ipath_dbg("No EPB response ADDRH\n");
792 break;
793 }
794 addrbyte = (addr + sofar) & 0xFF;
795 transval = csbit | EPB_MADDRL | addrbyte;
796 tries = epb_trans(dd, trans, transval,
797 &transval);
798 if (tries <= 0) {
799 ipath_dbg("No EPB response ADDRL\n");
800 break;
801 }
802 }
803
804 if (rd_notwr)
805 transval = csbit | EPB_ROMDATA | EPB_RD;
806 else
807 transval = csbit | EPB_ROMDATA | buf[sofar];
808 tries = epb_trans(dd, trans, transval, &transval);
809 if (tries <= 0) {
810 ipath_dbg("No EPB response DATA\n");
811 break;
812 }
813 if (rd_notwr)
814 buf[sofar] = transval & EPB_DATA_MASK;
815 ++sofar;
816 }
817 /* Finally, clear control-bit for Read or Write */
818 transval = csbit | EPB_UC_CTL;
819 tries = epb_trans(dd, trans, transval, &transval);
820 if (tries <= 0)
821 ipath_dbg("No EPB response to drop of uC %s cmd\n", op);
822 }
823
824 ret = sofar;
825 /* Release bus. Failure is an error */
826 if (epb_access(dd, sdnum, -1) < 0)
827 ret = -1;
828
829 spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
830 if (tries <= 0) {
831 ipath_dbg("SERDES PRAM %s failed after %d bytes\n", op, sofar);
832 ret = -1;
833 }
834 return ret;
835}
836
837#define PROG_CHUNK 64
838
839int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum,
840 u8 *img, int len, int offset)
841{
842 int cnt, sofar, req;
843
844 sofar = 0;
845 while (sofar < len) {
846 req = len - sofar;
847 if (req > PROG_CHUNK)
848 req = PROG_CHUNK;
849 cnt = ipath_sd7220_ram_xfer(dd, sdnum, offset + sofar,
850 img + sofar, req, 0);
851 if (cnt < req) {
852 sofar = -1;
853 break;
854 }
855 sofar += req;
856 }
857 return sofar;
858}
859
860#define VFY_CHUNK 64
861#define SD_PRAM_ERROR_LIMIT 42
862
863int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum,
864 const u8 *img, int len, int offset)
865{
866 int cnt, sofar, req, idx, errors;
867 unsigned char readback[VFY_CHUNK];
868
869 errors = 0;
870 sofar = 0;
871 while (sofar < len) {
872 req = len - sofar;
873 if (req > VFY_CHUNK)
874 req = VFY_CHUNK;
875 cnt = ipath_sd7220_ram_xfer(dd, sdnum, sofar + offset,
876 readback, req, 1);
877 if (cnt < req) {
878 /* failed in read itself */
879 sofar = -1;
880 break;
881 }
882 for (idx = 0; idx < cnt; ++idx) {
883 if (readback[idx] != img[idx+sofar])
884 ++errors;
885 }
886 sofar += cnt;
887 }
888 return errors ? -errors : sofar;
889}
890
891/* IRQ not set up at this point in init, so we poll. */
892#define IB_SERDES_TRIM_DONE (1ULL << 11)
893#define TRIM_TMO (30)
894
895static int ipath_sd_trimdone_poll(struct ipath_devdata *dd)
896{
897 int trim_tmo, ret;
898 uint64_t val;
899
900 /*
901 * Default to failure, so IBC will not start
902 * without IB_SERDES_TRIM_DONE.
903 */
904 ret = 0;
905 for (trim_tmo = 0; trim_tmo < TRIM_TMO; ++trim_tmo) {
906 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
907 if (val & IB_SERDES_TRIM_DONE) {
908 ipath_cdbg(VERBOSE, "TRIMDONE after %d\n", trim_tmo);
909 ret = 1;
910 break;
911 }
912 msleep(10);
913 }
914 if (trim_tmo >= TRIM_TMO) {
915 ipath_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo);
916 ret = 0;
917 }
918 return ret;
919}
920
921#define TX_FAST_ELT (9)
922
923/*
924 * Set the "negotiation" values for SERDES. These are used by the IB1.2
925 * link negotiation. Macros below are attempt to keep the values a
926 * little more human-editable.
927 * First, values related to Drive De-emphasis Settings.
928 */
929
930#define NUM_DDS_REGS 6
931#define DDS_REG_MAP 0x76A910 /* LSB-first list of regs (in elt 9) to mod */
932
933#define DDS_VAL(amp_d, main_d, ipst_d, ipre_d, amp_s, main_s, ipst_s, ipre_s) \
934 { { ((amp_d & 0x1F) << 1) | 1, ((amp_s & 0x1F) << 1) | 1, \
935 (main_d << 3) | 4 | (ipre_d >> 2), \
936 (main_s << 3) | 4 | (ipre_s >> 2), \
937 ((ipst_d & 0xF) << 1) | ((ipre_d & 3) << 6) | 0x21, \
938 ((ipst_s & 0xF) << 1) | ((ipre_s & 3) << 6) | 0x21 } }
939
940static struct dds_init {
941 uint8_t reg_vals[NUM_DDS_REGS];
942} dds_init_vals[] = {
943 /* DDR(FDR) SDR(HDR) */
944 /* Vendor recommends below for 3m cable */
945#define DDS_3M 0
946 DDS_VAL(31, 19, 12, 0, 29, 22, 9, 0),
947 DDS_VAL(31, 12, 15, 4, 31, 15, 15, 1),
948 DDS_VAL(31, 13, 15, 3, 31, 16, 15, 0),
949 DDS_VAL(31, 14, 15, 2, 31, 17, 14, 0),
950 DDS_VAL(31, 15, 15, 1, 31, 18, 13, 0),
951 DDS_VAL(31, 16, 15, 0, 31, 19, 12, 0),
952 DDS_VAL(31, 17, 14, 0, 31, 20, 11, 0),
953 DDS_VAL(31, 18, 13, 0, 30, 21, 10, 0),
954 DDS_VAL(31, 20, 11, 0, 28, 23, 8, 0),
955 DDS_VAL(31, 21, 10, 0, 27, 24, 7, 0),
956 DDS_VAL(31, 22, 9, 0, 26, 25, 6, 0),
957 DDS_VAL(30, 23, 8, 0, 25, 26, 5, 0),
958 DDS_VAL(29, 24, 7, 0, 23, 27, 4, 0),
959 /* Vendor recommends below for 1m cable */
960#define DDS_1M 13
961 DDS_VAL(28, 25, 6, 0, 21, 28, 3, 0),
962 DDS_VAL(27, 26, 5, 0, 19, 29, 2, 0),
963 DDS_VAL(25, 27, 4, 0, 17, 30, 1, 0)
964};
965
966/*
967 * Next, values related to Receive Equalization.
968 * In comments, FDR (Full) is IB DDR, HDR (Half) is IB SDR
969 */
970/* Hardware packs an element number and register address thus: */
971#define RXEQ_INIT_RDESC(elt, addr) (((elt) & 0xF) | ((addr) << 4))
972#define RXEQ_VAL(elt, adr, val0, val1, val2, val3) \
973 {RXEQ_INIT_RDESC((elt), (adr)), {(val0), (val1), (val2), (val3)} }
974
975#define RXEQ_VAL_ALL(elt, adr, val) \
976 {RXEQ_INIT_RDESC((elt), (adr)), {(val), (val), (val), (val)} }
977
978#define RXEQ_SDR_DFELTH 0
979#define RXEQ_SDR_TLTH 0
980#define RXEQ_SDR_G1CNT_Z1CNT 0x11
981#define RXEQ_SDR_ZCNT 23
982
983static struct rxeq_init {
984 u16 rdesc; /* in form used in SerDesDDSRXEQ */
985 u8 rdata[4];
986} rxeq_init_vals[] = {
987 /* Set Rcv Eq. to Preset node */
988 RXEQ_VAL_ALL(7, 0x27, 0x10),
989 /* Set DFELTHFDR/HDR thresholds */
990 RXEQ_VAL(7, 8, 0, 0, 0, 0), /* FDR */
991 RXEQ_VAL(7, 0x21, 0, 0, 0, 0), /* HDR */
992 /* Set TLTHFDR/HDR theshold */
993 RXEQ_VAL(7, 9, 2, 2, 2, 2), /* FDR */
994 RXEQ_VAL(7, 0x23, 2, 2, 2, 2), /* HDR */
995 /* Set Preamp setting 2 (ZFR/ZCNT) */
996 RXEQ_VAL(7, 0x1B, 12, 12, 12, 12), /* FDR */
997 RXEQ_VAL(7, 0x1C, 12, 12, 12, 12), /* HDR */
998 /* Set Preamp DC gain and Setting 1 (GFR/GHR) */
999 RXEQ_VAL(7, 0x1E, 0x10, 0x10, 0x10, 0x10), /* FDR */
1000 RXEQ_VAL(7, 0x1F, 0x10, 0x10, 0x10, 0x10), /* HDR */
1001 /* Toggle RELOCK (in VCDL_CTRL0) to lock to data */
1002 RXEQ_VAL_ALL(6, 6, 0x20), /* Set D5 High */
1003 RXEQ_VAL_ALL(6, 6, 0), /* Set D5 Low */
1004};
1005
1006/* There are 17 values from vendor, but IBC only accesses the first 16 */
1007#define DDS_ROWS (16)
1008#define RXEQ_ROWS ARRAY_SIZE(rxeq_init_vals)
1009
1010static int ipath_sd_setvals(struct ipath_devdata *dd)
1011{
1012 int idx, midx;
1013 int min_idx; /* Minimum index for this portion of table */
1014 uint32_t dds_reg_map;
1015 u64 __iomem *taddr, *iaddr;
1016 uint64_t data;
1017 uint64_t sdctl;
1018
1019 taddr = dd->ipath_kregbase + KR_IBSerDesMappTable;
1020 iaddr = dd->ipath_kregbase + dd->ipath_kregs->kr_ib_ddsrxeq;
1021
1022 /*
1023 * Init the DDS section of the table.
1024 * Each "row" of the table provokes NUM_DDS_REG writes, to the
1025 * registers indicated in DDS_REG_MAP.
1026 */
1027 sdctl = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibserdesctrl);
1028 sdctl = (sdctl & ~(0x1f << 8)) | (NUM_DDS_REGS << 8);
1029 sdctl = (sdctl & ~(0x1f << 13)) | (RXEQ_ROWS << 13);
1030 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, sdctl);
1031
1032 /*
1033 * Iterate down table within loop for each register to store.
1034 */
1035 dds_reg_map = DDS_REG_MAP;
1036 for (idx = 0; idx < NUM_DDS_REGS; ++idx) {
1037 data = ((dds_reg_map & 0xF) << 4) | TX_FAST_ELT;
1038 writeq(data, iaddr + idx);
1039 mmiowb();
1040 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
1041 dds_reg_map >>= 4;
1042 for (midx = 0; midx < DDS_ROWS; ++midx) {
1043 u64 __iomem *daddr = taddr + ((midx << 4) + idx);
1044 data = dds_init_vals[midx].reg_vals[idx];
1045 writeq(data, daddr);
1046 mmiowb();
1047 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
1048 } /* End inner for (vals for this reg, each row) */
1049 } /* end outer for (regs to be stored) */
1050
1051 /*
1052 * Init the RXEQ section of the table. As explained above the table
1053 * rxeq_init_vals[], this runs in a different order, as the pattern
1054 * of register references is more complex, but there are only
1055 * four "data" values per register.
1056 */
1057 min_idx = idx; /* RXEQ indices pick up where DDS left off */
1058 taddr += 0x100; /* RXEQ data is in second half of table */
1059 /* Iterate through RXEQ register addresses */
1060 for (idx = 0; idx < RXEQ_ROWS; ++idx) {
1061 int didx; /* "destination" */
1062 int vidx;
1063
1064 /* didx is offset by min_idx to address RXEQ range of regs */
1065 didx = idx + min_idx;
1066 /* Store the next RXEQ register address */
1067 writeq(rxeq_init_vals[idx].rdesc, iaddr + didx);
1068 mmiowb();
1069 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
1070 /* Iterate through RXEQ values */
1071 for (vidx = 0; vidx < 4; vidx++) {
1072 data = rxeq_init_vals[idx].rdata[vidx];
1073 writeq(data, taddr + (vidx << 6) + idx);
1074 mmiowb();
1075 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
1076 }
1077 } /* end outer for (Reg-writes for RXEQ) */
1078 return 0;
1079}
1080
1081#define CMUCTRL5 EPB_LOC(7, 0, 0x15)
1082#define RXHSCTRL0(chan) EPB_LOC(chan, 6, 0)
1083#define VCDL_DAC2(chan) EPB_LOC(chan, 6, 5)
1084#define VCDL_CTRL0(chan) EPB_LOC(chan, 6, 6)
1085#define VCDL_CTRL2(chan) EPB_LOC(chan, 6, 8)
1086#define START_EQ2(chan) EPB_LOC(chan, 7, 0x28)
1087
1088static int ibsd_sto_noisy(struct ipath_devdata *dd, int loc, int val, int mask)
1089{
1090 int ret = -1;
1091 int sloc; /* shifted loc, for messages */
1092
1093 loc |= (1U << EPB_IB_QUAD0_CS_SHF);
1094 sloc = loc >> EPB_ADDR_SHF;
1095
1096 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, mask);
1097 if (ret < 0)
1098 ipath_dev_err(dd, "Write failed: elt %d,"
1099 " addr 0x%X, chnl %d, val 0x%02X, mask 0x%02X\n",
1100 (sloc & 0xF), (sloc >> 9) & 0x3f, (sloc >> 4) & 7,
1101 val & 0xFF, mask & 0xFF);
1102 return ret;
1103}
1104
1105/*
1106 * Repeat a "store" across all channels of the IB SerDes.
1107 * Although nominally it inherits the "read value" of the last
1108 * channel it modified, the only really useful return is <0 for
1109 * failure, >= 0 for success. The parameter 'loc' is assumed to
1110 * be the location for the channel-0 copy of the register to
1111 * be modified.
1112 */
1113static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val,
1114 int mask)
1115{
1116 int ret = -1;
1117 int chnl;
1118
1119 if (loc & EPB_GLOBAL_WR) {
1120 /*
1121 * Our caller has assured us that we can set all four
1122 * channels at once. Trust that. If mask is not 0xFF,
1123 * we will read the _specified_ channel for our starting
1124 * value.
1125 */
1126 loc |= (1U << EPB_IB_QUAD0_CS_SHF);
1127 chnl = (loc >> (4 + EPB_ADDR_SHF)) & 7;
1128 if (mask != 0xFF) {
1129 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
1130 loc & ~EPB_GLOBAL_WR, 0, 0);
1131 if (ret < 0) {
1132 int sloc = loc >> EPB_ADDR_SHF;
1133 ipath_dev_err(dd, "pre-read failed: elt %d,"
1134 " addr 0x%X, chnl %d\n", (sloc & 0xF),
1135 (sloc >> 9) & 0x3f, chnl);
1136 return ret;
1137 }
1138 val = (ret & ~mask) | (val & mask);
1139 }
1140 loc &= ~(7 << (4+EPB_ADDR_SHF));
1141 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF);
1142 if (ret < 0) {
1143 int sloc = loc >> EPB_ADDR_SHF;
1144 ipath_dev_err(dd, "Global WR failed: elt %d,"
1145 " addr 0x%X, val %02X\n",
1146 (sloc & 0xF), (sloc >> 9) & 0x3f, val);
1147 }
1148 return ret;
1149 }
1150 /* Clear "channel" and set CS so we can simply iterate */
1151 loc &= ~(7 << (4+EPB_ADDR_SHF));
1152 loc |= (1U << EPB_IB_QUAD0_CS_SHF);
1153 for (chnl = 0; chnl < 4; ++chnl) {
1154 int cloc;
1155 cloc = loc | (chnl << (4+EPB_ADDR_SHF));
1156 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, cloc, val, mask);
1157 if (ret < 0) {
1158 int sloc = loc >> EPB_ADDR_SHF;
1159 ipath_dev_err(dd, "Write failed: elt %d,"
1160 " addr 0x%X, chnl %d, val 0x%02X,"
1161 " mask 0x%02X\n",
1162 (sloc & 0xF), (sloc >> 9) & 0x3f, chnl,
1163 val & 0xFF, mask & 0xFF);
1164 break;
1165 }
1166 }
1167 return ret;
1168}
1169
1170/*
1171 * Set the Tx values normally modified by IBC in IB1.2 mode to default
1172 * values, as gotten from first row of init table.
1173 */
1174static int set_dds_vals(struct ipath_devdata *dd, struct dds_init *ddi)
1175{
1176 int ret;
1177 int idx, reg, data;
1178 uint32_t regmap;
1179
1180 regmap = DDS_REG_MAP;
1181 for (idx = 0; idx < NUM_DDS_REGS; ++idx) {
1182 reg = (regmap & 0xF);
1183 regmap >>= 4;
1184 data = ddi->reg_vals[idx];
1185 /* Vendor says RMW not needed for these regs, use 0xFF mask */
1186 ret = ibsd_mod_allchnls(dd, EPB_LOC(0, 9, reg), data, 0xFF);
1187 if (ret < 0)
1188 break;
1189 }
1190 return ret;
1191}
1192
1193/*
1194 * Set the Rx values normally modified by IBC in IB1.2 mode to default
1195 * values, as gotten from selected column of init table.
1196 */
1197static int set_rxeq_vals(struct ipath_devdata *dd, int vsel)
1198{
1199 int ret;
1200 int ridx;
1201 int cnt = ARRAY_SIZE(rxeq_init_vals);
1202
1203 for (ridx = 0; ridx < cnt; ++ridx) {
1204 int elt, reg, val, loc;
1205 elt = rxeq_init_vals[ridx].rdesc & 0xF;
1206 reg = rxeq_init_vals[ridx].rdesc >> 4;
1207 loc = EPB_LOC(0, elt, reg);
1208 val = rxeq_init_vals[ridx].rdata[vsel];
1209 /* mask of 0xFF, because hardware does full-byte store. */
1210 ret = ibsd_mod_allchnls(dd, loc, val, 0xFF);
1211 if (ret < 0)
1212 break;
1213 }
1214 return ret;
1215}
1216
1217/*
1218 * Set the default values (row 0) for DDR Driver Demphasis.
1219 * we do this initially and whenever we turn off IB-1.2
1220 * The "default" values for Rx equalization are also stored to
1221 * SerDes registers. Formerly (and still default), we used set 2.
1222 * For experimenting with cables and link-partners, we allow changing
1223 * that via a module parameter.
1224 */
1225static unsigned ipath_rxeq_set = 2;
1226module_param_named(rxeq_default_set, ipath_rxeq_set, uint,
1227 S_IWUSR | S_IRUGO);
1228MODULE_PARM_DESC(rxeq_default_set,
1229 "Which set [0..3] of Rx Equalization values is default");
1230
1231static int ipath_internal_presets(struct ipath_devdata *dd)
1232{
1233 int ret = 0;
1234
1235 ret = set_dds_vals(dd, dds_init_vals + DDS_3M);
1236
1237 if (ret < 0)
1238 ipath_dev_err(dd, "Failed to set default DDS values\n");
1239 ret = set_rxeq_vals(dd, ipath_rxeq_set & 3);
1240 if (ret < 0)
1241 ipath_dev_err(dd, "Failed to set default RXEQ values\n");
1242 return ret;
1243}
1244
1245int ipath_sd7220_presets(struct ipath_devdata *dd)
1246{
1247 int ret = 0;
1248
1249 if (!dd->ipath_presets_needed)
1250 return ret;
1251 dd->ipath_presets_needed = 0;
1252 /* Assert uC reset, so we don't clash with it. */
1253 ipath_ibsd_reset(dd, 1);
1254 udelay(2);
1255 ipath_sd_trimdone_monitor(dd, "link-down");
1256
1257 ret = ipath_internal_presets(dd);
1258return ret;
1259}
1260
1261static int ipath_sd_trimself(struct ipath_devdata *dd, int val)
1262{
1263 return ibsd_sto_noisy(dd, CMUCTRL5, val, 0xFF);
1264}
1265
1266static int ipath_sd_early(struct ipath_devdata *dd)
1267{
1268 int ret = -1; /* Default failed */
1269 int chnl;
1270
1271 for (chnl = 0; chnl < 4; ++chnl) {
1272 ret = ibsd_sto_noisy(dd, RXHSCTRL0(chnl), 0xD4, 0xFF);
1273 if (ret < 0)
1274 goto bail;
1275 }
1276 for (chnl = 0; chnl < 4; ++chnl) {
1277 ret = ibsd_sto_noisy(dd, VCDL_DAC2(chnl), 0x2D, 0xFF);
1278 if (ret < 0)
1279 goto bail;
1280 }
1281 /* more fine-tuning of what will be default */
1282 for (chnl = 0; chnl < 4; ++chnl) {
1283 ret = ibsd_sto_noisy(dd, VCDL_CTRL2(chnl), 3, 0xF);
1284 if (ret < 0)
1285 goto bail;
1286 }
1287 for (chnl = 0; chnl < 4; ++chnl) {
1288 ret = ibsd_sto_noisy(dd, START_EQ1(chnl), 0x10, 0xFF);
1289 if (ret < 0)
1290 goto bail;
1291 }
1292 for (chnl = 0; chnl < 4; ++chnl) {
1293 ret = ibsd_sto_noisy(dd, START_EQ2(chnl), 0x30, 0xFF);
1294 if (ret < 0)
1295 goto bail;
1296 }
1297bail:
1298 return ret;
1299}
1300
1301#define BACTRL(chnl) EPB_LOC(chnl, 6, 0x0E)
1302#define LDOUTCTRL1(chnl) EPB_LOC(chnl, 7, 6)
1303#define RXHSSTATUS(chnl) EPB_LOC(chnl, 6, 0xF)
1304
1305static int ipath_sd_dactrim(struct ipath_devdata *dd)
1306{
1307 int ret = -1; /* Default failed */
1308 int chnl;
1309
1310 for (chnl = 0; chnl < 4; ++chnl) {
1311 ret = ibsd_sto_noisy(dd, BACTRL(chnl), 0x40, 0xFF);
1312 if (ret < 0)
1313 goto bail;
1314 }
1315 for (chnl = 0; chnl < 4; ++chnl) {
1316 ret = ibsd_sto_noisy(dd, LDOUTCTRL1(chnl), 0x04, 0xFF);
1317 if (ret < 0)
1318 goto bail;
1319 }
1320 for (chnl = 0; chnl < 4; ++chnl) {
1321 ret = ibsd_sto_noisy(dd, RXHSSTATUS(chnl), 0x04, 0xFF);
1322 if (ret < 0)
1323 goto bail;
1324 }
1325 /*
1326 * delay for max possible number of steps, with slop.
1327 * Each step is about 4usec.
1328 */
1329 udelay(415);
1330 for (chnl = 0; chnl < 4; ++chnl) {
1331 ret = ibsd_sto_noisy(dd, LDOUTCTRL1(chnl), 0x00, 0xFF);
1332 if (ret < 0)
1333 goto bail;
1334 }
1335bail:
1336 return ret;
1337}
1338
1339#define RELOCK_FIRST_MS 3
1340#define RXLSPPM(chan) EPB_LOC(chan, 0, 2)
1341void ipath_toggle_rclkrls(struct ipath_devdata *dd)
1342{
1343 int loc = RXLSPPM(0) | EPB_GLOBAL_WR;
1344 int ret;
1345
1346 ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
1347 if (ret < 0)
1348 ipath_dev_err(dd, "RCLKRLS failed to clear D7\n");
1349 else {
1350 udelay(1);
1351 ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
1352 }
1353 /* And again for good measure */
1354 udelay(1);
1355 ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
1356 if (ret < 0)
1357 ipath_dev_err(dd, "RCLKRLS failed to clear D7\n");
1358 else {
1359 udelay(1);
1360 ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
1361 }
1362 /* Now reset xgxs and IBC to complete the recovery */
1363 dd->ipath_f_xgxs_reset(dd);
1364}
1365
1366/*
1367 * Shut down the timer that polls for relock occasions, if needed
1368 * this is "hooked" from ipath_7220_quiet_serdes(), which is called
1369 * just before ipath_shutdown_device() in ipath_driver.c shuts down all
1370 * the other timers
1371 */
1372void ipath_shutdown_relock_poll(struct ipath_devdata *dd)
1373{
1374 struct ipath_relock *irp = &dd->ipath_relock_singleton;
1375 if (atomic_read(&irp->ipath_relock_timer_active)) {
1376 del_timer_sync(&irp->ipath_relock_timer);
1377 atomic_set(&irp->ipath_relock_timer_active, 0);
1378 }
1379}
1380
1381static unsigned ipath_relock_by_timer = 1;
1382module_param_named(relock_by_timer, ipath_relock_by_timer, uint,
1383 S_IWUSR | S_IRUGO);
1384MODULE_PARM_DESC(relock_by_timer, "Allow relock attempt if link not up");
1385
1386static void ipath_run_relock(unsigned long opaque)
1387{
1388 struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
1389 struct ipath_relock *irp = &dd->ipath_relock_singleton;
1390 u64 val, ltstate;
1391
1392 if (!(dd->ipath_flags & IPATH_INITTED)) {
1393 /* Not yet up, just reenable the timer for later */
1394 irp->ipath_relock_interval = HZ;
1395 mod_timer(&irp->ipath_relock_timer, jiffies + HZ);
1396 return;
1397 }
1398
1399 /*
1400 * Check link-training state for "stuck" state.
1401 * if found, try relock and schedule another try at
1402 * exponentially growing delay, maxed at one second.
1403 * if not stuck, our work is done.
1404 */
1405 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
1406 ltstate = ipath_ib_linktrstate(dd, val);
1407
1408 if (ltstate <= INFINIPATH_IBCS_LT_STATE_CFGWAITRMT
1409 && ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP) {
1410 int timeoff;
1411 /* Not up yet. Try again, if allowed by module-param */
1412 if (ipath_relock_by_timer) {
1413 if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)
1414 ipath_cdbg(VERBOSE, "Skip RELOCK in AUTONEG\n");
1415 else if (!(dd->ipath_flags & IPATH_IB_LINK_DISABLED)) {
1416 ipath_cdbg(VERBOSE, "RELOCK\n");
1417 ipath_toggle_rclkrls(dd);
1418 }
1419 }
1420 /* re-set timer for next check */
1421 timeoff = irp->ipath_relock_interval << 1;
1422 if (timeoff > HZ)
1423 timeoff = HZ;
1424 irp->ipath_relock_interval = timeoff;
1425
1426 mod_timer(&irp->ipath_relock_timer, jiffies + timeoff);
1427 } else {
1428 /* Up, so no more need to check so often */
1429 mod_timer(&irp->ipath_relock_timer, jiffies + HZ);
1430 }
1431}
1432
1433void ipath_set_relock_poll(struct ipath_devdata *dd, int ibup)
1434{
1435 struct ipath_relock *irp = &dd->ipath_relock_singleton;
1436
1437 if (ibup > 0) {
1438 /* we are now up, so relax timer to 1 second interval */
1439 if (atomic_read(&irp->ipath_relock_timer_active))
1440 mod_timer(&irp->ipath_relock_timer, jiffies + HZ);
1441 } else {
1442 /* Transition to down, (re-)set timer to short interval. */
1443 int timeout;
1444 timeout = (HZ * ((ibup == -1) ? 1000 : RELOCK_FIRST_MS))/1000;
1445 if (timeout == 0)
1446 timeout = 1;
1447 /* If timer has not yet been started, do so. */
1448 if (atomic_inc_return(&irp->ipath_relock_timer_active) == 1) {
1449 init_timer(&irp->ipath_relock_timer);
1450 irp->ipath_relock_timer.function = ipath_run_relock;
1451 irp->ipath_relock_timer.data = (unsigned long) dd;
1452 irp->ipath_relock_interval = timeout;
1453 irp->ipath_relock_timer.expires = jiffies + timeout;
1454 add_timer(&irp->ipath_relock_timer);
1455 } else {
1456 irp->ipath_relock_interval = timeout;
1457 mod_timer(&irp->ipath_relock_timer, jiffies + timeout);
1458 atomic_dec(&irp->ipath_relock_timer_active);
1459 }
1460 }
1461}
1462
diff --git a/drivers/infiniband/hw/ipath/ipath_sd7220_img.c b/drivers/infiniband/hw/ipath/ipath_sd7220_img.c
new file mode 100644
index 000000000000..5ef59da9270a
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_sd7220_img.c
@@ -0,0 +1,1082 @@
1/*
2 * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33/*
34 * This file contains the memory image from the vendor, to be copied into
35 * the IB SERDES of the IBA7220 during initialization.
36 * The file also includes the two functions which use this image.
37 */
38#include <linux/pci.h>
39#include <linux/delay.h>
40
41#include "ipath_kernel.h"
42#include "ipath_registers.h"
43#include "ipath_7220.h"
44
45static unsigned char ipath_sd7220_ib_img[] = {
46/*0000*/0x02, 0x0A, 0x29, 0x02, 0x0A, 0x87, 0xE5, 0xE6,
47 0x30, 0xE6, 0x04, 0x7F, 0x01, 0x80, 0x02, 0x7F,
48/*0010*/0x00, 0xE5, 0xE2, 0x30, 0xE4, 0x04, 0x7E, 0x01,
49 0x80, 0x02, 0x7E, 0x00, 0xEE, 0x5F, 0x60, 0x08,
50/*0020*/0x53, 0xF9, 0xF7, 0xE4, 0xF5, 0xFE, 0x80, 0x08,
51 0x7F, 0x0A, 0x12, 0x17, 0x31, 0x12, 0x0E, 0xA2,
52/*0030*/0x75, 0xFC, 0x08, 0xE4, 0xF5, 0xFD, 0xE5, 0xE7,
53 0x20, 0xE7, 0x03, 0x43, 0xF9, 0x08, 0x22, 0x00,
54/*0040*/0x01, 0x20, 0x11, 0x00, 0x04, 0x20, 0x00, 0x75,
55 0x51, 0x01, 0xE4, 0xF5, 0x52, 0xF5, 0x53, 0xF5,
56/*0050*/0x52, 0xF5, 0x7E, 0x7F, 0x04, 0x02, 0x04, 0x38,
57 0xC2, 0x36, 0x05, 0x52, 0xE5, 0x52, 0xD3, 0x94,
58/*0060*/0x0C, 0x40, 0x05, 0x75, 0x52, 0x01, 0xD2, 0x36,
59 0x90, 0x07, 0x0C, 0x74, 0x07, 0xF0, 0xA3, 0x74,
60/*0070*/0xFF, 0xF0, 0xE4, 0xF5, 0x0C, 0xA3, 0xF0, 0x90,
61 0x07, 0x14, 0xF0, 0xA3, 0xF0, 0x75, 0x0B, 0x20,
62/*0080*/0xF5, 0x09, 0xE4, 0xF5, 0x08, 0xE5, 0x08, 0xD3,
63 0x94, 0x30, 0x40, 0x03, 0x02, 0x04, 0x04, 0x12,
64/*0090*/0x00, 0x06, 0x15, 0x0B, 0xE5, 0x08, 0x70, 0x04,
65 0x7F, 0x01, 0x80, 0x02, 0x7F, 0x00, 0xE5, 0x09,
66/*00A0*/0x70, 0x04, 0x7E, 0x01, 0x80, 0x02, 0x7E, 0x00,
67 0xEE, 0x5F, 0x60, 0x05, 0x12, 0x18, 0x71, 0xD2,
68/*00B0*/0x35, 0x53, 0xE1, 0xF7, 0xE5, 0x08, 0x45, 0x09,
69 0xFF, 0xE5, 0x0B, 0x25, 0xE0, 0x25, 0xE0, 0x24,
70/*00C0*/0x83, 0xF5, 0x82, 0xE4, 0x34, 0x07, 0xF5, 0x83,
71 0xEF, 0xF0, 0x85, 0xE2, 0x20, 0xE5, 0x52, 0xD3,
72/*00D0*/0x94, 0x01, 0x40, 0x0D, 0x12, 0x19, 0xF3, 0xE0,
73 0x54, 0xA0, 0x64, 0x40, 0x70, 0x03, 0x02, 0x03,
74/*00E0*/0xFB, 0x53, 0xF9, 0xF8, 0x90, 0x94, 0x70, 0xE4,
75 0xF0, 0xE0, 0xF5, 0x10, 0xAF, 0x09, 0x12, 0x1E,
76/*00F0*/0xB3, 0xAF, 0x08, 0xEF, 0x44, 0x08, 0xF5, 0x82,
77 0x75, 0x83, 0x80, 0xE0, 0xF5, 0x29, 0xEF, 0x44,
78/*0100*/0x07, 0x12, 0x1A, 0x3C, 0xF5, 0x22, 0x54, 0x40,
79 0xD3, 0x94, 0x00, 0x40, 0x1E, 0xE5, 0x29, 0x54,
80/*0110*/0xF0, 0x70, 0x21, 0x12, 0x19, 0xF3, 0xE0, 0x44,
81 0x80, 0xF0, 0xE5, 0x22, 0x54, 0x30, 0x65, 0x08,
82/*0120*/0x70, 0x09, 0x12, 0x19, 0xF3, 0xE0, 0x54, 0xBF,
83 0xF0, 0x80, 0x09, 0x12, 0x19, 0xF3, 0x74, 0x40,
84/*0130*/0xF0, 0x02, 0x03, 0xFB, 0x12, 0x1A, 0x12, 0x75,
85 0x83, 0xAE, 0x74, 0xFF, 0xF0, 0xAF, 0x08, 0x7E,
86/*0140*/0x00, 0xEF, 0x44, 0x07, 0xF5, 0x82, 0xE0, 0xFD,
87 0xE5, 0x0B, 0x25, 0xE0, 0x25, 0xE0, 0x24, 0x81,
88/*0150*/0xF5, 0x82, 0xE4, 0x34, 0x07, 0xF5, 0x83, 0xED,
89 0xF0, 0x90, 0x07, 0x0E, 0xE0, 0x04, 0xF0, 0xEF,
90/*0160*/0x44, 0x07, 0xF5, 0x82, 0x75, 0x83, 0x98, 0xE0,
91 0xF5, 0x28, 0x12, 0x1A, 0x23, 0x40, 0x0C, 0x12,
92/*0170*/0x19, 0xF3, 0xE0, 0x44, 0x01, 0x12, 0x1A, 0x32,
93 0x02, 0x03, 0xF6, 0xAF, 0x08, 0x7E, 0x00, 0x74,
94/*0180*/0x80, 0xCD, 0xEF, 0xCD, 0x8D, 0x82, 0xF5, 0x83,
95 0xE0, 0x30, 0xE0, 0x0A, 0x12, 0x19, 0xF3, 0xE0,
96/*0190*/0x44, 0x20, 0xF0, 0x02, 0x03, 0xFB, 0x12, 0x19,
97 0xF3, 0xE0, 0x54, 0xDF, 0xF0, 0xEE, 0x44, 0xAE,
98/*01A0*/0x12, 0x1A, 0x43, 0x30, 0xE4, 0x03, 0x02, 0x03,
99 0xFB, 0x74, 0x9E, 0x12, 0x1A, 0x05, 0x20, 0xE0,
100/*01B0*/0x03, 0x02, 0x03, 0xFB, 0x8F, 0x82, 0x8E, 0x83,
101 0xE0, 0x20, 0xE0, 0x03, 0x02, 0x03, 0xFB, 0x12,
102/*01C0*/0x19, 0xF3, 0xE0, 0x44, 0x10, 0xF0, 0xE5, 0xE3,
103 0x20, 0xE7, 0x08, 0xE5, 0x08, 0x12, 0x1A, 0x3A,
104/*01D0*/0x44, 0x04, 0xF0, 0xAF, 0x08, 0x7E, 0x00, 0xEF,
105 0x12, 0x1A, 0x3A, 0x20, 0xE2, 0x34, 0x12, 0x19,
106/*01E0*/0xF3, 0xE0, 0x44, 0x08, 0xF0, 0xE5, 0xE4, 0x30,
107 0xE6, 0x04, 0x7D, 0x01, 0x80, 0x02, 0x7D, 0x00,
108/*01F0*/0xE5, 0x7E, 0xC3, 0x94, 0x04, 0x50, 0x04, 0x7C,
109 0x01, 0x80, 0x02, 0x7C, 0x00, 0xEC, 0x4D, 0x60,
110/*0200*/0x05, 0xC2, 0x35, 0x02, 0x03, 0xFB, 0xEE, 0x44,
111 0xD2, 0x12, 0x1A, 0x43, 0x44, 0x40, 0xF0, 0x02,
112/*0210*/0x03, 0xFB, 0x12, 0x19, 0xF3, 0xE0, 0x54, 0xF7,
113 0xF0, 0x12, 0x1A, 0x12, 0x75, 0x83, 0xD2, 0xE0,
114/*0220*/0x54, 0xBF, 0xF0, 0x90, 0x07, 0x14, 0xE0, 0x04,
115 0xF0, 0xE5, 0x7E, 0x70, 0x03, 0x75, 0x7E, 0x01,
116/*0230*/0xAF, 0x08, 0x7E, 0x00, 0x12, 0x1A, 0x23, 0x40,
117 0x12, 0x12, 0x19, 0xF3, 0xE0, 0x44, 0x01, 0x12,
118/*0240*/0x19, 0xF2, 0xE0, 0x54, 0x02, 0x12, 0x1A, 0x32,
119 0x02, 0x03, 0xFB, 0x12, 0x19, 0xF3, 0xE0, 0x44,
120/*0250*/0x02, 0x12, 0x19, 0xF2, 0xE0, 0x54, 0xFE, 0xF0,
121 0xC2, 0x35, 0xEE, 0x44, 0x8A, 0x8F, 0x82, 0xF5,
122/*0260*/0x83, 0xE0, 0xF5, 0x17, 0x54, 0x8F, 0x44, 0x40,
123 0xF0, 0x74, 0x90, 0xFC, 0xE5, 0x08, 0x44, 0x07,
124/*0270*/0xFD, 0xF5, 0x82, 0x8C, 0x83, 0xE0, 0x54, 0x3F,
125 0x90, 0x07, 0x02, 0xF0, 0xE0, 0x54, 0xC0, 0x8D,
126/*0280*/0x82, 0x8C, 0x83, 0xF0, 0x74, 0x92, 0x12, 0x1A,
127 0x05, 0x90, 0x07, 0x03, 0x12, 0x1A, 0x19, 0x74,
128/*0290*/0x82, 0x12, 0x1A, 0x05, 0x90, 0x07, 0x04, 0x12,
129 0x1A, 0x19, 0x74, 0xB4, 0x12, 0x1A, 0x05, 0x90,
130/*02A0*/0x07, 0x05, 0x12, 0x1A, 0x19, 0x74, 0x94, 0xFE,
131 0xE5, 0x08, 0x44, 0x06, 0x12, 0x1A, 0x0A, 0xF5,
132/*02B0*/0x10, 0x30, 0xE0, 0x04, 0xD2, 0x37, 0x80, 0x02,
133 0xC2, 0x37, 0xE5, 0x10, 0x54, 0x7F, 0x8F, 0x82,
134/*02C0*/0x8E, 0x83, 0xF0, 0x30, 0x44, 0x30, 0x12, 0x1A,
135 0x03, 0x54, 0x80, 0xD3, 0x94, 0x00, 0x40, 0x04,
136/*02D0*/0xD2, 0x39, 0x80, 0x02, 0xC2, 0x39, 0x8F, 0x82,
137 0x8E, 0x83, 0xE0, 0x44, 0x80, 0xF0, 0x12, 0x1A,
138/*02E0*/0x03, 0x54, 0x40, 0xD3, 0x94, 0x00, 0x40, 0x04,
139 0xD2, 0x3A, 0x80, 0x02, 0xC2, 0x3A, 0x8F, 0x82,
140/*02F0*/0x8E, 0x83, 0xE0, 0x44, 0x40, 0xF0, 0x74, 0x92,
141 0xFE, 0xE5, 0x08, 0x44, 0x06, 0x12, 0x1A, 0x0A,
142/*0300*/0x30, 0xE7, 0x04, 0xD2, 0x38, 0x80, 0x02, 0xC2,
143 0x38, 0x8F, 0x82, 0x8E, 0x83, 0xE0, 0x54, 0x7F,
144/*0310*/0xF0, 0x12, 0x1E, 0x46, 0xE4, 0xF5, 0x0A, 0x20,
145 0x03, 0x02, 0x80, 0x03, 0x30, 0x43, 0x03, 0x12,
146/*0320*/0x19, 0x95, 0x20, 0x02, 0x02, 0x80, 0x03, 0x30,
147 0x42, 0x03, 0x12, 0x0C, 0x8F, 0x30, 0x30, 0x06,
148/*0330*/0x12, 0x19, 0x95, 0x12, 0x0C, 0x8F, 0x12, 0x0D,
149 0x47, 0x12, 0x19, 0xF3, 0xE0, 0x54, 0xFB, 0xF0,
150/*0340*/0xE5, 0x0A, 0xC3, 0x94, 0x01, 0x40, 0x46, 0x43,
151 0xE1, 0x08, 0x12, 0x19, 0xF3, 0xE0, 0x44, 0x04,
152/*0350*/0xF0, 0xE5, 0xE4, 0x20, 0xE7, 0x2A, 0x12, 0x1A,
153 0x12, 0x75, 0x83, 0xD2, 0xE0, 0x54, 0x08, 0xD3,
154/*0360*/0x94, 0x00, 0x40, 0x04, 0x7F, 0x01, 0x80, 0x02,
155 0x7F, 0x00, 0xE5, 0x0A, 0xC3, 0x94, 0x01, 0x40,
156/*0370*/0x04, 0x7E, 0x01, 0x80, 0x02, 0x7E, 0x00, 0xEF,
157 0x5E, 0x60, 0x05, 0x12, 0x1D, 0xD7, 0x80, 0x17,
158/*0380*/0x12, 0x1A, 0x12, 0x75, 0x83, 0xD2, 0xE0, 0x44,
159 0x08, 0xF0, 0x02, 0x03, 0xFB, 0x12, 0x1A, 0x12,
160/*0390*/0x75, 0x83, 0xD2, 0xE0, 0x54, 0xF7, 0xF0, 0x12,
161 0x1E, 0x46, 0x7F, 0x08, 0x12, 0x17, 0x31, 0x74,
162/*03A0*/0x8E, 0xFE, 0x12, 0x1A, 0x12, 0x8E, 0x83, 0xE0,
163 0xF5, 0x10, 0x54, 0xFE, 0xF0, 0xE5, 0x10, 0x44,
164/*03B0*/0x01, 0xFF, 0xE5, 0x08, 0xFD, 0xED, 0x44, 0x07,
165 0xF5, 0x82, 0xEF, 0xF0, 0xE5, 0x10, 0x54, 0xFE,
166/*03C0*/0xFF, 0xED, 0x44, 0x07, 0xF5, 0x82, 0xEF, 0x12,
167 0x1A, 0x11, 0x75, 0x83, 0x86, 0xE0, 0x44, 0x10,
168/*03D0*/0x12, 0x1A, 0x11, 0xE0, 0x44, 0x10, 0xF0, 0x12,
169 0x19, 0xF3, 0xE0, 0x54, 0xFD, 0x44, 0x01, 0xFF,
170/*03E0*/0x12, 0x19, 0xF3, 0xEF, 0x12, 0x1A, 0x32, 0x30,
171 0x32, 0x0C, 0xE5, 0x08, 0x44, 0x08, 0xF5, 0x82,
172/*03F0*/0x75, 0x83, 0x82, 0x74, 0x05, 0xF0, 0xAF, 0x0B,
173 0x12, 0x18, 0xD7, 0x74, 0x10, 0x25, 0x08, 0xF5,
174/*0400*/0x08, 0x02, 0x00, 0x85, 0x05, 0x09, 0xE5, 0x09,
175 0xD3, 0x94, 0x07, 0x50, 0x03, 0x02, 0x00, 0x82,
176/*0410*/0xE5, 0x7E, 0xD3, 0x94, 0x00, 0x40, 0x04, 0x7F,
177 0x01, 0x80, 0x02, 0x7F, 0x00, 0xE5, 0x7E, 0xC3,
178/*0420*/0x94, 0xFA, 0x50, 0x04, 0x7E, 0x01, 0x80, 0x02,
179 0x7E, 0x00, 0xEE, 0x5F, 0x60, 0x02, 0x05, 0x7E,
180/*0430*/0x30, 0x35, 0x0B, 0x43, 0xE1, 0x01, 0x7F, 0x09,
181 0x12, 0x17, 0x31, 0x02, 0x00, 0x58, 0x53, 0xE1,
182/*0440*/0xFE, 0x02, 0x00, 0x58, 0x8E, 0x6A, 0x8F, 0x6B,
183 0x8C, 0x6C, 0x8D, 0x6D, 0x75, 0x6E, 0x01, 0x75,
184/*0450*/0x6F, 0x01, 0x75, 0x70, 0x01, 0xE4, 0xF5, 0x73,
185 0xF5, 0x74, 0xF5, 0x75, 0x90, 0x07, 0x2F, 0xF0,
186/*0460*/0xF5, 0x3C, 0xF5, 0x3E, 0xF5, 0x46, 0xF5, 0x47,
187 0xF5, 0x3D, 0xF5, 0x3F, 0xF5, 0x6F, 0xE5, 0x6F,
188/*0470*/0x70, 0x0F, 0xE5, 0x6B, 0x45, 0x6A, 0x12, 0x07,
189 0x2A, 0x75, 0x83, 0x80, 0x74, 0x3A, 0xF0, 0x80,
190/*0480*/0x09, 0x12, 0x07, 0x2A, 0x75, 0x83, 0x80, 0x74,
191 0x1A, 0xF0, 0xE4, 0xF5, 0x6E, 0xC3, 0x74, 0x3F,
192/*0490*/0x95, 0x6E, 0xFF, 0x12, 0x08, 0x65, 0x75, 0x83,
193 0x82, 0xEF, 0xF0, 0x12, 0x1A, 0x4D, 0x12, 0x08,
194/*04A0*/0xC6, 0xE5, 0x33, 0xF0, 0x12, 0x08, 0xFA, 0x12,
195 0x08, 0xB1, 0x40, 0xE1, 0xE5, 0x6F, 0x70, 0x0B,
196/*04B0*/0x12, 0x07, 0x2A, 0x75, 0x83, 0x80, 0x74, 0x36,
197 0xF0, 0x80, 0x09, 0x12, 0x07, 0x2A, 0x75, 0x83,
198/*04C0*/0x80, 0x74, 0x16, 0xF0, 0x75, 0x6E, 0x01, 0x12,
199 0x07, 0x2A, 0x75, 0x83, 0xB4, 0xE5, 0x6E, 0xF0,
200/*04D0*/0x12, 0x1A, 0x4D, 0x74, 0x3F, 0x25, 0x6E, 0xF5,
201 0x82, 0xE4, 0x34, 0x00, 0xF5, 0x83, 0xE5, 0x33,
202/*04E0*/0xF0, 0x74, 0xBF, 0x25, 0x6E, 0xF5, 0x82, 0xE4,
203 0x34, 0x00, 0x12, 0x08, 0xB1, 0x40, 0xD8, 0xE4,
204/*04F0*/0xF5, 0x70, 0xF5, 0x46, 0xF5, 0x47, 0xF5, 0x6E,
205 0x12, 0x08, 0xFA, 0xF5, 0x83, 0xE0, 0xFE, 0x12,
206/*0500*/0x08, 0xC6, 0xE0, 0x7C, 0x00, 0x24, 0x00, 0xFF,
207 0xEC, 0x3E, 0xFE, 0xAD, 0x3B, 0xD3, 0xEF, 0x9D,
208/*0510*/0xEE, 0x9C, 0x50, 0x04, 0x7B, 0x01, 0x80, 0x02,
209 0x7B, 0x00, 0xE5, 0x70, 0x70, 0x04, 0x7A, 0x01,
210/*0520*/0x80, 0x02, 0x7A, 0x00, 0xEB, 0x5A, 0x60, 0x06,
211 0x85, 0x6E, 0x46, 0x75, 0x70, 0x01, 0xD3, 0xEF,
212/*0530*/0x9D, 0xEE, 0x9C, 0x50, 0x04, 0x7F, 0x01, 0x80,
213 0x02, 0x7F, 0x00, 0xE5, 0x70, 0xB4, 0x01, 0x04,
214/*0540*/0x7E, 0x01, 0x80, 0x02, 0x7E, 0x00, 0xEF, 0x5E,
215 0x60, 0x03, 0x85, 0x6E, 0x47, 0x05, 0x6E, 0xE5,
216/*0550*/0x6E, 0x64, 0x7F, 0x70, 0xA3, 0xE5, 0x46, 0x60,
217 0x05, 0xE5, 0x47, 0xB4, 0x7E, 0x03, 0x85, 0x46,
218/*0560*/0x47, 0xE5, 0x6F, 0x70, 0x08, 0x85, 0x46, 0x76,
219 0x85, 0x47, 0x77, 0x80, 0x0E, 0xC3, 0x74, 0x7F,
220/*0570*/0x95, 0x46, 0xF5, 0x78, 0xC3, 0x74, 0x7F, 0x95,
221 0x47, 0xF5, 0x79, 0xE5, 0x6F, 0x70, 0x37, 0xE5,
222/*0580*/0x46, 0x65, 0x47, 0x70, 0x0C, 0x75, 0x73, 0x01,
223 0x75, 0x74, 0x01, 0xF5, 0x3C, 0xF5, 0x3D, 0x80,
224/*0590*/0x35, 0xE4, 0xF5, 0x4E, 0xC3, 0xE5, 0x47, 0x95,
225 0x46, 0xF5, 0x3C, 0xC3, 0x13, 0xF5, 0x71, 0x25,
226/*05A0*/0x46, 0xF5, 0x72, 0xC3, 0x94, 0x3F, 0x40, 0x05,
227 0xE4, 0xF5, 0x3D, 0x80, 0x40, 0xC3, 0x74, 0x3F,
228/*05B0*/0x95, 0x72, 0xF5, 0x3D, 0x80, 0x37, 0xE5, 0x46,
229 0x65, 0x47, 0x70, 0x0F, 0x75, 0x73, 0x01, 0x75,
230/*05C0*/0x75, 0x01, 0xF5, 0x3E, 0xF5, 0x3F, 0x75, 0x4E,
231 0x01, 0x80, 0x22, 0xE4, 0xF5, 0x4E, 0xC3, 0xE5,
232/*05D0*/0x47, 0x95, 0x46, 0xF5, 0x3E, 0xC3, 0x13, 0xF5,
233 0x71, 0x25, 0x46, 0xF5, 0x72, 0xD3, 0x94, 0x3F,
234/*05E0*/0x50, 0x05, 0xE4, 0xF5, 0x3F, 0x80, 0x06, 0xE5,
235 0x72, 0x24, 0xC1, 0xF5, 0x3F, 0x05, 0x6F, 0xE5,
236/*05F0*/0x6F, 0xC3, 0x94, 0x02, 0x50, 0x03, 0x02, 0x04,
237 0x6E, 0xE5, 0x6D, 0x45, 0x6C, 0x70, 0x02, 0x80,
238/*0600*/0x04, 0xE5, 0x74, 0x45, 0x75, 0x90, 0x07, 0x2F,
239 0xF0, 0x7F, 0x01, 0xE5, 0x3E, 0x60, 0x04, 0xE5,
240/*0610*/0x3C, 0x70, 0x14, 0xE4, 0xF5, 0x3C, 0xF5, 0x3D,
241 0xF5, 0x3E, 0xF5, 0x3F, 0x12, 0x08, 0xD2, 0x70,
242/*0620*/0x04, 0xF0, 0x02, 0x06, 0xA4, 0x80, 0x7A, 0xE5,
243 0x3C, 0xC3, 0x95, 0x3E, 0x40, 0x07, 0xE5, 0x3C,
244/*0630*/0x95, 0x3E, 0xFF, 0x80, 0x06, 0xC3, 0xE5, 0x3E,
245 0x95, 0x3C, 0xFF, 0xE5, 0x76, 0xD3, 0x95, 0x79,
246/*0640*/0x40, 0x05, 0x85, 0x76, 0x7A, 0x80, 0x03, 0x85,
247 0x79, 0x7A, 0xE5, 0x77, 0xC3, 0x95, 0x78, 0x50,
248/*0650*/0x05, 0x85, 0x77, 0x7B, 0x80, 0x03, 0x85, 0x78,
249 0x7B, 0xE5, 0x7B, 0xD3, 0x95, 0x7A, 0x40, 0x30,
250/*0660*/0xE5, 0x7B, 0x95, 0x7A, 0xF5, 0x3C, 0xF5, 0x3E,
251 0xC3, 0xE5, 0x7B, 0x95, 0x7A, 0x90, 0x07, 0x19,
252/*0670*/0xF0, 0xE5, 0x3C, 0xC3, 0x13, 0xF5, 0x71, 0x25,
253 0x7A, 0xF5, 0x72, 0xC3, 0x94, 0x3F, 0x40, 0x05,
254/*0680*/0xE4, 0xF5, 0x3D, 0x80, 0x1F, 0xC3, 0x74, 0x3F,
255 0x95, 0x72, 0xF5, 0x3D, 0xF5, 0x3F, 0x80, 0x14,
256/*0690*/0xE4, 0xF5, 0x3C, 0xF5, 0x3E, 0x90, 0x07, 0x19,
257 0xF0, 0x12, 0x08, 0xD2, 0x70, 0x03, 0xF0, 0x80,
258/*06A0*/0x03, 0x74, 0x01, 0xF0, 0x12, 0x08, 0x65, 0x75,
259 0x83, 0xD0, 0xE0, 0x54, 0x0F, 0xFE, 0xAD, 0x3C,
260/*06B0*/0x70, 0x02, 0x7E, 0x07, 0xBE, 0x0F, 0x02, 0x7E,
261 0x80, 0xEE, 0xFB, 0xEF, 0xD3, 0x9B, 0x74, 0x80,
262/*06C0*/0xF8, 0x98, 0x40, 0x1F, 0xE4, 0xF5, 0x3C, 0xF5,
263 0x3E, 0x12, 0x08, 0xD2, 0x70, 0x03, 0xF0, 0x80,
264/*06D0*/0x12, 0x74, 0x01, 0xF0, 0xE5, 0x08, 0xFB, 0xEB,
265 0x44, 0x07, 0xF5, 0x82, 0x75, 0x83, 0xD2, 0xE0,
266/*06E0*/0x44, 0x10, 0xF0, 0xE5, 0x08, 0xFB, 0xEB, 0x44,
267 0x09, 0xF5, 0x82, 0x75, 0x83, 0x9E, 0xED, 0xF0,
268/*06F0*/0xEB, 0x44, 0x07, 0xF5, 0x82, 0x75, 0x83, 0xCA,
269 0xED, 0xF0, 0x12, 0x08, 0x65, 0x75, 0x83, 0xCC,
270/*0700*/0xEF, 0xF0, 0x22, 0xE5, 0x08, 0x44, 0x07, 0xF5,
271 0x82, 0x75, 0x83, 0xBC, 0xE0, 0x54, 0xF0, 0xF0,
272/*0710*/0xE5, 0x08, 0x44, 0x07, 0xF5, 0x82, 0x75, 0x83,
273 0xBE, 0xE0, 0x54, 0xF0, 0xF0, 0xE5, 0x08, 0x44,
274/*0720*/0x07, 0xF5, 0x82, 0x75, 0x83, 0xC0, 0xE0, 0x54,
275 0xF0, 0xF0, 0xE5, 0x08, 0x44, 0x07, 0xF5, 0x82,
276/*0730*/0x22, 0xF0, 0x90, 0x07, 0x28, 0xE0, 0xFE, 0xA3,
277 0xE0, 0xF5, 0x82, 0x8E, 0x83, 0x22, 0x85, 0x42,
278/*0740*/0x42, 0x85, 0x41, 0x41, 0x85, 0x40, 0x40, 0x74,
279 0xC0, 0x2F, 0xF5, 0x82, 0x74, 0x02, 0x3E, 0xF5,
280/*0750*/0x83, 0xE5, 0x42, 0xF0, 0x74, 0xE0, 0x2F, 0xF5,
281 0x82, 0x74, 0x02, 0x3E, 0xF5, 0x83, 0x22, 0xE5,
282/*0760*/0x42, 0x29, 0xFD, 0xE4, 0x33, 0xFC, 0xE5, 0x3C,
283 0xC3, 0x9D, 0xEC, 0x64, 0x80, 0xF8, 0x74, 0x80,
284/*0770*/0x98, 0x22, 0xF5, 0x83, 0xE0, 0x90, 0x07, 0x22,
285 0x54, 0x1F, 0xFD, 0xE0, 0xFA, 0xA3, 0xE0, 0xF5,
286/*0780*/0x82, 0x8A, 0x83, 0xED, 0xF0, 0x22, 0x90, 0x07,
287 0x22, 0xE0, 0xFC, 0xA3, 0xE0, 0xF5, 0x82, 0x8C,
288/*0790*/0x83, 0x22, 0x90, 0x07, 0x24, 0xFF, 0xED, 0x44,
289 0x07, 0xCF, 0xF0, 0xA3, 0xEF, 0xF0, 0x22, 0x85,
290/*07A0*/0x38, 0x38, 0x85, 0x39, 0x39, 0x85, 0x3A, 0x3A,
291 0x74, 0xC0, 0x2F, 0xF5, 0x82, 0x74, 0x02, 0x3E,
292/*07B0*/0xF5, 0x83, 0x22, 0x90, 0x07, 0x26, 0xFF, 0xED,
293 0x44, 0x07, 0xCF, 0xF0, 0xA3, 0xEF, 0xF0, 0x22,
294/*07C0*/0xF0, 0x74, 0xA0, 0x2F, 0xF5, 0x82, 0x74, 0x02,
295 0x3E, 0xF5, 0x83, 0x22, 0x74, 0xC0, 0x25, 0x11,
296/*07D0*/0xF5, 0x82, 0xE4, 0x34, 0x01, 0xF5, 0x83, 0x22,
297 0x74, 0x00, 0x25, 0x11, 0xF5, 0x82, 0xE4, 0x34,
298/*07E0*/0x02, 0xF5, 0x83, 0x22, 0x74, 0x60, 0x25, 0x11,
299 0xF5, 0x82, 0xE4, 0x34, 0x03, 0xF5, 0x83, 0x22,
300/*07F0*/0x74, 0x80, 0x25, 0x11, 0xF5, 0x82, 0xE4, 0x34,
301 0x03, 0xF5, 0x83, 0x22, 0x74, 0xE0, 0x25, 0x11,
302/*0800*/0xF5, 0x82, 0xE4, 0x34, 0x03, 0xF5, 0x83, 0x22,
303 0x74, 0x40, 0x25, 0x11, 0xF5, 0x82, 0xE4, 0x34,
304/*0810*/0x06, 0xF5, 0x83, 0x22, 0x74, 0x80, 0x2F, 0xF5,
305 0x82, 0x74, 0x02, 0x3E, 0xF5, 0x83, 0x22, 0xAF,
306/*0820*/0x08, 0x7E, 0x00, 0xEF, 0x44, 0x07, 0xF5, 0x82,
307 0x22, 0xF5, 0x83, 0xE5, 0x82, 0x44, 0x07, 0xF5,
308/*0830*/0x82, 0xE5, 0x40, 0xF0, 0x22, 0x74, 0x40, 0x25,
309 0x11, 0xF5, 0x82, 0xE4, 0x34, 0x02, 0xF5, 0x83,
310/*0840*/0x22, 0x74, 0xC0, 0x25, 0x11, 0xF5, 0x82, 0xE4,
311 0x34, 0x03, 0xF5, 0x83, 0x22, 0x74, 0x00, 0x25,
312/*0850*/0x11, 0xF5, 0x82, 0xE4, 0x34, 0x06, 0xF5, 0x83,
313 0x22, 0x74, 0x20, 0x25, 0x11, 0xF5, 0x82, 0xE4,
314/*0860*/0x34, 0x06, 0xF5, 0x83, 0x22, 0xE5, 0x08, 0xFD,
315 0xED, 0x44, 0x07, 0xF5, 0x82, 0x22, 0xE5, 0x41,
316/*0870*/0xF0, 0xE5, 0x65, 0x64, 0x01, 0x45, 0x64, 0x22,
317 0x7E, 0x00, 0xFB, 0x7A, 0x00, 0xFD, 0x7C, 0x00,
318/*0880*/0x22, 0x74, 0x20, 0x25, 0x11, 0xF5, 0x82, 0xE4,
319 0x34, 0x02, 0x22, 0x74, 0xA0, 0x25, 0x11, 0xF5,
320/*0890*/0x82, 0xE4, 0x34, 0x03, 0x22, 0x85, 0x3E, 0x42,
321 0x85, 0x3F, 0x41, 0x8F, 0x40, 0x22, 0x85, 0x3C,
322/*08A0*/0x42, 0x85, 0x3D, 0x41, 0x8F, 0x40, 0x22, 0x75,
323 0x45, 0x3F, 0x90, 0x07, 0x20, 0xE4, 0xF0, 0xA3,
324/*08B0*/0x22, 0xF5, 0x83, 0xE5, 0x32, 0xF0, 0x05, 0x6E,
325 0xE5, 0x6E, 0xC3, 0x94, 0x40, 0x22, 0xF0, 0xE5,
326/*08C0*/0x08, 0x44, 0x06, 0xF5, 0x82, 0x22, 0x74, 0x00,
327 0x25, 0x6E, 0xF5, 0x82, 0xE4, 0x34, 0x00, 0xF5,
328/*08D0*/0x83, 0x22, 0xE5, 0x6D, 0x45, 0x6C, 0x90, 0x07,
329 0x2F, 0x22, 0xE4, 0xF9, 0xE5, 0x3C, 0xD3, 0x95,
330/*08E0*/0x3E, 0x22, 0x74, 0x80, 0x2E, 0xF5, 0x82, 0xE4,
331 0x34, 0x02, 0xF5, 0x83, 0xE0, 0x22, 0x74, 0xA0,
332/*08F0*/0x2E, 0xF5, 0x82, 0xE4, 0x34, 0x02, 0xF5, 0x83,
333 0xE0, 0x22, 0x74, 0x80, 0x25, 0x6E, 0xF5, 0x82,
334/*0900*/0xE4, 0x34, 0x00, 0x22, 0x25, 0x42, 0xFD, 0xE4,
335 0x33, 0xFC, 0x22, 0x85, 0x42, 0x42, 0x85, 0x41,
336/*0910*/0x41, 0x85, 0x40, 0x40, 0x22, 0xED, 0x4C, 0x60,
337 0x03, 0x02, 0x09, 0xE5, 0xEF, 0x4E, 0x70, 0x37,
338/*0920*/0x90, 0x07, 0x26, 0x12, 0x07, 0x89, 0xE0, 0xFD,
339 0x12, 0x07, 0xCC, 0xED, 0xF0, 0x90, 0x07, 0x28,
340/*0930*/0x12, 0x07, 0x89, 0xE0, 0xFD, 0x12, 0x07, 0xD8,
341 0xED, 0xF0, 0x12, 0x07, 0x86, 0xE0, 0x54, 0x1F,
342/*0940*/0xFD, 0x12, 0x08, 0x81, 0xF5, 0x83, 0xED, 0xF0,
343 0x90, 0x07, 0x24, 0x12, 0x07, 0x89, 0xE0, 0x54,
344/*0950*/0x1F, 0xFD, 0x12, 0x08, 0x35, 0xED, 0xF0, 0xEF,
345 0x64, 0x04, 0x4E, 0x70, 0x37, 0x90, 0x07, 0x26,
346/*0960*/0x12, 0x07, 0x89, 0xE0, 0xFD, 0x12, 0x07, 0xE4,
347 0xED, 0xF0, 0x90, 0x07, 0x28, 0x12, 0x07, 0x89,
348/*0970*/0xE0, 0xFD, 0x12, 0x07, 0xF0, 0xED, 0xF0, 0x12,
349 0x07, 0x86, 0xE0, 0x54, 0x1F, 0xFD, 0x12, 0x08,
350/*0980*/0x8B, 0xF5, 0x83, 0xED, 0xF0, 0x90, 0x07, 0x24,
351 0x12, 0x07, 0x89, 0xE0, 0x54, 0x1F, 0xFD, 0x12,
352/*0990*/0x08, 0x41, 0xED, 0xF0, 0xEF, 0x64, 0x01, 0x4E,
353 0x70, 0x04, 0x7D, 0x01, 0x80, 0x02, 0x7D, 0x00,
354/*09A0*/0xEF, 0x64, 0x02, 0x4E, 0x70, 0x04, 0x7F, 0x01,
355 0x80, 0x02, 0x7F, 0x00, 0xEF, 0x4D, 0x60, 0x78,
356/*09B0*/0x90, 0x07, 0x26, 0x12, 0x07, 0x35, 0xE0, 0xFF,
357 0x12, 0x07, 0xFC, 0xEF, 0x12, 0x07, 0x31, 0xE0,
358/*09C0*/0xFF, 0x12, 0x08, 0x08, 0xEF, 0xF0, 0x90, 0x07,
359 0x22, 0x12, 0x07, 0x35, 0xE0, 0x54, 0x1F, 0xFF,
360/*09D0*/0x12, 0x08, 0x4D, 0xEF, 0xF0, 0x90, 0x07, 0x24,
361 0x12, 0x07, 0x35, 0xE0, 0x54, 0x1F, 0xFF, 0x12,
362/*09E0*/0x08, 0x59, 0xEF, 0xF0, 0x22, 0x12, 0x07, 0xCC,
363 0xE4, 0xF0, 0x12, 0x07, 0xD8, 0xE4, 0xF0, 0x12,
364/*09F0*/0x08, 0x81, 0xF5, 0x83, 0xE4, 0xF0, 0x12, 0x08,
365 0x35, 0x74, 0x14, 0xF0, 0x12, 0x07, 0xE4, 0xE4,
366/*0A00*/0xF0, 0x12, 0x07, 0xF0, 0xE4, 0xF0, 0x12, 0x08,
367 0x8B, 0xF5, 0x83, 0xE4, 0xF0, 0x12, 0x08, 0x41,
368/*0A10*/0x74, 0x14, 0xF0, 0x12, 0x07, 0xFC, 0xE4, 0xF0,
369 0x12, 0x08, 0x08, 0xE4, 0xF0, 0x12, 0x08, 0x4D,
370/*0A20*/0xE4, 0xF0, 0x12, 0x08, 0x59, 0x74, 0x14, 0xF0,
371 0x22, 0x53, 0xF9, 0xF7, 0x75, 0xFC, 0x10, 0xE4,
372/*0A30*/0xF5, 0xFD, 0x75, 0xFE, 0x30, 0xF5, 0xFF, 0xE5,
373 0xE7, 0x20, 0xE7, 0x03, 0x43, 0xF9, 0x08, 0xE5,
374/*0A40*/0xE6, 0x20, 0xE7, 0x0B, 0x78, 0xFF, 0xE4, 0xF6,
375 0xD8, 0xFD, 0x53, 0xE6, 0xFE, 0x80, 0x09, 0x78,
376/*0A50*/0x08, 0xE4, 0xF6, 0xD8, 0xFD, 0x53, 0xE6, 0xFE,
377 0x75, 0x81, 0x80, 0xE4, 0xF5, 0xA8, 0xD2, 0xA8,
378/*0A60*/0xC2, 0xA9, 0xD2, 0xAF, 0xE5, 0xE2, 0x20, 0xE5,
379 0x05, 0x20, 0xE6, 0x02, 0x80, 0x03, 0x43, 0xE1,
380/*0A70*/0x02, 0xE5, 0xE2, 0x20, 0xE0, 0x0E, 0x90, 0x00,
381 0x00, 0x7F, 0x00, 0x7E, 0x08, 0xE4, 0xF0, 0xA3,
382/*0A80*/0xDF, 0xFC, 0xDE, 0xFA, 0x02, 0x0A, 0xDB, 0x43,
383 0xFA, 0x01, 0xC0, 0xE0, 0xC0, 0xF0, 0xC0, 0x83,
384/*0A90*/0xC0, 0x82, 0xC0, 0xD0, 0x12, 0x1C, 0xE7, 0xD0,
385 0xD0, 0xD0, 0x82, 0xD0, 0x83, 0xD0, 0xF0, 0xD0,
386/*0AA0*/0xE0, 0x53, 0xFA, 0xFE, 0x32, 0x02, 0x1B, 0x55,
387 0xE4, 0x93, 0xA3, 0xF8, 0xE4, 0x93, 0xA3, 0xF6,
388/*0AB0*/0x08, 0xDF, 0xF9, 0x80, 0x29, 0xE4, 0x93, 0xA3,
389 0xF8, 0x54, 0x07, 0x24, 0x0C, 0xC8, 0xC3, 0x33,
390/*0AC0*/0xC4, 0x54, 0x0F, 0x44, 0x20, 0xC8, 0x83, 0x40,
391 0x04, 0xF4, 0x56, 0x80, 0x01, 0x46, 0xF6, 0xDF,
392/*0AD0*/0xE4, 0x80, 0x0B, 0x01, 0x02, 0x04, 0x08, 0x10,
393 0x20, 0x40, 0x80, 0x90, 0x00, 0x3F, 0xE4, 0x7E,
394/*0AE0*/0x01, 0x93, 0x60, 0xC1, 0xA3, 0xFF, 0x54, 0x3F,
395 0x30, 0xE5, 0x09, 0x54, 0x1F, 0xFE, 0xE4, 0x93,
396/*0AF0*/0xA3, 0x60, 0x01, 0x0E, 0xCF, 0x54, 0xC0, 0x25,
397 0xE0, 0x60, 0xAD, 0x40, 0xB8, 0x80, 0xFE, 0x8C,
398/*0B00*/0x64, 0x8D, 0x65, 0x8A, 0x66, 0x8B, 0x67, 0xE4,
399 0xF5, 0x69, 0xEF, 0x4E, 0x70, 0x03, 0x02, 0x1D,
400/*0B10*/0x55, 0xE4, 0xF5, 0x68, 0xE5, 0x67, 0x45, 0x66,
401 0x70, 0x32, 0x12, 0x07, 0x2A, 0x75, 0x83, 0x90,
402/*0B20*/0xE4, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC2, 0xE4,
403 0x12, 0x07, 0x29, 0x75, 0x83, 0xC4, 0xE4, 0x12,
404/*0B30*/0x08, 0x70, 0x70, 0x29, 0x12, 0x07, 0x2A, 0x75,
405 0x83, 0x92, 0xE4, 0x12, 0x07, 0x29, 0x75, 0x83,
406/*0B40*/0xC6, 0xE4, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC8,
407 0xE4, 0xF0, 0x80, 0x11, 0x90, 0x07, 0x26, 0x12,
408/*0B50*/0x07, 0x35, 0xE4, 0x12, 0x08, 0x70, 0x70, 0x05,
409 0x12, 0x07, 0x32, 0xE4, 0xF0, 0x12, 0x1D, 0x55,
410/*0B60*/0x12, 0x1E, 0xBF, 0xE5, 0x67, 0x45, 0x66, 0x70,
411 0x33, 0x12, 0x07, 0x2A, 0x75, 0x83, 0x90, 0xE5,
412/*0B70*/0x41, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC2, 0xE5,
413 0x41, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC4, 0x12,
414/*0B80*/0x08, 0x6E, 0x70, 0x29, 0x12, 0x07, 0x2A, 0x75,
415 0x83, 0x92, 0xE5, 0x40, 0x12, 0x07, 0x29, 0x75,
416/*0B90*/0x83, 0xC6, 0xE5, 0x40, 0x12, 0x07, 0x29, 0x75,
417 0x83, 0xC8, 0x80, 0x0E, 0x90, 0x07, 0x26, 0x12,
418/*0BA0*/0x07, 0x35, 0x12, 0x08, 0x6E, 0x70, 0x06, 0x12,
419 0x07, 0x32, 0xE5, 0x40, 0xF0, 0xAF, 0x69, 0x7E,
420/*0BB0*/0x00, 0xAD, 0x67, 0xAC, 0x66, 0x12, 0x04, 0x44,
421 0x12, 0x07, 0x2A, 0x75, 0x83, 0xCA, 0xE0, 0xD3,
422/*0BC0*/0x94, 0x00, 0x50, 0x0C, 0x05, 0x68, 0xE5, 0x68,
423 0xC3, 0x94, 0x05, 0x50, 0x03, 0x02, 0x0B, 0x14,
424/*0BD0*/0x22, 0x8C, 0x60, 0x8D, 0x61, 0x12, 0x08, 0xDA,
425 0x74, 0x20, 0x40, 0x0D, 0x2F, 0xF5, 0x82, 0x74,
426/*0BE0*/0x03, 0x3E, 0xF5, 0x83, 0xE5, 0x3E, 0xF0, 0x80,
427 0x0B, 0x2F, 0xF5, 0x82, 0x74, 0x03, 0x3E, 0xF5,
428/*0BF0*/0x83, 0xE5, 0x3C, 0xF0, 0xE5, 0x3C, 0xD3, 0x95,
429 0x3E, 0x40, 0x3C, 0xE5, 0x61, 0x45, 0x60, 0x70,
430/*0C00*/0x10, 0xE9, 0x12, 0x09, 0x04, 0xE5, 0x3E, 0x12,
431 0x07, 0x68, 0x40, 0x3B, 0x12, 0x08, 0x95, 0x80,
432/*0C10*/0x18, 0xE5, 0x3E, 0xC3, 0x95, 0x38, 0x40, 0x1D,
433 0x85, 0x3E, 0x38, 0xE5, 0x3E, 0x60, 0x05, 0x85,
434/*0C20*/0x3F, 0x39, 0x80, 0x03, 0x85, 0x39, 0x39, 0x8F,
435 0x3A, 0x12, 0x08, 0x14, 0xE5, 0x3E, 0x12, 0x07,
436/*0C30*/0xC0, 0xE5, 0x3F, 0xF0, 0x22, 0x80, 0x43, 0xE5,
437 0x61, 0x45, 0x60, 0x70, 0x19, 0x12, 0x07, 0x5F,
438/*0C40*/0x40, 0x05, 0x12, 0x08, 0x9E, 0x80, 0x27, 0x12,
439 0x09, 0x0B, 0x12, 0x08, 0x14, 0xE5, 0x42, 0x12,
440/*0C50*/0x07, 0xC0, 0xE5, 0x41, 0xF0, 0x22, 0xE5, 0x3C,
441 0xC3, 0x95, 0x38, 0x40, 0x1D, 0x85, 0x3C, 0x38,
442/*0C60*/0xE5, 0x3C, 0x60, 0x05, 0x85, 0x3D, 0x39, 0x80,
443 0x03, 0x85, 0x39, 0x39, 0x8F, 0x3A, 0x12, 0x08,
444/*0C70*/0x14, 0xE5, 0x3C, 0x12, 0x07, 0xC0, 0xE5, 0x3D,
445 0xF0, 0x22, 0x85, 0x38, 0x38, 0x85, 0x39, 0x39,
446/*0C80*/0x85, 0x3A, 0x3A, 0x12, 0x08, 0x14, 0xE5, 0x38,
447 0x12, 0x07, 0xC0, 0xE5, 0x39, 0xF0, 0x22, 0x7F,
448/*0C90*/0x06, 0x12, 0x17, 0x31, 0x12, 0x1D, 0x23, 0x12,
449 0x0E, 0x04, 0x12, 0x0E, 0x33, 0xE0, 0x44, 0x0A,
450/*0CA0*/0xF0, 0x74, 0x8E, 0xFE, 0x12, 0x0E, 0x04, 0x12,
451 0x0E, 0x0B, 0xEF, 0xF0, 0xE5, 0x28, 0x30, 0xE5,
452/*0CB0*/0x03, 0xD3, 0x80, 0x01, 0xC3, 0x40, 0x05, 0x75,
453 0x14, 0x20, 0x80, 0x03, 0x75, 0x14, 0x08, 0x12,
454/*0CC0*/0x0E, 0x04, 0x75, 0x83, 0x8A, 0xE5, 0x14, 0xF0,
455 0xB4, 0xFF, 0x05, 0x75, 0x12, 0x80, 0x80, 0x06,
456/*0CD0*/0xE5, 0x14, 0xC3, 0x13, 0xF5, 0x12, 0xE4, 0xF5,
457 0x16, 0xF5, 0x7F, 0x12, 0x19, 0x36, 0x12, 0x13,
458/*0CE0*/0xA3, 0xE5, 0x0A, 0xC3, 0x94, 0x01, 0x50, 0x09,
459 0x05, 0x16, 0xE5, 0x16, 0xC3, 0x94, 0x14, 0x40,
460/*0CF0*/0xEA, 0xE5, 0xE4, 0x20, 0xE7, 0x28, 0x12, 0x0E,
461 0x04, 0x75, 0x83, 0xD2, 0xE0, 0x54, 0x08, 0xD3,
462/*0D00*/0x94, 0x00, 0x40, 0x04, 0x7F, 0x01, 0x80, 0x02,
463 0x7F, 0x00, 0xE5, 0x0A, 0xC3, 0x94, 0x01, 0x40,
464/*0D10*/0x04, 0x7E, 0x01, 0x80, 0x02, 0x7E, 0x00, 0xEF,
465 0x5E, 0x60, 0x03, 0x12, 0x1D, 0xD7, 0xE5, 0x7F,
466/*0D20*/0xC3, 0x94, 0x11, 0x40, 0x14, 0x12, 0x0E, 0x04,
467 0x75, 0x83, 0xD2, 0xE0, 0x44, 0x80, 0xF0, 0xE5,
468/*0D30*/0xE4, 0x20, 0xE7, 0x0F, 0x12, 0x1D, 0xD7, 0x80,
469 0x0A, 0x12, 0x0E, 0x04, 0x75, 0x83, 0xD2, 0xE0,
470/*0D40*/0x54, 0x7F, 0xF0, 0x12, 0x1D, 0x23, 0x22, 0x74,
471 0x8A, 0x85, 0x08, 0x82, 0xF5, 0x83, 0xE5, 0x17,
472/*0D50*/0xF0, 0x12, 0x0E, 0x3A, 0xE4, 0xF0, 0x90, 0x07,
473 0x02, 0xE0, 0x12, 0x0E, 0x17, 0x75, 0x83, 0x90,
474/*0D60*/0xEF, 0xF0, 0x74, 0x92, 0xFE, 0xE5, 0x08, 0x44,
475 0x07, 0xFF, 0xF5, 0x82, 0x8E, 0x83, 0xE0, 0x54,
476/*0D70*/0xC0, 0xFD, 0x90, 0x07, 0x03, 0xE0, 0x54, 0x3F,
477 0x4D, 0x8F, 0x82, 0x8E, 0x83, 0xF0, 0x90, 0x07,
478/*0D80*/0x04, 0xE0, 0x12, 0x0E, 0x17, 0x75, 0x83, 0x82,
479 0xEF, 0xF0, 0x90, 0x07, 0x05, 0xE0, 0xFF, 0xED,
480/*0D90*/0x44, 0x07, 0xF5, 0x82, 0x75, 0x83, 0xB4, 0xEF,
481 0x12, 0x0E, 0x03, 0x75, 0x83, 0x80, 0xE0, 0x54,
482/*0DA0*/0xBF, 0xF0, 0x30, 0x37, 0x0A, 0x12, 0x0E, 0x91,
483 0x75, 0x83, 0x94, 0xE0, 0x44, 0x80, 0xF0, 0x30,
484/*0DB0*/0x38, 0x0A, 0x12, 0x0E, 0x91, 0x75, 0x83, 0x92,
485 0xE0, 0x44, 0x80, 0xF0, 0xE5, 0x28, 0x30, 0xE4,
486/*0DC0*/0x1A, 0x20, 0x39, 0x0A, 0x12, 0x0E, 0x04, 0x75,
487 0x83, 0x88, 0xE0, 0x54, 0x7F, 0xF0, 0x20, 0x3A,
488/*0DD0*/0x0A, 0x12, 0x0E, 0x04, 0x75, 0x83, 0x88, 0xE0,
489 0x54, 0xBF, 0xF0, 0x74, 0x8C, 0xFE, 0x12, 0x0E,
490/*0DE0*/0x04, 0x8E, 0x83, 0xE0, 0x54, 0x0F, 0x12, 0x0E,
491 0x03, 0x75, 0x83, 0x86, 0xE0, 0x54, 0xBF, 0xF0,
492/*0DF0*/0xE5, 0x08, 0x44, 0x06, 0x12, 0x0D, 0xFD, 0x75,
493 0x83, 0x8A, 0xE4, 0xF0, 0x22, 0xF5, 0x82, 0x75,
494/*0E00*/0x83, 0x82, 0xE4, 0xF0, 0xE5, 0x08, 0x44, 0x07,
495 0xF5, 0x82, 0x22, 0x8E, 0x83, 0xE0, 0xF5, 0x10,
496/*0E10*/0x54, 0xFE, 0xF0, 0xE5, 0x10, 0x44, 0x01, 0xFF,
497 0xE5, 0x08, 0xFD, 0xED, 0x44, 0x07, 0xF5, 0x82,
498/*0E20*/0x22, 0xE5, 0x15, 0xC4, 0x54, 0x07, 0xFF, 0xE5,
499 0x08, 0xFD, 0xED, 0x44, 0x08, 0xF5, 0x82, 0x75,
500/*0E30*/0x83, 0x82, 0x22, 0x75, 0x83, 0x80, 0xE0, 0x44,
501 0x40, 0xF0, 0xE5, 0x08, 0x44, 0x08, 0xF5, 0x82,
502/*0E40*/0x75, 0x83, 0x8A, 0x22, 0xE5, 0x16, 0x25, 0xE0,
503 0x25, 0xE0, 0x24, 0xAF, 0xF5, 0x82, 0xE4, 0x34,
504/*0E50*/0x1A, 0xF5, 0x83, 0xE4, 0x93, 0xF5, 0x0D, 0x22,
505 0x43, 0xE1, 0x10, 0x43, 0xE1, 0x80, 0x53, 0xE1,
506/*0E60*/0xFD, 0x85, 0xE1, 0x10, 0x22, 0xE5, 0x16, 0x25,
507 0xE0, 0x25, 0xE0, 0x24, 0xB2, 0xF5, 0x82, 0xE4,
508/*0E70*/0x34, 0x1A, 0xF5, 0x83, 0xE4, 0x93, 0x22, 0x85,
509 0x55, 0x82, 0x85, 0x54, 0x83, 0xE5, 0x15, 0xF0,
510/*0E80*/0x22, 0xE5, 0xE2, 0x54, 0x20, 0xD3, 0x94, 0x00,
511 0x22, 0xE5, 0xE2, 0x54, 0x40, 0xD3, 0x94, 0x00,
512/*0E90*/0x22, 0xE5, 0x08, 0x44, 0x06, 0xF5, 0x82, 0x22,
513 0xFD, 0xE5, 0x08, 0xFB, 0xEB, 0x44, 0x07, 0xF5,
514/*0EA0*/0x82, 0x22, 0x53, 0xF9, 0xF7, 0x75, 0xFE, 0x30,
515 0x22, 0xEF, 0x4E, 0x70, 0x26, 0x12, 0x07, 0xCC,
516/*0EB0*/0xE0, 0xFD, 0x90, 0x07, 0x26, 0x12, 0x07, 0x7B,
517 0x12, 0x07, 0xD8, 0xE0, 0xFD, 0x90, 0x07, 0x28,
518/*0EC0*/0x12, 0x07, 0x7B, 0x12, 0x08, 0x81, 0x12, 0x07,
519 0x72, 0x12, 0x08, 0x35, 0xE0, 0x90, 0x07, 0x24,
520/*0ED0*/0x12, 0x07, 0x78, 0xEF, 0x64, 0x04, 0x4E, 0x70,
521 0x29, 0x12, 0x07, 0xE4, 0xE0, 0xFD, 0x90, 0x07,
522/*0EE0*/0x26, 0x12, 0x07, 0x7B, 0x12, 0x07, 0xF0, 0xE0,
523 0xFD, 0x90, 0x07, 0x28, 0x12, 0x07, 0x7B, 0x12,
524/*0EF0*/0x08, 0x8B, 0x12, 0x07, 0x72, 0x12, 0x08, 0x41,
525 0xE0, 0x54, 0x1F, 0xFD, 0x90, 0x07, 0x24, 0x12,
526/*0F00*/0x07, 0x7B, 0xEF, 0x64, 0x01, 0x4E, 0x70, 0x04,
527 0x7D, 0x01, 0x80, 0x02, 0x7D, 0x00, 0xEF, 0x64,
528/*0F10*/0x02, 0x4E, 0x70, 0x04, 0x7F, 0x01, 0x80, 0x02,
529 0x7F, 0x00, 0xEF, 0x4D, 0x60, 0x35, 0x12, 0x07,
530/*0F20*/0xFC, 0xE0, 0xFF, 0x90, 0x07, 0x26, 0x12, 0x07,
531 0x89, 0xEF, 0xF0, 0x12, 0x08, 0x08, 0xE0, 0xFF,
532/*0F30*/0x90, 0x07, 0x28, 0x12, 0x07, 0x89, 0xEF, 0xF0,
533 0x12, 0x08, 0x4D, 0xE0, 0x54, 0x1F, 0xFF, 0x12,
534/*0F40*/0x07, 0x86, 0xEF, 0xF0, 0x12, 0x08, 0x59, 0xE0,
535 0x54, 0x1F, 0xFF, 0x90, 0x07, 0x24, 0x12, 0x07,
536/*0F50*/0x89, 0xEF, 0xF0, 0x22, 0xE4, 0xF5, 0x53, 0x12,
537 0x0E, 0x81, 0x40, 0x04, 0x7F, 0x01, 0x80, 0x02,
538/*0F60*/0x7F, 0x00, 0x12, 0x0E, 0x89, 0x40, 0x04, 0x7E,
539 0x01, 0x80, 0x02, 0x7E, 0x00, 0xEE, 0x4F, 0x70,
540/*0F70*/0x03, 0x02, 0x0F, 0xF6, 0x85, 0xE1, 0x10, 0x43,
541 0xE1, 0x02, 0x53, 0xE1, 0x0F, 0x85, 0xE1, 0x10,
542/*0F80*/0xE4, 0xF5, 0x51, 0xE5, 0xE3, 0x54, 0x3F, 0xF5,
543 0x52, 0x12, 0x0E, 0x89, 0x40, 0x1D, 0xAD, 0x52,
544/*0F90*/0xAF, 0x51, 0x12, 0x11, 0x18, 0xEF, 0x60, 0x08,
545 0x85, 0xE1, 0x10, 0x43, 0xE1, 0x40, 0x80, 0x0B,
546/*0FA0*/0x53, 0xE1, 0xBF, 0x12, 0x0E, 0x58, 0x12, 0x00,
547 0x06, 0x80, 0xFB, 0xE5, 0xE3, 0x54, 0x3F, 0xF5,
548/*0FB0*/0x51, 0xE5, 0xE4, 0x54, 0x3F, 0xF5, 0x52, 0x12,
549 0x0E, 0x81, 0x40, 0x1D, 0xAD, 0x52, 0xAF, 0x51,
550/*0FC0*/0x12, 0x11, 0x18, 0xEF, 0x60, 0x08, 0x85, 0xE1,
551 0x10, 0x43, 0xE1, 0x20, 0x80, 0x0B, 0x53, 0xE1,
552/*0FD0*/0xDF, 0x12, 0x0E, 0x58, 0x12, 0x00, 0x06, 0x80,
553 0xFB, 0x12, 0x0E, 0x81, 0x40, 0x04, 0x7F, 0x01,
554/*0FE0*/0x80, 0x02, 0x7F, 0x00, 0x12, 0x0E, 0x89, 0x40,
555 0x04, 0x7E, 0x01, 0x80, 0x02, 0x7E, 0x00, 0xEE,
556/*0FF0*/0x4F, 0x60, 0x03, 0x12, 0x0E, 0x5B, 0x22, 0x12,
557 0x0E, 0x21, 0xEF, 0xF0, 0x12, 0x10, 0x91, 0x22,
558/*1000*/0x02, 0x11, 0x00, 0x02, 0x10, 0x40, 0x02, 0x10,
559 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
560/*1010*/0x01, 0x20, 0x01, 0x20, 0xE4, 0xF5, 0x57, 0x12,
561 0x16, 0xBD, 0x12, 0x16, 0x44, 0xE4, 0x12, 0x10,
562/*1020*/0x56, 0x12, 0x14, 0xB7, 0x90, 0x07, 0x26, 0x12,
563 0x07, 0x35, 0xE4, 0x12, 0x07, 0x31, 0xE4, 0xF0,
564/*1030*/0x12, 0x10, 0x56, 0x12, 0x14, 0xB7, 0x90, 0x07,
565 0x26, 0x12, 0x07, 0x35, 0xE5, 0x41, 0x12, 0x07,
566/*1040*/0x31, 0xE5, 0x40, 0xF0, 0xAF, 0x57, 0x7E, 0x00,
567 0xAD, 0x56, 0x7C, 0x00, 0x12, 0x04, 0x44, 0xAF,
568/*1050*/0x56, 0x7E, 0x00, 0x02, 0x11, 0xEE, 0xFF, 0x90,
569 0x07, 0x20, 0xA3, 0xE0, 0xFD, 0xE4, 0xF5, 0x56,
570/*1060*/0xF5, 0x40, 0xFE, 0xFC, 0xAB, 0x56, 0xFA, 0x12,
571 0x11, 0x51, 0x7F, 0x0F, 0x7D, 0x18, 0xE4, 0xF5,
572/*1070*/0x56, 0xF5, 0x40, 0xFE, 0xFC, 0xAB, 0x56, 0xFA,
573 0x12, 0x15, 0x41, 0xAF, 0x56, 0x7E, 0x00, 0x12,
574/*1080*/0x1A, 0xFF, 0xE4, 0xFF, 0xF5, 0x56, 0x7D, 0x1F,
575 0xF5, 0x40, 0xFE, 0xFC, 0xAB, 0x56, 0xFA, 0x22,
576/*1090*/0x22, 0xE4, 0xF5, 0x55, 0xE5, 0x08, 0xFD, 0x74,
577 0xA0, 0xF5, 0x56, 0xED, 0x44, 0x07, 0xF5, 0x57,
578/*10A0*/0xE5, 0x28, 0x30, 0xE5, 0x03, 0xD3, 0x80, 0x01,
579 0xC3, 0x40, 0x05, 0x7F, 0x28, 0xEF, 0x80, 0x04,
580/*10B0*/0x7F, 0x14, 0xEF, 0xC3, 0x13, 0xF5, 0x54, 0xE4,
581 0xF9, 0x12, 0x0E, 0x18, 0x75, 0x83, 0x8E, 0xE0,
582/*10C0*/0xF5, 0x10, 0xCE, 0xEF, 0xCE, 0xEE, 0xD3, 0x94,
583 0x00, 0x40, 0x26, 0xE5, 0x10, 0x54, 0xFE, 0x12,
584/*10D0*/0x0E, 0x98, 0x75, 0x83, 0x8E, 0xED, 0xF0, 0xE5,
585 0x10, 0x44, 0x01, 0xFD, 0xEB, 0x44, 0x07, 0xF5,
586/*10E0*/0x82, 0xED, 0xF0, 0x85, 0x57, 0x82, 0x85, 0x56,
587 0x83, 0xE0, 0x30, 0xE3, 0x01, 0x09, 0x1E, 0x80,
588/*10F0*/0xD4, 0xC2, 0x34, 0xE9, 0xC3, 0x95, 0x54, 0x40,
589 0x02, 0xD2, 0x34, 0x22, 0x02, 0x00, 0x06, 0x22,
590/*1100*/0x30, 0x30, 0x11, 0x90, 0x10, 0x00, 0xE4, 0x93,
591 0xF5, 0x10, 0x90, 0x10, 0x10, 0xE4, 0x93, 0xF5,
592/*1110*/0x10, 0x12, 0x10, 0x90, 0x12, 0x11, 0x50, 0x22,
593 0xE4, 0xFC, 0xC3, 0xED, 0x9F, 0xFA, 0xEF, 0xF5,
594/*1120*/0x83, 0x75, 0x82, 0x00, 0x79, 0xFF, 0xE4, 0x93,
595 0xCC, 0x6C, 0xCC, 0xA3, 0xD9, 0xF8, 0xDA, 0xF6,
596/*1130*/0xE5, 0xE2, 0x30, 0xE4, 0x02, 0x8C, 0xE5, 0xED,
597 0x24, 0xFF, 0xFF, 0xEF, 0x75, 0x82, 0xFF, 0xF5,
598/*1140*/0x83, 0xE4, 0x93, 0x6C, 0x70, 0x03, 0x7F, 0x01,
599 0x22, 0x7F, 0x00, 0x22, 0x22, 0x11, 0x00, 0x00,
600/*1150*/0x22, 0x8E, 0x58, 0x8F, 0x59, 0x8C, 0x5A, 0x8D,
601 0x5B, 0x8A, 0x5C, 0x8B, 0x5D, 0x75, 0x5E, 0x01,
602/*1160*/0xE4, 0xF5, 0x5F, 0xF5, 0x60, 0xF5, 0x62, 0x12,
603 0x07, 0x2A, 0x75, 0x83, 0xD0, 0xE0, 0xFF, 0xC4,
604/*1170*/0x54, 0x0F, 0xF5, 0x61, 0x12, 0x1E, 0xA5, 0x85,
605 0x59, 0x5E, 0xD3, 0xE5, 0x5E, 0x95, 0x5B, 0xE5,
606/*1180*/0x5A, 0x12, 0x07, 0x6B, 0x50, 0x4B, 0x12, 0x07,
607 0x03, 0x75, 0x83, 0xBC, 0xE0, 0x45, 0x5E, 0x12,
608/*1190*/0x07, 0x29, 0x75, 0x83, 0xBE, 0xE0, 0x45, 0x5E,
609 0x12, 0x07, 0x29, 0x75, 0x83, 0xC0, 0xE0, 0x45,
610/*11A0*/0x5E, 0xF0, 0xAF, 0x5F, 0xE5, 0x60, 0x12, 0x08,
611 0x78, 0x12, 0x0A, 0xFF, 0xAF, 0x62, 0x7E, 0x00,
612/*11B0*/0xAD, 0x5D, 0xAC, 0x5C, 0x12, 0x04, 0x44, 0xE5,
613 0x61, 0xAF, 0x5E, 0x7E, 0x00, 0xB4, 0x03, 0x05,
614/*11C0*/0x12, 0x1E, 0x21, 0x80, 0x07, 0xAD, 0x5D, 0xAC,
615 0x5C, 0x12, 0x13, 0x17, 0x05, 0x5E, 0x02, 0x11,
616/*11D0*/0x7A, 0x12, 0x07, 0x03, 0x75, 0x83, 0xBC, 0xE0,
617 0x45, 0x40, 0x12, 0x07, 0x29, 0x75, 0x83, 0xBE,
618/*11E0*/0xE0, 0x45, 0x40, 0x12, 0x07, 0x29, 0x75, 0x83,
619 0xC0, 0xE0, 0x45, 0x40, 0xF0, 0x22, 0x8E, 0x58,
620/*11F0*/0x8F, 0x59, 0x75, 0x5A, 0x01, 0x79, 0x01, 0x75,
621 0x5B, 0x01, 0xE4, 0xFB, 0x12, 0x07, 0x2A, 0x75,
622/*1200*/0x83, 0xAE, 0xE0, 0x54, 0x1A, 0xFF, 0x12, 0x08,
623 0x65, 0xE0, 0xC4, 0x13, 0x54, 0x07, 0xFE, 0xEF,
624/*1210*/0x70, 0x0C, 0xEE, 0x65, 0x35, 0x70, 0x07, 0x90,
625 0x07, 0x2F, 0xE0, 0xB4, 0x01, 0x0D, 0xAF, 0x35,
626/*1220*/0x7E, 0x00, 0x12, 0x0E, 0xA9, 0xCF, 0xEB, 0xCF,
627 0x02, 0x1E, 0x60, 0xE5, 0x59, 0x64, 0x02, 0x45,
628/*1230*/0x58, 0x70, 0x04, 0x7F, 0x01, 0x80, 0x02, 0x7F,
629 0x00, 0xE5, 0x59, 0x45, 0x58, 0x70, 0x04, 0x7E,
630/*1240*/0x01, 0x80, 0x02, 0x7E, 0x00, 0xEE, 0x4F, 0x60,
631 0x23, 0x85, 0x41, 0x49, 0x85, 0x40, 0x4B, 0xE5,
632/*1250*/0x59, 0x45, 0x58, 0x70, 0x2C, 0xAF, 0x5A, 0xFE,
633 0xCD, 0xE9, 0xCD, 0xFC, 0xAB, 0x59, 0xAA, 0x58,
634/*1260*/0x12, 0x0A, 0xFF, 0xAF, 0x5B, 0x7E, 0x00, 0x12,
635 0x1E, 0x60, 0x80, 0x15, 0xAF, 0x5B, 0x7E, 0x00,
636/*1270*/0x12, 0x1E, 0x60, 0x90, 0x07, 0x26, 0x12, 0x07,
637 0x35, 0xE5, 0x49, 0x12, 0x07, 0x31, 0xE5, 0x4B,
638/*1280*/0xF0, 0xE4, 0xFD, 0xAF, 0x35, 0xFE, 0xFC, 0x12,
639 0x09, 0x15, 0x22, 0x8C, 0x64, 0x8D, 0x65, 0x12,
640/*1290*/0x08, 0xDA, 0x40, 0x3C, 0xE5, 0x65, 0x45, 0x64,
641 0x70, 0x10, 0x12, 0x09, 0x04, 0xC3, 0xE5, 0x3E,
642/*12A0*/0x12, 0x07, 0x69, 0x40, 0x3B, 0x12, 0x08, 0x95,
643 0x80, 0x18, 0xE5, 0x3E, 0xC3, 0x95, 0x38, 0x40,
644/*12B0*/0x1D, 0x85, 0x3E, 0x38, 0xE5, 0x3E, 0x60, 0x05,
645 0x85, 0x3F, 0x39, 0x80, 0x03, 0x85, 0x39, 0x39,
646/*12C0*/0x8F, 0x3A, 0x12, 0x07, 0xA8, 0xE5, 0x3E, 0x12,
647 0x07, 0x53, 0xE5, 0x3F, 0xF0, 0x22, 0x80, 0x3B,
648/*12D0*/0xE5, 0x65, 0x45, 0x64, 0x70, 0x11, 0x12, 0x07,
649 0x5F, 0x40, 0x05, 0x12, 0x08, 0x9E, 0x80, 0x1F,
650/*12E0*/0x12, 0x07, 0x3E, 0xE5, 0x41, 0xF0, 0x22, 0xE5,
651 0x3C, 0xC3, 0x95, 0x38, 0x40, 0x1D, 0x85, 0x3C,
652/*12F0*/0x38, 0xE5, 0x3C, 0x60, 0x05, 0x85, 0x3D, 0x39,
653 0x80, 0x03, 0x85, 0x39, 0x39, 0x8F, 0x3A, 0x12,
654/*1300*/0x07, 0xA8, 0xE5, 0x3C, 0x12, 0x07, 0x53, 0xE5,
655 0x3D, 0xF0, 0x22, 0x12, 0x07, 0x9F, 0xE5, 0x38,
656/*1310*/0x12, 0x07, 0x53, 0xE5, 0x39, 0xF0, 0x22, 0x8C,
657 0x63, 0x8D, 0x64, 0x12, 0x08, 0xDA, 0x40, 0x3C,
658/*1320*/0xE5, 0x64, 0x45, 0x63, 0x70, 0x10, 0x12, 0x09,
659 0x04, 0xC3, 0xE5, 0x3E, 0x12, 0x07, 0x69, 0x40,
660/*1330*/0x3B, 0x12, 0x08, 0x95, 0x80, 0x18, 0xE5, 0x3E,
661 0xC3, 0x95, 0x38, 0x40, 0x1D, 0x85, 0x3E, 0x38,
662/*1340*/0xE5, 0x3E, 0x60, 0x05, 0x85, 0x3F, 0x39, 0x80,
663 0x03, 0x85, 0x39, 0x39, 0x8F, 0x3A, 0x12, 0x07,
664/*1350*/0xA8, 0xE5, 0x3E, 0x12, 0x07, 0x53, 0xE5, 0x3F,
665 0xF0, 0x22, 0x80, 0x3B, 0xE5, 0x64, 0x45, 0x63,
666/*1360*/0x70, 0x11, 0x12, 0x07, 0x5F, 0x40, 0x05, 0x12,
667 0x08, 0x9E, 0x80, 0x1F, 0x12, 0x07, 0x3E, 0xE5,
668/*1370*/0x41, 0xF0, 0x22, 0xE5, 0x3C, 0xC3, 0x95, 0x38,
669 0x40, 0x1D, 0x85, 0x3C, 0x38, 0xE5, 0x3C, 0x60,
670/*1380*/0x05, 0x85, 0x3D, 0x39, 0x80, 0x03, 0x85, 0x39,
671 0x39, 0x8F, 0x3A, 0x12, 0x07, 0xA8, 0xE5, 0x3C,
672/*1390*/0x12, 0x07, 0x53, 0xE5, 0x3D, 0xF0, 0x22, 0x12,
673 0x07, 0x9F, 0xE5, 0x38, 0x12, 0x07, 0x53, 0xE5,
674/*13A0*/0x39, 0xF0, 0x22, 0xE5, 0x0D, 0xFE, 0xE5, 0x08,
675 0x8E, 0x54, 0x44, 0x05, 0xF5, 0x55, 0x75, 0x15,
676/*13B0*/0x0F, 0xF5, 0x82, 0x12, 0x0E, 0x7A, 0x12, 0x17,
677 0xA3, 0x20, 0x31, 0x05, 0x75, 0x15, 0x03, 0x80,
678/*13C0*/0x03, 0x75, 0x15, 0x0B, 0xE5, 0x0A, 0xC3, 0x94,
679 0x01, 0x50, 0x38, 0x12, 0x14, 0x20, 0x20, 0x31,
680/*13D0*/0x06, 0x05, 0x15, 0x05, 0x15, 0x80, 0x04, 0x15,
681 0x15, 0x15, 0x15, 0xE5, 0x0A, 0xC3, 0x94, 0x01,
682/*13E0*/0x50, 0x21, 0x12, 0x14, 0x20, 0x20, 0x31, 0x04,
683 0x05, 0x15, 0x80, 0x02, 0x15, 0x15, 0xE5, 0x0A,
684/*13F0*/0xC3, 0x94, 0x01, 0x50, 0x0E, 0x12, 0x0E, 0x77,
685 0x12, 0x17, 0xA3, 0x20, 0x31, 0x05, 0x05, 0x15,
686/*1400*/0x12, 0x0E, 0x77, 0xE5, 0x15, 0xB4, 0x08, 0x04,
687 0x7F, 0x01, 0x80, 0x02, 0x7F, 0x00, 0xE5, 0x15,
688/*1410*/0xB4, 0x07, 0x04, 0x7E, 0x01, 0x80, 0x02, 0x7E,
689 0x00, 0xEE, 0x4F, 0x60, 0x02, 0x05, 0x7F, 0x22,
690/*1420*/0x85, 0x55, 0x82, 0x85, 0x54, 0x83, 0xE5, 0x15,
691 0xF0, 0x12, 0x17, 0xA3, 0x22, 0x12, 0x07, 0x2A,
692/*1430*/0x75, 0x83, 0xAE, 0x74, 0xFF, 0x12, 0x07, 0x29,
693 0xE0, 0x54, 0x1A, 0xF5, 0x34, 0xE0, 0xC4, 0x13,
694/*1440*/0x54, 0x07, 0xF5, 0x35, 0x24, 0xFE, 0x60, 0x24,
695 0x24, 0xFE, 0x60, 0x3C, 0x24, 0x04, 0x70, 0x63,
696/*1450*/0x75, 0x31, 0x2D, 0xE5, 0x08, 0xFD, 0x74, 0xB6,
697 0x12, 0x07, 0x92, 0x74, 0xBC, 0x90, 0x07, 0x22,
698/*1460*/0x12, 0x07, 0x95, 0x74, 0x90, 0x12, 0x07, 0xB3,
699 0x74, 0x92, 0x80, 0x3C, 0x75, 0x31, 0x3A, 0xE5,
700/*1470*/0x08, 0xFD, 0x74, 0xBA, 0x12, 0x07, 0x92, 0x74,
701 0xC0, 0x90, 0x07, 0x22, 0x12, 0x07, 0xB6, 0x74,
702/*1480*/0xC4, 0x12, 0x07, 0xB3, 0x74, 0xC8, 0x80, 0x20,
703 0x75, 0x31, 0x35, 0xE5, 0x08, 0xFD, 0x74, 0xB8,
704/*1490*/0x12, 0x07, 0x92, 0x74, 0xBE, 0xFF, 0xED, 0x44,
705 0x07, 0x90, 0x07, 0x22, 0xCF, 0xF0, 0xA3, 0xEF,
706/*14A0*/0xF0, 0x74, 0xC2, 0x12, 0x07, 0xB3, 0x74, 0xC6,
707 0xFF, 0xED, 0x44, 0x07, 0xA3, 0xCF, 0xF0, 0xA3,
708/*14B0*/0xEF, 0xF0, 0x22, 0x75, 0x34, 0x01, 0x22, 0x8E,
709 0x58, 0x8F, 0x59, 0x8C, 0x5A, 0x8D, 0x5B, 0x8A,
710/*14C0*/0x5C, 0x8B, 0x5D, 0x75, 0x5E, 0x01, 0xE4, 0xF5,
711 0x5F, 0x12, 0x1E, 0xA5, 0x85, 0x59, 0x5E, 0xD3,
712/*14D0*/0xE5, 0x5E, 0x95, 0x5B, 0xE5, 0x5A, 0x12, 0x07,
713 0x6B, 0x50, 0x57, 0xE5, 0x5D, 0x45, 0x5C, 0x70,
714/*14E0*/0x30, 0x12, 0x07, 0x2A, 0x75, 0x83, 0x92, 0xE5,
715 0x5E, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC6, 0xE5,
716/*14F0*/0x5E, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC8, 0xE5,
717 0x5E, 0x12, 0x07, 0x29, 0x75, 0x83, 0x90, 0xE5,
718/*1500*/0x5E, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC2, 0xE5,
719 0x5E, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC4, 0x80,
720/*1510*/0x03, 0x12, 0x07, 0x32, 0xE5, 0x5E, 0xF0, 0xAF,
721 0x5F, 0x7E, 0x00, 0xAD, 0x5D, 0xAC, 0x5C, 0x12,
722/*1520*/0x04, 0x44, 0xAF, 0x5E, 0x7E, 0x00, 0xAD, 0x5D,
723 0xAC, 0x5C, 0x12, 0x0B, 0xD1, 0x05, 0x5E, 0x02,
724/*1530*/0x14, 0xCF, 0xAB, 0x5D, 0xAA, 0x5C, 0xAD, 0x5B,
725 0xAC, 0x5A, 0xAF, 0x59, 0xAE, 0x58, 0x02, 0x1B,
726/*1540*/0xFB, 0x8C, 0x5C, 0x8D, 0x5D, 0x8A, 0x5E, 0x8B,
727 0x5F, 0x75, 0x60, 0x01, 0xE4, 0xF5, 0x61, 0xF5,
728/*1550*/0x62, 0xF5, 0x63, 0x12, 0x1E, 0xA5, 0x8F, 0x60,
729 0xD3, 0xE5, 0x60, 0x95, 0x5D, 0xE5, 0x5C, 0x12,
730/*1560*/0x07, 0x6B, 0x50, 0x61, 0xE5, 0x5F, 0x45, 0x5E,
731 0x70, 0x27, 0x12, 0x07, 0x2A, 0x75, 0x83, 0xB6,
732/*1570*/0xE5, 0x60, 0x12, 0x07, 0x29, 0x75, 0x83, 0xB8,
733 0xE5, 0x60, 0x12, 0x07, 0x29, 0x75, 0x83, 0xBA,
734/*1580*/0xE5, 0x60, 0xF0, 0xAF, 0x61, 0x7E, 0x00, 0xE5,
735 0x62, 0x12, 0x08, 0x7A, 0x12, 0x0A, 0xFF, 0x80,
736/*1590*/0x19, 0x90, 0x07, 0x24, 0x12, 0x07, 0x35, 0xE5,
737 0x60, 0x12, 0x07, 0x29, 0x75, 0x83, 0x8E, 0xE4,
738/*15A0*/0x12, 0x07, 0x29, 0x74, 0x01, 0x12, 0x07, 0x29,
739 0xE4, 0xF0, 0xAF, 0x63, 0x7E, 0x00, 0xAD, 0x5F,
740/*15B0*/0xAC, 0x5E, 0x12, 0x04, 0x44, 0xAF, 0x60, 0x7E,
741 0x00, 0xAD, 0x5F, 0xAC, 0x5E, 0x12, 0x12, 0x8B,
742/*15C0*/0x05, 0x60, 0x02, 0x15, 0x58, 0x22, 0x90, 0x11,
743 0x4D, 0xE4, 0x93, 0x90, 0x07, 0x2E, 0xF0, 0x12,
744/*15D0*/0x08, 0x1F, 0x75, 0x83, 0xAE, 0xE0, 0x54, 0x1A,
745 0xF5, 0x34, 0x70, 0x67, 0xEF, 0x44, 0x07, 0xF5,
746/*15E0*/0x82, 0x75, 0x83, 0xCE, 0xE0, 0xFF, 0x13, 0x13,
747 0x13, 0x54, 0x07, 0xF5, 0x36, 0x54, 0x0F, 0xD3,
748/*15F0*/0x94, 0x00, 0x40, 0x06, 0x12, 0x14, 0x2D, 0x12,
749 0x1B, 0xA9, 0xE5, 0x36, 0x54, 0x0F, 0x24, 0xFE,
750/*1600*/0x60, 0x0C, 0x14, 0x60, 0x0C, 0x14, 0x60, 0x19,
751 0x24, 0x03, 0x70, 0x37, 0x80, 0x10, 0x02, 0x1E,
752/*1610*/0x91, 0x12, 0x1E, 0x91, 0x12, 0x07, 0x2A, 0x75,
753 0x83, 0xCE, 0xE0, 0x54, 0xEF, 0xF0, 0x02, 0x1D,
754/*1620*/0xAE, 0x12, 0x10, 0x14, 0xE4, 0xF5, 0x55, 0x12,
755 0x1D, 0x85, 0x05, 0x55, 0xE5, 0x55, 0xC3, 0x94,
756/*1630*/0x05, 0x40, 0xF4, 0x12, 0x07, 0x2A, 0x75, 0x83,
757 0xCE, 0xE0, 0x54, 0xC7, 0x12, 0x07, 0x29, 0xE0,
758/*1640*/0x44, 0x08, 0xF0, 0x22, 0xE4, 0xF5, 0x58, 0xF5,
759 0x59, 0xAF, 0x08, 0xEF, 0x44, 0x07, 0xF5, 0x82,
760/*1650*/0x75, 0x83, 0xD0, 0xE0, 0xFD, 0xC4, 0x54, 0x0F,
761 0xF5, 0x5A, 0xEF, 0x44, 0x07, 0xF5, 0x82, 0x75,
762/*1660*/0x83, 0x80, 0x74, 0x01, 0xF0, 0x12, 0x08, 0x21,
763 0x75, 0x83, 0x82, 0xE5, 0x45, 0xF0, 0xEF, 0x44,
764/*1670*/0x07, 0xF5, 0x82, 0x75, 0x83, 0x8A, 0x74, 0xFF,
765 0xF0, 0x12, 0x1A, 0x4D, 0x12, 0x07, 0x2A, 0x75,
766/*1680*/0x83, 0xBC, 0xE0, 0x54, 0xEF, 0x12, 0x07, 0x29,
767 0x75, 0x83, 0xBE, 0xE0, 0x54, 0xEF, 0x12, 0x07,
768/*1690*/0x29, 0x75, 0x83, 0xC0, 0xE0, 0x54, 0xEF, 0x12,
769 0x07, 0x29, 0x75, 0x83, 0xBC, 0xE0, 0x44, 0x10,
770/*16A0*/0x12, 0x07, 0x29, 0x75, 0x83, 0xBE, 0xE0, 0x44,
771 0x10, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC0, 0xE0,
772/*16B0*/0x44, 0x10, 0xF0, 0xAF, 0x58, 0xE5, 0x59, 0x12,
773 0x08, 0x78, 0x02, 0x0A, 0xFF, 0xE4, 0xF5, 0x58,
774/*16C0*/0x7D, 0x01, 0xF5, 0x59, 0xAF, 0x35, 0xFE, 0xFC,
775 0x12, 0x09, 0x15, 0x12, 0x07, 0x2A, 0x75, 0x83,
776/*16D0*/0xB6, 0x74, 0x10, 0x12, 0x07, 0x29, 0x75, 0x83,
777 0xB8, 0x74, 0x10, 0x12, 0x07, 0x29, 0x75, 0x83,
778/*16E0*/0xBA, 0x74, 0x10, 0x12, 0x07, 0x29, 0x75, 0x83,
779 0xBC, 0x74, 0x10, 0x12, 0x07, 0x29, 0x75, 0x83,
780/*16F0*/0xBE, 0x74, 0x10, 0x12, 0x07, 0x29, 0x75, 0x83,
781 0xC0, 0x74, 0x10, 0x12, 0x07, 0x29, 0x75, 0x83,
782/*1700*/0x90, 0xE4, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC2,
783 0xE4, 0x12, 0x07, 0x29, 0x75, 0x83, 0xC4, 0xE4,
784/*1710*/0x12, 0x07, 0x29, 0x75, 0x83, 0x92, 0xE4, 0x12,
785 0x07, 0x29, 0x75, 0x83, 0xC6, 0xE4, 0x12, 0x07,
786/*1720*/0x29, 0x75, 0x83, 0xC8, 0xE4, 0xF0, 0xAF, 0x58,
787 0xFE, 0xE5, 0x59, 0x12, 0x08, 0x7A, 0x02, 0x0A,
788/*1730*/0xFF, 0xE5, 0xE2, 0x30, 0xE4, 0x6C, 0xE5, 0xE7,
789 0x54, 0xC0, 0x64, 0x40, 0x70, 0x64, 0xE5, 0x09,
790/*1740*/0xC4, 0x54, 0x30, 0xFE, 0xE5, 0x08, 0x25, 0xE0,
791 0x25, 0xE0, 0x54, 0xC0, 0x4E, 0xFE, 0xEF, 0x54,
792/*1750*/0x3F, 0x4E, 0xFD, 0xE5, 0x2B, 0xAE, 0x2A, 0x78,
793 0x02, 0xC3, 0x33, 0xCE, 0x33, 0xCE, 0xD8, 0xF9,
794/*1760*/0xF5, 0x82, 0x8E, 0x83, 0xED, 0xF0, 0xE5, 0x2B,
795 0xAE, 0x2A, 0x78, 0x02, 0xC3, 0x33, 0xCE, 0x33,
796/*1770*/0xCE, 0xD8, 0xF9, 0xFF, 0xF5, 0x82, 0x8E, 0x83,
797 0xA3, 0xE5, 0xFE, 0xF0, 0x8F, 0x82, 0x8E, 0x83,
798/*1780*/0xA3, 0xA3, 0xE5, 0xFD, 0xF0, 0x8F, 0x82, 0x8E,
799 0x83, 0xA3, 0xA3, 0xA3, 0xE5, 0xFC, 0xF0, 0xC3,
800/*1790*/0xE5, 0x2B, 0x94, 0xFA, 0xE5, 0x2A, 0x94, 0x00,
801 0x50, 0x08, 0x05, 0x2B, 0xE5, 0x2B, 0x70, 0x02,
802/*17A0*/0x05, 0x2A, 0x22, 0xE4, 0xFF, 0xE4, 0xF5, 0x58,
803 0xF5, 0x56, 0xF5, 0x57, 0x74, 0x82, 0xFC, 0x12,
804/*17B0*/0x0E, 0x04, 0x8C, 0x83, 0xE0, 0xF5, 0x10, 0x54,
805 0x7F, 0xF0, 0xE5, 0x10, 0x44, 0x80, 0x12, 0x0E,
806/*17C0*/0x98, 0xED, 0xF0, 0x7E, 0x0A, 0x12, 0x0E, 0x04,
807 0x75, 0x83, 0xA0, 0xE0, 0x20, 0xE0, 0x26, 0xDE,
808/*17D0*/0xF4, 0x05, 0x57, 0xE5, 0x57, 0x70, 0x02, 0x05,
809 0x56, 0xE5, 0x14, 0x24, 0x01, 0xFD, 0xE4, 0x33,
810/*17E0*/0xFC, 0xD3, 0xE5, 0x57, 0x9D, 0xE5, 0x56, 0x9C,
811 0x40, 0xD9, 0xE5, 0x0A, 0x94, 0x20, 0x50, 0x02,
812/*17F0*/0x05, 0x0A, 0x43, 0xE1, 0x08, 0xC2, 0x31, 0x12,
813 0x0E, 0x04, 0x75, 0x83, 0xA6, 0xE0, 0x55, 0x12,
814/*1800*/0x65, 0x12, 0x70, 0x03, 0xD2, 0x31, 0x22, 0xC2,
815 0x31, 0x22, 0x90, 0x07, 0x26, 0xE0, 0xFA, 0xA3,
816/*1810*/0xE0, 0xF5, 0x82, 0x8A, 0x83, 0xE0, 0xF5, 0x41,
817 0xE5, 0x39, 0xC3, 0x95, 0x41, 0x40, 0x26, 0xE5,
818/*1820*/0x39, 0x95, 0x41, 0xC3, 0x9F, 0xEE, 0x12, 0x07,
819 0x6B, 0x40, 0x04, 0x7C, 0x01, 0x80, 0x02, 0x7C,
820/*1830*/0x00, 0xE5, 0x41, 0x64, 0x3F, 0x60, 0x04, 0x7B,
821 0x01, 0x80, 0x02, 0x7B, 0x00, 0xEC, 0x5B, 0x60,
822/*1840*/0x29, 0x05, 0x41, 0x80, 0x28, 0xC3, 0xE5, 0x41,
823 0x95, 0x39, 0xC3, 0x9F, 0xEE, 0x12, 0x07, 0x6B,
824/*1850*/0x40, 0x04, 0x7F, 0x01, 0x80, 0x02, 0x7F, 0x00,
825 0xE5, 0x41, 0x60, 0x04, 0x7E, 0x01, 0x80, 0x02,
826/*1860*/0x7E, 0x00, 0xEF, 0x5E, 0x60, 0x04, 0x15, 0x41,
827 0x80, 0x03, 0x85, 0x39, 0x41, 0x85, 0x3A, 0x40,
828/*1870*/0x22, 0xE5, 0xE2, 0x30, 0xE4, 0x60, 0xE5, 0xE1,
829 0x30, 0xE2, 0x5B, 0xE5, 0x09, 0x70, 0x04, 0x7F,
830/*1880*/0x01, 0x80, 0x02, 0x7F, 0x00, 0xE5, 0x08, 0x70,
831 0x04, 0x7E, 0x01, 0x80, 0x02, 0x7E, 0x00, 0xEE,
832/*1890*/0x5F, 0x60, 0x43, 0x53, 0xF9, 0xF8, 0xE5, 0xE2,
833 0x30, 0xE4, 0x3B, 0xE5, 0xE1, 0x30, 0xE2, 0x2E,
834/*18A0*/0x43, 0xFA, 0x02, 0x53, 0xFA, 0xFB, 0xE4, 0xF5,
835 0x10, 0x90, 0x94, 0x70, 0xE5, 0x10, 0xF0, 0xE5,
836/*18B0*/0xE1, 0x30, 0xE2, 0xE7, 0x90, 0x94, 0x70, 0xE0,
837 0x65, 0x10, 0x60, 0x03, 0x43, 0xFA, 0x04, 0x05,
838/*18C0*/0x10, 0x90, 0x94, 0x70, 0xE5, 0x10, 0xF0, 0x70,
839 0xE6, 0x12, 0x00, 0x06, 0x80, 0xE1, 0x53, 0xFA,
840/*18D0*/0xFD, 0x53, 0xFA, 0xFB, 0x80, 0xC0, 0x22, 0x8F,
841 0x54, 0x12, 0x00, 0x06, 0xE5, 0xE1, 0x30, 0xE0,
842/*18E0*/0x04, 0x7F, 0x01, 0x80, 0x02, 0x7F, 0x00, 0xE5,
843 0x7E, 0xD3, 0x94, 0x05, 0x40, 0x04, 0x7E, 0x01,
844/*18F0*/0x80, 0x02, 0x7E, 0x00, 0xEE, 0x4F, 0x60, 0x3D,
845 0x85, 0x54, 0x11, 0xE5, 0xE2, 0x20, 0xE1, 0x32,
846/*1900*/0x74, 0xCE, 0x12, 0x1A, 0x05, 0x30, 0xE7, 0x04,
847 0x7D, 0x01, 0x80, 0x02, 0x7D, 0x00, 0x8F, 0x82,
848/*1910*/0x8E, 0x83, 0xE0, 0x30, 0xE6, 0x04, 0x7F, 0x01,
849 0x80, 0x02, 0x7F, 0x00, 0xEF, 0x5D, 0x70, 0x15,
850/*1920*/0x12, 0x15, 0xC6, 0x74, 0xCE, 0x12, 0x1A, 0x05,
851 0x30, 0xE6, 0x07, 0xE0, 0x44, 0x80, 0xF0, 0x43,
852/*1930*/0xF9, 0x80, 0x12, 0x18, 0x71, 0x22, 0x12, 0x0E,
853 0x44, 0xE5, 0x16, 0x25, 0xE0, 0x25, 0xE0, 0x24,
854/*1940*/0xB0, 0xF5, 0x82, 0xE4, 0x34, 0x1A, 0xF5, 0x83,
855 0xE4, 0x93, 0xF5, 0x0F, 0xE5, 0x16, 0x25, 0xE0,
856/*1950*/0x25, 0xE0, 0x24, 0xB1, 0xF5, 0x82, 0xE4, 0x34,
857 0x1A, 0xF5, 0x83, 0xE4, 0x93, 0xF5, 0x0E, 0x12,
858/*1960*/0x0E, 0x65, 0xF5, 0x10, 0xE5, 0x0F, 0x54, 0xF0,
859 0x12, 0x0E, 0x17, 0x75, 0x83, 0x8C, 0xEF, 0xF0,
860/*1970*/0xE5, 0x0F, 0x30, 0xE0, 0x0C, 0x12, 0x0E, 0x04,
861 0x75, 0x83, 0x86, 0xE0, 0x44, 0x40, 0xF0, 0x80,
862/*1980*/0x0A, 0x12, 0x0E, 0x04, 0x75, 0x83, 0x86, 0xE0,
863 0x54, 0xBF, 0xF0, 0x12, 0x0E, 0x91, 0x75, 0x83,
864/*1990*/0x82, 0xE5, 0x0E, 0xF0, 0x22, 0x7F, 0x05, 0x12,
865 0x17, 0x31, 0x12, 0x0E, 0x04, 0x12, 0x0E, 0x33,
866/*19A0*/0x74, 0x02, 0xF0, 0x74, 0x8E, 0xFE, 0x12, 0x0E,
867 0x04, 0x12, 0x0E, 0x0B, 0xEF, 0xF0, 0x75, 0x15,
868/*19B0*/0x70, 0x12, 0x0F, 0xF7, 0x20, 0x34, 0x05, 0x75,
869 0x15, 0x10, 0x80, 0x03, 0x75, 0x15, 0x50, 0x12,
870/*19C0*/0x0F, 0xF7, 0x20, 0x34, 0x04, 0x74, 0x10, 0x80,
871 0x02, 0x74, 0xF0, 0x25, 0x15, 0xF5, 0x15, 0x12,
872/*19D0*/0x0E, 0x21, 0xEF, 0xF0, 0x12, 0x10, 0x91, 0x20,
873 0x34, 0x17, 0xE5, 0x15, 0x64, 0x30, 0x60, 0x0C,
874/*19E0*/0x74, 0x10, 0x25, 0x15, 0xF5, 0x15, 0xB4, 0x80,
875 0x03, 0xE4, 0xF5, 0x15, 0x12, 0x0E, 0x21, 0xEF,
876/*19F0*/0xF0, 0x22, 0xF0, 0xE5, 0x0B, 0x25, 0xE0, 0x25,
877 0xE0, 0x24, 0x82, 0xF5, 0x82, 0xE4, 0x34, 0x07,
878/*1A00*/0xF5, 0x83, 0x22, 0x74, 0x88, 0xFE, 0xE5, 0x08,
879 0x44, 0x07, 0xFF, 0xF5, 0x82, 0x8E, 0x83, 0xE0,
880/*1A10*/0x22, 0xF0, 0xE5, 0x08, 0x44, 0x07, 0xF5, 0x82,
881 0x22, 0xF0, 0xE0, 0x54, 0xC0, 0x8F, 0x82, 0x8E,
882/*1A20*/0x83, 0xF0, 0x22, 0xEF, 0x44, 0x07, 0xF5, 0x82,
883 0x75, 0x83, 0x86, 0xE0, 0x54, 0x10, 0xD3, 0x94,
884/*1A30*/0x00, 0x22, 0xF0, 0x90, 0x07, 0x15, 0xE0, 0x04,
885 0xF0, 0x22, 0x44, 0x06, 0xF5, 0x82, 0x75, 0x83,
886/*1A40*/0x9E, 0xE0, 0x22, 0xFE, 0xEF, 0x44, 0x07, 0xF5,
887 0x82, 0x8E, 0x83, 0xE0, 0x22, 0xE4, 0x90, 0x07,
888/*1A50*/0x2A, 0xF0, 0xA3, 0xF0, 0x12, 0x07, 0x2A, 0x75,
889 0x83, 0x82, 0xE0, 0x54, 0x7F, 0x12, 0x07, 0x29,
890/*1A60*/0xE0, 0x44, 0x80, 0xF0, 0x12, 0x10, 0xFC, 0x12,
891 0x08, 0x1F, 0x75, 0x83, 0xA0, 0xE0, 0x20, 0xE0,
892/*1A70*/0x1A, 0x90, 0x07, 0x2B, 0xE0, 0x04, 0xF0, 0x70,
893 0x06, 0x90, 0x07, 0x2A, 0xE0, 0x04, 0xF0, 0x90,
894/*1A80*/0x07, 0x2A, 0xE0, 0xB4, 0x10, 0xE1, 0xA3, 0xE0,
895 0xB4, 0x00, 0xDC, 0xEE, 0x44, 0xA6, 0xFC, 0xEF,
896/*1A90*/0x44, 0x07, 0xF5, 0x82, 0x8C, 0x83, 0xE0, 0xF5,
897 0x32, 0xEE, 0x44, 0xA8, 0xFE, 0xEF, 0x44, 0x07,
898/*1AA0*/0xF5, 0x82, 0x8E, 0x83, 0xE0, 0xF5, 0x33, 0x22,
899 0x01, 0x20, 0x11, 0x00, 0x04, 0x20, 0x00, 0x90,
900/*1AB0*/0x00, 0x20, 0x0F, 0x92, 0x00, 0x21, 0x0F, 0x94,
901 0x00, 0x22, 0x0F, 0x96, 0x00, 0x23, 0x0F, 0x98,
902/*1AC0*/0x00, 0x24, 0x0F, 0x9A, 0x00, 0x25, 0x0F, 0x9C,
903 0x00, 0x26, 0x0F, 0x9E, 0x00, 0x27, 0x0F, 0xA0,
904/*1AD0*/0x01, 0x20, 0x01, 0xA2, 0x01, 0x21, 0x01, 0xA4,
905 0x01, 0x22, 0x01, 0xA6, 0x01, 0x23, 0x01, 0xA8,
906/*1AE0*/0x01, 0x24, 0x01, 0xAA, 0x01, 0x25, 0x01, 0xAC,
907 0x01, 0x26, 0x01, 0xAE, 0x01, 0x27, 0x01, 0xB0,
908/*1AF0*/0x01, 0x28, 0x01, 0xB4, 0x00, 0x28, 0x0F, 0xB6,
909 0x40, 0x28, 0x0F, 0xB8, 0x61, 0x28, 0x01, 0xCB,
910/*1B00*/0xEF, 0xCB, 0xCA, 0xEE, 0xCA, 0x7F, 0x01, 0xE4,
911 0xFD, 0xEB, 0x4A, 0x70, 0x24, 0xE5, 0x08, 0xF5,
912/*1B10*/0x82, 0x74, 0xB6, 0x12, 0x08, 0x29, 0xE5, 0x08,
913 0xF5, 0x82, 0x74, 0xB8, 0x12, 0x08, 0x29, 0xE5,
914/*1B20*/0x08, 0xF5, 0x82, 0x74, 0xBA, 0x12, 0x08, 0x29,
915 0x7E, 0x00, 0x7C, 0x00, 0x12, 0x0A, 0xFF, 0x80,
916/*1B30*/0x12, 0x90, 0x07, 0x26, 0x12, 0x07, 0x35, 0xE5,
917 0x41, 0xF0, 0x90, 0x07, 0x24, 0x12, 0x07, 0x35,
918/*1B40*/0xE5, 0x40, 0xF0, 0x12, 0x07, 0x2A, 0x75, 0x83,
919 0x8E, 0xE4, 0x12, 0x07, 0x29, 0x74, 0x01, 0x12,
920/*1B50*/0x07, 0x29, 0xE4, 0xF0, 0x22, 0xE4, 0xF5, 0x26,
921 0xF5, 0x27, 0x53, 0xE1, 0xFE, 0xF5, 0x2A, 0x75,
922/*1B60*/0x2B, 0x01, 0xF5, 0x08, 0x7F, 0x01, 0x12, 0x17,
923 0x31, 0x30, 0x30, 0x1C, 0x90, 0x1A, 0xA9, 0xE4,
924/*1B70*/0x93, 0xF5, 0x10, 0x90, 0x1F, 0xF9, 0xE4, 0x93,
925 0xF5, 0x10, 0x90, 0x00, 0x41, 0xE4, 0x93, 0xF5,
926/*1B80*/0x10, 0x90, 0x1E, 0xCA, 0xE4, 0x93, 0xF5, 0x10,
927 0x7F, 0x02, 0x12, 0x17, 0x31, 0x12, 0x0F, 0x54,
928/*1B90*/0x7F, 0x03, 0x12, 0x17, 0x31, 0x12, 0x00, 0x06,
929 0xE5, 0xE2, 0x30, 0xE7, 0x09, 0x12, 0x10, 0x00,
930/*1BA0*/0x30, 0x30, 0x03, 0x12, 0x11, 0x00, 0x02, 0x00,
931 0x47, 0x12, 0x08, 0x1F, 0x75, 0x83, 0xD0, 0xE0,
932/*1BB0*/0xC4, 0x54, 0x0F, 0xFD, 0x75, 0x43, 0x01, 0x75,
933 0x44, 0xFF, 0x12, 0x08, 0xAA, 0x74, 0x04, 0xF0,
934/*1BC0*/0x75, 0x3B, 0x01, 0xED, 0x14, 0x60, 0x0C, 0x14,
935 0x60, 0x0B, 0x14, 0x60, 0x0F, 0x24, 0x03, 0x70,
936/*1BD0*/0x0B, 0x80, 0x09, 0x80, 0x00, 0x12, 0x08, 0xA7,
937 0x04, 0xF0, 0x80, 0x06, 0x12, 0x08, 0xA7, 0x74,
938/*1BE0*/0x04, 0xF0, 0xEE, 0x44, 0x82, 0xFE, 0xEF, 0x44,
939 0x07, 0xF5, 0x82, 0x8E, 0x83, 0xE5, 0x45, 0x12,
940/*1BF0*/0x08, 0xBE, 0x75, 0x83, 0x82, 0xE5, 0x31, 0xF0,
941 0x02, 0x11, 0x4C, 0x8E, 0x60, 0x8F, 0x61, 0x12,
942/*1C00*/0x1E, 0xA5, 0xE4, 0xFF, 0xCE, 0xED, 0xCE, 0xEE,
943 0xD3, 0x95, 0x61, 0xE5, 0x60, 0x12, 0x07, 0x6B,
944/*1C10*/0x40, 0x39, 0x74, 0x20, 0x2E, 0xF5, 0x82, 0xE4,
945 0x34, 0x03, 0xF5, 0x83, 0xE0, 0x70, 0x03, 0xFF,
946/*1C20*/0x80, 0x26, 0x12, 0x08, 0xE2, 0xFD, 0xC3, 0x9F,
947 0x40, 0x1E, 0xCF, 0xED, 0xCF, 0xEB, 0x4A, 0x70,
948/*1C30*/0x0B, 0x8D, 0x42, 0x12, 0x08, 0xEE, 0xF5, 0x41,
949 0x8E, 0x40, 0x80, 0x0C, 0x12, 0x08, 0xE2, 0xF5,
950/*1C40*/0x38, 0x12, 0x08, 0xEE, 0xF5, 0x39, 0x8E, 0x3A,
951 0x1E, 0x80, 0xBC, 0x22, 0x75, 0x58, 0x01, 0xE5,
952/*1C50*/0x35, 0x70, 0x0C, 0x12, 0x07, 0xCC, 0xE0, 0xF5,
953 0x4A, 0x12, 0x07, 0xD8, 0xE0, 0xF5, 0x4C, 0xE5,
954/*1C60*/0x35, 0xB4, 0x04, 0x0C, 0x12, 0x07, 0xE4, 0xE0,
955 0xF5, 0x4A, 0x12, 0x07, 0xF0, 0xE0, 0xF5, 0x4C,
956/*1C70*/0xE5, 0x35, 0xB4, 0x01, 0x04, 0x7F, 0x01, 0x80,
957 0x02, 0x7F, 0x00, 0xE5, 0x35, 0xB4, 0x02, 0x04,
958/*1C80*/0x7E, 0x01, 0x80, 0x02, 0x7E, 0x00, 0xEE, 0x4F,
959 0x60, 0x0C, 0x12, 0x07, 0xFC, 0xE0, 0xF5, 0x4A,
960/*1C90*/0x12, 0x08, 0x08, 0xE0, 0xF5, 0x4C, 0x85, 0x41,
961 0x49, 0x85, 0x40, 0x4B, 0x22, 0x75, 0x5B, 0x01,
962/*1CA0*/0x90, 0x07, 0x24, 0x12, 0x07, 0x35, 0xE0, 0x54,
963 0x1F, 0xFF, 0xD3, 0x94, 0x02, 0x50, 0x04, 0x8F,
964/*1CB0*/0x58, 0x80, 0x05, 0xEF, 0x24, 0xFE, 0xF5, 0x58,
965 0xEF, 0xC3, 0x94, 0x18, 0x40, 0x05, 0x75, 0x59,
966/*1CC0*/0x18, 0x80, 0x04, 0xEF, 0x04, 0xF5, 0x59, 0x85,
967 0x43, 0x5A, 0xAF, 0x58, 0x7E, 0x00, 0xAD, 0x59,
968/*1CD0*/0x7C, 0x00, 0xAB, 0x5B, 0x7A, 0x00, 0x12, 0x15,
969 0x41, 0xAF, 0x5A, 0x7E, 0x00, 0x12, 0x18, 0x0A,
970/*1CE0*/0xAF, 0x5B, 0x7E, 0x00, 0x02, 0x1A, 0xFF, 0xE5,
971 0xE2, 0x30, 0xE7, 0x0E, 0x12, 0x10, 0x03, 0xC2,
972/*1CF0*/0x30, 0x30, 0x30, 0x03, 0x12, 0x10, 0xFF, 0x20,
973 0x33, 0x28, 0xE5, 0xE7, 0x30, 0xE7, 0x05, 0x12,
974/*1D00*/0x0E, 0xA2, 0x80, 0x0D, 0xE5, 0xFE, 0xC3, 0x94,
975 0x20, 0x50, 0x06, 0x12, 0x0E, 0xA2, 0x43, 0xF9,
976/*1D10*/0x08, 0xE5, 0xF2, 0x30, 0xE7, 0x03, 0x53, 0xF9,
977 0x7F, 0xE5, 0xF1, 0x54, 0x70, 0xD3, 0x94, 0x00,
978/*1D20*/0x50, 0xD8, 0x22, 0x12, 0x0E, 0x04, 0x75, 0x83,
979 0x80, 0xE4, 0xF0, 0xE5, 0x08, 0x44, 0x07, 0x12,
980/*1D30*/0x0D, 0xFD, 0x75, 0x83, 0x84, 0x12, 0x0E, 0x02,
981 0x75, 0x83, 0x86, 0x12, 0x0E, 0x02, 0x75, 0x83,
982/*1D40*/0x8C, 0xE0, 0x54, 0xF3, 0x12, 0x0E, 0x03, 0x75,
983 0x83, 0x8E, 0x12, 0x0E, 0x02, 0x75, 0x83, 0x94,
984/*1D50*/0xE0, 0x54, 0xFB, 0xF0, 0x22, 0x12, 0x07, 0x2A,
985 0x75, 0x83, 0x8E, 0xE4, 0x12, 0x07, 0x29, 0x74,
986/*1D60*/0x01, 0x12, 0x07, 0x29, 0xE4, 0x12, 0x08, 0xBE,
987 0x75, 0x83, 0x8C, 0xE0, 0x44, 0x20, 0x12, 0x08,
988/*1D70*/0xBE, 0xE0, 0x54, 0xDF, 0xF0, 0x74, 0x84, 0x85,
989 0x08, 0x82, 0xF5, 0x83, 0xE0, 0x54, 0x7F, 0xF0,
990/*1D80*/0xE0, 0x44, 0x80, 0xF0, 0x22, 0x75, 0x56, 0x01,
991 0xE4, 0xFD, 0xF5, 0x57, 0xAF, 0x35, 0xFE, 0xFC,
992/*1D90*/0x12, 0x09, 0x15, 0x12, 0x1C, 0x9D, 0x12, 0x1E,
993 0x7A, 0x12, 0x1C, 0x4C, 0xAF, 0x57, 0x7E, 0x00,
994/*1DA0*/0xAD, 0x56, 0x7C, 0x00, 0x12, 0x04, 0x44, 0xAF,
995 0x56, 0x7E, 0x00, 0x02, 0x11, 0xEE, 0x75, 0x56,
996/*1DB0*/0x01, 0xE4, 0xFD, 0xF5, 0x57, 0xAF, 0x35, 0xFE,
997 0xFC, 0x12, 0x09, 0x15, 0x12, 0x1C, 0x9D, 0x12,
998/*1DC0*/0x1E, 0x7A, 0x12, 0x1C, 0x4C, 0xAF, 0x57, 0x7E,
999 0x00, 0xAD, 0x56, 0x7C, 0x00, 0x12, 0x04, 0x44,
1000/*1DD0*/0xAF, 0x56, 0x7E, 0x00, 0x02, 0x11, 0xEE, 0xE4,
1001 0xF5, 0x16, 0x12, 0x0E, 0x44, 0xFE, 0xE5, 0x08,
1002/*1DE0*/0x44, 0x05, 0xFF, 0x12, 0x0E, 0x65, 0x8F, 0x82,
1003 0x8E, 0x83, 0xF0, 0x05, 0x16, 0xE5, 0x16, 0xC3,
1004/*1DF0*/0x94, 0x14, 0x40, 0xE6, 0xE5, 0x08, 0x12, 0x0E,
1005 0x2B, 0xE4, 0xF0, 0x22, 0xE4, 0xF5, 0x58, 0xF5,
1006/*1E00*/0x59, 0xF5, 0x5A, 0xFF, 0xFE, 0xAD, 0x58, 0xFC,
1007 0x12, 0x09, 0x15, 0x7F, 0x04, 0x7E, 0x00, 0xAD,
1008/*1E10*/0x58, 0x7C, 0x00, 0x12, 0x09, 0x15, 0x7F, 0x02,
1009 0x7E, 0x00, 0xAD, 0x58, 0x7C, 0x00, 0x02, 0x09,
1010/*1E20*/0x15, 0xE5, 0x3C, 0x25, 0x3E, 0xFC, 0xE5, 0x42,
1011 0x24, 0x00, 0xFB, 0xE4, 0x33, 0xFA, 0xEC, 0xC3,
1012/*1E30*/0x9B, 0xEA, 0x12, 0x07, 0x6B, 0x40, 0x0B, 0x8C,
1013 0x42, 0xE5, 0x3D, 0x25, 0x3F, 0xF5, 0x41, 0x8F,
1014/*1E40*/0x40, 0x22, 0x12, 0x09, 0x0B, 0x22, 0x74, 0x84,
1015 0xF5, 0x18, 0x85, 0x08, 0x19, 0x85, 0x19, 0x82,
1016/*1E50*/0x85, 0x18, 0x83, 0xE0, 0x54, 0x7F, 0xF0, 0xE0,
1017 0x44, 0x80, 0xF0, 0xE0, 0x44, 0x80, 0xF0, 0x22,
1018/*1E60*/0xEF, 0x4E, 0x70, 0x0B, 0x12, 0x07, 0x2A, 0x75,
1019 0x83, 0xD2, 0xE0, 0x54, 0xDF, 0xF0, 0x22, 0x12,
1020/*1E70*/0x07, 0x2A, 0x75, 0x83, 0xD2, 0xE0, 0x44, 0x20,
1021 0xF0, 0x22, 0x75, 0x58, 0x01, 0x90, 0x07, 0x26,
1022/*1E80*/0x12, 0x07, 0x35, 0xE0, 0x54, 0x3F, 0xF5, 0x41,
1023 0x12, 0x07, 0x32, 0xE0, 0x54, 0x3F, 0xF5, 0x40,
1024/*1E90*/0x22, 0x75, 0x56, 0x02, 0xE4, 0xF5, 0x57, 0x12,
1025 0x1D, 0xFC, 0xAF, 0x57, 0x7E, 0x00, 0xAD, 0x56,
1026/*1EA0*/0x7C, 0x00, 0x02, 0x04, 0x44, 0xE4, 0xF5, 0x42,
1027 0xF5, 0x41, 0xF5, 0x40, 0xF5, 0x38, 0xF5, 0x39,
1028/*1EB0*/0xF5, 0x3A, 0x22, 0xEF, 0x54, 0x07, 0xFF, 0xE5,
1029 0xF9, 0x54, 0xF8, 0x4F, 0xF5, 0xF9, 0x22, 0x7F,
1030/*1EC0*/0x01, 0xE4, 0xFE, 0x0F, 0x0E, 0xBE, 0xFF, 0xFB,
1031 0x22, 0x01, 0x20, 0x00, 0x01, 0x04, 0x20, 0x00,
1032/*1ED0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1033 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1034/*1EE0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1035 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1036/*1EF0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1037 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1038/*1F00*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1039 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1040/*1F10*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1041 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1042/*1F20*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1043 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1044/*1F30*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1045 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1046/*1F40*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1047 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1048/*1F50*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1049 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1050/*1F60*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1051 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1052/*1F70*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1053 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1054/*1F80*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1055 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1056/*1F90*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1057 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1058/*1FA0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1059 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1060/*1FB0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1061 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1062/*1FC0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1063 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1064/*1FD0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1065 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1066/*1FE0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1067 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1068/*1FF0*/0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1069 0x01, 0x20, 0x11, 0x00, 0x04, 0x20, 0x00, 0x81
1070};
1071
1072int ipath_sd7220_ib_load(struct ipath_devdata *dd)
1073{
1074 return ipath_sd7220_prog_ld(dd, IB_7220_SERDES, ipath_sd7220_ib_img,
1075 sizeof(ipath_sd7220_ib_img), 0);
1076}
1077
1078int ipath_sd7220_ib_vfy(struct ipath_devdata *dd)
1079{
1080 return ipath_sd7220_prog_vfy(dd, IB_7220_SERDES, ipath_sd7220_ib_img,
1081 sizeof(ipath_sd7220_ib_img), 0);
1082}
diff --git a/drivers/infiniband/hw/ipath/ipath_sdma.c b/drivers/infiniband/hw/ipath/ipath_sdma.c
new file mode 100644
index 000000000000..1974df7a9f78
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_sdma.c
@@ -0,0 +1,790 @@
1/*
2 * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/spinlock.h>
34
35#include "ipath_kernel.h"
36#include "ipath_verbs.h"
37#include "ipath_common.h"
38
39#define SDMA_DESCQ_SZ PAGE_SIZE /* 256 entries per 4KB page */
40
41static void vl15_watchdog_enq(struct ipath_devdata *dd)
42{
43 /* ipath_sdma_lock must already be held */
44 if (atomic_inc_return(&dd->ipath_sdma_vl15_count) == 1) {
45 unsigned long interval = (HZ + 19) / 20;
46 dd->ipath_sdma_vl15_timer.expires = jiffies + interval;
47 add_timer(&dd->ipath_sdma_vl15_timer);
48 }
49}
50
51static void vl15_watchdog_deq(struct ipath_devdata *dd)
52{
53 /* ipath_sdma_lock must already be held */
54 if (atomic_dec_return(&dd->ipath_sdma_vl15_count) != 0) {
55 unsigned long interval = (HZ + 19) / 20;
56 mod_timer(&dd->ipath_sdma_vl15_timer, jiffies + interval);
57 } else {
58 del_timer(&dd->ipath_sdma_vl15_timer);
59 }
60}
61
62static void vl15_watchdog_timeout(unsigned long opaque)
63{
64 struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
65
66 if (atomic_read(&dd->ipath_sdma_vl15_count) != 0) {
67 ipath_dbg("vl15 watchdog timeout - clearing\n");
68 ipath_cancel_sends(dd, 1);
69 ipath_hol_down(dd);
70 } else {
71 ipath_dbg("vl15 watchdog timeout - "
72 "condition already cleared\n");
73 }
74}
75
76static void unmap_desc(struct ipath_devdata *dd, unsigned head)
77{
78 __le64 *descqp = &dd->ipath_sdma_descq[head].qw[0];
79 u64 desc[2];
80 dma_addr_t addr;
81 size_t len;
82
83 desc[0] = le64_to_cpu(descqp[0]);
84 desc[1] = le64_to_cpu(descqp[1]);
85
86 addr = (desc[1] << 32) | (desc[0] >> 32);
87 len = (desc[0] >> 14) & (0x7ffULL << 2);
88 dma_unmap_single(&dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
89}
90
91/*
92 * ipath_sdma_lock should be locked before calling this.
93 */
94int ipath_sdma_make_progress(struct ipath_devdata *dd)
95{
96 struct list_head *lp = NULL;
97 struct ipath_sdma_txreq *txp = NULL;
98 u16 dmahead;
99 u16 start_idx = 0;
100 int progress = 0;
101
102 if (!list_empty(&dd->ipath_sdma_activelist)) {
103 lp = dd->ipath_sdma_activelist.next;
104 txp = list_entry(lp, struct ipath_sdma_txreq, list);
105 start_idx = txp->start_idx;
106 }
107
108 /*
109 * Read the SDMA head register in order to know that the
110 * interrupt clear has been written to the chip.
111 * Otherwise, we may not get an interrupt for the last
112 * descriptor in the queue.
113 */
114 dmahead = (u16)ipath_read_kreg32(dd, dd->ipath_kregs->kr_senddmahead);
115 /* sanity check return value for error handling (chip reset, etc.) */
116 if (dmahead >= dd->ipath_sdma_descq_cnt)
117 goto done;
118
119 while (dd->ipath_sdma_descq_head != dmahead) {
120 if (txp && txp->flags & IPATH_SDMA_TXREQ_F_FREEDESC &&
121 dd->ipath_sdma_descq_head == start_idx) {
122 unmap_desc(dd, dd->ipath_sdma_descq_head);
123 start_idx++;
124 if (start_idx == dd->ipath_sdma_descq_cnt)
125 start_idx = 0;
126 }
127
128 /* increment free count and head */
129 dd->ipath_sdma_descq_removed++;
130 if (++dd->ipath_sdma_descq_head == dd->ipath_sdma_descq_cnt)
131 dd->ipath_sdma_descq_head = 0;
132
133 if (txp && txp->next_descq_idx == dd->ipath_sdma_descq_head) {
134 /* move to notify list */
135 if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
136 vl15_watchdog_deq(dd);
137 list_move_tail(lp, &dd->ipath_sdma_notifylist);
138 if (!list_empty(&dd->ipath_sdma_activelist)) {
139 lp = dd->ipath_sdma_activelist.next;
140 txp = list_entry(lp, struct ipath_sdma_txreq,
141 list);
142 start_idx = txp->start_idx;
143 } else {
144 lp = NULL;
145 txp = NULL;
146 }
147 }
148 progress = 1;
149 }
150
151 if (progress)
152 tasklet_hi_schedule(&dd->ipath_sdma_notify_task);
153
154done:
155 return progress;
156}
157
158static void ipath_sdma_notify(struct ipath_devdata *dd, struct list_head *list)
159{
160 struct ipath_sdma_txreq *txp, *txp_next;
161
162 list_for_each_entry_safe(txp, txp_next, list, list) {
163 list_del_init(&txp->list);
164
165 if (txp->callback)
166 (*txp->callback)(txp->callback_cookie,
167 txp->callback_status);
168 }
169}
170
171static void sdma_notify_taskbody(struct ipath_devdata *dd)
172{
173 unsigned long flags;
174 struct list_head list;
175
176 INIT_LIST_HEAD(&list);
177
178 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
179
180 list_splice_init(&dd->ipath_sdma_notifylist, &list);
181
182 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
183
184 ipath_sdma_notify(dd, &list);
185
186 /*
187 * The IB verbs layer needs to see the callback before getting
188 * the call to ipath_ib_piobufavail() because the callback
189 * handles releasing resources the next send will need.
190 * Otherwise, we could do these calls in
191 * ipath_sdma_make_progress().
192 */
193 ipath_ib_piobufavail(dd->verbs_dev);
194}
195
196static void sdma_notify_task(unsigned long opaque)
197{
198 struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
199
200 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
201 sdma_notify_taskbody(dd);
202}
203
204static void dump_sdma_state(struct ipath_devdata *dd)
205{
206 unsigned long reg;
207
208 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmastatus);
209 ipath_cdbg(VERBOSE, "kr_senddmastatus: 0x%016lx\n", reg);
210
211 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendctrl);
212 ipath_cdbg(VERBOSE, "kr_sendctrl: 0x%016lx\n", reg);
213
214 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask0);
215 ipath_cdbg(VERBOSE, "kr_senddmabufmask0: 0x%016lx\n", reg);
216
217 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask1);
218 ipath_cdbg(VERBOSE, "kr_senddmabufmask1: 0x%016lx\n", reg);
219
220 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask2);
221 ipath_cdbg(VERBOSE, "kr_senddmabufmask2: 0x%016lx\n", reg);
222
223 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmatail);
224 ipath_cdbg(VERBOSE, "kr_senddmatail: 0x%016lx\n", reg);
225
226 reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmahead);
227 ipath_cdbg(VERBOSE, "kr_senddmahead: 0x%016lx\n", reg);
228}
229
230static void sdma_abort_task(unsigned long opaque)
231{
232 struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
233 u64 status;
234 unsigned long flags;
235
236 if (test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
237 return;
238
239 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
240
241 status = dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK;
242
243 /* nothing to do */
244 if (status == IPATH_SDMA_ABORT_NONE)
245 goto unlock;
246
247 /* ipath_sdma_abort() is done, waiting for interrupt */
248 if (status == IPATH_SDMA_ABORT_DISARMED) {
249 if (jiffies < dd->ipath_sdma_abort_intr_timeout)
250 goto resched_noprint;
251 /* give up, intr got lost somewhere */
252 ipath_dbg("give up waiting for SDMADISABLED intr\n");
253 __set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
254 status = IPATH_SDMA_ABORT_ABORTED;
255 }
256
257 /* everything is stopped, time to clean up and restart */
258 if (status == IPATH_SDMA_ABORT_ABORTED) {
259 struct ipath_sdma_txreq *txp, *txpnext;
260 u64 hwstatus;
261 int notify = 0;
262
263 hwstatus = ipath_read_kreg64(dd,
264 dd->ipath_kregs->kr_senddmastatus);
265
266 if (/* ScoreBoardDrainInProg */
267 test_bit(63, &hwstatus) ||
268 /* AbortInProg */
269 test_bit(62, &hwstatus) ||
270 /* InternalSDmaEnable */
271 test_bit(61, &hwstatus) ||
272 /* ScbEmpty */
273 !test_bit(30, &hwstatus)) {
274 if (dd->ipath_sdma_reset_wait > 0) {
275 /* not done shutting down sdma */
276 --dd->ipath_sdma_reset_wait;
277 goto resched;
278 }
279 ipath_cdbg(VERBOSE, "gave up waiting for quiescent "
280 "status after SDMA reset, continuing\n");
281 dump_sdma_state(dd);
282 }
283
284 /* dequeue all "sent" requests */
285 list_for_each_entry_safe(txp, txpnext,
286 &dd->ipath_sdma_activelist, list) {
287 txp->callback_status = IPATH_SDMA_TXREQ_S_ABORTED;
288 if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
289 vl15_watchdog_deq(dd);
290 list_move_tail(&txp->list, &dd->ipath_sdma_notifylist);
291 notify = 1;
292 }
293 if (notify)
294 tasklet_hi_schedule(&dd->ipath_sdma_notify_task);
295
296 /* reset our notion of head and tail */
297 dd->ipath_sdma_descq_tail = 0;
298 dd->ipath_sdma_descq_head = 0;
299 dd->ipath_sdma_head_dma[0] = 0;
300 dd->ipath_sdma_generation = 0;
301 dd->ipath_sdma_descq_removed = dd->ipath_sdma_descq_added;
302
303 /* Reset SendDmaLenGen */
304 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen,
305 (u64) dd->ipath_sdma_descq_cnt | (1ULL << 18));
306
307 /* done with sdma state for a bit */
308 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
309
310 /*
311 * Don't restart sdma here. Wait until link is up to ACTIVE.
312 * VL15 MADs used to bring the link up use PIO, and multiple
313 * link transitions otherwise cause the sdma engine to be
314 * stopped and started multiple times.
315 * The disable is done here, including the shadow, so the
316 * state is kept consistent.
317 * See ipath_restart_sdma() for the actual starting of sdma.
318 */
319 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
320 dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
321 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
322 dd->ipath_sendctrl);
323 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
324 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
325
326 /* make sure I see next message */
327 dd->ipath_sdma_abort_jiffies = 0;
328
329 goto done;
330 }
331
332resched:
333 /*
334 * for now, keep spinning
335 * JAG - this is bad to just have default be a loop without
336 * state change
337 */
338 if (jiffies > dd->ipath_sdma_abort_jiffies) {
339 ipath_dbg("looping with status 0x%016llx\n",
340 dd->ipath_sdma_status);
341 dd->ipath_sdma_abort_jiffies = jiffies + 5 * HZ;
342 }
343resched_noprint:
344 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
345 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
346 tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
347 return;
348
349unlock:
350 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
351done:
352 return;
353}
354
355/*
356 * This is called from interrupt context.
357 */
358void ipath_sdma_intr(struct ipath_devdata *dd)
359{
360 unsigned long flags;
361
362 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
363
364 (void) ipath_sdma_make_progress(dd);
365
366 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
367}
368
369static int alloc_sdma(struct ipath_devdata *dd)
370{
371 int ret = 0;
372
373 /* Allocate memory for SendDMA descriptor FIFO */
374 dd->ipath_sdma_descq = dma_alloc_coherent(&dd->pcidev->dev,
375 SDMA_DESCQ_SZ, &dd->ipath_sdma_descq_phys, GFP_KERNEL);
376
377 if (!dd->ipath_sdma_descq) {
378 ipath_dev_err(dd, "failed to allocate SendDMA descriptor "
379 "FIFO memory\n");
380 ret = -ENOMEM;
381 goto done;
382 }
383
384 dd->ipath_sdma_descq_cnt =
385 SDMA_DESCQ_SZ / sizeof(struct ipath_sdma_desc);
386
387 /* Allocate memory for DMA of head register to memory */
388 dd->ipath_sdma_head_dma = dma_alloc_coherent(&dd->pcidev->dev,
389 PAGE_SIZE, &dd->ipath_sdma_head_phys, GFP_KERNEL);
390 if (!dd->ipath_sdma_head_dma) {
391 ipath_dev_err(dd, "failed to allocate SendDMA head memory\n");
392 ret = -ENOMEM;
393 goto cleanup_descq;
394 }
395 dd->ipath_sdma_head_dma[0] = 0;
396
397 init_timer(&dd->ipath_sdma_vl15_timer);
398 dd->ipath_sdma_vl15_timer.function = vl15_watchdog_timeout;
399 dd->ipath_sdma_vl15_timer.data = (unsigned long)dd;
400 atomic_set(&dd->ipath_sdma_vl15_count, 0);
401
402 goto done;
403
404cleanup_descq:
405 dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ,
406 (void *)dd->ipath_sdma_descq, dd->ipath_sdma_descq_phys);
407 dd->ipath_sdma_descq = NULL;
408 dd->ipath_sdma_descq_phys = 0;
409done:
410 return ret;
411}
412
413int setup_sdma(struct ipath_devdata *dd)
414{
415 int ret = 0;
416 unsigned i, n;
417 u64 tmp64;
418 u64 senddmabufmask[3] = { 0 };
419 unsigned long flags;
420
421 ret = alloc_sdma(dd);
422 if (ret)
423 goto done;
424
425 if (!dd->ipath_sdma_descq) {
426 ipath_dev_err(dd, "SendDMA memory not allocated\n");
427 goto done;
428 }
429
430 dd->ipath_sdma_status = 0;
431 dd->ipath_sdma_abort_jiffies = 0;
432 dd->ipath_sdma_generation = 0;
433 dd->ipath_sdma_descq_tail = 0;
434 dd->ipath_sdma_descq_head = 0;
435 dd->ipath_sdma_descq_removed = 0;
436 dd->ipath_sdma_descq_added = 0;
437
438 /* Set SendDmaBase */
439 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase,
440 dd->ipath_sdma_descq_phys);
441 /* Set SendDmaLenGen */
442 tmp64 = dd->ipath_sdma_descq_cnt;
443 tmp64 |= 1<<18; /* enable generation checking */
444 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, tmp64);
445 /* Set SendDmaTail */
446 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail,
447 dd->ipath_sdma_descq_tail);
448 /* Set SendDmaHeadAddr */
449 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr,
450 dd->ipath_sdma_head_phys);
451
452 /* Reserve all the former "kernel" piobufs */
453 n = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - dd->ipath_pioreserved;
454 for (i = dd->ipath_lastport_piobuf; i < n; ++i) {
455 unsigned word = i / 64;
456 unsigned bit = i & 63;
457 BUG_ON(word >= 3);
458 senddmabufmask[word] |= 1ULL << bit;
459 }
460 ipath_chg_pioavailkernel(dd, dd->ipath_lastport_piobuf,
461 n - dd->ipath_lastport_piobuf, 0);
462 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0,
463 senddmabufmask[0]);
464 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1,
465 senddmabufmask[1]);
466 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2,
467 senddmabufmask[2]);
468
469 INIT_LIST_HEAD(&dd->ipath_sdma_activelist);
470 INIT_LIST_HEAD(&dd->ipath_sdma_notifylist);
471
472 tasklet_init(&dd->ipath_sdma_notify_task, sdma_notify_task,
473 (unsigned long) dd);
474 tasklet_init(&dd->ipath_sdma_abort_task, sdma_abort_task,
475 (unsigned long) dd);
476
477 /*
478 * No use to turn on SDMA here, as link is probably not ACTIVE
479 * Just mark it RUNNING and enable the interrupt, and let the
480 * ipath_restart_sdma() on link transition to ACTIVE actually
481 * enable it.
482 */
483 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
484 dd->ipath_sendctrl |= INFINIPATH_S_SDMAINTENABLE;
485 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
486 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
487 __set_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status);
488 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
489
490done:
491 return ret;
492}
493
494void teardown_sdma(struct ipath_devdata *dd)
495{
496 struct ipath_sdma_txreq *txp, *txpnext;
497 unsigned long flags;
498 dma_addr_t sdma_head_phys = 0;
499 dma_addr_t sdma_descq_phys = 0;
500 void *sdma_descq = NULL;
501 void *sdma_head_dma = NULL;
502
503 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
504 __clear_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status);
505 __set_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
506 __set_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status);
507 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
508
509 tasklet_kill(&dd->ipath_sdma_abort_task);
510 tasklet_kill(&dd->ipath_sdma_notify_task);
511
512 /* turn off sdma */
513 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
514 dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
515 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
516 dd->ipath_sendctrl);
517 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
518 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
519
520 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
521 /* dequeue all "sent" requests */
522 list_for_each_entry_safe(txp, txpnext, &dd->ipath_sdma_activelist,
523 list) {
524 txp->callback_status = IPATH_SDMA_TXREQ_S_SHUTDOWN;
525 if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
526 vl15_watchdog_deq(dd);
527 list_move_tail(&txp->list, &dd->ipath_sdma_notifylist);
528 }
529 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
530
531 sdma_notify_taskbody(dd);
532
533 del_timer_sync(&dd->ipath_sdma_vl15_timer);
534
535 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
536
537 dd->ipath_sdma_abort_jiffies = 0;
538
539 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase, 0);
540 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, 0);
541 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, 0);
542 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr, 0);
543 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0, 0);
544 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1, 0);
545 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2, 0);
546
547 if (dd->ipath_sdma_head_dma) {
548 sdma_head_dma = (void *) dd->ipath_sdma_head_dma;
549 sdma_head_phys = dd->ipath_sdma_head_phys;
550 dd->ipath_sdma_head_dma = NULL;
551 dd->ipath_sdma_head_phys = 0;
552 }
553
554 if (dd->ipath_sdma_descq) {
555 sdma_descq = dd->ipath_sdma_descq;
556 sdma_descq_phys = dd->ipath_sdma_descq_phys;
557 dd->ipath_sdma_descq = NULL;
558 dd->ipath_sdma_descq_phys = 0;
559 }
560
561 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
562
563 if (sdma_head_dma)
564 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
565 sdma_head_dma, sdma_head_phys);
566
567 if (sdma_descq)
568 dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ,
569 sdma_descq, sdma_descq_phys);
570}
571
572/*
573 * [Re]start SDMA, if we use it, and it's not already OK.
574 * This is called on transition to link ACTIVE, either the first or
575 * subsequent times.
576 */
577void ipath_restart_sdma(struct ipath_devdata *dd)
578{
579 unsigned long flags;
580 int needed = 1;
581
582 if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA))
583 goto bail;
584
585 /*
586 * First, make sure we should, which is to say,
587 * check that we are "RUNNING" (not in teardown)
588 * and not "SHUTDOWN"
589 */
590 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
591 if (!test_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status)
592 || test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
593 needed = 0;
594 else {
595 __clear_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
596 __clear_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
597 __clear_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
598 }
599 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
600 if (!needed) {
601 ipath_dbg("invalid attempt to restart SDMA, status 0x%016llx\n",
602 dd->ipath_sdma_status);
603 goto bail;
604 }
605 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
606 /*
607 * First clear, just to be safe. Enable is only done
608 * in chip on 0->1 transition
609 */
610 dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
611 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
612 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
613 dd->ipath_sendctrl |= INFINIPATH_S_SDMAENABLE;
614 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
615 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
616 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
617
618bail:
619 return;
620}
621
622static inline void make_sdma_desc(struct ipath_devdata *dd,
623 u64 *sdmadesc, u64 addr, u64 dwlen, u64 dwoffset)
624{
625 WARN_ON(addr & 3);
626 /* SDmaPhyAddr[47:32] */
627 sdmadesc[1] = addr >> 32;
628 /* SDmaPhyAddr[31:0] */
629 sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
630 /* SDmaGeneration[1:0] */
631 sdmadesc[0] |= (dd->ipath_sdma_generation & 3ULL) << 30;
632 /* SDmaDwordCount[10:0] */
633 sdmadesc[0] |= (dwlen & 0x7ffULL) << 16;
634 /* SDmaBufOffset[12:2] */
635 sdmadesc[0] |= dwoffset & 0x7ffULL;
636}
637
638/*
639 * This function queues one IB packet onto the send DMA queue per call.
640 * The caller is responsible for checking:
641 * 1) The number of send DMA descriptor entries is less than the size of
642 * the descriptor queue.
643 * 2) The IB SGE addresses and lengths are 32-bit aligned
644 * (except possibly the last SGE's length)
645 * 3) The SGE addresses are suitable for passing to dma_map_single().
646 */
647int ipath_sdma_verbs_send(struct ipath_devdata *dd,
648 struct ipath_sge_state *ss, u32 dwords,
649 struct ipath_verbs_txreq *tx)
650{
651
652 unsigned long flags;
653 struct ipath_sge *sge;
654 int ret = 0;
655 u16 tail;
656 __le64 *descqp;
657 u64 sdmadesc[2];
658 u32 dwoffset;
659 dma_addr_t addr;
660
661 if ((tx->map_len + (dwords<<2)) > dd->ipath_ibmaxlen) {
662 ipath_dbg("packet size %X > ibmax %X, fail\n",
663 tx->map_len + (dwords<<2), dd->ipath_ibmaxlen);
664 ret = -EMSGSIZE;
665 goto fail;
666 }
667
668 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
669
670retry:
671 if (unlikely(test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status))) {
672 ret = -EBUSY;
673 goto unlock;
674 }
675
676 if (tx->txreq.sg_count > ipath_sdma_descq_freecnt(dd)) {
677 if (ipath_sdma_make_progress(dd))
678 goto retry;
679 ret = -ENOBUFS;
680 goto unlock;
681 }
682
683 addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
684 tx->map_len, DMA_TO_DEVICE);
685 if (dma_mapping_error(addr)) {
686 ret = -EIO;
687 goto unlock;
688 }
689
690 dwoffset = tx->map_len >> 2;
691 make_sdma_desc(dd, sdmadesc, (u64) addr, dwoffset, 0);
692
693 /* SDmaFirstDesc */
694 sdmadesc[0] |= 1ULL << 12;
695 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF)
696 sdmadesc[0] |= 1ULL << 14; /* SDmaUseLargeBuf */
697
698 /* write to the descq */
699 tail = dd->ipath_sdma_descq_tail;
700 descqp = &dd->ipath_sdma_descq[tail].qw[0];
701 *descqp++ = cpu_to_le64(sdmadesc[0]);
702 *descqp++ = cpu_to_le64(sdmadesc[1]);
703
704 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEDESC)
705 tx->txreq.start_idx = tail;
706
707 /* increment the tail */
708 if (++tail == dd->ipath_sdma_descq_cnt) {
709 tail = 0;
710 descqp = &dd->ipath_sdma_descq[0].qw[0];
711 ++dd->ipath_sdma_generation;
712 }
713
714 sge = &ss->sge;
715 while (dwords) {
716 u32 dw;
717 u32 len;
718
719 len = dwords << 2;
720 if (len > sge->length)
721 len = sge->length;
722 if (len > sge->sge_length)
723 len = sge->sge_length;
724 BUG_ON(len == 0);
725 dw = (len + 3) >> 2;
726 addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
727 DMA_TO_DEVICE);
728 make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
729 /* SDmaUseLargeBuf has to be set in every descriptor */
730 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF)
731 sdmadesc[0] |= 1ULL << 14;
732 /* write to the descq */
733 *descqp++ = cpu_to_le64(sdmadesc[0]);
734 *descqp++ = cpu_to_le64(sdmadesc[1]);
735
736 /* increment the tail */
737 if (++tail == dd->ipath_sdma_descq_cnt) {
738 tail = 0;
739 descqp = &dd->ipath_sdma_descq[0].qw[0];
740 ++dd->ipath_sdma_generation;
741 }
742 sge->vaddr += len;
743 sge->length -= len;
744 sge->sge_length -= len;
745 if (sge->sge_length == 0) {
746 if (--ss->num_sge)
747 *sge = *ss->sg_list++;
748 } else if (sge->length == 0 && sge->mr != NULL) {
749 if (++sge->n >= IPATH_SEGSZ) {
750 if (++sge->m >= sge->mr->mapsz)
751 break;
752 sge->n = 0;
753 }
754 sge->vaddr =
755 sge->mr->map[sge->m]->segs[sge->n].vaddr;
756 sge->length =
757 sge->mr->map[sge->m]->segs[sge->n].length;
758 }
759
760 dwoffset += dw;
761 dwords -= dw;
762 }
763
764 if (!tail)
765 descqp = &dd->ipath_sdma_descq[dd->ipath_sdma_descq_cnt].qw[0];
766 descqp -= 2;
767 /* SDmaLastDesc */
768 descqp[0] |= __constant_cpu_to_le64(1ULL << 11);
769 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_INTREQ) {
770 /* SDmaIntReq */
771 descqp[0] |= __constant_cpu_to_le64(1ULL << 15);
772 }
773
774 /* Commit writes to memory and advance the tail on the chip */
775 wmb();
776 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail);
777
778 tx->txreq.next_descq_idx = tail;
779 tx->txreq.callback_status = IPATH_SDMA_TXREQ_S_OK;
780 dd->ipath_sdma_descq_tail = tail;
781 dd->ipath_sdma_descq_added += tx->txreq.sg_count;
782 list_add_tail(&tx->txreq.list, &dd->ipath_sdma_activelist);
783 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_VL15)
784 vl15_watchdog_enq(dd);
785
786unlock:
787 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
788fail:
789 return ret;
790}
diff --git a/drivers/infiniband/hw/ipath/ipath_srq.c b/drivers/infiniband/hw/ipath/ipath_srq.c
index f772102e4713..e3d80ca84c1a 100644
--- a/drivers/infiniband/hw/ipath/ipath_srq.c
+++ b/drivers/infiniband/hw/ipath/ipath_srq.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -245,7 +245,8 @@ int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
245 sizeof(offset_addr)); 245 sizeof(offset_addr));
246 if (ret) 246 if (ret)
247 goto bail_free; 247 goto bail_free;
248 udata->outbuf = (void __user *) offset_addr; 248 udata->outbuf =
249 (void __user *) (unsigned long) offset_addr;
249 ret = ib_copy_to_udata(udata, &offset, 250 ret = ib_copy_to_udata(udata, &offset,
250 sizeof(offset)); 251 sizeof(offset));
251 if (ret) 252 if (ret)
diff --git a/drivers/infiniband/hw/ipath/ipath_stats.c b/drivers/infiniband/hw/ipath/ipath_stats.c
index d2725cd11bdc..c8e3d65f0de8 100644
--- a/drivers/infiniband/hw/ipath/ipath_stats.c
+++ b/drivers/infiniband/hw/ipath/ipath_stats.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -136,6 +136,7 @@ static void ipath_qcheck(struct ipath_devdata *dd)
136 struct ipath_portdata *pd = dd->ipath_pd[0]; 136 struct ipath_portdata *pd = dd->ipath_pd[0];
137 size_t blen = 0; 137 size_t blen = 0;
138 char buf[128]; 138 char buf[128];
139 u32 hdrqtail;
139 140
140 *buf = 0; 141 *buf = 0;
141 if (pd->port_hdrqfull != dd->ipath_p0_hdrqfull) { 142 if (pd->port_hdrqfull != dd->ipath_p0_hdrqfull) {
@@ -174,17 +175,18 @@ static void ipath_qcheck(struct ipath_devdata *dd)
174 if (blen) 175 if (blen)
175 ipath_dbg("%s\n", buf); 176 ipath_dbg("%s\n", buf);
176 177
177 if (pd->port_head != (u32) 178 hdrqtail = ipath_get_hdrqtail(pd);
178 le64_to_cpu(*dd->ipath_hdrqtailptr)) { 179 if (pd->port_head != hdrqtail) {
179 if (dd->ipath_lastport0rcv_cnt == 180 if (dd->ipath_lastport0rcv_cnt ==
180 ipath_stats.sps_port0pkts) { 181 ipath_stats.sps_port0pkts) {
181 ipath_cdbg(PKT, "missing rcv interrupts? " 182 ipath_cdbg(PKT, "missing rcv interrupts? "
182 "port0 hd=%llx tl=%x; port0pkts %llx\n", 183 "port0 hd=%x tl=%x; port0pkts %llx; write"
183 (unsigned long long) 184 " hd (w/intr)\n",
184 le64_to_cpu(*dd->ipath_hdrqtailptr), 185 pd->port_head, hdrqtail,
185 pd->port_head,
186 (unsigned long long) 186 (unsigned long long)
187 ipath_stats.sps_port0pkts); 187 ipath_stats.sps_port0pkts);
188 ipath_write_ureg(dd, ur_rcvhdrhead, hdrqtail |
189 dd->ipath_rhdrhead_intr_off, pd->port_port);
188 } 190 }
189 dd->ipath_lastport0rcv_cnt = ipath_stats.sps_port0pkts; 191 dd->ipath_lastport0rcv_cnt = ipath_stats.sps_port0pkts;
190 } 192 }
@@ -290,11 +292,11 @@ void ipath_get_faststats(unsigned long opaque)
290 && time_after(jiffies, dd->ipath_unmasktime)) { 292 && time_after(jiffies, dd->ipath_unmasktime)) {
291 char ebuf[256]; 293 char ebuf[256];
292 int iserr; 294 int iserr;
293 iserr = ipath_decode_err(ebuf, sizeof ebuf, 295 iserr = ipath_decode_err(dd, ebuf, sizeof ebuf,
294 dd->ipath_maskederrs); 296 dd->ipath_maskederrs);
295 if (dd->ipath_maskederrs & 297 if (dd->ipath_maskederrs &
296 ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL | 298 ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
297 INFINIPATH_E_PKTERRS )) 299 INFINIPATH_E_PKTERRS))
298 ipath_dev_err(dd, "Re-enabling masked errors " 300 ipath_dev_err(dd, "Re-enabling masked errors "
299 "(%s)\n", ebuf); 301 "(%s)\n", ebuf);
300 else { 302 else {
@@ -306,17 +308,18 @@ void ipath_get_faststats(unsigned long opaque)
306 * level. 308 * level.
307 */ 309 */
308 if (iserr) 310 if (iserr)
309 ipath_dbg("Re-enabling queue full errors (%s)\n", 311 ipath_dbg(
310 ebuf); 312 "Re-enabling queue full errors (%s)\n",
313 ebuf);
311 else 314 else
312 ipath_cdbg(ERRPKT, "Re-enabling packet" 315 ipath_cdbg(ERRPKT, "Re-enabling packet"
313 " problem interrupt (%s)\n", ebuf); 316 " problem interrupt (%s)\n", ebuf);
314 } 317 }
315 318
316 /* re-enable masked errors */ 319 /* re-enable masked errors */
317 dd->ipath_errormask |= dd->ipath_maskederrs; 320 dd->ipath_errormask |= dd->ipath_maskederrs;
318 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 321 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
319 dd->ipath_errormask); 322 dd->ipath_errormask);
320 dd->ipath_maskederrs = 0; 323 dd->ipath_maskederrs = 0;
321 } 324 }
322 325
diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/infiniband/hw/ipath/ipath_sysfs.c
index 56dfc8a2344c..a6c8efbdc0c9 100644
--- a/drivers/infiniband/hw/ipath/ipath_sysfs.c
+++ b/drivers/infiniband/hw/ipath/ipath_sysfs.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -34,6 +34,7 @@
34#include <linux/ctype.h> 34#include <linux/ctype.h>
35 35
36#include "ipath_kernel.h" 36#include "ipath_kernel.h"
37#include "ipath_verbs.h"
37#include "ipath_common.h" 38#include "ipath_common.h"
38 39
39/** 40/**
@@ -163,6 +164,15 @@ static ssize_t show_boardversion(struct device *dev,
163 return scnprintf(buf, PAGE_SIZE, "%s", dd->ipath_boardversion); 164 return scnprintf(buf, PAGE_SIZE, "%s", dd->ipath_boardversion);
164} 165}
165 166
167static ssize_t show_localbus_info(struct device *dev,
168 struct device_attribute *attr,
169 char *buf)
170{
171 struct ipath_devdata *dd = dev_get_drvdata(dev);
172 /* The string printed here is already newline-terminated. */
173 return scnprintf(buf, PAGE_SIZE, "%s", dd->ipath_lbus_info);
174}
175
166static ssize_t show_lmc(struct device *dev, 176static ssize_t show_lmc(struct device *dev,
167 struct device_attribute *attr, 177 struct device_attribute *attr,
168 char *buf) 178 char *buf)
@@ -311,6 +321,8 @@ static ssize_t store_guid(struct device *dev,
311 321
312 dd->ipath_guid = new_guid; 322 dd->ipath_guid = new_guid;
313 dd->ipath_nguid = 1; 323 dd->ipath_nguid = 1;
324 if (dd->verbs_dev)
325 dd->verbs_dev->ibdev.node_guid = new_guid;
314 326
315 ret = strlen(buf); 327 ret = strlen(buf);
316 goto bail; 328 goto bail;
@@ -919,21 +931,21 @@ static ssize_t store_rx_polinv_enb(struct device *dev,
919 u16 val; 931 u16 val;
920 932
921 ret = ipath_parse_ushort(buf, &val); 933 ret = ipath_parse_ushort(buf, &val);
922 if (ret < 0 || val > 1) 934 if (ret >= 0 && val > 1) {
923 goto invalid; 935 ipath_dev_err(dd,
936 "attempt to set invalid Rx Polarity (enable)\n");
937 ret = -EINVAL;
938 goto bail;
939 }
924 940
925 r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_RXPOL_ENB, val); 941 r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_RXPOL_ENB, val);
926 if (r < 0) { 942 if (r < 0)
927 ret = r; 943 ret = r;
928 goto bail;
929 }
930 944
931 goto bail;
932invalid:
933 ipath_dev_err(dd, "attempt to set invalid Rx Polarity (enable)\n");
934bail: 945bail:
935 return ret; 946 return ret;
936} 947}
948
937/* 949/*
938 * Get/Set RX lane-reversal enable. 0=no, 1=yes. 950 * Get/Set RX lane-reversal enable. 0=no, 1=yes.
939 */ 951 */
@@ -988,6 +1000,75 @@ static struct attribute_group driver_attr_group = {
988 .attrs = driver_attributes 1000 .attrs = driver_attributes
989}; 1001};
990 1002
1003static ssize_t store_tempsense(struct device *dev,
1004 struct device_attribute *attr,
1005 const char *buf,
1006 size_t count)
1007{
1008 struct ipath_devdata *dd = dev_get_drvdata(dev);
1009 int ret, stat;
1010 u16 val;
1011
1012 ret = ipath_parse_ushort(buf, &val);
1013 if (ret <= 0) {
1014 ipath_dev_err(dd, "attempt to set invalid tempsense config\n");
1015 goto bail;
1016 }
1017 /* If anything but the highest limit, enable T_CRIT_A "interrupt" */
1018 stat = ipath_tempsense_write(dd, 9, (val == 0x7f7f) ? 0x80 : 0);
1019 if (stat) {
1020 ipath_dev_err(dd, "Unable to set tempsense config\n");
1021 ret = -1;
1022 goto bail;
1023 }
1024 stat = ipath_tempsense_write(dd, 0xB, (u8) (val & 0xFF));
1025 if (stat) {
1026 ipath_dev_err(dd, "Unable to set local Tcrit\n");
1027 ret = -1;
1028 goto bail;
1029 }
1030 stat = ipath_tempsense_write(dd, 0xD, (u8) (val >> 8));
1031 if (stat) {
1032 ipath_dev_err(dd, "Unable to set remote Tcrit\n");
1033 ret = -1;
1034 goto bail;
1035 }
1036
1037bail:
1038 return ret;
1039}
1040
1041/*
1042 * dump tempsense regs. in decimal, to ease shell-scripts.
1043 */
1044static ssize_t show_tempsense(struct device *dev,
1045 struct device_attribute *attr,
1046 char *buf)
1047{
1048 struct ipath_devdata *dd = dev_get_drvdata(dev);
1049 int ret;
1050 int idx;
1051 u8 regvals[8];
1052
1053 ret = -ENXIO;
1054 for (idx = 0; idx < 8; ++idx) {
1055 if (idx == 6)
1056 continue;
1057 ret = ipath_tempsense_read(dd, idx);
1058 if (ret < 0)
1059 break;
1060 regvals[idx] = ret;
1061 }
1062 if (idx == 8)
1063 ret = scnprintf(buf, PAGE_SIZE, "%d %d %02X %02X %d %d\n",
1064 *(signed char *)(regvals),
1065 *(signed char *)(regvals + 1),
1066 regvals[2], regvals[3],
1067 *(signed char *)(regvals + 5),
1068 *(signed char *)(regvals + 7));
1069 return ret;
1070}
1071
991struct attribute_group *ipath_driver_attr_groups[] = { 1072struct attribute_group *ipath_driver_attr_groups[] = {
992 &driver_attr_group, 1073 &driver_attr_group,
993 NULL, 1074 NULL,
@@ -1011,10 +1092,13 @@ static DEVICE_ATTR(unit, S_IRUGO, show_unit, NULL);
1011static DEVICE_ATTR(rx_pol_inv, S_IWUSR, NULL, store_rx_pol_inv); 1092static DEVICE_ATTR(rx_pol_inv, S_IWUSR, NULL, store_rx_pol_inv);
1012static DEVICE_ATTR(led_override, S_IWUSR, NULL, store_led_override); 1093static DEVICE_ATTR(led_override, S_IWUSR, NULL, store_led_override);
1013static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL); 1094static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
1095static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
1014static DEVICE_ATTR(jint_max_packets, S_IWUSR | S_IRUGO, 1096static DEVICE_ATTR(jint_max_packets, S_IWUSR | S_IRUGO,
1015 show_jint_max_packets, store_jint_max_packets); 1097 show_jint_max_packets, store_jint_max_packets);
1016static DEVICE_ATTR(jint_idle_ticks, S_IWUSR | S_IRUGO, 1098static DEVICE_ATTR(jint_idle_ticks, S_IWUSR | S_IRUGO,
1017 show_jint_idle_ticks, store_jint_idle_ticks); 1099 show_jint_idle_ticks, store_jint_idle_ticks);
1100static DEVICE_ATTR(tempsense, S_IWUSR | S_IRUGO,
1101 show_tempsense, store_tempsense);
1018 1102
1019static struct attribute *dev_attributes[] = { 1103static struct attribute *dev_attributes[] = {
1020 &dev_attr_guid.attr, 1104 &dev_attr_guid.attr,
@@ -1034,6 +1118,8 @@ static struct attribute *dev_attributes[] = {
1034 &dev_attr_rx_pol_inv.attr, 1118 &dev_attr_rx_pol_inv.attr,
1035 &dev_attr_led_override.attr, 1119 &dev_attr_led_override.attr,
1036 &dev_attr_logged_errors.attr, 1120 &dev_attr_logged_errors.attr,
1121 &dev_attr_tempsense.attr,
1122 &dev_attr_localbus_info.attr,
1037 NULL 1123 NULL
1038}; 1124};
1039 1125
diff --git a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c
index 2dd8de20d221..bfe8926b5514 100644
--- a/drivers/infiniband/hw/ipath/ipath_uc.c
+++ b/drivers/infiniband/hw/ipath/ipath_uc.c
@@ -94,7 +94,7 @@ int ipath_make_uc_req(struct ipath_qp *qp)
94 qp->s_state = 94 qp->s_state =
95 OP(SEND_ONLY_WITH_IMMEDIATE); 95 OP(SEND_ONLY_WITH_IMMEDIATE);
96 /* Immediate data comes after the BTH */ 96 /* Immediate data comes after the BTH */
97 ohdr->u.imm_data = wqe->wr.imm_data; 97 ohdr->u.imm_data = wqe->wr.ex.imm_data;
98 hwords += 1; 98 hwords += 1;
99 } 99 }
100 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 100 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
@@ -123,7 +123,7 @@ int ipath_make_uc_req(struct ipath_qp *qp)
123 qp->s_state = 123 qp->s_state =
124 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE); 124 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
125 /* Immediate data comes after the RETH */ 125 /* Immediate data comes after the RETH */
126 ohdr->u.rc.imm_data = wqe->wr.imm_data; 126 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
127 hwords += 1; 127 hwords += 1;
128 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 128 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
129 bth0 |= 1 << 23; 129 bth0 |= 1 << 23;
@@ -152,7 +152,7 @@ int ipath_make_uc_req(struct ipath_qp *qp)
152 else { 152 else {
153 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); 153 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
154 /* Immediate data comes after the BTH */ 154 /* Immediate data comes after the BTH */
155 ohdr->u.imm_data = wqe->wr.imm_data; 155 ohdr->u.imm_data = wqe->wr.ex.imm_data;
156 hwords += 1; 156 hwords += 1;
157 } 157 }
158 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 158 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
@@ -177,7 +177,7 @@ int ipath_make_uc_req(struct ipath_qp *qp)
177 qp->s_state = 177 qp->s_state =
178 OP(RDMA_WRITE_LAST_WITH_IMMEDIATE); 178 OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
179 /* Immediate data comes after the BTH */ 179 /* Immediate data comes after the BTH */
180 ohdr->u.imm_data = wqe->wr.imm_data; 180 ohdr->u.imm_data = wqe->wr.ex.imm_data;
181 hwords += 1; 181 hwords += 1;
182 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 182 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
183 bth0 |= 1 << 23; 183 bth0 |= 1 << 23;
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
index de67eed08ed0..8b6a261c89e3 100644
--- a/drivers/infiniband/hw/ipath/ipath_ud.c
+++ b/drivers/infiniband/hw/ipath/ipath_ud.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -95,7 +95,7 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe)
95 95
96 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) { 96 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
97 wc.wc_flags = IB_WC_WITH_IMM; 97 wc.wc_flags = IB_WC_WITH_IMM;
98 wc.imm_data = swqe->wr.imm_data; 98 wc.imm_data = swqe->wr.ex.imm_data;
99 } else { 99 } else {
100 wc.wc_flags = 0; 100 wc.wc_flags = 0;
101 wc.imm_data = 0; 101 wc.imm_data = 0;
@@ -303,6 +303,7 @@ int ipath_make_ud_req(struct ipath_qp *qp)
303 qp->s_hdrwords = 7; 303 qp->s_hdrwords = 7;
304 qp->s_cur_size = wqe->length; 304 qp->s_cur_size = wqe->length;
305 qp->s_cur_sge = &qp->s_sge; 305 qp->s_cur_sge = &qp->s_sge;
306 qp->s_dmult = ah_attr->static_rate;
306 qp->s_wqe = wqe; 307 qp->s_wqe = wqe;
307 qp->s_sge.sge = wqe->sg_list[0]; 308 qp->s_sge.sge = wqe->sg_list[0];
308 qp->s_sge.sg_list = wqe->sg_list + 1; 309 qp->s_sge.sg_list = wqe->sg_list + 1;
@@ -326,7 +327,7 @@ int ipath_make_ud_req(struct ipath_qp *qp)
326 } 327 }
327 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { 328 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
328 qp->s_hdrwords++; 329 qp->s_hdrwords++;
329 ohdr->u.ud.imm_data = wqe->wr.imm_data; 330 ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
330 bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24; 331 bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
331 } else 332 } else
332 bth0 = IB_OPCODE_UD_SEND_ONLY << 24; 333 bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
diff --git a/drivers/infiniband/hw/ipath/ipath_user_sdma.c b/drivers/infiniband/hw/ipath/ipath_user_sdma.c
new file mode 100644
index 000000000000..86e016916cd1
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_user_sdma.c
@@ -0,0 +1,879 @@
1/*
2 * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/mm.h>
33#include <linux/types.h>
34#include <linux/device.h>
35#include <linux/dmapool.h>
36#include <linux/slab.h>
37#include <linux/list.h>
38#include <linux/highmem.h>
39#include <linux/io.h>
40#include <linux/uio.h>
41#include <linux/rbtree.h>
42#include <linux/spinlock.h>
43#include <linux/delay.h>
44
45#include "ipath_kernel.h"
46#include "ipath_user_sdma.h"
47
48/* minimum size of header */
49#define IPATH_USER_SDMA_MIN_HEADER_LENGTH 64
50/* expected size of headers (for dma_pool) */
51#define IPATH_USER_SDMA_EXP_HEADER_LENGTH 64
52/* length mask in PBC (lower 11 bits) */
53#define IPATH_PBC_LENGTH_MASK ((1 << 11) - 1)
54
55struct ipath_user_sdma_pkt {
56 u8 naddr; /* dimension of addr (1..3) ... */
57 u32 counter; /* sdma pkts queued counter for this entry */
58 u64 added; /* global descq number of entries */
59
60 struct {
61 u32 offset; /* offset for kvaddr, addr */
62 u32 length; /* length in page */
63 u8 put_page; /* should we put_page? */
64 u8 dma_mapped; /* is page dma_mapped? */
65 struct page *page; /* may be NULL (coherent mem) */
66 void *kvaddr; /* FIXME: only for pio hack */
67 dma_addr_t addr;
68 } addr[4]; /* max pages, any more and we coalesce */
69 struct list_head list; /* list element */
70};
71
72struct ipath_user_sdma_queue {
73 /*
74 * pkts sent to dma engine are queued on this
75 * list head. the type of the elements of this
76 * list are struct ipath_user_sdma_pkt...
77 */
78 struct list_head sent;
79
80 /* headers with expected length are allocated from here... */
81 char header_cache_name[64];
82 struct dma_pool *header_cache;
83
84 /* packets are allocated from the slab cache... */
85 char pkt_slab_name[64];
86 struct kmem_cache *pkt_slab;
87
88 /* as packets go on the queued queue, they are counted... */
89 u32 counter;
90 u32 sent_counter;
91
92 /* dma page table */
93 struct rb_root dma_pages_root;
94
95 /* protect everything above... */
96 struct mutex lock;
97};
98
99struct ipath_user_sdma_queue *
100ipath_user_sdma_queue_create(struct device *dev, int unit, int port, int sport)
101{
102 struct ipath_user_sdma_queue *pq =
103 kmalloc(sizeof(struct ipath_user_sdma_queue), GFP_KERNEL);
104
105 if (!pq)
106 goto done;
107
108 pq->counter = 0;
109 pq->sent_counter = 0;
110 INIT_LIST_HEAD(&pq->sent);
111
112 mutex_init(&pq->lock);
113
114 snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
115 "ipath-user-sdma-pkts-%u-%02u.%02u", unit, port, sport);
116 pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
117 sizeof(struct ipath_user_sdma_pkt),
118 0, 0, NULL);
119
120 if (!pq->pkt_slab)
121 goto err_kfree;
122
123 snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
124 "ipath-user-sdma-headers-%u-%02u.%02u", unit, port, sport);
125 pq->header_cache = dma_pool_create(pq->header_cache_name,
126 dev,
127 IPATH_USER_SDMA_EXP_HEADER_LENGTH,
128 4, 0);
129 if (!pq->header_cache)
130 goto err_slab;
131
132 pq->dma_pages_root = RB_ROOT;
133
134 goto done;
135
136err_slab:
137 kmem_cache_destroy(pq->pkt_slab);
138err_kfree:
139 kfree(pq);
140 pq = NULL;
141
142done:
143 return pq;
144}
145
146static void ipath_user_sdma_init_frag(struct ipath_user_sdma_pkt *pkt,
147 int i, size_t offset, size_t len,
148 int put_page, int dma_mapped,
149 struct page *page,
150 void *kvaddr, dma_addr_t dma_addr)
151{
152 pkt->addr[i].offset = offset;
153 pkt->addr[i].length = len;
154 pkt->addr[i].put_page = put_page;
155 pkt->addr[i].dma_mapped = dma_mapped;
156 pkt->addr[i].page = page;
157 pkt->addr[i].kvaddr = kvaddr;
158 pkt->addr[i].addr = dma_addr;
159}
160
161static void ipath_user_sdma_init_header(struct ipath_user_sdma_pkt *pkt,
162 u32 counter, size_t offset,
163 size_t len, int dma_mapped,
164 struct page *page,
165 void *kvaddr, dma_addr_t dma_addr)
166{
167 pkt->naddr = 1;
168 pkt->counter = counter;
169 ipath_user_sdma_init_frag(pkt, 0, offset, len, 0, dma_mapped, page,
170 kvaddr, dma_addr);
171}
172
173/* we've too many pages in the iovec, coalesce to a single page */
174static int ipath_user_sdma_coalesce(const struct ipath_devdata *dd,
175 struct ipath_user_sdma_pkt *pkt,
176 const struct iovec *iov,
177 unsigned long niov) {
178 int ret = 0;
179 struct page *page = alloc_page(GFP_KERNEL);
180 void *mpage_save;
181 char *mpage;
182 int i;
183 int len = 0;
184 dma_addr_t dma_addr;
185
186 if (!page) {
187 ret = -ENOMEM;
188 goto done;
189 }
190
191 mpage = kmap(page);
192 mpage_save = mpage;
193 for (i = 0; i < niov; i++) {
194 int cfur;
195
196 cfur = copy_from_user(mpage,
197 iov[i].iov_base, iov[i].iov_len);
198 if (cfur) {
199 ret = -EFAULT;
200 goto free_unmap;
201 }
202
203 mpage += iov[i].iov_len;
204 len += iov[i].iov_len;
205 }
206
207 dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
208 DMA_TO_DEVICE);
209 if (dma_mapping_error(dma_addr)) {
210 ret = -ENOMEM;
211 goto free_unmap;
212 }
213
214 ipath_user_sdma_init_frag(pkt, 1, 0, len, 0, 1, page, mpage_save,
215 dma_addr);
216 pkt->naddr = 2;
217
218 goto done;
219
220free_unmap:
221 kunmap(page);
222 __free_page(page);
223done:
224 return ret;
225}
226
227/* how many pages in this iovec element? */
228static int ipath_user_sdma_num_pages(const struct iovec *iov)
229{
230 const unsigned long addr = (unsigned long) iov->iov_base;
231 const unsigned long len = iov->iov_len;
232 const unsigned long spage = addr & PAGE_MASK;
233 const unsigned long epage = (addr + len - 1) & PAGE_MASK;
234
235 return 1 + ((epage - spage) >> PAGE_SHIFT);
236}
237
238/* truncate length to page boundry */
239static int ipath_user_sdma_page_length(unsigned long addr, unsigned long len)
240{
241 const unsigned long offset = addr & ~PAGE_MASK;
242
243 return ((offset + len) > PAGE_SIZE) ? (PAGE_SIZE - offset) : len;
244}
245
246static void ipath_user_sdma_free_pkt_frag(struct device *dev,
247 struct ipath_user_sdma_queue *pq,
248 struct ipath_user_sdma_pkt *pkt,
249 int frag)
250{
251 const int i = frag;
252
253 if (pkt->addr[i].page) {
254 if (pkt->addr[i].dma_mapped)
255 dma_unmap_page(dev,
256 pkt->addr[i].addr,
257 pkt->addr[i].length,
258 DMA_TO_DEVICE);
259
260 if (pkt->addr[i].kvaddr)
261 kunmap(pkt->addr[i].page);
262
263 if (pkt->addr[i].put_page)
264 put_page(pkt->addr[i].page);
265 else
266 __free_page(pkt->addr[i].page);
267 } else if (pkt->addr[i].kvaddr)
268 /* free coherent mem from cache... */
269 dma_pool_free(pq->header_cache,
270 pkt->addr[i].kvaddr, pkt->addr[i].addr);
271}
272
273/* return number of pages pinned... */
274static int ipath_user_sdma_pin_pages(const struct ipath_devdata *dd,
275 struct ipath_user_sdma_pkt *pkt,
276 unsigned long addr, int tlen, int npages)
277{
278 struct page *pages[2];
279 int j;
280 int ret;
281
282 ret = get_user_pages(current, current->mm, addr,
283 npages, 0, 1, pages, NULL);
284
285 if (ret != npages) {
286 int i;
287
288 for (i = 0; i < ret; i++)
289 put_page(pages[i]);
290
291 ret = -ENOMEM;
292 goto done;
293 }
294
295 for (j = 0; j < npages; j++) {
296 /* map the pages... */
297 const int flen =
298 ipath_user_sdma_page_length(addr, tlen);
299 dma_addr_t dma_addr =
300 dma_map_page(&dd->pcidev->dev,
301 pages[j], 0, flen, DMA_TO_DEVICE);
302 unsigned long fofs = addr & ~PAGE_MASK;
303
304 if (dma_mapping_error(dma_addr)) {
305 ret = -ENOMEM;
306 goto done;
307 }
308
309 ipath_user_sdma_init_frag(pkt, pkt->naddr, fofs, flen, 1, 1,
310 pages[j], kmap(pages[j]),
311 dma_addr);
312
313 pkt->naddr++;
314 addr += flen;
315 tlen -= flen;
316 }
317
318done:
319 return ret;
320}
321
322static int ipath_user_sdma_pin_pkt(const struct ipath_devdata *dd,
323 struct ipath_user_sdma_queue *pq,
324 struct ipath_user_sdma_pkt *pkt,
325 const struct iovec *iov,
326 unsigned long niov)
327{
328 int ret = 0;
329 unsigned long idx;
330
331 for (idx = 0; idx < niov; idx++) {
332 const int npages = ipath_user_sdma_num_pages(iov + idx);
333 const unsigned long addr = (unsigned long) iov[idx].iov_base;
334
335 ret = ipath_user_sdma_pin_pages(dd, pkt,
336 addr, iov[idx].iov_len,
337 npages);
338 if (ret < 0)
339 goto free_pkt;
340 }
341
342 goto done;
343
344free_pkt:
345 for (idx = 0; idx < pkt->naddr; idx++)
346 ipath_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
347
348done:
349 return ret;
350}
351
352static int ipath_user_sdma_init_payload(const struct ipath_devdata *dd,
353 struct ipath_user_sdma_queue *pq,
354 struct ipath_user_sdma_pkt *pkt,
355 const struct iovec *iov,
356 unsigned long niov, int npages)
357{
358 int ret = 0;
359
360 if (npages >= ARRAY_SIZE(pkt->addr))
361 ret = ipath_user_sdma_coalesce(dd, pkt, iov, niov);
362 else
363 ret = ipath_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
364
365 return ret;
366}
367
368/* free a packet list -- return counter value of last packet */
369static void ipath_user_sdma_free_pkt_list(struct device *dev,
370 struct ipath_user_sdma_queue *pq,
371 struct list_head *list)
372{
373 struct ipath_user_sdma_pkt *pkt, *pkt_next;
374
375 list_for_each_entry_safe(pkt, pkt_next, list, list) {
376 int i;
377
378 for (i = 0; i < pkt->naddr; i++)
379 ipath_user_sdma_free_pkt_frag(dev, pq, pkt, i);
380
381 kmem_cache_free(pq->pkt_slab, pkt);
382 }
383}
384
385/*
386 * copy headers, coalesce etc -- pq->lock must be held
387 *
388 * we queue all the packets to list, returning the
389 * number of bytes total. list must be empty initially,
390 * as, if there is an error we clean it...
391 */
392static int ipath_user_sdma_queue_pkts(const struct ipath_devdata *dd,
393 struct ipath_user_sdma_queue *pq,
394 struct list_head *list,
395 const struct iovec *iov,
396 unsigned long niov,
397 int maxpkts)
398{
399 unsigned long idx = 0;
400 int ret = 0;
401 int npkts = 0;
402 struct page *page = NULL;
403 __le32 *pbc;
404 dma_addr_t dma_addr;
405 struct ipath_user_sdma_pkt *pkt = NULL;
406 size_t len;
407 size_t nw;
408 u32 counter = pq->counter;
409 int dma_mapped = 0;
410
411 while (idx < niov && npkts < maxpkts) {
412 const unsigned long addr = (unsigned long) iov[idx].iov_base;
413 const unsigned long idx_save = idx;
414 unsigned pktnw;
415 unsigned pktnwc;
416 int nfrags = 0;
417 int npages = 0;
418 int cfur;
419
420 dma_mapped = 0;
421 len = iov[idx].iov_len;
422 nw = len >> 2;
423 page = NULL;
424
425 pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
426 if (!pkt) {
427 ret = -ENOMEM;
428 goto free_list;
429 }
430
431 if (len < IPATH_USER_SDMA_MIN_HEADER_LENGTH ||
432 len > PAGE_SIZE || len & 3 || addr & 3) {
433 ret = -EINVAL;
434 goto free_pkt;
435 }
436
437 if (len == IPATH_USER_SDMA_EXP_HEADER_LENGTH)
438 pbc = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
439 &dma_addr);
440 else
441 pbc = NULL;
442
443 if (!pbc) {
444 page = alloc_page(GFP_KERNEL);
445 if (!page) {
446 ret = -ENOMEM;
447 goto free_pkt;
448 }
449 pbc = kmap(page);
450 }
451
452 cfur = copy_from_user(pbc, iov[idx].iov_base, len);
453 if (cfur) {
454 ret = -EFAULT;
455 goto free_pbc;
456 }
457
458 /*
459 * this assignment is a bit strange. it's because the
460 * the pbc counts the number of 32 bit words in the full
461 * packet _except_ the first word of the pbc itself...
462 */
463 pktnwc = nw - 1;
464
465 /*
466 * pktnw computation yields the number of 32 bit words
467 * that the caller has indicated in the PBC. note that
468 * this is one less than the total number of words that
469 * goes to the send DMA engine as the first 32 bit word
470 * of the PBC itself is not counted. Armed with this count,
471 * we can verify that the packet is consistent with the
472 * iovec lengths.
473 */
474 pktnw = le32_to_cpu(*pbc) & IPATH_PBC_LENGTH_MASK;
475 if (pktnw < pktnwc || pktnw > pktnwc + (PAGE_SIZE >> 2)) {
476 ret = -EINVAL;
477 goto free_pbc;
478 }
479
480
481 idx++;
482 while (pktnwc < pktnw && idx < niov) {
483 const size_t slen = iov[idx].iov_len;
484 const unsigned long faddr =
485 (unsigned long) iov[idx].iov_base;
486
487 if (slen & 3 || faddr & 3 || !slen ||
488 slen > PAGE_SIZE) {
489 ret = -EINVAL;
490 goto free_pbc;
491 }
492
493 npages++;
494 if ((faddr & PAGE_MASK) !=
495 ((faddr + slen - 1) & PAGE_MASK))
496 npages++;
497
498 pktnwc += slen >> 2;
499 idx++;
500 nfrags++;
501 }
502
503 if (pktnwc != pktnw) {
504 ret = -EINVAL;
505 goto free_pbc;
506 }
507
508 if (page) {
509 dma_addr = dma_map_page(&dd->pcidev->dev,
510 page, 0, len, DMA_TO_DEVICE);
511 if (dma_mapping_error(dma_addr)) {
512 ret = -ENOMEM;
513 goto free_pbc;
514 }
515
516 dma_mapped = 1;
517 }
518
519 ipath_user_sdma_init_header(pkt, counter, 0, len, dma_mapped,
520 page, pbc, dma_addr);
521
522 if (nfrags) {
523 ret = ipath_user_sdma_init_payload(dd, pq, pkt,
524 iov + idx_save + 1,
525 nfrags, npages);
526 if (ret < 0)
527 goto free_pbc_dma;
528 }
529
530 counter++;
531 npkts++;
532
533 list_add_tail(&pkt->list, list);
534 }
535
536 ret = idx;
537 goto done;
538
539free_pbc_dma:
540 if (dma_mapped)
541 dma_unmap_page(&dd->pcidev->dev, dma_addr, len, DMA_TO_DEVICE);
542free_pbc:
543 if (page) {
544 kunmap(page);
545 __free_page(page);
546 } else
547 dma_pool_free(pq->header_cache, pbc, dma_addr);
548free_pkt:
549 kmem_cache_free(pq->pkt_slab, pkt);
550free_list:
551 ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
552done:
553 return ret;
554}
555
556static void ipath_user_sdma_set_complete_counter(struct ipath_user_sdma_queue *pq,
557 u32 c)
558{
559 pq->sent_counter = c;
560}
561
562/* try to clean out queue -- needs pq->lock */
563static int ipath_user_sdma_queue_clean(const struct ipath_devdata *dd,
564 struct ipath_user_sdma_queue *pq)
565{
566 struct list_head free_list;
567 struct ipath_user_sdma_pkt *pkt;
568 struct ipath_user_sdma_pkt *pkt_prev;
569 int ret = 0;
570
571 INIT_LIST_HEAD(&free_list);
572
573 list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
574 s64 descd = dd->ipath_sdma_descq_removed - pkt->added;
575
576 if (descd < 0)
577 break;
578
579 list_move_tail(&pkt->list, &free_list);
580
581 /* one more packet cleaned */
582 ret++;
583 }
584
585 if (!list_empty(&free_list)) {
586 u32 counter;
587
588 pkt = list_entry(free_list.prev,
589 struct ipath_user_sdma_pkt, list);
590 counter = pkt->counter;
591
592 ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
593 ipath_user_sdma_set_complete_counter(pq, counter);
594 }
595
596 return ret;
597}
598
599void ipath_user_sdma_queue_destroy(struct ipath_user_sdma_queue *pq)
600{
601 if (!pq)
602 return;
603
604 kmem_cache_destroy(pq->pkt_slab);
605 dma_pool_destroy(pq->header_cache);
606 kfree(pq);
607}
608
609/* clean descriptor queue, returns > 0 if some elements cleaned */
610static int ipath_user_sdma_hwqueue_clean(struct ipath_devdata *dd)
611{
612 int ret;
613 unsigned long flags;
614
615 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
616 ret = ipath_sdma_make_progress(dd);
617 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
618
619 return ret;
620}
621
622/* we're in close, drain packets so that we can cleanup successfully... */
623void ipath_user_sdma_queue_drain(struct ipath_devdata *dd,
624 struct ipath_user_sdma_queue *pq)
625{
626 int i;
627
628 if (!pq)
629 return;
630
631 for (i = 0; i < 100; i++) {
632 mutex_lock(&pq->lock);
633 if (list_empty(&pq->sent)) {
634 mutex_unlock(&pq->lock);
635 break;
636 }
637 ipath_user_sdma_hwqueue_clean(dd);
638 ipath_user_sdma_queue_clean(dd, pq);
639 mutex_unlock(&pq->lock);
640 msleep(10);
641 }
642
643 if (!list_empty(&pq->sent)) {
644 struct list_head free_list;
645
646 printk(KERN_INFO "drain: lists not empty: forcing!\n");
647 INIT_LIST_HEAD(&free_list);
648 mutex_lock(&pq->lock);
649 list_splice_init(&pq->sent, &free_list);
650 ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
651 mutex_unlock(&pq->lock);
652 }
653}
654
655static inline __le64 ipath_sdma_make_desc0(struct ipath_devdata *dd,
656 u64 addr, u64 dwlen, u64 dwoffset)
657{
658 return cpu_to_le64(/* SDmaPhyAddr[31:0] */
659 ((addr & 0xfffffffcULL) << 32) |
660 /* SDmaGeneration[1:0] */
661 ((dd->ipath_sdma_generation & 3ULL) << 30) |
662 /* SDmaDwordCount[10:0] */
663 ((dwlen & 0x7ffULL) << 16) |
664 /* SDmaBufOffset[12:2] */
665 (dwoffset & 0x7ffULL));
666}
667
668static inline __le64 ipath_sdma_make_first_desc0(__le64 descq)
669{
670 return descq | __constant_cpu_to_le64(1ULL << 12);
671}
672
673static inline __le64 ipath_sdma_make_last_desc0(__le64 descq)
674{
675 /* last */ /* dma head */
676 return descq | __constant_cpu_to_le64(1ULL << 11 | 1ULL << 13);
677}
678
679static inline __le64 ipath_sdma_make_desc1(u64 addr)
680{
681 /* SDmaPhyAddr[47:32] */
682 return cpu_to_le64(addr >> 32);
683}
684
685static void ipath_user_sdma_send_frag(struct ipath_devdata *dd,
686 struct ipath_user_sdma_pkt *pkt, int idx,
687 unsigned ofs, u16 tail)
688{
689 const u64 addr = (u64) pkt->addr[idx].addr +
690 (u64) pkt->addr[idx].offset;
691 const u64 dwlen = (u64) pkt->addr[idx].length / 4;
692 __le64 *descqp;
693 __le64 descq0;
694
695 descqp = &dd->ipath_sdma_descq[tail].qw[0];
696
697 descq0 = ipath_sdma_make_desc0(dd, addr, dwlen, ofs);
698 if (idx == 0)
699 descq0 = ipath_sdma_make_first_desc0(descq0);
700 if (idx == pkt->naddr - 1)
701 descq0 = ipath_sdma_make_last_desc0(descq0);
702
703 descqp[0] = descq0;
704 descqp[1] = ipath_sdma_make_desc1(addr);
705}
706
707/* pq->lock must be held, get packets on the wire... */
708static int ipath_user_sdma_push_pkts(struct ipath_devdata *dd,
709 struct ipath_user_sdma_queue *pq,
710 struct list_head *pktlist)
711{
712 int ret = 0;
713 unsigned long flags;
714 u16 tail;
715
716 if (list_empty(pktlist))
717 return 0;
718
719 if (unlikely(!(dd->ipath_flags & IPATH_LINKACTIVE)))
720 return -ECOMM;
721
722 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
723
724 if (unlikely(dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK)) {
725 ret = -ECOMM;
726 goto unlock;
727 }
728
729 tail = dd->ipath_sdma_descq_tail;
730 while (!list_empty(pktlist)) {
731 struct ipath_user_sdma_pkt *pkt =
732 list_entry(pktlist->next, struct ipath_user_sdma_pkt,
733 list);
734 int i;
735 unsigned ofs = 0;
736 u16 dtail = tail;
737
738 if (pkt->naddr > ipath_sdma_descq_freecnt(dd))
739 goto unlock_check_tail;
740
741 for (i = 0; i < pkt->naddr; i++) {
742 ipath_user_sdma_send_frag(dd, pkt, i, ofs, tail);
743 ofs += pkt->addr[i].length >> 2;
744
745 if (++tail == dd->ipath_sdma_descq_cnt) {
746 tail = 0;
747 ++dd->ipath_sdma_generation;
748 }
749 }
750
751 if ((ofs<<2) > dd->ipath_ibmaxlen) {
752 ipath_dbg("packet size %X > ibmax %X, fail\n",
753 ofs<<2, dd->ipath_ibmaxlen);
754 ret = -EMSGSIZE;
755 goto unlock;
756 }
757
758 /*
759 * if the packet is >= 2KB mtu equivalent, we have to use
760 * the large buffers, and have to mark each descriptor as
761 * part of a large buffer packet.
762 */
763 if (ofs >= IPATH_SMALLBUF_DWORDS) {
764 for (i = 0; i < pkt->naddr; i++) {
765 dd->ipath_sdma_descq[dtail].qw[0] |=
766 __constant_cpu_to_le64(1ULL << 14);
767 if (++dtail == dd->ipath_sdma_descq_cnt)
768 dtail = 0;
769 }
770 }
771
772 dd->ipath_sdma_descq_added += pkt->naddr;
773 pkt->added = dd->ipath_sdma_descq_added;
774 list_move_tail(&pkt->list, &pq->sent);
775 ret++;
776 }
777
778unlock_check_tail:
779 /* advance the tail on the chip if necessary */
780 if (dd->ipath_sdma_descq_tail != tail) {
781 wmb();
782 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail);
783 dd->ipath_sdma_descq_tail = tail;
784 }
785
786unlock:
787 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
788
789 return ret;
790}
791
792int ipath_user_sdma_writev(struct ipath_devdata *dd,
793 struct ipath_user_sdma_queue *pq,
794 const struct iovec *iov,
795 unsigned long dim)
796{
797 int ret = 0;
798 struct list_head list;
799 int npkts = 0;
800
801 INIT_LIST_HEAD(&list);
802
803 mutex_lock(&pq->lock);
804
805 if (dd->ipath_sdma_descq_added != dd->ipath_sdma_descq_removed) {
806 ipath_user_sdma_hwqueue_clean(dd);
807 ipath_user_sdma_queue_clean(dd, pq);
808 }
809
810 while (dim) {
811 const int mxp = 8;
812
813 down_write(&current->mm->mmap_sem);
814 ret = ipath_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
815 up_write(&current->mm->mmap_sem);
816
817 if (ret <= 0)
818 goto done_unlock;
819 else {
820 dim -= ret;
821 iov += ret;
822 }
823
824 /* force packets onto the sdma hw queue... */
825 if (!list_empty(&list)) {
826 /*
827 * lazily clean hw queue. the 4 is a guess of about
828 * how many sdma descriptors a packet will take (it
829 * doesn't have to be perfect).
830 */
831 if (ipath_sdma_descq_freecnt(dd) < ret * 4) {
832 ipath_user_sdma_hwqueue_clean(dd);
833 ipath_user_sdma_queue_clean(dd, pq);
834 }
835
836 ret = ipath_user_sdma_push_pkts(dd, pq, &list);
837 if (ret < 0)
838 goto done_unlock;
839 else {
840 npkts += ret;
841 pq->counter += ret;
842
843 if (!list_empty(&list))
844 goto done_unlock;
845 }
846 }
847 }
848
849done_unlock:
850 if (!list_empty(&list))
851 ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
852 mutex_unlock(&pq->lock);
853
854 return (ret < 0) ? ret : npkts;
855}
856
857int ipath_user_sdma_make_progress(struct ipath_devdata *dd,
858 struct ipath_user_sdma_queue *pq)
859{
860 int ret = 0;
861
862 mutex_lock(&pq->lock);
863 ipath_user_sdma_hwqueue_clean(dd);
864 ret = ipath_user_sdma_queue_clean(dd, pq);
865 mutex_unlock(&pq->lock);
866
867 return ret;
868}
869
870u32 ipath_user_sdma_complete_counter(const struct ipath_user_sdma_queue *pq)
871{
872 return pq->sent_counter;
873}
874
875u32 ipath_user_sdma_inflight_counter(struct ipath_user_sdma_queue *pq)
876{
877 return pq->counter;
878}
879
diff --git a/drivers/infiniband/hw/ipath/ipath_user_sdma.h b/drivers/infiniband/hw/ipath/ipath_user_sdma.h
new file mode 100644
index 000000000000..e70946c1428c
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_user_sdma.h
@@ -0,0 +1,54 @@
1/*
2 * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/device.h>
33
34struct ipath_user_sdma_queue;
35
36struct ipath_user_sdma_queue *
37ipath_user_sdma_queue_create(struct device *dev, int unit, int port, int sport);
38void ipath_user_sdma_queue_destroy(struct ipath_user_sdma_queue *pq);
39
40int ipath_user_sdma_writev(struct ipath_devdata *dd,
41 struct ipath_user_sdma_queue *pq,
42 const struct iovec *iov,
43 unsigned long dim);
44
45int ipath_user_sdma_make_progress(struct ipath_devdata *dd,
46 struct ipath_user_sdma_queue *pq);
47
48int ipath_user_sdma_pkt_sent(const struct ipath_user_sdma_queue *pq,
49 u32 counter);
50void ipath_user_sdma_queue_drain(struct ipath_devdata *dd,
51 struct ipath_user_sdma_queue *pq);
52
53u32 ipath_user_sdma_complete_counter(const struct ipath_user_sdma_queue *pq);
54u32 ipath_user_sdma_inflight_counter(struct ipath_user_sdma_queue *pq);
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 32d8f882e56c..320a6d018de7 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -242,6 +242,93 @@ static void ipath_flush_wqe(struct ipath_qp *qp, struct ib_send_wr *wr)
242 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1); 242 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
243} 243}
244 244
245/*
246 * Count the number of DMA descriptors needed to send length bytes of data.
247 * Don't modify the ipath_sge_state to get the count.
248 * Return zero if any of the segments is not aligned.
249 */
250static u32 ipath_count_sge(struct ipath_sge_state *ss, u32 length)
251{
252 struct ipath_sge *sg_list = ss->sg_list;
253 struct ipath_sge sge = ss->sge;
254 u8 num_sge = ss->num_sge;
255 u32 ndesc = 1; /* count the header */
256
257 while (length) {
258 u32 len = sge.length;
259
260 if (len > length)
261 len = length;
262 if (len > sge.sge_length)
263 len = sge.sge_length;
264 BUG_ON(len == 0);
265 if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
266 (len != length && (len & (sizeof(u32) - 1)))) {
267 ndesc = 0;
268 break;
269 }
270 ndesc++;
271 sge.vaddr += len;
272 sge.length -= len;
273 sge.sge_length -= len;
274 if (sge.sge_length == 0) {
275 if (--num_sge)
276 sge = *sg_list++;
277 } else if (sge.length == 0 && sge.mr != NULL) {
278 if (++sge.n >= IPATH_SEGSZ) {
279 if (++sge.m >= sge.mr->mapsz)
280 break;
281 sge.n = 0;
282 }
283 sge.vaddr =
284 sge.mr->map[sge.m]->segs[sge.n].vaddr;
285 sge.length =
286 sge.mr->map[sge.m]->segs[sge.n].length;
287 }
288 length -= len;
289 }
290 return ndesc;
291}
292
293/*
294 * Copy from the SGEs to the data buffer.
295 */
296static void ipath_copy_from_sge(void *data, struct ipath_sge_state *ss,
297 u32 length)
298{
299 struct ipath_sge *sge = &ss->sge;
300
301 while (length) {
302 u32 len = sge->length;
303
304 if (len > length)
305 len = length;
306 if (len > sge->sge_length)
307 len = sge->sge_length;
308 BUG_ON(len == 0);
309 memcpy(data, sge->vaddr, len);
310 sge->vaddr += len;
311 sge->length -= len;
312 sge->sge_length -= len;
313 if (sge->sge_length == 0) {
314 if (--ss->num_sge)
315 *sge = *ss->sg_list++;
316 } else if (sge->length == 0 && sge->mr != NULL) {
317 if (++sge->n >= IPATH_SEGSZ) {
318 if (++sge->m >= sge->mr->mapsz)
319 break;
320 sge->n = 0;
321 }
322 sge->vaddr =
323 sge->mr->map[sge->m]->segs[sge->n].vaddr;
324 sge->length =
325 sge->mr->map[sge->m]->segs[sge->n].length;
326 }
327 data += len;
328 length -= len;
329 }
330}
331
245/** 332/**
246 * ipath_post_one_send - post one RC, UC, or UD send work request 333 * ipath_post_one_send - post one RC, UC, or UD send work request
247 * @qp: the QP to post on 334 * @qp: the QP to post on
@@ -866,27 +953,257 @@ static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
866 __raw_writel(last, piobuf); 953 __raw_writel(last, piobuf);
867} 954}
868 955
869static int ipath_verbs_send_pio(struct ipath_qp *qp, u32 *hdr, u32 hdrwords, 956/*
957 * Convert IB rate to delay multiplier.
958 */
959unsigned ipath_ib_rate_to_mult(enum ib_rate rate)
960{
961 switch (rate) {
962 case IB_RATE_2_5_GBPS: return 8;
963 case IB_RATE_5_GBPS: return 4;
964 case IB_RATE_10_GBPS: return 2;
965 case IB_RATE_20_GBPS: return 1;
966 default: return 0;
967 }
968}
969
970/*
971 * Convert delay multiplier to IB rate
972 */
973static enum ib_rate ipath_mult_to_ib_rate(unsigned mult)
974{
975 switch (mult) {
976 case 8: return IB_RATE_2_5_GBPS;
977 case 4: return IB_RATE_5_GBPS;
978 case 2: return IB_RATE_10_GBPS;
979 case 1: return IB_RATE_20_GBPS;
980 default: return IB_RATE_PORT_CURRENT;
981 }
982}
983
984static inline struct ipath_verbs_txreq *get_txreq(struct ipath_ibdev *dev)
985{
986 struct ipath_verbs_txreq *tx = NULL;
987 unsigned long flags;
988
989 spin_lock_irqsave(&dev->pending_lock, flags);
990 if (!list_empty(&dev->txreq_free)) {
991 struct list_head *l = dev->txreq_free.next;
992
993 list_del(l);
994 tx = list_entry(l, struct ipath_verbs_txreq, txreq.list);
995 }
996 spin_unlock_irqrestore(&dev->pending_lock, flags);
997 return tx;
998}
999
1000static inline void put_txreq(struct ipath_ibdev *dev,
1001 struct ipath_verbs_txreq *tx)
1002{
1003 unsigned long flags;
1004
1005 spin_lock_irqsave(&dev->pending_lock, flags);
1006 list_add(&tx->txreq.list, &dev->txreq_free);
1007 spin_unlock_irqrestore(&dev->pending_lock, flags);
1008}
1009
1010static void sdma_complete(void *cookie, int status)
1011{
1012 struct ipath_verbs_txreq *tx = cookie;
1013 struct ipath_qp *qp = tx->qp;
1014 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
1015
1016 /* Generate a completion queue entry if needed */
1017 if (qp->ibqp.qp_type != IB_QPT_RC && tx->wqe) {
1018 enum ib_wc_status ibs = status == IPATH_SDMA_TXREQ_S_OK ?
1019 IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR;
1020
1021 ipath_send_complete(qp, tx->wqe, ibs);
1022 }
1023
1024 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
1025 kfree(tx->txreq.map_addr);
1026 put_txreq(dev, tx);
1027
1028 if (atomic_dec_and_test(&qp->refcount))
1029 wake_up(&qp->wait);
1030}
1031
1032/*
1033 * Compute the number of clock cycles of delay before sending the next packet.
1034 * The multipliers reflect the number of clocks for the fastest rate so
1035 * one tick at 4xDDR is 8 ticks at 1xSDR.
1036 * If the destination port will take longer to receive a packet than
1037 * the outgoing link can send it, we need to delay sending the next packet
1038 * by the difference in time it takes the receiver to receive and the sender
1039 * to send this packet.
1040 * Note that this delay is always correct for UC and RC but not always
1041 * optimal for UD. For UD, the destination HCA can be different for each
1042 * packet, in which case, we could send packets to a different destination
1043 * while "waiting" for the delay. The overhead for doing this without
1044 * HW support is more than just paying the cost of delaying some packets
1045 * unnecessarily.
1046 */
1047static inline unsigned ipath_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult)
1048{
1049 return (rcv_mult > snd_mult) ?
1050 (plen * (rcv_mult - snd_mult) + 1) >> 1 : 0;
1051}
1052
1053static int ipath_verbs_send_dma(struct ipath_qp *qp,
1054 struct ipath_ib_header *hdr, u32 hdrwords,
1055 struct ipath_sge_state *ss, u32 len,
1056 u32 plen, u32 dwords)
1057{
1058 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
1059 struct ipath_devdata *dd = dev->dd;
1060 struct ipath_verbs_txreq *tx;
1061 u32 *piobuf;
1062 u32 control;
1063 u32 ndesc;
1064 int ret;
1065
1066 tx = qp->s_tx;
1067 if (tx) {
1068 qp->s_tx = NULL;
1069 /* resend previously constructed packet */
1070 ret = ipath_sdma_verbs_send(dd, tx->ss, tx->len, tx);
1071 if (ret)
1072 qp->s_tx = tx;
1073 goto bail;
1074 }
1075
1076 tx = get_txreq(dev);
1077 if (!tx) {
1078 ret = -EBUSY;
1079 goto bail;
1080 }
1081
1082 /*
1083 * Get the saved delay count we computed for the previous packet
1084 * and save the delay count for this packet to be used next time
1085 * we get here.
1086 */
1087 control = qp->s_pkt_delay;
1088 qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult);
1089
1090 tx->qp = qp;
1091 atomic_inc(&qp->refcount);
1092 tx->wqe = qp->s_wqe;
1093 tx->txreq.callback = sdma_complete;
1094 tx->txreq.callback_cookie = tx;
1095 tx->txreq.flags = IPATH_SDMA_TXREQ_F_HEADTOHOST |
1096 IPATH_SDMA_TXREQ_F_INTREQ | IPATH_SDMA_TXREQ_F_FREEDESC;
1097 if (plen + 1 >= IPATH_SMALLBUF_DWORDS)
1098 tx->txreq.flags |= IPATH_SDMA_TXREQ_F_USELARGEBUF;
1099
1100 /* VL15 packets bypass credit check */
1101 if ((be16_to_cpu(hdr->lrh[0]) >> 12) == 15) {
1102 control |= 1ULL << 31;
1103 tx->txreq.flags |= IPATH_SDMA_TXREQ_F_VL15;
1104 }
1105
1106 if (len) {
1107 /*
1108 * Don't try to DMA if it takes more descriptors than
1109 * the queue holds.
1110 */
1111 ndesc = ipath_count_sge(ss, len);
1112 if (ndesc >= dd->ipath_sdma_descq_cnt)
1113 ndesc = 0;
1114 } else
1115 ndesc = 1;
1116 if (ndesc) {
1117 tx->hdr.pbc[0] = cpu_to_le32(plen);
1118 tx->hdr.pbc[1] = cpu_to_le32(control);
1119 memcpy(&tx->hdr.hdr, hdr, hdrwords << 2);
1120 tx->txreq.sg_count = ndesc;
1121 tx->map_len = (hdrwords + 2) << 2;
1122 tx->txreq.map_addr = &tx->hdr;
1123 ret = ipath_sdma_verbs_send(dd, ss, dwords, tx);
1124 if (ret) {
1125 /* save ss and length in dwords */
1126 tx->ss = ss;
1127 tx->len = dwords;
1128 qp->s_tx = tx;
1129 }
1130 goto bail;
1131 }
1132
1133 /* Allocate a buffer and copy the header and payload to it. */
1134 tx->map_len = (plen + 1) << 2;
1135 piobuf = kmalloc(tx->map_len, GFP_ATOMIC);
1136 if (unlikely(piobuf == NULL)) {
1137 ret = -EBUSY;
1138 goto err_tx;
1139 }
1140 tx->txreq.map_addr = piobuf;
1141 tx->txreq.flags |= IPATH_SDMA_TXREQ_F_FREEBUF;
1142 tx->txreq.sg_count = 1;
1143
1144 *piobuf++ = (__force u32) cpu_to_le32(plen);
1145 *piobuf++ = (__force u32) cpu_to_le32(control);
1146 memcpy(piobuf, hdr, hdrwords << 2);
1147 ipath_copy_from_sge(piobuf + hdrwords, ss, len);
1148
1149 ret = ipath_sdma_verbs_send(dd, NULL, 0, tx);
1150 /*
1151 * If we couldn't queue the DMA request, save the info
1152 * and try again later rather than destroying the
1153 * buffer and undoing the side effects of the copy.
1154 */
1155 if (ret) {
1156 tx->ss = NULL;
1157 tx->len = 0;
1158 qp->s_tx = tx;
1159 }
1160 dev->n_unaligned++;
1161 goto bail;
1162
1163err_tx:
1164 if (atomic_dec_and_test(&qp->refcount))
1165 wake_up(&qp->wait);
1166 put_txreq(dev, tx);
1167bail:
1168 return ret;
1169}
1170
1171static int ipath_verbs_send_pio(struct ipath_qp *qp,
1172 struct ipath_ib_header *ibhdr, u32 hdrwords,
870 struct ipath_sge_state *ss, u32 len, 1173 struct ipath_sge_state *ss, u32 len,
871 u32 plen, u32 dwords) 1174 u32 plen, u32 dwords)
872{ 1175{
873 struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd; 1176 struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
1177 u32 *hdr = (u32 *) ibhdr;
874 u32 __iomem *piobuf; 1178 u32 __iomem *piobuf;
875 unsigned flush_wc; 1179 unsigned flush_wc;
1180 u32 control;
876 int ret; 1181 int ret;
877 1182
878 piobuf = ipath_getpiobuf(dd, NULL); 1183 piobuf = ipath_getpiobuf(dd, plen, NULL);
879 if (unlikely(piobuf == NULL)) { 1184 if (unlikely(piobuf == NULL)) {
880 ret = -EBUSY; 1185 ret = -EBUSY;
881 goto bail; 1186 goto bail;
882 } 1187 }
883 1188
884 /* 1189 /*
885 * Write len to control qword, no flags. 1190 * Get the saved delay count we computed for the previous packet
1191 * and save the delay count for this packet to be used next time
1192 * we get here.
1193 */
1194 control = qp->s_pkt_delay;
1195 qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult);
1196
1197 /* VL15 packets bypass credit check */
1198 if ((be16_to_cpu(ibhdr->lrh[0]) >> 12) == 15)
1199 control |= 1ULL << 31;
1200
1201 /*
1202 * Write the length to the control qword plus any needed flags.
886 * We have to flush after the PBC for correctness on some cpus 1203 * We have to flush after the PBC for correctness on some cpus
887 * or WC buffer can be written out of order. 1204 * or WC buffer can be written out of order.
888 */ 1205 */
889 writeq(plen, piobuf); 1206 writeq(((u64) control << 32) | plen, piobuf);
890 piobuf += 2; 1207 piobuf += 2;
891 1208
892 flush_wc = dd->ipath_flags & IPATH_PIO_FLUSH_WC; 1209 flush_wc = dd->ipath_flags & IPATH_PIO_FLUSH_WC;
@@ -961,15 +1278,25 @@ int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
961 */ 1278 */
962 plen = hdrwords + dwords + 1; 1279 plen = hdrwords + dwords + 1;
963 1280
964 /* Drop non-VL15 packets if we are not in the active state */ 1281 /*
965 if (!(dd->ipath_flags & IPATH_LINKACTIVE) && 1282 * VL15 packets (IB_QPT_SMI) will always use PIO, so we
966 qp->ibqp.qp_type != IB_QPT_SMI) { 1283 * can defer SDMA restart until link goes ACTIVE without
1284 * worrying about just how we got there.
1285 */
1286 if (qp->ibqp.qp_type == IB_QPT_SMI)
1287 ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len,
1288 plen, dwords);
1289 /* All non-VL15 packets are dropped if link is not ACTIVE */
1290 else if (!(dd->ipath_flags & IPATH_LINKACTIVE)) {
967 if (qp->s_wqe) 1291 if (qp->s_wqe)
968 ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS); 1292 ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
969 ret = 0; 1293 ret = 0;
970 } else 1294 } else if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
971 ret = ipath_verbs_send_pio(qp, (u32 *) hdr, hdrwords, 1295 ret = ipath_verbs_send_dma(qp, hdr, hdrwords, ss, len,
972 ss, len, plen, dwords); 1296 plen, dwords);
1297 else
1298 ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len,
1299 plen, dwords);
973 1300
974 return ret; 1301 return ret;
975} 1302}
@@ -1038,6 +1365,12 @@ int ipath_get_counters(struct ipath_devdata *dd,
1038 ipath_snap_cntr(dd, crp->cr_errlpcrccnt) + 1365 ipath_snap_cntr(dd, crp->cr_errlpcrccnt) +
1039 ipath_snap_cntr(dd, crp->cr_badformatcnt) + 1366 ipath_snap_cntr(dd, crp->cr_badformatcnt) +
1040 dd->ipath_rxfc_unsupvl_errs; 1367 dd->ipath_rxfc_unsupvl_errs;
1368 if (crp->cr_rxotherlocalphyerrcnt)
1369 cntrs->port_rcv_errors +=
1370 ipath_snap_cntr(dd, crp->cr_rxotherlocalphyerrcnt);
1371 if (crp->cr_rxvlerrcnt)
1372 cntrs->port_rcv_errors +=
1373 ipath_snap_cntr(dd, crp->cr_rxvlerrcnt);
1041 cntrs->port_rcv_remphys_errors = 1374 cntrs->port_rcv_remphys_errors =
1042 ipath_snap_cntr(dd, crp->cr_rcvebpcnt); 1375 ipath_snap_cntr(dd, crp->cr_rcvebpcnt);
1043 cntrs->port_xmit_discards = ipath_snap_cntr(dd, crp->cr_unsupvlcnt); 1376 cntrs->port_xmit_discards = ipath_snap_cntr(dd, crp->cr_unsupvlcnt);
@@ -1046,9 +1379,16 @@ int ipath_get_counters(struct ipath_devdata *dd,
1046 cntrs->port_xmit_packets = ipath_snap_cntr(dd, crp->cr_pktsendcnt); 1379 cntrs->port_xmit_packets = ipath_snap_cntr(dd, crp->cr_pktsendcnt);
1047 cntrs->port_rcv_packets = ipath_snap_cntr(dd, crp->cr_pktrcvcnt); 1380 cntrs->port_rcv_packets = ipath_snap_cntr(dd, crp->cr_pktrcvcnt);
1048 cntrs->local_link_integrity_errors = 1381 cntrs->local_link_integrity_errors =
1049 (dd->ipath_flags & IPATH_GPIO_ERRINTRS) ? 1382 crp->cr_locallinkintegrityerrcnt ?
1050 dd->ipath_lli_errs : dd->ipath_lli_errors; 1383 ipath_snap_cntr(dd, crp->cr_locallinkintegrityerrcnt) :
1051 cntrs->excessive_buffer_overrun_errors = dd->ipath_overrun_thresh_errs; 1384 ((dd->ipath_flags & IPATH_GPIO_ERRINTRS) ?
1385 dd->ipath_lli_errs : dd->ipath_lli_errors);
1386 cntrs->excessive_buffer_overrun_errors =
1387 crp->cr_excessbufferovflcnt ?
1388 ipath_snap_cntr(dd, crp->cr_excessbufferovflcnt) :
1389 dd->ipath_overrun_thresh_errs;
1390 cntrs->vl15_dropped = crp->cr_vl15droppedpktcnt ?
1391 ipath_snap_cntr(dd, crp->cr_vl15droppedpktcnt) : 0;
1052 1392
1053 ret = 0; 1393 ret = 0;
1054 1394
@@ -1183,7 +1523,9 @@ static int ipath_query_port(struct ib_device *ibdev,
1183 props->sm_lid = dev->sm_lid; 1523 props->sm_lid = dev->sm_lid;
1184 props->sm_sl = dev->sm_sl; 1524 props->sm_sl = dev->sm_sl;
1185 ibcstat = dd->ipath_lastibcstat; 1525 ibcstat = dd->ipath_lastibcstat;
1186 props->state = ((ibcstat >> 4) & 0x3) + 1; 1526 /* map LinkState to IB portinfo values. */
1527 props->state = ipath_ib_linkstate(dd, ibcstat) + 1;
1528
1187 /* See phys_state_show() */ 1529 /* See phys_state_show() */
1188 props->phys_state = /* MEA: assumes shift == 0 */ 1530 props->phys_state = /* MEA: assumes shift == 0 */
1189 ipath_cvt_physportstate[dd->ipath_lastibcstat & 1531 ipath_cvt_physportstate[dd->ipath_lastibcstat &
@@ -1195,18 +1537,13 @@ static int ipath_query_port(struct ib_device *ibdev,
1195 props->bad_pkey_cntr = ipath_get_cr_errpkey(dd) - 1537 props->bad_pkey_cntr = ipath_get_cr_errpkey(dd) -
1196 dev->z_pkey_violations; 1538 dev->z_pkey_violations;
1197 props->qkey_viol_cntr = dev->qkey_violations; 1539 props->qkey_viol_cntr = dev->qkey_violations;
1198 props->active_width = IB_WIDTH_4X; 1540 props->active_width = dd->ipath_link_width_active;
1199 /* See rate_show() */ 1541 /* See rate_show() */
1200 props->active_speed = 1; /* Regular 10Mbs speed. */ 1542 props->active_speed = dd->ipath_link_speed_active;
1201 props->max_vl_num = 1; /* VLCap = VL0 */ 1543 props->max_vl_num = 1; /* VLCap = VL0 */
1202 props->init_type_reply = 0; 1544 props->init_type_reply = 0;
1203 1545
1204 /* 1546 props->max_mtu = ipath_mtu4096 ? IB_MTU_4096 : IB_MTU_2048;
1205 * Note: the chip supports a maximum MTU of 4096, but the driver
1206 * hasn't implemented this feature yet, so set the maximum value
1207 * to 2048.
1208 */
1209 props->max_mtu = IB_MTU_2048;
1210 switch (dd->ipath_ibmtu) { 1547 switch (dd->ipath_ibmtu) {
1211 case 4096: 1548 case 4096:
1212 mtu = IB_MTU_4096; 1549 mtu = IB_MTU_4096;
@@ -1399,6 +1736,7 @@ static struct ib_ah *ipath_create_ah(struct ib_pd *pd,
1399 1736
1400 /* ib_create_ah() will initialize ah->ibah. */ 1737 /* ib_create_ah() will initialize ah->ibah. */
1401 ah->attr = *ah_attr; 1738 ah->attr = *ah_attr;
1739 ah->attr.static_rate = ipath_ib_rate_to_mult(ah_attr->static_rate);
1402 1740
1403 ret = &ah->ibah; 1741 ret = &ah->ibah;
1404 1742
@@ -1432,6 +1770,7 @@ static int ipath_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
1432 struct ipath_ah *ah = to_iah(ibah); 1770 struct ipath_ah *ah = to_iah(ibah);
1433 1771
1434 *ah_attr = ah->attr; 1772 *ah_attr = ah->attr;
1773 ah_attr->static_rate = ipath_mult_to_ib_rate(ah->attr.static_rate);
1435 1774
1436 return 0; 1775 return 0;
1437} 1776}
@@ -1581,6 +1920,8 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
1581 struct ipath_verbs_counters cntrs; 1920 struct ipath_verbs_counters cntrs;
1582 struct ipath_ibdev *idev; 1921 struct ipath_ibdev *idev;
1583 struct ib_device *dev; 1922 struct ib_device *dev;
1923 struct ipath_verbs_txreq *tx;
1924 unsigned i;
1584 int ret; 1925 int ret;
1585 1926
1586 idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev); 1927 idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev);
@@ -1591,6 +1932,17 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
1591 1932
1592 dev = &idev->ibdev; 1933 dev = &idev->ibdev;
1593 1934
1935 if (dd->ipath_sdma_descq_cnt) {
1936 tx = kmalloc(dd->ipath_sdma_descq_cnt * sizeof *tx,
1937 GFP_KERNEL);
1938 if (tx == NULL) {
1939 ret = -ENOMEM;
1940 goto err_tx;
1941 }
1942 } else
1943 tx = NULL;
1944 idev->txreq_bufs = tx;
1945
1594 /* Only need to initialize non-zero fields. */ 1946 /* Only need to initialize non-zero fields. */
1595 spin_lock_init(&idev->n_pds_lock); 1947 spin_lock_init(&idev->n_pds_lock);
1596 spin_lock_init(&idev->n_ahs_lock); 1948 spin_lock_init(&idev->n_ahs_lock);
@@ -1631,15 +1983,17 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
1631 INIT_LIST_HEAD(&idev->pending[2]); 1983 INIT_LIST_HEAD(&idev->pending[2]);
1632 INIT_LIST_HEAD(&idev->piowait); 1984 INIT_LIST_HEAD(&idev->piowait);
1633 INIT_LIST_HEAD(&idev->rnrwait); 1985 INIT_LIST_HEAD(&idev->rnrwait);
1986 INIT_LIST_HEAD(&idev->txreq_free);
1634 idev->pending_index = 0; 1987 idev->pending_index = 0;
1635 idev->port_cap_flags = 1988 idev->port_cap_flags =
1636 IB_PORT_SYS_IMAGE_GUID_SUP | IB_PORT_CLIENT_REG_SUP; 1989 IB_PORT_SYS_IMAGE_GUID_SUP | IB_PORT_CLIENT_REG_SUP;
1990 if (dd->ipath_flags & IPATH_HAS_LINK_LATENCY)
1991 idev->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
1637 idev->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA; 1992 idev->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
1638 idev->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA; 1993 idev->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1639 idev->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS; 1994 idev->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1640 idev->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS; 1995 idev->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1641 idev->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT; 1996 idev->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
1642 idev->link_width_enabled = 3; /* 1x or 4x */
1643 1997
1644 /* Snapshot current HW counters to "clear" them. */ 1998 /* Snapshot current HW counters to "clear" them. */
1645 ipath_get_counters(dd, &cntrs); 1999 ipath_get_counters(dd, &cntrs);
@@ -1661,6 +2015,9 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
1661 cntrs.excessive_buffer_overrun_errors; 2015 cntrs.excessive_buffer_overrun_errors;
1662 idev->z_vl15_dropped = cntrs.vl15_dropped; 2016 idev->z_vl15_dropped = cntrs.vl15_dropped;
1663 2017
2018 for (i = 0; i < dd->ipath_sdma_descq_cnt; i++, tx++)
2019 list_add(&tx->txreq.list, &idev->txreq_free);
2020
1664 /* 2021 /*
1665 * The system image GUID is supposed to be the same for all 2022 * The system image GUID is supposed to be the same for all
1666 * IB HCAs in a single system but since there can be other 2023 * IB HCAs in a single system but since there can be other
@@ -1710,6 +2067,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
1710 dev->phys_port_cnt = 1; 2067 dev->phys_port_cnt = 1;
1711 dev->num_comp_vectors = 1; 2068 dev->num_comp_vectors = 1;
1712 dev->dma_device = &dd->pcidev->dev; 2069 dev->dma_device = &dd->pcidev->dev;
2070 dev->class_dev.dev = dev->dma_device;
1713 dev->query_device = ipath_query_device; 2071 dev->query_device = ipath_query_device;
1714 dev->modify_device = ipath_modify_device; 2072 dev->modify_device = ipath_modify_device;
1715 dev->query_port = ipath_query_port; 2073 dev->query_port = ipath_query_port;
@@ -1774,6 +2132,8 @@ err_reg:
1774err_lk: 2132err_lk:
1775 kfree(idev->qp_table.table); 2133 kfree(idev->qp_table.table);
1776err_qp: 2134err_qp:
2135 kfree(idev->txreq_bufs);
2136err_tx:
1777 ib_dealloc_device(dev); 2137 ib_dealloc_device(dev);
1778 ipath_dev_err(dd, "cannot register verbs: %d!\n", -ret); 2138 ipath_dev_err(dd, "cannot register verbs: %d!\n", -ret);
1779 idev = NULL; 2139 idev = NULL;
@@ -1808,6 +2168,7 @@ void ipath_unregister_ib_device(struct ipath_ibdev *dev)
1808 ipath_free_all_qps(&dev->qp_table); 2168 ipath_free_all_qps(&dev->qp_table);
1809 kfree(dev->qp_table.table); 2169 kfree(dev->qp_table.table);
1810 kfree(dev->lk_table.table); 2170 kfree(dev->lk_table.table);
2171 kfree(dev->txreq_bufs);
1811 ib_dealloc_device(ibdev); 2172 ib_dealloc_device(ibdev);
1812} 2173}
1813 2174
@@ -1855,13 +2216,15 @@ static ssize_t show_stats(struct class_device *cdev, char *buf)
1855 "RC stalls %d\n" 2216 "RC stalls %d\n"
1856 "piobuf wait %d\n" 2217 "piobuf wait %d\n"
1857 "no piobuf %d\n" 2218 "no piobuf %d\n"
2219 "unaligned %d\n"
1858 "PKT drops %d\n" 2220 "PKT drops %d\n"
1859 "WQE errs %d\n", 2221 "WQE errs %d\n",
1860 dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks, 2222 dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks,
1861 dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks, 2223 dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks,
1862 dev->n_other_naks, dev->n_timeouts, 2224 dev->n_other_naks, dev->n_timeouts,
1863 dev->n_rdma_dup_busy, dev->n_rc_stalls, dev->n_piowait, 2225 dev->n_rdma_dup_busy, dev->n_rc_stalls, dev->n_piowait,
1864 dev->n_no_piobuf, dev->n_pkt_drops, dev->n_wqe_errs); 2226 dev->n_no_piobuf, dev->n_unaligned,
2227 dev->n_pkt_drops, dev->n_wqe_errs);
1865 for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) { 2228 for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) {
1866 const struct ipath_opcode_stats *si = &dev->opstats[i]; 2229 const struct ipath_opcode_stats *si = &dev->opstats[i];
1867 2230
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index 3d59736b49b2..6514aa8306cd 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -138,6 +138,11 @@ struct ipath_ib_header {
138 } u; 138 } u;
139} __attribute__ ((packed)); 139} __attribute__ ((packed));
140 140
141struct ipath_pio_header {
142 __le32 pbc[2];
143 struct ipath_ib_header hdr;
144} __attribute__ ((packed));
145
141/* 146/*
142 * There is one struct ipath_mcast for each multicast GID. 147 * There is one struct ipath_mcast for each multicast GID.
143 * All attached QPs are then stored as a list of 148 * All attached QPs are then stored as a list of
@@ -319,6 +324,7 @@ struct ipath_sge_state {
319 struct ipath_sge *sg_list; /* next SGE to be used if any */ 324 struct ipath_sge *sg_list; /* next SGE to be used if any */
320 struct ipath_sge sge; /* progress state for the current SGE */ 325 struct ipath_sge sge; /* progress state for the current SGE */
321 u8 num_sge; 326 u8 num_sge;
327 u8 static_rate;
322}; 328};
323 329
324/* 330/*
@@ -356,6 +362,7 @@ struct ipath_qp {
356 struct tasklet_struct s_task; 362 struct tasklet_struct s_task;
357 struct ipath_mmap_info *ip; 363 struct ipath_mmap_info *ip;
358 struct ipath_sge_state *s_cur_sge; 364 struct ipath_sge_state *s_cur_sge;
365 struct ipath_verbs_txreq *s_tx;
359 struct ipath_sge_state s_sge; /* current send request data */ 366 struct ipath_sge_state s_sge; /* current send request data */
360 struct ipath_ack_entry s_ack_queue[IPATH_MAX_RDMA_ATOMIC + 1]; 367 struct ipath_ack_entry s_ack_queue[IPATH_MAX_RDMA_ATOMIC + 1];
361 struct ipath_sge_state s_ack_rdma_sge; 368 struct ipath_sge_state s_ack_rdma_sge;
@@ -363,7 +370,8 @@ struct ipath_qp {
363 struct ipath_sge_state r_sge; /* current receive data */ 370 struct ipath_sge_state r_sge; /* current receive data */
364 spinlock_t s_lock; 371 spinlock_t s_lock;
365 unsigned long s_busy; 372 unsigned long s_busy;
366 u32 s_hdrwords; /* size of s_hdr in 32 bit words */ 373 u16 s_pkt_delay;
374 u16 s_hdrwords; /* size of s_hdr in 32 bit words */
367 u32 s_cur_size; /* size of send packet in bytes */ 375 u32 s_cur_size; /* size of send packet in bytes */
368 u32 s_len; /* total length of s_sge */ 376 u32 s_len; /* total length of s_sge */
369 u32 s_rdma_read_len; /* total length of s_rdma_read_sge */ 377 u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
@@ -387,7 +395,6 @@ struct ipath_qp {
387 u8 r_nak_state; /* non-zero if NAK is pending */ 395 u8 r_nak_state; /* non-zero if NAK is pending */
388 u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */ 396 u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
389 u8 r_reuse_sge; /* for UC receive errors */ 397 u8 r_reuse_sge; /* for UC receive errors */
390 u8 r_sge_inx; /* current index into sg_list */
391 u8 r_wrid_valid; /* r_wrid set but CQ entry not yet made */ 398 u8 r_wrid_valid; /* r_wrid set but CQ entry not yet made */
392 u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */ 399 u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
393 u8 r_head_ack_queue; /* index into s_ack_queue[] */ 400 u8 r_head_ack_queue; /* index into s_ack_queue[] */
@@ -403,6 +410,7 @@ struct ipath_qp {
403 u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */ 410 u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
404 u8 s_tail_ack_queue; /* index into s_ack_queue[] */ 411 u8 s_tail_ack_queue; /* index into s_ack_queue[] */
405 u8 s_flags; 412 u8 s_flags;
413 u8 s_dmult;
406 u8 timeout; /* Timeout for this QP */ 414 u8 timeout; /* Timeout for this QP */
407 enum ib_mtu path_mtu; 415 enum ib_mtu path_mtu;
408 u32 remote_qpn; 416 u32 remote_qpn;
@@ -510,6 +518,8 @@ struct ipath_ibdev {
510 struct ipath_lkey_table lk_table; 518 struct ipath_lkey_table lk_table;
511 struct list_head pending[3]; /* FIFO of QPs waiting for ACKs */ 519 struct list_head pending[3]; /* FIFO of QPs waiting for ACKs */
512 struct list_head piowait; /* list for wait PIO buf */ 520 struct list_head piowait; /* list for wait PIO buf */
521 struct list_head txreq_free;
522 void *txreq_bufs;
513 /* list of QPs waiting for RNR timer */ 523 /* list of QPs waiting for RNR timer */
514 struct list_head rnrwait; 524 struct list_head rnrwait;
515 spinlock_t pending_lock; 525 spinlock_t pending_lock;
@@ -570,6 +580,7 @@ struct ipath_ibdev {
570 u32 n_rdma_dup_busy; 580 u32 n_rdma_dup_busy;
571 u32 n_piowait; 581 u32 n_piowait;
572 u32 n_no_piobuf; 582 u32 n_no_piobuf;
583 u32 n_unaligned;
573 u32 port_cap_flags; 584 u32 port_cap_flags;
574 u32 pma_sample_start; 585 u32 pma_sample_start;
575 u32 pma_sample_interval; 586 u32 pma_sample_interval;
@@ -581,7 +592,6 @@ struct ipath_ibdev {
581 u16 pending_index; /* which pending queue is active */ 592 u16 pending_index; /* which pending queue is active */
582 u8 pma_sample_status; 593 u8 pma_sample_status;
583 u8 subnet_timeout; 594 u8 subnet_timeout;
584 u8 link_width_enabled;
585 u8 vl_high_limit; 595 u8 vl_high_limit;
586 struct ipath_opcode_stats opstats[128]; 596 struct ipath_opcode_stats opstats[128];
587}; 597};
@@ -602,6 +612,16 @@ struct ipath_verbs_counters {
602 u32 vl15_dropped; 612 u32 vl15_dropped;
603}; 613};
604 614
615struct ipath_verbs_txreq {
616 struct ipath_qp *qp;
617 struct ipath_swqe *wqe;
618 u32 map_len;
619 u32 len;
620 struct ipath_sge_state *ss;
621 struct ipath_pio_header hdr;
622 struct ipath_sdma_txreq txreq;
623};
624
605static inline struct ipath_mr *to_imr(struct ib_mr *ibmr) 625static inline struct ipath_mr *to_imr(struct ib_mr *ibmr)
606{ 626{
607 return container_of(ibmr, struct ipath_mr, ibmr); 627 return container_of(ibmr, struct ipath_mr, ibmr);
@@ -694,11 +714,11 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc);
694 714
695void ipath_get_credit(struct ipath_qp *qp, u32 aeth); 715void ipath_get_credit(struct ipath_qp *qp, u32 aeth);
696 716
717unsigned ipath_ib_rate_to_mult(enum ib_rate rate);
718
697int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr, 719int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
698 u32 hdrwords, struct ipath_sge_state *ss, u32 len); 720 u32 hdrwords, struct ipath_sge_state *ss, u32 len);
699 721
700void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);
701
702void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length); 722void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length);
703 723
704void ipath_skip_sge(struct ipath_sge_state *ss, u32 length); 724void ipath_skip_sge(struct ipath_sge_state *ss, u32 length);
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 7360bbafbe84..3557e7edc9b6 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -85,6 +85,82 @@ static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq)
85 return get_sw_cqe(cq, cq->mcq.cons_index); 85 return get_sw_cqe(cq, cq->mcq.cons_index);
86} 86}
87 87
88int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
89{
90 struct mlx4_ib_cq *mcq = to_mcq(cq);
91 struct mlx4_ib_dev *dev = to_mdev(cq->device);
92
93 return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period);
94}
95
96static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent)
97{
98 int err;
99
100 err = mlx4_buf_alloc(dev->dev, nent * sizeof(struct mlx4_cqe),
101 PAGE_SIZE * 2, &buf->buf);
102
103 if (err)
104 goto out;
105
106 err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
107 &buf->mtt);
108 if (err)
109 goto err_buf;
110
111 err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
112 if (err)
113 goto err_mtt;
114
115 return 0;
116
117err_mtt:
118 mlx4_mtt_cleanup(dev->dev, &buf->mtt);
119
120err_buf:
121 mlx4_buf_free(dev->dev, nent * sizeof(struct mlx4_cqe),
122 &buf->buf);
123
124out:
125 return err;
126}
127
128static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
129{
130 mlx4_buf_free(dev->dev, (cqe + 1) * sizeof(struct mlx4_cqe), &buf->buf);
131}
132
133static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context,
134 struct mlx4_ib_cq_buf *buf, struct ib_umem **umem,
135 u64 buf_addr, int cqe)
136{
137 int err;
138
139 *umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe),
140 IB_ACCESS_LOCAL_WRITE);
141 if (IS_ERR(*umem))
142 return PTR_ERR(*umem);
143
144 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem),
145 ilog2((*umem)->page_size), &buf->mtt);
146 if (err)
147 goto err_buf;
148
149 err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem);
150 if (err)
151 goto err_mtt;
152
153 return 0;
154
155err_mtt:
156 mlx4_mtt_cleanup(dev->dev, &buf->mtt);
157
158err_buf:
159 ib_umem_release(*umem);
160
161 return err;
162}
163
88struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector, 164struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector,
89 struct ib_ucontext *context, 165 struct ib_ucontext *context,
90 struct ib_udata *udata) 166 struct ib_udata *udata)
@@ -92,7 +168,6 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
92 struct mlx4_ib_dev *dev = to_mdev(ibdev); 168 struct mlx4_ib_dev *dev = to_mdev(ibdev);
93 struct mlx4_ib_cq *cq; 169 struct mlx4_ib_cq *cq;
94 struct mlx4_uar *uar; 170 struct mlx4_uar *uar;
95 int buf_size;
96 int err; 171 int err;
97 172
98 if (entries < 1 || entries > dev->dev->caps.max_cqes) 173 if (entries < 1 || entries > dev->dev->caps.max_cqes)
@@ -104,8 +179,10 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
104 179
105 entries = roundup_pow_of_two(entries + 1); 180 entries = roundup_pow_of_two(entries + 1);
106 cq->ibcq.cqe = entries - 1; 181 cq->ibcq.cqe = entries - 1;
107 buf_size = entries * sizeof (struct mlx4_cqe); 182 mutex_init(&cq->resize_mutex);
108 spin_lock_init(&cq->lock); 183 spin_lock_init(&cq->lock);
184 cq->resize_buf = NULL;
185 cq->resize_umem = NULL;
109 186
110 if (context) { 187 if (context) {
111 struct mlx4_ib_create_cq ucmd; 188 struct mlx4_ib_create_cq ucmd;
@@ -115,21 +192,10 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
115 goto err_cq; 192 goto err_cq;
116 } 193 }
117 194
118 cq->umem = ib_umem_get(context, ucmd.buf_addr, buf_size, 195 err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem,
119 IB_ACCESS_LOCAL_WRITE); 196 ucmd.buf_addr, entries);
120 if (IS_ERR(cq->umem)) {
121 err = PTR_ERR(cq->umem);
122 goto err_cq;
123 }
124
125 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(cq->umem),
126 ilog2(cq->umem->page_size), &cq->buf.mtt);
127 if (err)
128 goto err_buf;
129
130 err = mlx4_ib_umem_write_mtt(dev, &cq->buf.mtt, cq->umem);
131 if (err) 197 if (err)
132 goto err_mtt; 198 goto err_cq;
133 199
134 err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr, 200 err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
135 &cq->db); 201 &cq->db);
@@ -147,19 +213,9 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
147 *cq->mcq.set_ci_db = 0; 213 *cq->mcq.set_ci_db = 0;
148 *cq->mcq.arm_db = 0; 214 *cq->mcq.arm_db = 0;
149 215
150 if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &cq->buf.buf)) { 216 err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries);
151 err = -ENOMEM;
152 goto err_db;
153 }
154
155 err = mlx4_mtt_init(dev->dev, cq->buf.buf.npages, cq->buf.buf.page_shift,
156 &cq->buf.mtt);
157 if (err) 217 if (err)
158 goto err_buf; 218 goto err_db;
159
160 err = mlx4_buf_write_mtt(dev->dev, &cq->buf.mtt, &cq->buf.buf);
161 if (err)
162 goto err_mtt;
163 219
164 uar = &dev->priv_uar; 220 uar = &dev->priv_uar;
165 } 221 }
@@ -187,12 +243,10 @@ err_dbmap:
187err_mtt: 243err_mtt:
188 mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt); 244 mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
189 245
190err_buf:
191 if (context) 246 if (context)
192 ib_umem_release(cq->umem); 247 ib_umem_release(cq->umem);
193 else 248 else
194 mlx4_buf_free(dev->dev, entries * sizeof (struct mlx4_cqe), 249 mlx4_ib_free_cq_buf(dev, &cq->buf, entries);
195 &cq->buf.buf);
196 250
197err_db: 251err_db:
198 if (!context) 252 if (!context)
@@ -204,6 +258,170 @@ err_cq:
204 return ERR_PTR(err); 258 return ERR_PTR(err);
205} 259}
206 260
261static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
262 int entries)
263{
264 int err;
265
266 if (cq->resize_buf)
267 return -EBUSY;
268
269 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
270 if (!cq->resize_buf)
271 return -ENOMEM;
272
273 err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
274 if (err) {
275 kfree(cq->resize_buf);
276 cq->resize_buf = NULL;
277 return err;
278 }
279
280 cq->resize_buf->cqe = entries - 1;
281
282 return 0;
283}
284
285static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
286 int entries, struct ib_udata *udata)
287{
288 struct mlx4_ib_resize_cq ucmd;
289 int err;
290
291 if (cq->resize_umem)
292 return -EBUSY;
293
294 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
295 return -EFAULT;
296
297 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
298 if (!cq->resize_buf)
299 return -ENOMEM;
300
301 err = mlx4_ib_get_cq_umem(dev, cq->umem->context, &cq->resize_buf->buf,
302 &cq->resize_umem, ucmd.buf_addr, entries);
303 if (err) {
304 kfree(cq->resize_buf);
305 cq->resize_buf = NULL;
306 return err;
307 }
308
309 cq->resize_buf->cqe = entries - 1;
310
311 return 0;
312}
313
314static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq)
315{
316 u32 i;
317
318 i = cq->mcq.cons_index;
319 while (get_sw_cqe(cq, i & cq->ibcq.cqe))
320 ++i;
321
322 return i - cq->mcq.cons_index;
323}
324
325static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
326{
327 struct mlx4_cqe *cqe;
328 int i;
329
330 i = cq->mcq.cons_index;
331 cqe = get_cqe(cq, i & cq->ibcq.cqe);
332 while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
333 memcpy(get_cqe_from_buf(&cq->resize_buf->buf,
334 (i + 1) & cq->resize_buf->cqe),
335 get_cqe(cq, i & cq->ibcq.cqe), sizeof(struct mlx4_cqe));
336 cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
337 }
338 ++cq->mcq.cons_index;
339}
340
341int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
342{
343 struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
344 struct mlx4_ib_cq *cq = to_mcq(ibcq);
345 int outst_cqe;
346 int err;
347
348 mutex_lock(&cq->resize_mutex);
349
350 if (entries < 1 || entries > dev->dev->caps.max_cqes) {
351 err = -EINVAL;
352 goto out;
353 }
354
355 entries = roundup_pow_of_two(entries + 1);
356 if (entries == ibcq->cqe + 1) {
357 err = 0;
358 goto out;
359 }
360
361 if (ibcq->uobject) {
362 err = mlx4_alloc_resize_umem(dev, cq, entries, udata);
363 if (err)
364 goto out;
365 } else {
366 /* Can't be smaller then the number of outstanding CQEs */
367 outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
368 if (entries < outst_cqe + 1) {
369 err = 0;
370 goto out;
371 }
372
373 err = mlx4_alloc_resize_buf(dev, cq, entries);
374 if (err)
375 goto out;
376 }
377
378 err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt);
379 if (err)
380 goto err_buf;
381
382 if (ibcq->uobject) {
383 cq->buf = cq->resize_buf->buf;
384 cq->ibcq.cqe = cq->resize_buf->cqe;
385 ib_umem_release(cq->umem);
386 cq->umem = cq->resize_umem;
387
388 kfree(cq->resize_buf);
389 cq->resize_buf = NULL;
390 cq->resize_umem = NULL;
391 } else {
392 spin_lock_irq(&cq->lock);
393 if (cq->resize_buf) {
394 mlx4_ib_cq_resize_copy_cqes(cq);
395 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
396 cq->buf = cq->resize_buf->buf;
397 cq->ibcq.cqe = cq->resize_buf->cqe;
398
399 kfree(cq->resize_buf);
400 cq->resize_buf = NULL;
401 }
402 spin_unlock_irq(&cq->lock);
403 }
404
405 goto out;
406
407err_buf:
408 if (!ibcq->uobject)
409 mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf,
410 cq->resize_buf->cqe);
411
412 kfree(cq->resize_buf);
413 cq->resize_buf = NULL;
414
415 if (cq->resize_umem) {
416 ib_umem_release(cq->resize_umem);
417 cq->resize_umem = NULL;
418 }
419
420out:
421 mutex_unlock(&cq->resize_mutex);
422 return err;
423}
424
207int mlx4_ib_destroy_cq(struct ib_cq *cq) 425int mlx4_ib_destroy_cq(struct ib_cq *cq)
208{ 426{
209 struct mlx4_ib_dev *dev = to_mdev(cq->device); 427 struct mlx4_ib_dev *dev = to_mdev(cq->device);
@@ -216,8 +434,7 @@ int mlx4_ib_destroy_cq(struct ib_cq *cq)
216 mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db); 434 mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db);
217 ib_umem_release(mcq->umem); 435 ib_umem_release(mcq->umem);
218 } else { 436 } else {
219 mlx4_buf_free(dev->dev, (cq->cqe + 1) * sizeof (struct mlx4_cqe), 437 mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe + 1);
220 &mcq->buf.buf);
221 mlx4_ib_db_free(dev, &mcq->db); 438 mlx4_ib_db_free(dev, &mcq->db);
222 } 439 }
223 440
@@ -297,6 +514,20 @@ static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
297 wc->vendor_err = cqe->vendor_err_syndrome; 514 wc->vendor_err = cqe->vendor_err_syndrome;
298} 515}
299 516
517static int mlx4_ib_ipoib_csum_ok(__be32 status, __be16 checksum)
518{
519 return ((status & cpu_to_be32(MLX4_CQE_IPOIB_STATUS_IPV4 |
520 MLX4_CQE_IPOIB_STATUS_IPV4F |
521 MLX4_CQE_IPOIB_STATUS_IPV4OPT |
522 MLX4_CQE_IPOIB_STATUS_IPV6 |
523 MLX4_CQE_IPOIB_STATUS_IPOK)) ==
524 cpu_to_be32(MLX4_CQE_IPOIB_STATUS_IPV4 |
525 MLX4_CQE_IPOIB_STATUS_IPOK)) &&
526 (status & cpu_to_be32(MLX4_CQE_IPOIB_STATUS_UDP |
527 MLX4_CQE_IPOIB_STATUS_TCP)) &&
528 checksum == cpu_to_be16(0xffff);
529}
530
300static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, 531static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
301 struct mlx4_ib_qp **cur_qp, 532 struct mlx4_ib_qp **cur_qp,
302 struct ib_wc *wc) 533 struct ib_wc *wc)
@@ -310,6 +541,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
310 u32 g_mlpath_rqpn; 541 u32 g_mlpath_rqpn;
311 u16 wqe_ctr; 542 u16 wqe_ctr;
312 543
544repoll:
313 cqe = next_cqe_sw(cq); 545 cqe = next_cqe_sw(cq);
314 if (!cqe) 546 if (!cqe)
315 return -EAGAIN; 547 return -EAGAIN;
@@ -332,6 +564,22 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
332 return -EINVAL; 564 return -EINVAL;
333 } 565 }
334 566
567 /* Resize CQ in progress */
568 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
569 if (cq->resize_buf) {
570 struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device);
571
572 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
573 cq->buf = cq->resize_buf->buf;
574 cq->ibcq.cqe = cq->resize_buf->cqe;
575
576 kfree(cq->resize_buf);
577 cq->resize_buf = NULL;
578 }
579
580 goto repoll;
581 }
582
335 if (!*cur_qp || 583 if (!*cur_qp ||
336 (be32_to_cpu(cqe->my_qpn) & 0xffffff) != (*cur_qp)->mqp.qpn) { 584 (be32_to_cpu(cqe->my_qpn) & 0xffffff) != (*cur_qp)->mqp.qpn) {
337 /* 585 /*
@@ -406,6 +654,9 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
406 case MLX4_OPCODE_BIND_MW: 654 case MLX4_OPCODE_BIND_MW:
407 wc->opcode = IB_WC_BIND_MW; 655 wc->opcode = IB_WC_BIND_MW;
408 break; 656 break;
657 case MLX4_OPCODE_LSO:
658 wc->opcode = IB_WC_LSO;
659 break;
409 } 660 }
410 } else { 661 } else {
411 wc->byte_len = be32_to_cpu(cqe->byte_cnt); 662 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
@@ -434,6 +685,8 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
434 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; 685 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
435 wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0; 686 wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
436 wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f; 687 wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
688 wc->csum_ok = mlx4_ib_ipoib_csum_ok(cqe->ipoib_status,
689 cqe->checksum);
437 } 690 }
438 691
439 return 0; 692 return 0;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 0ed02b7834da..4c1e72fc8f57 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -165,7 +165,7 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad)
165 event.device = ibdev; 165 event.device = ibdev;
166 event.element.port_num = port_num; 166 event.element.port_num = port_num;
167 167
168 if(pinfo->clientrereg_resv_subnetto & 0x80) 168 if (pinfo->clientrereg_resv_subnetto & 0x80)
169 event.event = IB_EVENT_CLIENT_REREGISTER; 169 event.event = IB_EVENT_CLIENT_REREGISTER;
170 else 170 else
171 event.event = IB_EVENT_LID_CHANGE; 171 event.event = IB_EVENT_LID_CHANGE;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 96a39b5c9254..136c76c7b4e7 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -44,8 +44,8 @@
44#include "user.h" 44#include "user.h"
45 45
46#define DRV_NAME "mlx4_ib" 46#define DRV_NAME "mlx4_ib"
47#define DRV_VERSION "0.01" 47#define DRV_VERSION "1.0"
48#define DRV_RELDATE "May 1, 2006" 48#define DRV_RELDATE "April 4, 2008"
49 49
50MODULE_AUTHOR("Roland Dreier"); 50MODULE_AUTHOR("Roland Dreier");
51MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver"); 51MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
@@ -99,6 +99,10 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
99 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; 99 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
100 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT) 100 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
101 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; 101 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
102 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
103 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
104 if (dev->dev->caps.max_gso_sz)
105 props->device_cap_flags |= IB_DEVICE_UD_TSO;
102 106
103 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & 107 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
104 0xffffff; 108 0xffffff;
@@ -567,6 +571,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
567 (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 571 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
568 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 572 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
569 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 573 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
574 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
570 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 575 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
571 (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 576 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
572 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 577 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
@@ -605,6 +610,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
605 ibdev->ib_dev.post_send = mlx4_ib_post_send; 610 ibdev->ib_dev.post_send = mlx4_ib_post_send;
606 ibdev->ib_dev.post_recv = mlx4_ib_post_recv; 611 ibdev->ib_dev.post_recv = mlx4_ib_post_recv;
607 ibdev->ib_dev.create_cq = mlx4_ib_create_cq; 612 ibdev->ib_dev.create_cq = mlx4_ib_create_cq;
613 ibdev->ib_dev.modify_cq = mlx4_ib_modify_cq;
614 ibdev->ib_dev.resize_cq = mlx4_ib_resize_cq;
608 ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq; 615 ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq;
609 ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq; 616 ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq;
610 ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq; 617 ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq;
@@ -675,18 +682,20 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
675} 682}
676 683
677static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, 684static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
678 enum mlx4_dev_event event, int subtype, 685 enum mlx4_dev_event event, int port)
679 int port)
680{ 686{
681 struct ib_event ibev; 687 struct ib_event ibev;
682 688
683 switch (event) { 689 switch (event) {
684 case MLX4_EVENT_TYPE_PORT_CHANGE: 690 case MLX4_DEV_EVENT_PORT_UP:
685 ibev.event = subtype == MLX4_PORT_CHANGE_SUBTYPE_ACTIVE ? 691 ibev.event = IB_EVENT_PORT_ACTIVE;
686 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
687 break; 692 break;
688 693
689 case MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR: 694 case MLX4_DEV_EVENT_PORT_DOWN:
695 ibev.event = IB_EVENT_PORT_ERR;
696 break;
697
698 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
690 ibev.event = IB_EVENT_DEVICE_FATAL; 699 ibev.event = IB_EVENT_DEVICE_FATAL;
691 break; 700 break;
692 701
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 3726e451a327..9e637323c155 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -78,13 +78,21 @@ struct mlx4_ib_cq_buf {
78 struct mlx4_mtt mtt; 78 struct mlx4_mtt mtt;
79}; 79};
80 80
81struct mlx4_ib_cq_resize {
82 struct mlx4_ib_cq_buf buf;
83 int cqe;
84};
85
81struct mlx4_ib_cq { 86struct mlx4_ib_cq {
82 struct ib_cq ibcq; 87 struct ib_cq ibcq;
83 struct mlx4_cq mcq; 88 struct mlx4_cq mcq;
84 struct mlx4_ib_cq_buf buf; 89 struct mlx4_ib_cq_buf buf;
90 struct mlx4_ib_cq_resize *resize_buf;
85 struct mlx4_ib_db db; 91 struct mlx4_ib_db db;
86 spinlock_t lock; 92 spinlock_t lock;
93 struct mutex resize_mutex;
87 struct ib_umem *umem; 94 struct ib_umem *umem;
95 struct ib_umem *resize_umem;
88}; 96};
89 97
90struct mlx4_ib_mr { 98struct mlx4_ib_mr {
@@ -110,6 +118,10 @@ struct mlx4_ib_wq {
110 unsigned tail; 118 unsigned tail;
111}; 119};
112 120
121enum mlx4_ib_qp_flags {
122 MLX4_IB_QP_LSO = 1 << 0
123};
124
113struct mlx4_ib_qp { 125struct mlx4_ib_qp {
114 struct ib_qp ibqp; 126 struct ib_qp ibqp;
115 struct mlx4_qp mqp; 127 struct mlx4_qp mqp;
@@ -129,6 +141,7 @@ struct mlx4_ib_qp {
129 struct mlx4_mtt mtt; 141 struct mlx4_mtt mtt;
130 int buf_size; 142 int buf_size;
131 struct mutex mutex; 143 struct mutex mutex;
144 u32 flags;
132 u8 port; 145 u8 port;
133 u8 alt_port; 146 u8 alt_port;
134 u8 atomic_rd_en; 147 u8 atomic_rd_en;
@@ -249,6 +262,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
249 struct ib_udata *udata); 262 struct ib_udata *udata);
250int mlx4_ib_dereg_mr(struct ib_mr *mr); 263int mlx4_ib_dereg_mr(struct ib_mr *mr);
251 264
265int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
266int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
252struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector, 267struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector,
253 struct ib_ucontext *context, 268 struct ib_ucontext *context,
254 struct ib_udata *udata); 269 struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 958e205b6d7c..b75efae7e449 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -71,6 +71,7 @@ enum {
71 71
72static const __be32 mlx4_ib_opcode[] = { 72static const __be32 mlx4_ib_opcode[] = {
73 [IB_WR_SEND] = __constant_cpu_to_be32(MLX4_OPCODE_SEND), 73 [IB_WR_SEND] = __constant_cpu_to_be32(MLX4_OPCODE_SEND),
74 [IB_WR_LSO] = __constant_cpu_to_be32(MLX4_OPCODE_LSO),
74 [IB_WR_SEND_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_SEND_IMM), 75 [IB_WR_SEND_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_SEND_IMM),
75 [IB_WR_RDMA_WRITE] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE), 76 [IB_WR_RDMA_WRITE] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
76 [IB_WR_RDMA_WRITE_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM), 77 [IB_WR_RDMA_WRITE_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
@@ -122,7 +123,7 @@ static void *get_send_wqe(struct mlx4_ib_qp *qp, int n)
122 */ 123 */
123static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size) 124static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size)
124{ 125{
125 u32 *wqe; 126 __be32 *wqe;
126 int i; 127 int i;
127 int s; 128 int s;
128 int ind; 129 int ind;
@@ -143,7 +144,7 @@ static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size)
143 buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); 144 buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
144 for (i = 64; i < s; i += 64) { 145 for (i = 64; i < s; i += 64) {
145 wqe = buf + i; 146 wqe = buf + i;
146 *wqe = 0xffffffff; 147 *wqe = cpu_to_be32(0xffffffff);
147 } 148 }
148 } 149 }
149} 150}
@@ -242,7 +243,7 @@ static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
242 } 243 }
243} 244}
244 245
245static int send_wqe_overhead(enum ib_qp_type type) 246static int send_wqe_overhead(enum ib_qp_type type, u32 flags)
246{ 247{
247 /* 248 /*
248 * UD WQEs must have a datagram segment. 249 * UD WQEs must have a datagram segment.
@@ -253,7 +254,8 @@ static int send_wqe_overhead(enum ib_qp_type type)
253 switch (type) { 254 switch (type) {
254 case IB_QPT_UD: 255 case IB_QPT_UD:
255 return sizeof (struct mlx4_wqe_ctrl_seg) + 256 return sizeof (struct mlx4_wqe_ctrl_seg) +
256 sizeof (struct mlx4_wqe_datagram_seg); 257 sizeof (struct mlx4_wqe_datagram_seg) +
258 ((flags & MLX4_IB_QP_LSO) ? 64 : 0);
257 case IB_QPT_UC: 259 case IB_QPT_UC:
258 return sizeof (struct mlx4_wqe_ctrl_seg) + 260 return sizeof (struct mlx4_wqe_ctrl_seg) +
259 sizeof (struct mlx4_wqe_raddr_seg); 261 sizeof (struct mlx4_wqe_raddr_seg);
@@ -315,7 +317,7 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
315 /* Sanity check SQ size before proceeding */ 317 /* Sanity check SQ size before proceeding */
316 if (cap->max_send_wr > dev->dev->caps.max_wqes || 318 if (cap->max_send_wr > dev->dev->caps.max_wqes ||
317 cap->max_send_sge > dev->dev->caps.max_sq_sg || 319 cap->max_send_sge > dev->dev->caps.max_sq_sg ||
318 cap->max_inline_data + send_wqe_overhead(type) + 320 cap->max_inline_data + send_wqe_overhead(type, qp->flags) +
319 sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz) 321 sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz)
320 return -EINVAL; 322 return -EINVAL;
321 323
@@ -329,7 +331,7 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
329 331
330 s = max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg), 332 s = max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg),
331 cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) + 333 cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) +
332 send_wqe_overhead(type); 334 send_wqe_overhead(type, qp->flags);
333 335
334 /* 336 /*
335 * Hermon supports shrinking WQEs, such that a single work 337 * Hermon supports shrinking WQEs, such that a single work
@@ -394,7 +396,8 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
394 } 396 }
395 397
396 qp->sq.max_gs = ((qp->sq_max_wqes_per_wr << qp->sq.wqe_shift) - 398 qp->sq.max_gs = ((qp->sq_max_wqes_per_wr << qp->sq.wqe_shift) -
397 send_wqe_overhead(type)) / sizeof (struct mlx4_wqe_data_seg); 399 send_wqe_overhead(type, qp->flags)) /
400 sizeof (struct mlx4_wqe_data_seg);
398 401
399 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + 402 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
400 (qp->sq.wqe_cnt << qp->sq.wqe_shift); 403 (qp->sq.wqe_cnt << qp->sq.wqe_shift);
@@ -503,6 +506,9 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
503 } else { 506 } else {
504 qp->sq_no_prefetch = 0; 507 qp->sq_no_prefetch = 0;
505 508
509 if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
510 qp->flags |= MLX4_IB_QP_LSO;
511
506 err = set_kernel_sq_size(dev, &init_attr->cap, init_attr->qp_type, qp); 512 err = set_kernel_sq_size(dev, &init_attr->cap, init_attr->qp_type, qp);
507 if (err) 513 if (err)
508 goto err; 514 goto err;
@@ -673,6 +679,13 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
673 struct mlx4_ib_qp *qp; 679 struct mlx4_ib_qp *qp;
674 int err; 680 int err;
675 681
682 /* We only support LSO, and only for kernel UD QPs. */
683 if (init_attr->create_flags & ~IB_QP_CREATE_IPOIB_UD_LSO)
684 return ERR_PTR(-EINVAL);
685 if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO &&
686 (pd->uobject || init_attr->qp_type != IB_QPT_UD))
687 return ERR_PTR(-EINVAL);
688
676 switch (init_attr->qp_type) { 689 switch (init_attr->qp_type) {
677 case IB_QPT_RC: 690 case IB_QPT_RC:
678 case IB_QPT_UC: 691 case IB_QPT_UC:
@@ -876,10 +889,15 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
876 } 889 }
877 } 890 }
878 891
879 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI || 892 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
880 ibqp->qp_type == IB_QPT_UD)
881 context->mtu_msgmax = (IB_MTU_4096 << 5) | 11; 893 context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
882 else if (attr_mask & IB_QP_PATH_MTU) { 894 else if (ibqp->qp_type == IB_QPT_UD) {
895 if (qp->flags & MLX4_IB_QP_LSO)
896 context->mtu_msgmax = (IB_MTU_4096 << 5) |
897 ilog2(dev->dev->caps.max_gso_sz);
898 else
899 context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
900 } else if (attr_mask & IB_QP_PATH_MTU) {
883 if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) { 901 if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
884 printk(KERN_ERR "path MTU (%u) is invalid\n", 902 printk(KERN_ERR "path MTU (%u) is invalid\n",
885 attr->path_mtu); 903 attr->path_mtu);
@@ -1182,7 +1200,7 @@ out:
1182} 1200}
1183 1201
1184static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, 1202static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
1185 void *wqe) 1203 void *wqe, unsigned *mlx_seg_len)
1186{ 1204{
1187 struct ib_device *ib_dev = &to_mdev(sqp->qp.ibqp.device)->ib_dev; 1205 struct ib_device *ib_dev = &to_mdev(sqp->qp.ibqp.device)->ib_dev;
1188 struct mlx4_wqe_mlx_seg *mlx = wqe; 1206 struct mlx4_wqe_mlx_seg *mlx = wqe;
@@ -1231,7 +1249,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
1231 case IB_WR_SEND_WITH_IMM: 1249 case IB_WR_SEND_WITH_IMM:
1232 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; 1250 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1233 sqp->ud_header.immediate_present = 1; 1251 sqp->ud_header.immediate_present = 1;
1234 sqp->ud_header.immediate_data = wr->imm_data; 1252 sqp->ud_header.immediate_data = wr->ex.imm_data;
1235 break; 1253 break;
1236 default: 1254 default:
1237 return -EINVAL; 1255 return -EINVAL;
@@ -1303,7 +1321,9 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
1303 i = 2; 1321 i = 2;
1304 } 1322 }
1305 1323
1306 return ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16); 1324 *mlx_seg_len =
1325 ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16);
1326 return 0;
1307} 1327}
1308 1328
1309static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq) 1329static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
@@ -1396,6 +1416,34 @@ static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
1396 dseg->addr = cpu_to_be64(sg->addr); 1416 dseg->addr = cpu_to_be64(sg->addr);
1397} 1417}
1398 1418
1419static int build_lso_seg(struct mlx4_lso_seg *wqe, struct ib_send_wr *wr,
1420 struct mlx4_ib_qp *qp, unsigned *lso_seg_len)
1421{
1422 unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16);
1423
1424 /*
1425 * This is a temporary limitation and will be removed in
1426 * a forthcoming FW release:
1427 */
1428 if (unlikely(halign > 64))
1429 return -EINVAL;
1430
1431 if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) &&
1432 wr->num_sge > qp->sq.max_gs - (halign >> 4)))
1433 return -EINVAL;
1434
1435 memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);
1436
1437 /* make sure LSO header is written before overwriting stamping */
1438 wmb();
1439
1440 wqe->mss_hdr_size = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 |
1441 wr->wr.ud.hlen);
1442
1443 *lso_seg_len = halign;
1444 return 0;
1445}
1446
1399int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 1447int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1400 struct ib_send_wr **bad_wr) 1448 struct ib_send_wr **bad_wr)
1401{ 1449{
@@ -1409,6 +1457,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1409 unsigned ind; 1457 unsigned ind;
1410 int uninitialized_var(stamp); 1458 int uninitialized_var(stamp);
1411 int uninitialized_var(size); 1459 int uninitialized_var(size);
1460 unsigned seglen;
1412 int i; 1461 int i;
1413 1462
1414 spin_lock_irqsave(&qp->sq.lock, flags); 1463 spin_lock_irqsave(&qp->sq.lock, flags);
@@ -1436,11 +1485,14 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1436 cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) : 0) | 1485 cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) : 0) |
1437 (wr->send_flags & IB_SEND_SOLICITED ? 1486 (wr->send_flags & IB_SEND_SOLICITED ?
1438 cpu_to_be32(MLX4_WQE_CTRL_SOLICITED) : 0) | 1487 cpu_to_be32(MLX4_WQE_CTRL_SOLICITED) : 0) |
1488 ((wr->send_flags & IB_SEND_IP_CSUM) ?
1489 cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
1490 MLX4_WQE_CTRL_TCP_UDP_CSUM) : 0) |
1439 qp->sq_signal_bits; 1491 qp->sq_signal_bits;
1440 1492
1441 if (wr->opcode == IB_WR_SEND_WITH_IMM || 1493 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
1442 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) 1494 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
1443 ctrl->imm = wr->imm_data; 1495 ctrl->imm = wr->ex.imm_data;
1444 else 1496 else
1445 ctrl->imm = 0; 1497 ctrl->imm = 0;
1446 1498
@@ -1484,19 +1536,27 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1484 set_datagram_seg(wqe, wr); 1536 set_datagram_seg(wqe, wr);
1485 wqe += sizeof (struct mlx4_wqe_datagram_seg); 1537 wqe += sizeof (struct mlx4_wqe_datagram_seg);
1486 size += sizeof (struct mlx4_wqe_datagram_seg) / 16; 1538 size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
1539
1540 if (wr->opcode == IB_WR_LSO) {
1541 err = build_lso_seg(wqe, wr, qp, &seglen);
1542 if (unlikely(err)) {
1543 *bad_wr = wr;
1544 goto out;
1545 }
1546 wqe += seglen;
1547 size += seglen / 16;
1548 }
1487 break; 1549 break;
1488 1550
1489 case IB_QPT_SMI: 1551 case IB_QPT_SMI:
1490 case IB_QPT_GSI: 1552 case IB_QPT_GSI:
1491 err = build_mlx_header(to_msqp(qp), wr, ctrl); 1553 err = build_mlx_header(to_msqp(qp), wr, ctrl, &seglen);
1492 if (err < 0) { 1554 if (unlikely(err)) {
1493 *bad_wr = wr; 1555 *bad_wr = wr;
1494 goto out; 1556 goto out;
1495 } 1557 }
1496 wqe += err; 1558 wqe += seglen;
1497 size += err / 16; 1559 size += seglen / 16;
1498
1499 err = 0;
1500 break; 1560 break;
1501 1561
1502 default: 1562 default:
@@ -1725,7 +1785,9 @@ int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
1725 struct mlx4_ib_qp *qp = to_mqp(ibqp); 1785 struct mlx4_ib_qp *qp = to_mqp(ibqp);
1726 struct mlx4_qp_context context; 1786 struct mlx4_qp_context context;
1727 int mlx4_state; 1787 int mlx4_state;
1728 int err; 1788 int err = 0;
1789
1790 mutex_lock(&qp->mutex);
1729 1791
1730 if (qp->state == IB_QPS_RESET) { 1792 if (qp->state == IB_QPS_RESET) {
1731 qp_attr->qp_state = IB_QPS_RESET; 1793 qp_attr->qp_state = IB_QPS_RESET;
@@ -1733,12 +1795,15 @@ int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
1733 } 1795 }
1734 1796
1735 err = mlx4_qp_query(dev->dev, &qp->mqp, &context); 1797 err = mlx4_qp_query(dev->dev, &qp->mqp, &context);
1736 if (err) 1798 if (err) {
1737 return -EINVAL; 1799 err = -EINVAL;
1800 goto out;
1801 }
1738 1802
1739 mlx4_state = be32_to_cpu(context.flags) >> 28; 1803 mlx4_state = be32_to_cpu(context.flags) >> 28;
1740 1804
1741 qp_attr->qp_state = to_ib_qp_state(mlx4_state); 1805 qp->state = to_ib_qp_state(mlx4_state);
1806 qp_attr->qp_state = qp->state;
1742 qp_attr->path_mtu = context.mtu_msgmax >> 5; 1807 qp_attr->path_mtu = context.mtu_msgmax >> 5;
1743 qp_attr->path_mig_state = 1808 qp_attr->path_mig_state =
1744 to_ib_mig_state((be32_to_cpu(context.flags) >> 11) & 0x3); 1809 to_ib_mig_state((be32_to_cpu(context.flags) >> 11) & 0x3);
@@ -1797,6 +1862,8 @@ done:
1797 1862
1798 qp_init_attr->cap = qp_attr->cap; 1863 qp_init_attr->cap = qp_attr->cap;
1799 1864
1800 return 0; 1865out:
1866 mutex_unlock(&qp->mutex);
1867 return err;
1801} 1868}
1802 1869
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 09a30dd12b14..54d230ee7d63 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -219,7 +219,7 @@ static void mthca_cmd_post_dbell(struct mthca_dev *dev,
219 __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) | 219 __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) |
220 (1 << HCA_E_BIT) | 220 (1 << HCA_E_BIT) |
221 (op_modifier << HCR_OPMOD_SHIFT) | 221 (op_modifier << HCR_OPMOD_SHIFT) |
222 op), ptr + offs[6]); 222 op), ptr + offs[6]);
223 wmb(); 223 wmb();
224 __raw_writel((__force u32) 0, ptr + offs[7]); 224 __raw_writel((__force u32) 0, ptr + offs[7]);
225 wmb(); 225 wmb();
@@ -1339,6 +1339,10 @@ int mthca_INIT_HCA(struct mthca_dev *dev,
1339 /* Check port for UD address vector: */ 1339 /* Check port for UD address vector: */
1340 *(inbox + INIT_HCA_FLAGS2_OFFSET / 4) |= cpu_to_be32(1); 1340 *(inbox + INIT_HCA_FLAGS2_OFFSET / 4) |= cpu_to_be32(1);
1341 1341
1342 /* Enable IPoIB checksumming if we can: */
1343 if (dev->device_cap_flags & IB_DEVICE_UD_IP_CSUM)
1344 *(inbox + INIT_HCA_FLAGS2_OFFSET / 4) |= cpu_to_be32(7 << 3);
1345
1342 /* We leave wqe_quota, responder_exu, etc as 0 (default) */ 1346 /* We leave wqe_quota, responder_exu, etc as 0 (default) */
1343 1347
1344 /* QPC/EEC/CQC/EQC/RDB attributes */ 1348 /* QPC/EEC/CQC/EQC/RDB attributes */
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.h b/drivers/infiniband/hw/mthca/mthca_cmd.h
index 2f976f2051d6..8928ca4a9325 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.h
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.h
@@ -103,6 +103,7 @@ enum {
103 DEV_LIM_FLAG_RAW_IPV6 = 1 << 4, 103 DEV_LIM_FLAG_RAW_IPV6 = 1 << 4,
104 DEV_LIM_FLAG_RAW_ETHER = 1 << 5, 104 DEV_LIM_FLAG_RAW_ETHER = 1 << 5,
105 DEV_LIM_FLAG_SRQ = 1 << 6, 105 DEV_LIM_FLAG_SRQ = 1 << 6,
106 DEV_LIM_FLAG_IPOIB_CSUM = 1 << 7,
106 DEV_LIM_FLAG_BAD_PKEY_CNTR = 1 << 8, 107 DEV_LIM_FLAG_BAD_PKEY_CNTR = 1 << 8,
107 DEV_LIM_FLAG_BAD_QKEY_CNTR = 1 << 9, 108 DEV_LIM_FLAG_BAD_QKEY_CNTR = 1 << 9,
108 DEV_LIM_FLAG_MW = 1 << 16, 109 DEV_LIM_FLAG_MW = 1 << 16,
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 1e1e336d3ef9..20401d2ba6b2 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -119,7 +119,8 @@ struct mthca_cqe {
119 __be32 my_qpn; 119 __be32 my_qpn;
120 __be32 my_ee; 120 __be32 my_ee;
121 __be32 rqpn; 121 __be32 rqpn;
122 __be16 sl_g_mlpath; 122 u8 sl_ipok;
123 u8 g_mlpath;
123 __be16 rlid; 124 __be16 rlid;
124 __be32 imm_etype_pkey_eec; 125 __be32 imm_etype_pkey_eec;
125 __be32 byte_cnt; 126 __be32 byte_cnt;
@@ -493,6 +494,7 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
493 int is_send; 494 int is_send;
494 int free_cqe = 1; 495 int free_cqe = 1;
495 int err = 0; 496 int err = 0;
497 u16 checksum;
496 498
497 cqe = next_cqe_sw(cq); 499 cqe = next_cqe_sw(cq);
498 if (!cqe) 500 if (!cqe)
@@ -635,12 +637,14 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
635 break; 637 break;
636 } 638 }
637 entry->slid = be16_to_cpu(cqe->rlid); 639 entry->slid = be16_to_cpu(cqe->rlid);
638 entry->sl = be16_to_cpu(cqe->sl_g_mlpath) >> 12; 640 entry->sl = cqe->sl_ipok >> 4;
639 entry->src_qp = be32_to_cpu(cqe->rqpn) & 0xffffff; 641 entry->src_qp = be32_to_cpu(cqe->rqpn) & 0xffffff;
640 entry->dlid_path_bits = be16_to_cpu(cqe->sl_g_mlpath) & 0x7f; 642 entry->dlid_path_bits = cqe->g_mlpath & 0x7f;
641 entry->pkey_index = be32_to_cpu(cqe->imm_etype_pkey_eec) >> 16; 643 entry->pkey_index = be32_to_cpu(cqe->imm_etype_pkey_eec) >> 16;
642 entry->wc_flags |= be16_to_cpu(cqe->sl_g_mlpath) & 0x80 ? 644 entry->wc_flags |= cqe->g_mlpath & 0x80 ? IB_WC_GRH : 0;
643 IB_WC_GRH : 0; 645 checksum = (be32_to_cpu(cqe->rqpn) >> 24) |
646 ((be32_to_cpu(cqe->my_ee) >> 16) & 0xff00);
647 entry->csum_ok = (cqe->sl_ipok & 1 && checksum == 0xffff);
644 } 648 }
645 649
646 entry->status = IB_WC_SUCCESS; 650 entry->status = IB_WC_SUCCESS;
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 7bbdd1f4e6c7..0e842e023400 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -54,8 +54,8 @@
54 54
55#define DRV_NAME "ib_mthca" 55#define DRV_NAME "ib_mthca"
56#define PFX DRV_NAME ": " 56#define PFX DRV_NAME ": "
57#define DRV_VERSION "0.08" 57#define DRV_VERSION "1.0"
58#define DRV_RELDATE "February 14, 2006" 58#define DRV_RELDATE "April 4, 2008"
59 59
60enum { 60enum {
61 MTHCA_FLAG_DDR_HIDDEN = 1 << 1, 61 MTHCA_FLAG_DDR_HIDDEN = 1 << 1,
@@ -390,11 +390,11 @@ extern void __buggy_use_of_MTHCA_PUT(void);
390 do { \ 390 do { \
391 void *__p = (char *) (source) + (offset); \ 391 void *__p = (char *) (source) + (offset); \
392 switch (sizeof (dest)) { \ 392 switch (sizeof (dest)) { \
393 case 1: (dest) = *(u8 *) __p; break; \ 393 case 1: (dest) = *(u8 *) __p; break; \
394 case 2: (dest) = be16_to_cpup(__p); break; \ 394 case 2: (dest) = be16_to_cpup(__p); break; \
395 case 4: (dest) = be32_to_cpup(__p); break; \ 395 case 4: (dest) = be32_to_cpup(__p); break; \
396 case 8: (dest) = be64_to_cpup(__p); break; \ 396 case 8: (dest) = be64_to_cpup(__p); break; \
397 default: __buggy_use_of_MTHCA_GET(); \ 397 default: __buggy_use_of_MTHCA_GET(); \
398 } \ 398 } \
399 } while (0) 399 } while (0)
400 400
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index b60eb5df96e8..8bde7f98e58a 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -232,9 +232,9 @@ static inline struct mthca_eqe *get_eqe(struct mthca_eq *eq, u32 entry)
232 return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE; 232 return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
233} 233}
234 234
235static inline struct mthca_eqe* next_eqe_sw(struct mthca_eq *eq) 235static inline struct mthca_eqe *next_eqe_sw(struct mthca_eq *eq)
236{ 236{
237 struct mthca_eqe* eqe; 237 struct mthca_eqe *eqe;
238 eqe = get_eqe(eq, eq->cons_index); 238 eqe = get_eqe(eq, eq->cons_index);
239 return (MTHCA_EQ_ENTRY_OWNER_HW & eqe->owner) ? NULL : eqe; 239 return (MTHCA_EQ_ENTRY_OWNER_HW & eqe->owner) ? NULL : eqe;
240} 240}
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index acfa41d968ee..8b7e83e6e88f 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -125,7 +125,7 @@ static void smp_snoop(struct ib_device *ibdev,
125 event.device = ibdev; 125 event.device = ibdev;
126 event.element.port_num = port_num; 126 event.element.port_num = port_num;
127 127
128 if(pinfo->clientrereg_resv_subnetto & 0x80) 128 if (pinfo->clientrereg_resv_subnetto & 0x80)
129 event.event = IB_EVENT_CLIENT_REREGISTER; 129 event.event = IB_EVENT_CLIENT_REREGISTER;
130 else 130 else
131 event.event = IB_EVENT_LID_CHANGE; 131 event.event = IB_EVENT_LID_CHANGE;
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index cd3d8adbef9f..9ebadd6e0cfb 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -267,11 +267,16 @@ static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
267 if (dev_lim->flags & DEV_LIM_FLAG_SRQ) 267 if (dev_lim->flags & DEV_LIM_FLAG_SRQ)
268 mdev->mthca_flags |= MTHCA_FLAG_SRQ; 268 mdev->mthca_flags |= MTHCA_FLAG_SRQ;
269 269
270 if (mthca_is_memfree(mdev))
271 if (dev_lim->flags & DEV_LIM_FLAG_IPOIB_CSUM)
272 mdev->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
273
270 return 0; 274 return 0;
271} 275}
272 276
273static int mthca_init_tavor(struct mthca_dev *mdev) 277static int mthca_init_tavor(struct mthca_dev *mdev)
274{ 278{
279 s64 size;
275 u8 status; 280 u8 status;
276 int err; 281 int err;
277 struct mthca_dev_lim dev_lim; 282 struct mthca_dev_lim dev_lim;
@@ -324,9 +329,11 @@ static int mthca_init_tavor(struct mthca_dev *mdev)
324 if (mdev->mthca_flags & MTHCA_FLAG_SRQ) 329 if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
325 profile.num_srq = dev_lim.max_srqs; 330 profile.num_srq = dev_lim.max_srqs;
326 331
327 err = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); 332 size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
328 if (err < 0) 333 if (size < 0) {
334 err = size;
329 goto err_disable; 335 goto err_disable;
336 }
330 337
331 err = mthca_INIT_HCA(mdev, &init_hca, &status); 338 err = mthca_INIT_HCA(mdev, &init_hca, &status);
332 if (err) { 339 if (err) {
@@ -605,7 +612,7 @@ static int mthca_init_arbel(struct mthca_dev *mdev)
605 struct mthca_dev_lim dev_lim; 612 struct mthca_dev_lim dev_lim;
606 struct mthca_profile profile; 613 struct mthca_profile profile;
607 struct mthca_init_hca_param init_hca; 614 struct mthca_init_hca_param init_hca;
608 u64 icm_size; 615 s64 icm_size;
609 u8 status; 616 u8 status;
610 int err; 617 int err;
611 618
@@ -653,7 +660,7 @@ static int mthca_init_arbel(struct mthca_dev *mdev)
653 profile.num_srq = dev_lim.max_srqs; 660 profile.num_srq = dev_lim.max_srqs;
654 661
655 icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); 662 icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
656 if ((int) icm_size < 0) { 663 if (icm_size < 0) {
657 err = icm_size; 664 err = icm_size;
658 goto err_stop_fw; 665 goto err_stop_fw;
659 } 666 }
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 252db0822f6c..b224079d4e1f 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -359,12 +359,14 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
359 int use_lowmem, int use_coherent) 359 int use_lowmem, int use_coherent)
360{ 360{
361 struct mthca_icm_table *table; 361 struct mthca_icm_table *table;
362 int obj_per_chunk;
362 int num_icm; 363 int num_icm;
363 unsigned chunk_size; 364 unsigned chunk_size;
364 int i; 365 int i;
365 u8 status; 366 u8 status;
366 367
367 num_icm = (obj_size * nobj + MTHCA_TABLE_CHUNK_SIZE - 1) / MTHCA_TABLE_CHUNK_SIZE; 368 obj_per_chunk = MTHCA_TABLE_CHUNK_SIZE / obj_size;
369 num_icm = DIV_ROUND_UP(nobj, obj_per_chunk);
368 370
369 table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL); 371 table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL);
370 if (!table) 372 if (!table)
@@ -412,7 +414,7 @@ err:
412 if (table->icm[i]) { 414 if (table->icm[i]) {
413 mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE, 415 mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE,
414 MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE, 416 MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
415 &status); 417 &status);
416 mthca_free_icm(dev, table->icm[i], table->coherent); 418 mthca_free_icm(dev, table->icm[i], table->coherent);
417 } 419 }
418 420
diff --git a/drivers/infiniband/hw/mthca/mthca_profile.c b/drivers/infiniband/hw/mthca/mthca_profile.c
index 26bf86d1cfcd..605a8d57fac6 100644
--- a/drivers/infiniband/hw/mthca/mthca_profile.c
+++ b/drivers/infiniband/hw/mthca/mthca_profile.c
@@ -63,7 +63,7 @@ enum {
63 MTHCA_NUM_PDS = 1 << 15 63 MTHCA_NUM_PDS = 1 << 15
64}; 64};
65 65
66u64 mthca_make_profile(struct mthca_dev *dev, 66s64 mthca_make_profile(struct mthca_dev *dev,
67 struct mthca_profile *request, 67 struct mthca_profile *request,
68 struct mthca_dev_lim *dev_lim, 68 struct mthca_dev_lim *dev_lim,
69 struct mthca_init_hca_param *init_hca) 69 struct mthca_init_hca_param *init_hca)
@@ -77,7 +77,7 @@ u64 mthca_make_profile(struct mthca_dev *dev,
77 }; 77 };
78 78
79 u64 mem_base, mem_avail; 79 u64 mem_base, mem_avail;
80 u64 total_size = 0; 80 s64 total_size = 0;
81 struct mthca_resource *profile; 81 struct mthca_resource *profile;
82 struct mthca_resource tmp; 82 struct mthca_resource tmp;
83 int i, j; 83 int i, j;
diff --git a/drivers/infiniband/hw/mthca/mthca_profile.h b/drivers/infiniband/hw/mthca/mthca_profile.h
index 94641808f97f..e76cb62d8e32 100644
--- a/drivers/infiniband/hw/mthca/mthca_profile.h
+++ b/drivers/infiniband/hw/mthca/mthca_profile.h
@@ -53,7 +53,7 @@ struct mthca_profile {
53 int fmr_reserved_mtts; 53 int fmr_reserved_mtts;
54}; 54};
55 55
56u64 mthca_make_profile(struct mthca_dev *mdev, 56s64 mthca_make_profile(struct mthca_dev *mdev,
57 struct mthca_profile *request, 57 struct mthca_profile *request,
58 struct mthca_dev_lim *dev_lim, 58 struct mthca_dev_lim *dev_lim,
59 struct mthca_init_hca_param *init_hca); 59 struct mthca_init_hca_param *init_hca);
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 9e491df6419c..81b257e18bb6 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -60,7 +60,7 @@ static int mthca_query_device(struct ib_device *ibdev,
60 struct ib_smp *in_mad = NULL; 60 struct ib_smp *in_mad = NULL;
61 struct ib_smp *out_mad = NULL; 61 struct ib_smp *out_mad = NULL;
62 int err = -ENOMEM; 62 int err = -ENOMEM;
63 struct mthca_dev* mdev = to_mdev(ibdev); 63 struct mthca_dev *mdev = to_mdev(ibdev);
64 64
65 u8 status; 65 u8 status;
66 66
@@ -540,6 +540,9 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
540 struct mthca_qp *qp; 540 struct mthca_qp *qp;
541 int err; 541 int err;
542 542
543 if (init_attr->create_flags)
544 return ERR_PTR(-EINVAL);
545
543 switch (init_attr->qp_type) { 546 switch (init_attr->qp_type) {
544 case IB_QPT_RC: 547 case IB_QPT_RC:
545 case IB_QPT_UC: 548 case IB_QPT_UC:
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index db5595bbf7f0..09dc3614cf2c 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -437,29 +437,34 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
437 int mthca_state; 437 int mthca_state;
438 u8 status; 438 u8 status;
439 439
440 mutex_lock(&qp->mutex);
441
440 if (qp->state == IB_QPS_RESET) { 442 if (qp->state == IB_QPS_RESET) {
441 qp_attr->qp_state = IB_QPS_RESET; 443 qp_attr->qp_state = IB_QPS_RESET;
442 goto done; 444 goto done;
443 } 445 }
444 446
445 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 447 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
446 if (IS_ERR(mailbox)) 448 if (IS_ERR(mailbox)) {
447 return PTR_ERR(mailbox); 449 err = PTR_ERR(mailbox);
450 goto out;
451 }
448 452
449 err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox, &status); 453 err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox, &status);
450 if (err) 454 if (err)
451 goto out; 455 goto out_mailbox;
452 if (status) { 456 if (status) {
453 mthca_warn(dev, "QUERY_QP returned status %02x\n", status); 457 mthca_warn(dev, "QUERY_QP returned status %02x\n", status);
454 err = -EINVAL; 458 err = -EINVAL;
455 goto out; 459 goto out_mailbox;
456 } 460 }
457 461
458 qp_param = mailbox->buf; 462 qp_param = mailbox->buf;
459 context = &qp_param->context; 463 context = &qp_param->context;
460 mthca_state = be32_to_cpu(context->flags) >> 28; 464 mthca_state = be32_to_cpu(context->flags) >> 28;
461 465
462 qp_attr->qp_state = to_ib_qp_state(mthca_state); 466 qp->state = to_ib_qp_state(mthca_state);
467 qp_attr->qp_state = qp->state;
463 qp_attr->path_mtu = context->mtu_msgmax >> 5; 468 qp_attr->path_mtu = context->mtu_msgmax >> 5;
464 qp_attr->path_mig_state = 469 qp_attr->path_mig_state =
465 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3); 470 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
@@ -506,8 +511,11 @@ done:
506 511
507 qp_init_attr->cap = qp_attr->cap; 512 qp_init_attr->cap = qp_attr->cap;
508 513
509out: 514out_mailbox:
510 mthca_free_mailbox(dev, mailbox); 515 mthca_free_mailbox(dev, mailbox);
516
517out:
518 mutex_unlock(&qp->mutex);
511 return err; 519 return err;
512} 520}
513 521
@@ -1532,7 +1540,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1532 case IB_WR_SEND_WITH_IMM: 1540 case IB_WR_SEND_WITH_IMM:
1533 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; 1541 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1534 sqp->ud_header.immediate_present = 1; 1542 sqp->ud_header.immediate_present = 1;
1535 sqp->ud_header.immediate_data = wr->imm_data; 1543 sqp->ud_header.immediate_data = wr->ex.imm_data;
1536 break; 1544 break;
1537 default: 1545 default:
1538 return -EINVAL; 1546 return -EINVAL;
@@ -1679,7 +1687,7 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1679 cpu_to_be32(1); 1687 cpu_to_be32(1);
1680 if (wr->opcode == IB_WR_SEND_WITH_IMM || 1688 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
1681 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) 1689 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
1682 ((struct mthca_next_seg *) wqe)->imm = wr->imm_data; 1690 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data;
1683 1691
1684 wqe += sizeof (struct mthca_next_seg); 1692 wqe += sizeof (struct mthca_next_seg);
1685 size = sizeof (struct mthca_next_seg) / 16; 1693 size = sizeof (struct mthca_next_seg) / 16;
@@ -2015,10 +2023,12 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2015 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) | 2023 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
2016 ((wr->send_flags & IB_SEND_SOLICITED) ? 2024 ((wr->send_flags & IB_SEND_SOLICITED) ?
2017 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) | 2025 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) |
2026 ((wr->send_flags & IB_SEND_IP_CSUM) ?
2027 cpu_to_be32(MTHCA_NEXT_IP_CSUM | MTHCA_NEXT_TCP_UDP_CSUM) : 0) |
2018 cpu_to_be32(1); 2028 cpu_to_be32(1);
2019 if (wr->opcode == IB_WR_SEND_WITH_IMM || 2029 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
2020 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) 2030 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
2021 ((struct mthca_next_seg *) wqe)->imm = wr->imm_data; 2031 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data;
2022 2032
2023 wqe += sizeof (struct mthca_next_seg); 2033 wqe += sizeof (struct mthca_next_seg);
2024 size = sizeof (struct mthca_next_seg) / 16; 2034 size = sizeof (struct mthca_next_seg) / 16;
diff --git a/drivers/infiniband/hw/mthca/mthca_wqe.h b/drivers/infiniband/hw/mthca/mthca_wqe.h
index f6a66fe78e48..b3551a8dea1d 100644
--- a/drivers/infiniband/hw/mthca/mthca_wqe.h
+++ b/drivers/infiniband/hw/mthca/mthca_wqe.h
@@ -38,14 +38,16 @@
38#include <linux/types.h> 38#include <linux/types.h>
39 39
40enum { 40enum {
41 MTHCA_NEXT_DBD = 1 << 7, 41 MTHCA_NEXT_DBD = 1 << 7,
42 MTHCA_NEXT_FENCE = 1 << 6, 42 MTHCA_NEXT_FENCE = 1 << 6,
43 MTHCA_NEXT_CQ_UPDATE = 1 << 3, 43 MTHCA_NEXT_CQ_UPDATE = 1 << 3,
44 MTHCA_NEXT_EVENT_GEN = 1 << 2, 44 MTHCA_NEXT_EVENT_GEN = 1 << 2,
45 MTHCA_NEXT_SOLICIT = 1 << 1, 45 MTHCA_NEXT_SOLICIT = 1 << 1,
46 46 MTHCA_NEXT_IP_CSUM = 1 << 4,
47 MTHCA_MLX_VL15 = 1 << 17, 47 MTHCA_NEXT_TCP_UDP_CSUM = 1 << 5,
48 MTHCA_MLX_SLR = 1 << 16 48
49 MTHCA_MLX_VL15 = 1 << 17,
50 MTHCA_MLX_SLR = 1 << 16
49}; 51};
50 52
51enum { 53enum {
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index b2112f5a422f..b00b0e3a91dc 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -65,7 +65,6 @@ MODULE_LICENSE("Dual BSD/GPL");
65MODULE_VERSION(DRV_VERSION); 65MODULE_VERSION(DRV_VERSION);
66 66
67int max_mtu = 9000; 67int max_mtu = 9000;
68int nics_per_function = 1;
69int interrupt_mod_interval = 0; 68int interrupt_mod_interval = 0;
70 69
71 70
@@ -93,15 +92,9 @@ module_param_named(debug_level, nes_debug_level, uint, 0644);
93MODULE_PARM_DESC(debug_level, "Enable debug output level"); 92MODULE_PARM_DESC(debug_level, "Enable debug output level");
94 93
95LIST_HEAD(nes_adapter_list); 94LIST_HEAD(nes_adapter_list);
96LIST_HEAD(nes_dev_list); 95static LIST_HEAD(nes_dev_list);
97 96
98atomic_t qps_destroyed; 97atomic_t qps_destroyed;
99atomic_t cqp_reqs_allocated;
100atomic_t cqp_reqs_freed;
101atomic_t cqp_reqs_dynallocated;
102atomic_t cqp_reqs_dynfreed;
103atomic_t cqp_reqs_queued;
104atomic_t cqp_reqs_redriven;
105 98
106static void nes_print_macaddr(struct net_device *netdev); 99static void nes_print_macaddr(struct net_device *netdev);
107static irqreturn_t nes_interrupt(int, void *); 100static irqreturn_t nes_interrupt(int, void *);
@@ -310,7 +303,7 @@ void nes_rem_ref(struct ib_qp *ibqp)
310 303
311 if (atomic_read(&nesqp->refcount) == 0) { 304 if (atomic_read(&nesqp->refcount) == 0) {
312 printk(KERN_INFO PFX "%s: Reference count already 0 for QP%d, last aeq = 0x%04X.\n", 305 printk(KERN_INFO PFX "%s: Reference count already 0 for QP%d, last aeq = 0x%04X.\n",
313 __FUNCTION__, ibqp->qp_num, nesqp->last_aeq); 306 __func__, ibqp->qp_num, nesqp->last_aeq);
314 BUG(); 307 BUG();
315 } 308 }
316 309
@@ -751,13 +744,13 @@ static void __devexit nes_remove(struct pci_dev *pcidev)
751 744
752 list_del(&nesdev->list); 745 list_del(&nesdev->list);
753 nes_destroy_cqp(nesdev); 746 nes_destroy_cqp(nesdev);
747
748 free_irq(pcidev->irq, nesdev);
754 tasklet_kill(&nesdev->dpc_tasklet); 749 tasklet_kill(&nesdev->dpc_tasklet);
755 750
756 /* Deallocate the Adapter Structure */ 751 /* Deallocate the Adapter Structure */
757 nes_destroy_adapter(nesdev->nesadapter); 752 nes_destroy_adapter(nesdev->nesadapter);
758 753
759 free_irq(pcidev->irq, nesdev);
760
761 if (nesdev->msi_enabled) { 754 if (nesdev->msi_enabled) {
762 pci_disable_msi(pcidev); 755 pci_disable_msi(pcidev);
763 } 756 }
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index a48b288618ec..1626124a156d 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -143,12 +143,12 @@
143#ifdef CONFIG_INFINIBAND_NES_DEBUG 143#ifdef CONFIG_INFINIBAND_NES_DEBUG
144#define nes_debug(level, fmt, args...) \ 144#define nes_debug(level, fmt, args...) \
145 if (level & nes_debug_level) \ 145 if (level & nes_debug_level) \
146 printk(KERN_ERR PFX "%s[%u]: " fmt, __FUNCTION__, __LINE__, ##args) 146 printk(KERN_ERR PFX "%s[%u]: " fmt, __func__, __LINE__, ##args)
147 147
148#define assert(expr) \ 148#define assert(expr) \
149if (!(expr)) { \ 149if (!(expr)) { \
150 printk(KERN_ERR PFX "Assertion failed! %s, %s, %s, line %d\n", \ 150 printk(KERN_ERR PFX "Assertion failed! %s, %s, %s, line %d\n", \
151 #expr, __FILE__, __FUNCTION__, __LINE__); \ 151 #expr, __FILE__, __func__, __LINE__); \
152} 152}
153 153
154#define NES_EVENT_TIMEOUT 1200000 154#define NES_EVENT_TIMEOUT 1200000
@@ -166,7 +166,6 @@ if (!(expr)) { \
166#include "nes_cm.h" 166#include "nes_cm.h"
167 167
168extern int max_mtu; 168extern int max_mtu;
169extern int nics_per_function;
170#define max_frame_len (max_mtu+ETH_HLEN) 169#define max_frame_len (max_mtu+ETH_HLEN)
171extern int interrupt_mod_interval; 170extern int interrupt_mod_interval;
172extern int nes_if_count; 171extern int nes_if_count;
@@ -177,9 +176,6 @@ extern unsigned int nes_drv_opt;
177extern unsigned int nes_debug_level; 176extern unsigned int nes_debug_level;
178 177
179extern struct list_head nes_adapter_list; 178extern struct list_head nes_adapter_list;
180extern struct list_head nes_dev_list;
181
182extern struct nes_cm_core *g_cm_core;
183 179
184extern atomic_t cm_connects; 180extern atomic_t cm_connects;
185extern atomic_t cm_accepts; 181extern atomic_t cm_accepts;
@@ -209,7 +205,6 @@ extern atomic_t cm_nodes_destroyed;
209extern atomic_t cm_accel_dropped_pkts; 205extern atomic_t cm_accel_dropped_pkts;
210extern atomic_t cm_resets_recvd; 206extern atomic_t cm_resets_recvd;
211 207
212extern u32 crit_err_count;
213extern u32 int_mod_timer_init; 208extern u32 int_mod_timer_init;
214extern u32 int_mod_cq_depth_256; 209extern u32 int_mod_cq_depth_256;
215extern u32 int_mod_cq_depth_128; 210extern u32 int_mod_cq_depth_128;
@@ -219,14 +214,6 @@ extern u32 int_mod_cq_depth_16;
219extern u32 int_mod_cq_depth_4; 214extern u32 int_mod_cq_depth_4;
220extern u32 int_mod_cq_depth_1; 215extern u32 int_mod_cq_depth_1;
221 216
222extern atomic_t cqp_reqs_allocated;
223extern atomic_t cqp_reqs_freed;
224extern atomic_t cqp_reqs_dynallocated;
225extern atomic_t cqp_reqs_dynfreed;
226extern atomic_t cqp_reqs_queued;
227extern atomic_t cqp_reqs_redriven;
228
229
230struct nes_device { 217struct nes_device {
231 struct nes_adapter *nesadapter; 218 struct nes_adapter *nesadapter;
232 void __iomem *regs; 219 void __iomem *regs;
@@ -412,7 +399,7 @@ static inline int nes_alloc_resource(struct nes_adapter *nesadapter,
412 if (resource_num >= max_resources) { 399 if (resource_num >= max_resources) {
413 resource_num = find_first_zero_bit(resource_array, max_resources); 400 resource_num = find_first_zero_bit(resource_array, max_resources);
414 if (resource_num >= max_resources) { 401 if (resource_num >= max_resources) {
415 printk(KERN_ERR PFX "%s: No available resourcess.\n", __FUNCTION__); 402 printk(KERN_ERR PFX "%s: No available resourcess.\n", __func__);
416 spin_unlock_irqrestore(&nesadapter->resource_lock, flags); 403 spin_unlock_irqrestore(&nesadapter->resource_lock, flags);
417 return -EMFILE; 404 return -EMFILE;
418 } 405 }
@@ -510,9 +497,6 @@ struct ib_qp *nes_get_qp(struct ib_device *, int);
510/* nes_hw.c */ 497/* nes_hw.c */
511struct nes_adapter *nes_init_adapter(struct nes_device *, u8); 498struct nes_adapter *nes_init_adapter(struct nes_device *, u8);
512void nes_nic_init_timer_defaults(struct nes_device *, u8); 499void nes_nic_init_timer_defaults(struct nes_device *, u8);
513unsigned int nes_reset_adapter_ne020(struct nes_device *, u8 *);
514int nes_init_serdes(struct nes_device *, u8, u8, u8);
515void nes_init_csr_ne020(struct nes_device *, u8, u8);
516void nes_destroy_adapter(struct nes_adapter *); 500void nes_destroy_adapter(struct nes_adapter *);
517int nes_init_cqp(struct nes_device *); 501int nes_init_cqp(struct nes_device *);
518int nes_init_phy(struct nes_device *); 502int nes_init_phy(struct nes_device *);
@@ -520,20 +504,12 @@ int nes_init_nic_qp(struct nes_device *, struct net_device *);
520void nes_destroy_nic_qp(struct nes_vnic *); 504void nes_destroy_nic_qp(struct nes_vnic *);
521int nes_napi_isr(struct nes_device *); 505int nes_napi_isr(struct nes_device *);
522void nes_dpc(unsigned long); 506void nes_dpc(unsigned long);
523void nes_process_ceq(struct nes_device *, struct nes_hw_ceq *);
524void nes_process_aeq(struct nes_device *, struct nes_hw_aeq *);
525void nes_process_mac_intr(struct nes_device *, u32);
526void nes_nic_napi_ce_handler(struct nes_device *, struct nes_hw_nic_cq *);
527void nes_nic_ce_handler(struct nes_device *, struct nes_hw_nic_cq *); 507void nes_nic_ce_handler(struct nes_device *, struct nes_hw_nic_cq *);
528void nes_cqp_ce_handler(struct nes_device *, struct nes_hw_cq *);
529void nes_process_iwarp_aeqe(struct nes_device *, struct nes_hw_aeqe *);
530void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *); 508void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *);
531int nes_destroy_cqp(struct nes_device *); 509int nes_destroy_cqp(struct nes_device *);
532int nes_nic_cm_xmit(struct sk_buff *, struct net_device *); 510int nes_nic_cm_xmit(struct sk_buff *, struct net_device *);
533 511
534/* nes_nic.c */ 512/* nes_nic.c */
535void nes_netdev_set_multicast_list(struct net_device *);
536void nes_netdev_exit(struct nes_vnic *);
537struct net_device *nes_netdev_init(struct nes_device *, void __iomem *); 513struct net_device *nes_netdev_init(struct nes_device *, void __iomem *);
538void nes_netdev_destroy(struct net_device *); 514void nes_netdev_destroy(struct net_device *);
539int nes_nic_cm_xmit(struct sk_buff *, struct net_device *); 515int nes_nic_cm_xmit(struct sk_buff *, struct net_device *);
@@ -544,7 +520,6 @@ int nes_cm_recv(struct sk_buff *, struct net_device *);
544void nes_update_arp(unsigned char *, u32, u32, u16, u16); 520void nes_update_arp(unsigned char *, u32, u32, u16, u16);
545void nes_manage_arp_cache(struct net_device *, unsigned char *, u32, u32); 521void nes_manage_arp_cache(struct net_device *, unsigned char *, u32, u32);
546void nes_sock_release(struct nes_qp *, unsigned long *); 522void nes_sock_release(struct nes_qp *, unsigned long *);
547struct nes_cm_core *nes_cm_alloc_core(void);
548void flush_wqes(struct nes_device *nesdev, struct nes_qp *, u32, u32); 523void flush_wqes(struct nes_device *nesdev, struct nes_qp *, u32, u32);
549int nes_manage_apbvt(struct nes_vnic *, u32, u32, u32); 524int nes_manage_apbvt(struct nes_vnic *, u32, u32, u32);
550int nes_cm_disconn(struct nes_qp *); 525int nes_cm_disconn(struct nes_qp *);
@@ -556,7 +531,6 @@ int nes_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
556struct nes_ib_device *nes_init_ofa_device(struct net_device *); 531struct nes_ib_device *nes_init_ofa_device(struct net_device *);
557void nes_destroy_ofa_device(struct nes_ib_device *); 532void nes_destroy_ofa_device(struct nes_ib_device *);
558int nes_register_ofa_device(struct nes_ib_device *); 533int nes_register_ofa_device(struct nes_ib_device *);
559void nes_unregister_ofa_device(struct nes_ib_device *);
560 534
561/* nes_util.c */ 535/* nes_util.c */
562int nes_read_eeprom_values(struct nes_device *, struct nes_adapter *); 536int nes_read_eeprom_values(struct nes_device *, struct nes_adapter *);
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 0bef878e0f65..d0738623bcf3 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -80,7 +80,30 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *,
80static int add_ref_cm_node(struct nes_cm_node *); 80static int add_ref_cm_node(struct nes_cm_node *);
81static int rem_ref_cm_node(struct nes_cm_core *, struct nes_cm_node *); 81static int rem_ref_cm_node(struct nes_cm_core *, struct nes_cm_node *);
82static int mini_cm_del_listen(struct nes_cm_core *, struct nes_cm_listener *); 82static int mini_cm_del_listen(struct nes_cm_core *, struct nes_cm_listener *);
83 83static struct sk_buff *form_cm_frame(struct sk_buff *, struct nes_cm_node *,
84 void *, u32, void *, u32, u8);
85static struct sk_buff *get_free_pkt(struct nes_cm_node *cm_node);
86
87static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *,
88 struct nes_vnic *,
89 struct ietf_mpa_frame *,
90 struct nes_cm_info *);
91static int mini_cm_accept(struct nes_cm_core *, struct ietf_mpa_frame *,
92 struct nes_cm_node *);
93static int mini_cm_reject(struct nes_cm_core *, struct ietf_mpa_frame *,
94 struct nes_cm_node *);
95static int mini_cm_close(struct nes_cm_core *, struct nes_cm_node *);
96static int mini_cm_recv_pkt(struct nes_cm_core *, struct nes_vnic *,
97 struct sk_buff *);
98static int mini_cm_dealloc_core(struct nes_cm_core *);
99static int mini_cm_get(struct nes_cm_core *);
100static int mini_cm_set(struct nes_cm_core *, u32, u32);
101static int nes_cm_disconn_true(struct nes_qp *);
102static int nes_cm_post_event(struct nes_cm_event *event);
103static int nes_disconnect(struct nes_qp *nesqp, int abrupt);
104static void nes_disconnect_worker(struct work_struct *work);
105static int send_ack(struct nes_cm_node *cm_node);
106static int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb);
84 107
85/* External CM API Interface */ 108/* External CM API Interface */
86/* instance of function pointers for client API */ 109/* instance of function pointers for client API */
@@ -99,7 +122,7 @@ static struct nes_cm_ops nes_cm_api = {
99 mini_cm_set 122 mini_cm_set
100}; 123};
101 124
102struct nes_cm_core *g_cm_core; 125static struct nes_cm_core *g_cm_core;
103 126
104atomic_t cm_connects; 127atomic_t cm_connects;
105atomic_t cm_accepts; 128atomic_t cm_accepts;
@@ -149,7 +172,7 @@ static struct nes_cm_event *create_event(struct nes_cm_node *cm_node,
149/** 172/**
150 * send_mpa_request 173 * send_mpa_request
151 */ 174 */
152int send_mpa_request(struct nes_cm_node *cm_node) 175static int send_mpa_request(struct nes_cm_node *cm_node)
153{ 176{
154 struct sk_buff *skb; 177 struct sk_buff *skb;
155 int ret; 178 int ret;
@@ -243,8 +266,9 @@ static int handle_exception_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb
243 * form_cm_frame - get a free packet and build empty frame Use 266 * form_cm_frame - get a free packet and build empty frame Use
244 * node info to build. 267 * node info to build.
245 */ 268 */
246struct sk_buff *form_cm_frame(struct sk_buff *skb, struct nes_cm_node *cm_node, 269static struct sk_buff *form_cm_frame(struct sk_buff *skb, struct nes_cm_node *cm_node,
247 void *options, u32 optionsize, void *data, u32 datasize, u8 flags) 270 void *options, u32 optionsize, void *data,
271 u32 datasize, u8 flags)
248{ 272{
249 struct tcphdr *tcph; 273 struct tcphdr *tcph;
250 struct iphdr *iph; 274 struct iphdr *iph;
@@ -342,7 +366,6 @@ static void print_core(struct nes_cm_core *core)
342 if (!core) 366 if (!core)
343 return; 367 return;
344 nes_debug(NES_DBG_CM, "---------------------------------------------\n"); 368 nes_debug(NES_DBG_CM, "---------------------------------------------\n");
345 nes_debug(NES_DBG_CM, "Session ID : %u \n", atomic_read(&core->session_id));
346 369
347 nes_debug(NES_DBG_CM, "State : %u \n", core->state); 370 nes_debug(NES_DBG_CM, "State : %u \n", core->state);
348 371
@@ -395,7 +418,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
395 } 418 }
396 419
397 if (type == NES_TIMER_TYPE_SEND) { 420 if (type == NES_TIMER_TYPE_SEND) {
398 new_send->seq_num = htonl(tcp_hdr(skb)->seq); 421 new_send->seq_num = ntohl(tcp_hdr(skb)->seq);
399 atomic_inc(&new_send->skb->users); 422 atomic_inc(&new_send->skb->users);
400 423
401 ret = nes_nic_cm_xmit(new_send->skb, cm_node->netdev); 424 ret = nes_nic_cm_xmit(new_send->skb, cm_node->netdev);
@@ -420,7 +443,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
420 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); 443 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
421 } 444 }
422 if (type == NES_TIMER_TYPE_RECV) { 445 if (type == NES_TIMER_TYPE_RECV) {
423 new_send->seq_num = htonl(tcp_hdr(skb)->seq); 446 new_send->seq_num = ntohl(tcp_hdr(skb)->seq);
424 new_send->timetosend = jiffies; 447 new_send->timetosend = jiffies;
425 spin_lock_irqsave(&cm_node->recv_list_lock, flags); 448 spin_lock_irqsave(&cm_node->recv_list_lock, flags);
426 list_add_tail(&new_send->list, &cm_node->recv_list); 449 list_add_tail(&new_send->list, &cm_node->recv_list);
@@ -442,7 +465,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
442/** 465/**
443 * nes_cm_timer_tick 466 * nes_cm_timer_tick
444 */ 467 */
445void nes_cm_timer_tick(unsigned long pass) 468static void nes_cm_timer_tick(unsigned long pass)
446{ 469{
447 unsigned long flags, qplockflags; 470 unsigned long flags, qplockflags;
448 unsigned long nexttimeout = jiffies + NES_LONG_TIME; 471 unsigned long nexttimeout = jiffies + NES_LONG_TIME;
@@ -644,7 +667,7 @@ void nes_cm_timer_tick(unsigned long pass)
644/** 667/**
645 * send_syn 668 * send_syn
646 */ 669 */
647int send_syn(struct nes_cm_node *cm_node, u32 sendack) 670static int send_syn(struct nes_cm_node *cm_node, u32 sendack)
648{ 671{
649 int ret; 672 int ret;
650 int flags = SET_SYN; 673 int flags = SET_SYN;
@@ -710,7 +733,7 @@ int send_syn(struct nes_cm_node *cm_node, u32 sendack)
710/** 733/**
711 * send_reset 734 * send_reset
712 */ 735 */
713int send_reset(struct nes_cm_node *cm_node) 736static int send_reset(struct nes_cm_node *cm_node)
714{ 737{
715 int ret; 738 int ret;
716 struct sk_buff *skb = get_free_pkt(cm_node); 739 struct sk_buff *skb = get_free_pkt(cm_node);
@@ -732,7 +755,7 @@ int send_reset(struct nes_cm_node *cm_node)
732/** 755/**
733 * send_ack 756 * send_ack
734 */ 757 */
735int send_ack(struct nes_cm_node *cm_node) 758static int send_ack(struct nes_cm_node *cm_node)
736{ 759{
737 int ret; 760 int ret;
738 struct sk_buff *skb = get_free_pkt(cm_node); 761 struct sk_buff *skb = get_free_pkt(cm_node);
@@ -752,7 +775,7 @@ int send_ack(struct nes_cm_node *cm_node)
752/** 775/**
753 * send_fin 776 * send_fin
754 */ 777 */
755int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb) 778static int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb)
756{ 779{
757 int ret; 780 int ret;
758 781
@@ -775,7 +798,7 @@ int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb)
775/** 798/**
776 * get_free_pkt 799 * get_free_pkt
777 */ 800 */
778struct sk_buff *get_free_pkt(struct nes_cm_node *cm_node) 801static struct sk_buff *get_free_pkt(struct nes_cm_node *cm_node)
779{ 802{
780 struct sk_buff *skb, *new_skb; 803 struct sk_buff *skb, *new_skb;
781 804
@@ -820,7 +843,6 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core,
820{ 843{
821 unsigned long flags; 844 unsigned long flags;
822 u32 hashkey; 845 u32 hashkey;
823 struct list_head *list_pos;
824 struct list_head *hte; 846 struct list_head *hte;
825 struct nes_cm_node *cm_node; 847 struct nes_cm_node *cm_node;
826 848
@@ -835,8 +857,7 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core,
835 857
836 /* walk list and find cm_node associated with this session ID */ 858 /* walk list and find cm_node associated with this session ID */
837 spin_lock_irqsave(&cm_core->ht_lock, flags); 859 spin_lock_irqsave(&cm_core->ht_lock, flags);
838 list_for_each(list_pos, hte) { 860 list_for_each_entry(cm_node, hte, list) {
839 cm_node = container_of(list_pos, struct nes_cm_node, list);
840 /* compare quad, return node handle if a match */ 861 /* compare quad, return node handle if a match */
841 nes_debug(NES_DBG_CM, "finding node %x:%x =? %x:%x ^ %x:%x =? %x:%x\n", 862 nes_debug(NES_DBG_CM, "finding node %x:%x =? %x:%x ^ %x:%x =? %x:%x\n",
842 cm_node->loc_addr, cm_node->loc_port, 863 cm_node->loc_addr, cm_node->loc_port,
@@ -864,13 +885,11 @@ static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core,
864 nes_addr_t dst_addr, u16 dst_port, enum nes_cm_listener_state listener_state) 885 nes_addr_t dst_addr, u16 dst_port, enum nes_cm_listener_state listener_state)
865{ 886{
866 unsigned long flags; 887 unsigned long flags;
867 struct list_head *listen_list;
868 struct nes_cm_listener *listen_node; 888 struct nes_cm_listener *listen_node;
869 889
870 /* walk list and find cm_node associated with this session ID */ 890 /* walk list and find cm_node associated with this session ID */
871 spin_lock_irqsave(&cm_core->listen_list_lock, flags); 891 spin_lock_irqsave(&cm_core->listen_list_lock, flags);
872 list_for_each(listen_list, &cm_core->listen_list.list) { 892 list_for_each_entry(listen_node, &cm_core->listen_list.list, list) {
873 listen_node = container_of(listen_list, struct nes_cm_listener, list);
874 /* compare node pair, return node handle if a match */ 893 /* compare node pair, return node handle if a match */
875 if (((listen_node->loc_addr == dst_addr) || 894 if (((listen_node->loc_addr == dst_addr) ||
876 listen_node->loc_addr == 0x00000000) && 895 listen_node->loc_addr == 0x00000000) &&
@@ -1014,7 +1033,7 @@ static void nes_addr_send_arp(u32 dst_ip)
1014 fl.nl_u.ip4_u.daddr = htonl(dst_ip); 1033 fl.nl_u.ip4_u.daddr = htonl(dst_ip);
1015 if (ip_route_output_key(&init_net, &rt, &fl)) { 1034 if (ip_route_output_key(&init_net, &rt, &fl)) {
1016 printk("%s: ip_route_output_key failed for 0x%08X\n", 1035 printk("%s: ip_route_output_key failed for 0x%08X\n",
1017 __FUNCTION__, dst_ip); 1036 __func__, dst_ip);
1018 return; 1037 return;
1019 } 1038 }
1020 1039
@@ -1077,8 +1096,6 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
1077 cm_node->tcp_cntxt.rcv_nxt = 0; 1096 cm_node->tcp_cntxt.rcv_nxt = 0;
1078 /* get a unique session ID , add thread_id to an upcounter to handle race */ 1097 /* get a unique session ID , add thread_id to an upcounter to handle race */
1079 atomic_inc(&cm_core->node_cnt); 1098 atomic_inc(&cm_core->node_cnt);
1080 atomic_inc(&cm_core->session_id);
1081 cm_node->session_id = (u32)(atomic_read(&cm_core->session_id) + current->tgid);
1082 cm_node->conn_type = cm_info->conn_type; 1099 cm_node->conn_type = cm_info->conn_type;
1083 cm_node->apbvt_set = 0; 1100 cm_node->apbvt_set = 0;
1084 cm_node->accept_pend = 0; 1101 cm_node->accept_pend = 0;
@@ -1239,7 +1256,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc, u32 opti
1239 continue; 1256 continue;
1240 case OPTION_NUMBER_MSS: 1257 case OPTION_NUMBER_MSS:
1241 nes_debug(NES_DBG_CM, "%s: MSS Length: %d Offset: %d Size: %d\n", 1258 nes_debug(NES_DBG_CM, "%s: MSS Length: %d Offset: %d Size: %d\n",
1242 __FUNCTION__, 1259 __func__,
1243 all_options->as_mss.length, offset, optionsize); 1260 all_options->as_mss.length, offset, optionsize);
1244 got_mss_option = 1; 1261 got_mss_option = 1;
1245 if (all_options->as_mss.length != 4) { 1262 if (all_options->as_mss.length != 4) {
@@ -1272,8 +1289,8 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc, u32 opti
1272/** 1289/**
1273 * process_packet 1290 * process_packet
1274 */ 1291 */
1275int process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb, 1292static int process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb,
1276 struct nes_cm_core *cm_core) 1293 struct nes_cm_core *cm_core)
1277{ 1294{
1278 int optionsize; 1295 int optionsize;
1279 int datasize; 1296 int datasize;
@@ -1360,7 +1377,7 @@ int process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb,
1360 if (optionsize) { 1377 if (optionsize) {
1361 u8 *optionsloc = (u8 *)&tcph[1]; 1378 u8 *optionsloc = (u8 *)&tcph[1];
1362 if (process_options(cm_node, optionsloc, optionsize, (u32)tcph->syn)) { 1379 if (process_options(cm_node, optionsloc, optionsize, (u32)tcph->syn)) {
1363 nes_debug(NES_DBG_CM, "%s: Node %p, Sending RESET\n", __FUNCTION__, cm_node); 1380 nes_debug(NES_DBG_CM, "%s: Node %p, Sending RESET\n", __func__, cm_node);
1364 send_reset(cm_node); 1381 send_reset(cm_node);
1365 if (cm_node->state != NES_CM_STATE_SYN_SENT) 1382 if (cm_node->state != NES_CM_STATE_SYN_SENT)
1366 rem_ref_cm_node(cm_core, cm_node); 1383 rem_ref_cm_node(cm_core, cm_node);
@@ -1605,9 +1622,7 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
1605 listener->cm_core = cm_core; 1622 listener->cm_core = cm_core;
1606 listener->nesvnic = nesvnic; 1623 listener->nesvnic = nesvnic;
1607 atomic_inc(&cm_core->node_cnt); 1624 atomic_inc(&cm_core->node_cnt);
1608 atomic_inc(&cm_core->session_id);
1609 1625
1610 listener->session_id = (u32)(atomic_read(&cm_core->session_id) + current->tgid);
1611 listener->conn_type = cm_info->conn_type; 1626 listener->conn_type = cm_info->conn_type;
1612 listener->backlog = cm_info->backlog; 1627 listener->backlog = cm_info->backlog;
1613 listener->listener_state = NES_CM_LISTENER_ACTIVE_STATE; 1628 listener->listener_state = NES_CM_LISTENER_ACTIVE_STATE;
@@ -1631,9 +1646,10 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
1631/** 1646/**
1632 * mini_cm_connect - make a connection node with params 1647 * mini_cm_connect - make a connection node with params
1633 */ 1648 */
1634struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core, 1649static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
1635 struct nes_vnic *nesvnic, struct ietf_mpa_frame *mpa_frame, 1650 struct nes_vnic *nesvnic,
1636 struct nes_cm_info *cm_info) 1651 struct ietf_mpa_frame *mpa_frame,
1652 struct nes_cm_info *cm_info)
1637{ 1653{
1638 int ret = 0; 1654 int ret = 0;
1639 struct nes_cm_node *cm_node; 1655 struct nes_cm_node *cm_node;
@@ -1717,8 +1733,8 @@ struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
1717 * mini_cm_accept - accept a connection 1733 * mini_cm_accept - accept a connection
1718 * This function is never called 1734 * This function is never called
1719 */ 1735 */
1720int mini_cm_accept(struct nes_cm_core *cm_core, struct ietf_mpa_frame *mpa_frame, 1736static int mini_cm_accept(struct nes_cm_core *cm_core, struct ietf_mpa_frame *mpa_frame,
1721 struct nes_cm_node *cm_node) 1737 struct nes_cm_node *cm_node)
1722{ 1738{
1723 return 0; 1739 return 0;
1724} 1740}
@@ -1727,9 +1743,9 @@ int mini_cm_accept(struct nes_cm_core *cm_core, struct ietf_mpa_frame *mpa_frame
1727/** 1743/**
1728 * mini_cm_reject - reject and teardown a connection 1744 * mini_cm_reject - reject and teardown a connection
1729 */ 1745 */
1730int mini_cm_reject(struct nes_cm_core *cm_core, 1746static int mini_cm_reject(struct nes_cm_core *cm_core,
1731 struct ietf_mpa_frame *mpa_frame, 1747 struct ietf_mpa_frame *mpa_frame,
1732 struct nes_cm_node *cm_node) 1748 struct nes_cm_node *cm_node)
1733{ 1749{
1734 int ret = 0; 1750 int ret = 0;
1735 struct sk_buff *skb; 1751 struct sk_buff *skb;
@@ -1761,7 +1777,7 @@ int mini_cm_reject(struct nes_cm_core *cm_core,
1761/** 1777/**
1762 * mini_cm_close 1778 * mini_cm_close
1763 */ 1779 */
1764int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node) 1780static int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node)
1765{ 1781{
1766 int ret = 0; 1782 int ret = 0;
1767 1783
@@ -1808,8 +1824,8 @@ int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node)
1808 * recv_pkt - recv an ETHERNET packet, and process it through CM 1824 * recv_pkt - recv an ETHERNET packet, and process it through CM
1809 * node state machine 1825 * node state machine
1810 */ 1826 */
1811int mini_cm_recv_pkt(struct nes_cm_core *cm_core, struct nes_vnic *nesvnic, 1827static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, struct nes_vnic *nesvnic,
1812 struct sk_buff *skb) 1828 struct sk_buff *skb)
1813{ 1829{
1814 struct nes_cm_node *cm_node = NULL; 1830 struct nes_cm_node *cm_node = NULL;
1815 struct nes_cm_listener *listener = NULL; 1831 struct nes_cm_listener *listener = NULL;
@@ -1898,7 +1914,7 @@ int mini_cm_recv_pkt(struct nes_cm_core *cm_core, struct nes_vnic *nesvnic,
1898/** 1914/**
1899 * nes_cm_alloc_core - allocate a top level instance of a cm core 1915 * nes_cm_alloc_core - allocate a top level instance of a cm core
1900 */ 1916 */
1901struct nes_cm_core *nes_cm_alloc_core(void) 1917static struct nes_cm_core *nes_cm_alloc_core(void)
1902{ 1918{
1903 int i; 1919 int i;
1904 1920
@@ -1919,7 +1935,6 @@ struct nes_cm_core *nes_cm_alloc_core(void)
1919 cm_core->state = NES_CM_STATE_INITED; 1935 cm_core->state = NES_CM_STATE_INITED;
1920 cm_core->free_tx_pkt_max = NES_CM_DEFAULT_FREE_PKTS; 1936 cm_core->free_tx_pkt_max = NES_CM_DEFAULT_FREE_PKTS;
1921 1937
1922 atomic_set(&cm_core->session_id, 0);
1923 atomic_set(&cm_core->events_posted, 0); 1938 atomic_set(&cm_core->events_posted, 0);
1924 1939
1925 /* init the packet lists */ 1940 /* init the packet lists */
@@ -1958,7 +1973,7 @@ struct nes_cm_core *nes_cm_alloc_core(void)
1958/** 1973/**
1959 * mini_cm_dealloc_core - deallocate a top level instance of a cm core 1974 * mini_cm_dealloc_core - deallocate a top level instance of a cm core
1960 */ 1975 */
1961int mini_cm_dealloc_core(struct nes_cm_core *cm_core) 1976static int mini_cm_dealloc_core(struct nes_cm_core *cm_core)
1962{ 1977{
1963 nes_debug(NES_DBG_CM, "De-Alloc CM Core (%p)\n", cm_core); 1978 nes_debug(NES_DBG_CM, "De-Alloc CM Core (%p)\n", cm_core);
1964 1979
@@ -1983,7 +1998,7 @@ int mini_cm_dealloc_core(struct nes_cm_core *cm_core)
1983/** 1998/**
1984 * mini_cm_get 1999 * mini_cm_get
1985 */ 2000 */
1986int mini_cm_get(struct nes_cm_core *cm_core) 2001static int mini_cm_get(struct nes_cm_core *cm_core)
1987{ 2002{
1988 return cm_core->state; 2003 return cm_core->state;
1989} 2004}
@@ -1992,7 +2007,7 @@ int mini_cm_get(struct nes_cm_core *cm_core)
1992/** 2007/**
1993 * mini_cm_set 2008 * mini_cm_set
1994 */ 2009 */
1995int mini_cm_set(struct nes_cm_core *cm_core, u32 type, u32 value) 2010static int mini_cm_set(struct nes_cm_core *cm_core, u32 type, u32 value)
1996{ 2011{
1997 int ret = 0; 2012 int ret = 0;
1998 2013
@@ -2109,7 +2124,7 @@ int nes_cm_disconn(struct nes_qp *nesqp)
2109/** 2124/**
2110 * nes_disconnect_worker 2125 * nes_disconnect_worker
2111 */ 2126 */
2112void nes_disconnect_worker(struct work_struct *work) 2127static void nes_disconnect_worker(struct work_struct *work)
2113{ 2128{
2114 struct nes_qp *nesqp = container_of(work, struct nes_qp, disconn_work); 2129 struct nes_qp *nesqp = container_of(work, struct nes_qp, disconn_work);
2115 2130
@@ -2122,7 +2137,7 @@ void nes_disconnect_worker(struct work_struct *work)
2122/** 2137/**
2123 * nes_cm_disconn_true 2138 * nes_cm_disconn_true
2124 */ 2139 */
2125int nes_cm_disconn_true(struct nes_qp *nesqp) 2140static int nes_cm_disconn_true(struct nes_qp *nesqp)
2126{ 2141{
2127 unsigned long flags; 2142 unsigned long flags;
2128 int ret = 0; 2143 int ret = 0;
@@ -2265,7 +2280,7 @@ int nes_cm_disconn_true(struct nes_qp *nesqp)
2265/** 2280/**
2266 * nes_disconnect 2281 * nes_disconnect
2267 */ 2282 */
2268int nes_disconnect(struct nes_qp *nesqp, int abrupt) 2283static int nes_disconnect(struct nes_qp *nesqp, int abrupt)
2269{ 2284{
2270 int ret = 0; 2285 int ret = 0;
2271 struct nes_vnic *nesvnic; 2286 struct nes_vnic *nesvnic;
@@ -2482,7 +2497,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2482 } 2497 }
2483 if (ret) 2498 if (ret)
2484 printk("%s[%u] OFA CM event_handler returned, ret=%d\n", 2499 printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
2485 __FUNCTION__, __LINE__, ret); 2500 __func__, __LINE__, ret);
2486 2501
2487 return 0; 2502 return 0;
2488} 2503}
@@ -2650,7 +2665,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
2650 cm_node = g_cm_core->api->listen(g_cm_core, nesvnic, &cm_info); 2665 cm_node = g_cm_core->api->listen(g_cm_core, nesvnic, &cm_info);
2651 if (!cm_node) { 2666 if (!cm_node) {
2652 printk("%s[%u] Error returned from listen API call\n", 2667 printk("%s[%u] Error returned from listen API call\n",
2653 __FUNCTION__, __LINE__); 2668 __func__, __LINE__);
2654 return -ENOMEM; 2669 return -ENOMEM;
2655 } 2670 }
2656 2671
@@ -2740,7 +2755,7 @@ int nes_cm_stop(void)
2740 * cm_event_connected 2755 * cm_event_connected
2741 * handle a connected event, setup QPs and HW 2756 * handle a connected event, setup QPs and HW
2742 */ 2757 */
2743void cm_event_connected(struct nes_cm_event *event) 2758static void cm_event_connected(struct nes_cm_event *event)
2744{ 2759{
2745 u64 u64temp; 2760 u64 u64temp;
2746 struct nes_qp *nesqp; 2761 struct nes_qp *nesqp;
@@ -2864,7 +2879,7 @@ void cm_event_connected(struct nes_cm_event *event)
2864 2879
2865 if (ret) 2880 if (ret)
2866 printk("%s[%u] OFA CM event_handler returned, ret=%d\n", 2881 printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
2867 __FUNCTION__, __LINE__, ret); 2882 __func__, __LINE__, ret);
2868 nes_debug(NES_DBG_CM, "Exiting connect thread for QP%u. jiffies = %lu\n", 2883 nes_debug(NES_DBG_CM, "Exiting connect thread for QP%u. jiffies = %lu\n",
2869 nesqp->hwqp.qp_id, jiffies ); 2884 nesqp->hwqp.qp_id, jiffies );
2870 2885
@@ -2877,7 +2892,7 @@ void cm_event_connected(struct nes_cm_event *event)
2877/** 2892/**
2878 * cm_event_connect_error 2893 * cm_event_connect_error
2879 */ 2894 */
2880void cm_event_connect_error(struct nes_cm_event *event) 2895static void cm_event_connect_error(struct nes_cm_event *event)
2881{ 2896{
2882 struct nes_qp *nesqp; 2897 struct nes_qp *nesqp;
2883 struct iw_cm_id *cm_id; 2898 struct iw_cm_id *cm_id;
@@ -2919,7 +2934,7 @@ void cm_event_connect_error(struct nes_cm_event *event)
2919 nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret); 2934 nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
2920 if (ret) 2935 if (ret)
2921 printk("%s[%u] OFA CM event_handler returned, ret=%d\n", 2936 printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
2922 __FUNCTION__, __LINE__, ret); 2937 __func__, __LINE__, ret);
2923 nes_rem_ref(&nesqp->ibqp); 2938 nes_rem_ref(&nesqp->ibqp);
2924 cm_id->rem_ref(cm_id); 2939 cm_id->rem_ref(cm_id);
2925 2940
@@ -2930,7 +2945,7 @@ void cm_event_connect_error(struct nes_cm_event *event)
2930/** 2945/**
2931 * cm_event_reset 2946 * cm_event_reset
2932 */ 2947 */
2933void cm_event_reset(struct nes_cm_event *event) 2948static void cm_event_reset(struct nes_cm_event *event)
2934{ 2949{
2935 struct nes_qp *nesqp; 2950 struct nes_qp *nesqp;
2936 struct iw_cm_id *cm_id; 2951 struct iw_cm_id *cm_id;
@@ -2973,7 +2988,7 @@ void cm_event_reset(struct nes_cm_event *event)
2973/** 2988/**
2974 * cm_event_mpa_req 2989 * cm_event_mpa_req
2975 */ 2990 */
2976void cm_event_mpa_req(struct nes_cm_event *event) 2991static void cm_event_mpa_req(struct nes_cm_event *event)
2977{ 2992{
2978 struct iw_cm_id *cm_id; 2993 struct iw_cm_id *cm_id;
2979 struct iw_cm_event cm_event; 2994 struct iw_cm_event cm_event;
@@ -3007,7 +3022,7 @@ void cm_event_mpa_req(struct nes_cm_event *event)
3007 ret = cm_id->event_handler(cm_id, &cm_event); 3022 ret = cm_id->event_handler(cm_id, &cm_event);
3008 if (ret) 3023 if (ret)
3009 printk("%s[%u] OFA CM event_handler returned, ret=%d\n", 3024 printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
3010 __FUNCTION__, __LINE__, ret); 3025 __func__, __LINE__, ret);
3011 3026
3012 return; 3027 return;
3013} 3028}
@@ -3019,7 +3034,7 @@ static void nes_cm_event_handler(struct work_struct *);
3019 * nes_cm_post_event 3034 * nes_cm_post_event
3020 * post an event to the cm event handler 3035 * post an event to the cm event handler
3021 */ 3036 */
3022int nes_cm_post_event(struct nes_cm_event *event) 3037static int nes_cm_post_event(struct nes_cm_event *event)
3023{ 3038{
3024 atomic_inc(&event->cm_node->cm_core->events_posted); 3039 atomic_inc(&event->cm_node->cm_core->events_posted);
3025 add_ref_cm_node(event->cm_node); 3040 add_ref_cm_node(event->cm_node);
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
index a59f0a7fb278..7717cb2ab500 100644
--- a/drivers/infiniband/hw/nes/nes_cm.h
+++ b/drivers/infiniband/hw/nes/nes_cm.h
@@ -225,7 +225,6 @@ enum nes_cm_listener_state {
225 225
226struct nes_cm_listener { 226struct nes_cm_listener {
227 struct list_head list; 227 struct list_head list;
228 u64 session_id;
229 struct nes_cm_core *cm_core; 228 struct nes_cm_core *cm_core;
230 u8 loc_mac[ETH_ALEN]; 229 u8 loc_mac[ETH_ALEN];
231 nes_addr_t loc_addr; 230 nes_addr_t loc_addr;
@@ -242,7 +241,6 @@ struct nes_cm_listener {
242 241
243/* per connection node and node state information */ 242/* per connection node and node state information */
244struct nes_cm_node { 243struct nes_cm_node {
245 u64 session_id;
246 u32 hashkey; 244 u32 hashkey;
247 245
248 nes_addr_t loc_addr, rem_addr; 246 nes_addr_t loc_addr, rem_addr;
@@ -327,7 +325,6 @@ struct nes_cm_event {
327 325
328struct nes_cm_core { 326struct nes_cm_core {
329 enum nes_cm_node_state state; 327 enum nes_cm_node_state state;
330 atomic_t session_id;
331 328
332 atomic_t listen_node_cnt; 329 atomic_t listen_node_cnt;
333 struct nes_cm_node listen_list; 330 struct nes_cm_node listen_list;
@@ -383,35 +380,10 @@ struct nes_cm_ops {
383 int (*set)(struct nes_cm_core *, u32, u32); 380 int (*set)(struct nes_cm_core *, u32, u32);
384}; 381};
385 382
386
387int send_mpa_request(struct nes_cm_node *);
388struct sk_buff *form_cm_frame(struct sk_buff *, struct nes_cm_node *,
389 void *, u32, void *, u32, u8);
390int schedule_nes_timer(struct nes_cm_node *, struct sk_buff *, 383int schedule_nes_timer(struct nes_cm_node *, struct sk_buff *,
391 enum nes_timer_type, int, int); 384 enum nes_timer_type, int, int);
392void nes_cm_timer_tick(unsigned long);
393int send_syn(struct nes_cm_node *, u32);
394int send_reset(struct nes_cm_node *);
395int send_ack(struct nes_cm_node *);
396int send_fin(struct nes_cm_node *, struct sk_buff *);
397struct sk_buff *get_free_pkt(struct nes_cm_node *);
398int process_packet(struct nes_cm_node *, struct sk_buff *, struct nes_cm_core *);
399
400struct nes_cm_node * mini_cm_connect(struct nes_cm_core *,
401 struct nes_vnic *, struct ietf_mpa_frame *, struct nes_cm_info *);
402int mini_cm_accept(struct nes_cm_core *, struct ietf_mpa_frame *, struct nes_cm_node *);
403int mini_cm_reject(struct nes_cm_core *, struct ietf_mpa_frame *, struct nes_cm_node *);
404int mini_cm_close(struct nes_cm_core *, struct nes_cm_node *);
405int mini_cm_recv_pkt(struct nes_cm_core *, struct nes_vnic *, struct sk_buff *);
406struct nes_cm_core *mini_cm_alloc_core(struct nes_cm_info *);
407int mini_cm_dealloc_core(struct nes_cm_core *);
408int mini_cm_get(struct nes_cm_core *);
409int mini_cm_set(struct nes_cm_core *, u32, u32);
410 385
411int nes_cm_disconn(struct nes_qp *); 386int nes_cm_disconn(struct nes_qp *);
412void nes_disconnect_worker(struct work_struct *);
413int nes_cm_disconn_true(struct nes_qp *);
414int nes_disconnect(struct nes_qp *, int);
415 387
416int nes_accept(struct iw_cm_id *, struct iw_cm_conn_param *); 388int nes_accept(struct iw_cm_id *, struct iw_cm_conn_param *);
417int nes_reject(struct iw_cm_id *, const void *, u8); 389int nes_reject(struct iw_cm_id *, const void *, u8);
@@ -423,11 +395,4 @@ int nes_cm_recv(struct sk_buff *, struct net_device *);
423int nes_cm_start(void); 395int nes_cm_start(void);
424int nes_cm_stop(void); 396int nes_cm_stop(void);
425 397
426/* CM event handler functions */
427void cm_event_connected(struct nes_cm_event *);
428void cm_event_connect_error(struct nes_cm_event *);
429void cm_event_reset(struct nes_cm_event *);
430void cm_event_mpa_req(struct nes_cm_event *);
431int nes_cm_post_event(struct nes_cm_event *);
432
433#endif /* NES_CM_H */ 398#endif /* NES_CM_H */
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 49e53e4c1ebe..aa53aab91bf8 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -41,7 +41,7 @@
41 41
42#include "nes.h" 42#include "nes.h"
43 43
44u32 crit_err_count = 0; 44static u32 crit_err_count;
45u32 int_mod_timer_init; 45u32 int_mod_timer_init;
46u32 int_mod_cq_depth_256; 46u32 int_mod_cq_depth_256;
47u32 int_mod_cq_depth_128; 47u32 int_mod_cq_depth_128;
@@ -53,6 +53,17 @@ u32 int_mod_cq_depth_1;
53 53
54#include "nes_cm.h" 54#include "nes_cm.h"
55 55
56static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq);
57static void nes_init_csr_ne020(struct nes_device *nesdev, u8 hw_rev, u8 port_count);
58static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
59 u8 OneG_Mode);
60static void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq);
61static void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq);
62static void nes_process_ceq(struct nes_device *nesdev, struct nes_hw_ceq *ceq);
63static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
64 struct nes_hw_aeqe *aeqe);
65static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number);
66static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode);
56 67
57#ifdef CONFIG_INFINIBAND_NES_DEBUG 68#ifdef CONFIG_INFINIBAND_NES_DEBUG
58static unsigned char *nes_iwarp_state_str[] = { 69static unsigned char *nes_iwarp_state_str[] = {
@@ -370,7 +381,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
370 nesadapter->et_use_adaptive_rx_coalesce = 1; 381 nesadapter->et_use_adaptive_rx_coalesce = 1;
371 nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT_DYNAMIC; 382 nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT_DYNAMIC;
372 nesadapter->et_rx_coalesce_usecs_irq = 0; 383 nesadapter->et_rx_coalesce_usecs_irq = 0;
373 printk(PFX "%s: Using Adaptive Interrupt Moderation\n", __FUNCTION__); 384 printk(PFX "%s: Using Adaptive Interrupt Moderation\n", __func__);
374 } 385 }
375 /* Setup and enable the periodic timer */ 386 /* Setup and enable the periodic timer */
376 if (nesadapter->et_rx_coalesce_usecs_irq) 387 if (nesadapter->et_rx_coalesce_usecs_irq)
@@ -382,7 +393,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
382 nesadapter->base_pd = 1; 393 nesadapter->base_pd = 1;
383 394
384 nesadapter->device_cap_flags = 395 nesadapter->device_cap_flags =
385 IB_DEVICE_ZERO_STAG | IB_DEVICE_SEND_W_INV | IB_DEVICE_MEM_WINDOW; 396 IB_DEVICE_ZERO_STAG | IB_DEVICE_MEM_WINDOW;
386 397
387 nesadapter->allocated_qps = (unsigned long *)&(((unsigned char *)nesadapter) 398 nesadapter->allocated_qps = (unsigned long *)&(((unsigned char *)nesadapter)
388 [(sizeof(struct nes_adapter)+(sizeof(unsigned long)-1))&(~(sizeof(unsigned long)-1))]); 399 [(sizeof(struct nes_adapter)+(sizeof(unsigned long)-1))&(~(sizeof(unsigned long)-1))]);
@@ -572,7 +583,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
572 if (vendor_id == 0xffff) 583 if (vendor_id == 0xffff)
573 break; 584 break;
574 } 585 }
575 nes_debug(NES_DBG_INIT, "%s %d functions found for %s.\n", __FUNCTION__, 586 nes_debug(NES_DBG_INIT, "%s %d functions found for %s.\n", __func__,
576 func_index, pci_name(nesdev->pcidev)); 587 func_index, pci_name(nesdev->pcidev));
577 nesadapter->adapter_fcn_count = func_index; 588 nesadapter->adapter_fcn_count = func_index;
578 589
@@ -583,7 +594,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
583/** 594/**
584 * nes_reset_adapter_ne020 595 * nes_reset_adapter_ne020
585 */ 596 */
586unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode) 597static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode)
587{ 598{
588 u32 port_count; 599 u32 port_count;
589 u32 u32temp; 600 u32 u32temp;
@@ -691,7 +702,8 @@ unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode)
691/** 702/**
692 * nes_init_serdes 703 * nes_init_serdes
693 */ 704 */
694int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count, u8 OneG_Mode) 705static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
706 u8 OneG_Mode)
695{ 707{
696 int i; 708 int i;
697 u32 u32temp; 709 u32 u32temp;
@@ -739,7 +751,7 @@ int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count, u8 One
739 & 0x0000000f)) != 0x0000000f) && (i++ < 5000)) 751 & 0x0000000f)) != 0x0000000f) && (i++ < 5000))
740 mdelay(1); 752 mdelay(1);
741 if (i >= 5000) { 753 if (i >= 5000) {
742 printk("%s: Init: serdes 1 not ready, status=%x\n", __FUNCTION__, u32temp); 754 printk("%s: Init: serdes 1 not ready, status=%x\n", __func__, u32temp);
743 /* return 1; */ 755 /* return 1; */
744 } 756 }
745 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP1, 0x000bdef7); 757 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP1, 0x000bdef7);
@@ -760,7 +772,7 @@ int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count, u8 One
760 * nes_init_csr_ne020 772 * nes_init_csr_ne020
761 * Initialize registers for ne020 hardware 773 * Initialize registers for ne020 hardware
762 */ 774 */
763void nes_init_csr_ne020(struct nes_device *nesdev, u8 hw_rev, u8 port_count) 775static void nes_init_csr_ne020(struct nes_device *nesdev, u8 hw_rev, u8 port_count)
764{ 776{
765 u32 u32temp; 777 u32 u32temp;
766 778
@@ -1204,7 +1216,7 @@ int nes_init_phy(struct nes_device *nesdev)
1204 if (nesadapter->OneG_Mode) { 1216 if (nesadapter->OneG_Mode) {
1205 nes_debug(NES_DBG_PHY, "1G PHY, mac_index = %d.\n", mac_index); 1217 nes_debug(NES_DBG_PHY, "1G PHY, mac_index = %d.\n", mac_index);
1206 if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_1G) { 1218 if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_1G) {
1207 printk(PFX "%s: Programming mdc config for 1G\n", __FUNCTION__); 1219 printk(PFX "%s: Programming mdc config for 1G\n", __func__);
1208 tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG); 1220 tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
1209 tx_config |= 0x04; 1221 tx_config |= 0x04;
1210 nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config); 1222 nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
@@ -1358,7 +1370,7 @@ static void nes_replenish_nic_rq(struct nes_vnic *nesvnic)
1358static void nes_rq_wqes_timeout(unsigned long parm) 1370static void nes_rq_wqes_timeout(unsigned long parm)
1359{ 1371{
1360 struct nes_vnic *nesvnic = (struct nes_vnic *)parm; 1372 struct nes_vnic *nesvnic = (struct nes_vnic *)parm;
1361 printk("%s: Timer fired.\n", __FUNCTION__); 1373 printk("%s: Timer fired.\n", __func__);
1362 atomic_set(&nesvnic->rx_skb_timer_running, 0); 1374 atomic_set(&nesvnic->rx_skb_timer_running, 0);
1363 if (atomic_read(&nesvnic->rx_skbs_needed)) 1375 if (atomic_read(&nesvnic->rx_skbs_needed))
1364 nes_replenish_nic_rq(nesvnic); 1376 nes_replenish_nic_rq(nesvnic);
@@ -1909,7 +1921,7 @@ void nes_dpc(unsigned long param)
1909/** 1921/**
1910 * nes_process_ceq 1922 * nes_process_ceq
1911 */ 1923 */
1912void nes_process_ceq(struct nes_device *nesdev, struct nes_hw_ceq *ceq) 1924static void nes_process_ceq(struct nes_device *nesdev, struct nes_hw_ceq *ceq)
1913{ 1925{
1914 u64 u64temp; 1926 u64 u64temp;
1915 struct nes_hw_cq *cq; 1927 struct nes_hw_cq *cq;
@@ -1949,7 +1961,7 @@ void nes_process_ceq(struct nes_device *nesdev, struct nes_hw_ceq *ceq)
1949/** 1961/**
1950 * nes_process_aeq 1962 * nes_process_aeq
1951 */ 1963 */
1952void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq) 1964static void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq)
1953{ 1965{
1954// u64 u64temp; 1966// u64 u64temp;
1955 u32 head; 1967 u32 head;
@@ -2060,7 +2072,7 @@ static void nes_reset_link(struct nes_device *nesdev, u32 mac_index)
2060/** 2072/**
2061 * nes_process_mac_intr 2073 * nes_process_mac_intr
2062 */ 2074 */
2063void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) 2075static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2064{ 2076{
2065 unsigned long flags; 2077 unsigned long flags;
2066 u32 pcs_control_status; 2078 u32 pcs_control_status;
@@ -2163,7 +2175,7 @@ void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2163 temp_phy_data = phy_data; 2175 temp_phy_data = phy_data;
2164 } while (1); 2176 } while (1);
2165 nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n", 2177 nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n",
2166 __FUNCTION__, phy_data, nesadapter->mac_link_down ? "DOWN" : "UP"); 2178 __func__, phy_data, nesadapter->mac_link_down ? "DOWN" : "UP");
2167 2179
2168 } else { 2180 } else {
2169 phy_data = (0x0f0f0000 == (pcs_control_status & 0x0f1f0000)) ? 4 : 0; 2181 phy_data = (0x0f0f0000 == (pcs_control_status & 0x0f1f0000)) ? 4 : 0;
@@ -2205,7 +2217,7 @@ void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2205 2217
2206 2218
2207 2219
2208void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq) 2220static void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
2209{ 2221{
2210 struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq); 2222 struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
2211 2223
@@ -2428,7 +2440,7 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
2428/** 2440/**
2429 * nes_cqp_ce_handler 2441 * nes_cqp_ce_handler
2430 */ 2442 */
2431void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq) 2443static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
2432{ 2444{
2433 u64 u64temp; 2445 u64 u64temp;
2434 unsigned long flags; 2446 unsigned long flags;
@@ -2567,7 +2579,8 @@ void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
2567/** 2579/**
2568 * nes_process_iwarp_aeqe 2580 * nes_process_iwarp_aeqe
2569 */ 2581 */
2570void nes_process_iwarp_aeqe(struct nes_device *nesdev, struct nes_hw_aeqe *aeqe) 2582static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
2583 struct nes_hw_aeqe *aeqe)
2571{ 2584{
2572 u64 context; 2585 u64 context;
2573 u64 aeqe_context = 0; 2586 u64 aeqe_context = 0;
@@ -2819,7 +2832,7 @@ void nes_process_iwarp_aeqe(struct nes_device *nesdev, struct nes_hw_aeqe *aeqe)
2819 le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])); 2832 le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]));
2820 if (resource_allocated) { 2833 if (resource_allocated) {
2821 printk(KERN_ERR PFX "%s: Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u\n", 2834 printk(KERN_ERR PFX "%s: Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u\n",
2822 __FUNCTION__, le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])); 2835 __func__, le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]));
2823 } 2836 }
2824 break; 2837 break;
2825 case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: 2838 case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index eee77da61935..34166641f207 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -802,7 +802,7 @@ static int nes_netdev_set_mac_address(struct net_device *netdev, void *p)
802 802
803 memcpy(netdev->dev_addr, mac_addr->sa_data, netdev->addr_len); 803 memcpy(netdev->dev_addr, mac_addr->sa_data, netdev->addr_len);
804 printk(PFX "%s: Address length = %d, Address = %02X%02X%02X%02X%02X%02X..\n", 804 printk(PFX "%s: Address length = %d, Address = %02X%02X%02X%02X%02X%02X..\n",
805 __FUNCTION__, netdev->addr_len, 805 __func__, netdev->addr_len,
806 mac_addr->sa_data[0], mac_addr->sa_data[1], 806 mac_addr->sa_data[0], mac_addr->sa_data[1],
807 mac_addr->sa_data[2], mac_addr->sa_data[3], 807 mac_addr->sa_data[2], mac_addr->sa_data[3],
808 mac_addr->sa_data[4], mac_addr->sa_data[5]); 808 mac_addr->sa_data[4], mac_addr->sa_data[5]);
@@ -832,7 +832,7 @@ static int nes_netdev_set_mac_address(struct net_device *netdev, void *p)
832/** 832/**
833 * nes_netdev_set_multicast_list 833 * nes_netdev_set_multicast_list
834 */ 834 */
835void nes_netdev_set_multicast_list(struct net_device *netdev) 835static void nes_netdev_set_multicast_list(struct net_device *netdev)
836{ 836{
837 struct nes_vnic *nesvnic = netdev_priv(netdev); 837 struct nes_vnic *nesvnic = netdev_priv(netdev);
838 struct nes_device *nesdev = nesvnic->nesdev; 838 struct nes_device *nesdev = nesvnic->nesdev;
@@ -947,28 +947,6 @@ static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu)
947 return ret; 947 return ret;
948} 948}
949 949
950
951/**
952 * nes_netdev_exit - destroy network device
953 */
954void nes_netdev_exit(struct nes_vnic *nesvnic)
955{
956 struct net_device *netdev = nesvnic->netdev;
957 struct nes_ib_device *nesibdev = nesvnic->nesibdev;
958
959 nes_debug(NES_DBG_SHUTDOWN, "\n");
960
961 // destroy the ibdevice if RDMA enabled
962 if ((nesvnic->rdma_enabled)&&(nesvnic->of_device_registered)) {
963 nes_destroy_ofa_device( nesibdev );
964 nesvnic->of_device_registered = 0;
965 nesvnic->nesibdev = NULL;
966 }
967 unregister_netdev(netdev);
968 nes_debug(NES_DBG_SHUTDOWN, "\n");
969}
970
971
972#define NES_ETHTOOL_STAT_COUNT 55 950#define NES_ETHTOOL_STAT_COUNT 55
973static const char nes_ethtool_stringset[NES_ETHTOOL_STAT_COUNT][ETH_GSTRING_LEN] = { 951static const char nes_ethtool_stringset[NES_ETHTOOL_STAT_COUNT][ETH_GSTRING_LEN] = {
974 "Link Change Interrupts", 952 "Link Change Interrupts",
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index c4ec6ac63461..f9db07c2717d 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -566,7 +566,7 @@ struct nes_cqp_request *nes_get_cqp_request(struct nes_device *nesdev)
566 cqp_request); 566 cqp_request);
567 } else 567 } else
568 printk(KERN_ERR PFX "%s: Could not allocated a CQP request.\n", 568 printk(KERN_ERR PFX "%s: Could not allocated a CQP request.\n",
569 __FUNCTION__); 569 __func__);
570 570
571 return cqp_request; 571 return cqp_request;
572} 572}
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index a651e9d9f0ef..7c27420c2240 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -49,6 +49,7 @@ atomic_t mod_qp_timouts;
49atomic_t qps_created; 49atomic_t qps_created;
50atomic_t sw_qps_destroyed; 50atomic_t sw_qps_destroyed;
51 51
52static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
52 53
53/** 54/**
54 * nes_alloc_mw 55 * nes_alloc_mw
@@ -1043,10 +1044,10 @@ static int nes_setup_virt_qp(struct nes_qp *nesqp, struct nes_pbl *nespbl,
1043 u8 sq_pbl_entries; 1044 u8 sq_pbl_entries;
1044 1045
1045 pbl_entries = nespbl->pbl_size >> 3; 1046 pbl_entries = nespbl->pbl_size >> 3;
1046 nes_debug(NES_DBG_QP, "Userspace PBL, pbl_size=%u, pbl_entries = %d pbl_vbase=%p, pbl_pbase=%p\n", 1047 nes_debug(NES_DBG_QP, "Userspace PBL, pbl_size=%u, pbl_entries = %d pbl_vbase=%p, pbl_pbase=%lx\n",
1047 nespbl->pbl_size, pbl_entries, 1048 nespbl->pbl_size, pbl_entries,
1048 (void *)nespbl->pbl_vbase, 1049 (void *)nespbl->pbl_vbase,
1049 (void *)nespbl->pbl_pbase); 1050 (unsigned long) nespbl->pbl_pbase);
1050 pbl = (__le64 *) nespbl->pbl_vbase; /* points to first pbl entry */ 1051 pbl = (__le64 *) nespbl->pbl_vbase; /* points to first pbl entry */
1051 /* now lets set the sq_vbase as well as rq_vbase addrs we will assign */ 1052 /* now lets set the sq_vbase as well as rq_vbase addrs we will assign */
1052 /* the first pbl to be fro the rq_vbase... */ 1053 /* the first pbl to be fro the rq_vbase... */
@@ -1074,9 +1075,9 @@ static int nes_setup_virt_qp(struct nes_qp *nesqp, struct nes_pbl *nespbl,
1074 /* nesqp->hwqp.rq_vbase = bus_to_virt(*pbl); */ 1075 /* nesqp->hwqp.rq_vbase = bus_to_virt(*pbl); */
1075 /*nesqp->hwqp.rq_vbase = phys_to_virt(*pbl); */ 1076 /*nesqp->hwqp.rq_vbase = phys_to_virt(*pbl); */
1076 1077
1077 nes_debug(NES_DBG_QP, "QP sq_vbase= %p sq_pbase=%p rq_vbase=%p rq_pbase=%p\n", 1078 nes_debug(NES_DBG_QP, "QP sq_vbase= %p sq_pbase=%lx rq_vbase=%p rq_pbase=%lx\n",
1078 nesqp->hwqp.sq_vbase, (void *)nesqp->hwqp.sq_pbase, 1079 nesqp->hwqp.sq_vbase, (unsigned long) nesqp->hwqp.sq_pbase,
1079 nesqp->hwqp.rq_vbase, (void *)nesqp->hwqp.rq_pbase); 1080 nesqp->hwqp.rq_vbase, (unsigned long) nesqp->hwqp.rq_pbase);
1080 spin_lock_irqsave(&nesadapter->pbl_lock, flags); 1081 spin_lock_irqsave(&nesadapter->pbl_lock, flags);
1081 if (!nesadapter->free_256pbl) { 1082 if (!nesadapter->free_256pbl) {
1082 pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase, 1083 pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase,
@@ -1251,6 +1252,9 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
1251 u8 rq_encoded_size; 1252 u8 rq_encoded_size;
1252 /* int counter; */ 1253 /* int counter; */
1253 1254
1255 if (init_attr->create_flags)
1256 return ERR_PTR(-EINVAL);
1257
1254 atomic_inc(&qps_created); 1258 atomic_inc(&qps_created);
1255 switch (init_attr->qp_type) { 1259 switch (init_attr->qp_type) {
1256 case IB_QPT_RC: 1260 case IB_QPT_RC:
@@ -1908,13 +1912,13 @@ static int nes_destroy_cq(struct ib_cq *ib_cq)
1908 nesadapter->free_256pbl++; 1912 nesadapter->free_256pbl++;
1909 if (nesadapter->free_256pbl > nesadapter->max_256pbl) { 1913 if (nesadapter->free_256pbl > nesadapter->max_256pbl) {
1910 printk(KERN_ERR PFX "%s: free 256B PBLs(%u) has exceeded the max(%u)\n", 1914 printk(KERN_ERR PFX "%s: free 256B PBLs(%u) has exceeded the max(%u)\n",
1911 __FUNCTION__, nesadapter->free_256pbl, nesadapter->max_256pbl); 1915 __func__, nesadapter->free_256pbl, nesadapter->max_256pbl);
1912 } 1916 }
1913 } else if (nescq->virtual_cq == 2) { 1917 } else if (nescq->virtual_cq == 2) {
1914 nesadapter->free_4kpbl++; 1918 nesadapter->free_4kpbl++;
1915 if (nesadapter->free_4kpbl > nesadapter->max_4kpbl) { 1919 if (nesadapter->free_4kpbl > nesadapter->max_4kpbl) {
1916 printk(KERN_ERR PFX "%s: free 4K PBLs(%u) has exceeded the max(%u)\n", 1920 printk(KERN_ERR PFX "%s: free 4K PBLs(%u) has exceeded the max(%u)\n",
1917 __FUNCTION__, nesadapter->free_4kpbl, nesadapter->max_4kpbl); 1921 __func__, nesadapter->free_4kpbl, nesadapter->max_4kpbl);
1918 } 1922 }
1919 opcode |= NES_CQP_CQ_4KB_CHUNK; 1923 opcode |= NES_CQP_CQ_4KB_CHUNK;
1920 } 1924 }
@@ -2653,10 +2657,10 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
2653 2657
2654 nespbl->pbl_vbase = (u64 *)pbl; 2658 nespbl->pbl_vbase = (u64 *)pbl;
2655 nespbl->user_base = start; 2659 nespbl->user_base = start;
2656 nes_debug(NES_DBG_MR, "Allocated PBL memory, %u bytes, pbl_pbase=%p," 2660 nes_debug(NES_DBG_MR, "Allocated PBL memory, %u bytes, pbl_pbase=%lx,"
2657 " pbl_vbase=%p user_base=0x%lx\n", 2661 " pbl_vbase=%p user_base=0x%lx\n",
2658 nespbl->pbl_size, (void *)nespbl->pbl_pbase, 2662 nespbl->pbl_size, (unsigned long) nespbl->pbl_pbase,
2659 (void*)nespbl->pbl_vbase, nespbl->user_base); 2663 (void *) nespbl->pbl_vbase, nespbl->user_base);
2660 2664
2661 list_for_each_entry(chunk, &region->chunk_list, list) { 2665 list_for_each_entry(chunk, &region->chunk_list, list) {
2662 for (nmap_index = 0; nmap_index < chunk->nmap; ++nmap_index) { 2666 for (nmap_index = 0; nmap_index < chunk->nmap; ++nmap_index) {
@@ -3895,14 +3899,11 @@ int nes_register_ofa_device(struct nes_ib_device *nesibdev)
3895/** 3899/**
3896 * nes_unregister_ofa_device 3900 * nes_unregister_ofa_device
3897 */ 3901 */
3898void nes_unregister_ofa_device(struct nes_ib_device *nesibdev) 3902static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev)
3899{ 3903{
3900 struct nes_vnic *nesvnic = nesibdev->nesvnic; 3904 struct nes_vnic *nesvnic = nesibdev->nesvnic;
3901 int i; 3905 int i;
3902 3906
3903 if (nesibdev == NULL)
3904 return;
3905
3906 for (i = 0; i < ARRAY_SIZE(nes_class_attributes); ++i) { 3907 for (i = 0; i < ARRAY_SIZE(nes_class_attributes); ++i) {
3907 class_device_remove_file(&nesibdev->ibdev.class_dev, nes_class_attributes[i]); 3908 class_device_remove_file(&nesibdev->ibdev.class_dev, nes_class_attributes[i]);
3908 } 3909 }
diff --git a/drivers/infiniband/ulp/ipoib/Makefile b/drivers/infiniband/ulp/ipoib/Makefile
index 98ee38e8c2c4..3090100f0de7 100644
--- a/drivers/infiniband/ulp/ipoib/Makefile
+++ b/drivers/infiniband/ulp/ipoib/Makefile
@@ -4,7 +4,8 @@ ib_ipoib-y := ipoib_main.o \
4 ipoib_ib.o \ 4 ipoib_ib.o \
5 ipoib_multicast.o \ 5 ipoib_multicast.o \
6 ipoib_verbs.o \ 6 ipoib_verbs.o \
7 ipoib_vlan.o 7 ipoib_vlan.o \
8 ipoib_ethtool.o
8ib_ipoib-$(CONFIG_INFINIBAND_IPOIB_CM) += ipoib_cm.o 9ib_ipoib-$(CONFIG_INFINIBAND_IPOIB_CM) += ipoib_cm.o
9ib_ipoib-$(CONFIG_INFINIBAND_IPOIB_DEBUG) += ipoib_fs.o 10ib_ipoib-$(CONFIG_INFINIBAND_IPOIB_DEBUG) += ipoib_fs.o
10 11
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 054fab8e27a0..73b2b176ad0e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -87,6 +87,7 @@ enum {
87 IPOIB_MCAST_STARTED = 8, 87 IPOIB_MCAST_STARTED = 8,
88 IPOIB_FLAG_ADMIN_CM = 9, 88 IPOIB_FLAG_ADMIN_CM = 9,
89 IPOIB_FLAG_UMCAST = 10, 89 IPOIB_FLAG_UMCAST = 10,
90 IPOIB_FLAG_CSUM = 11,
90 91
91 IPOIB_MAX_BACKOFF_SECONDS = 16, 92 IPOIB_MAX_BACKOFF_SECONDS = 16,
92 93
@@ -241,6 +242,11 @@ struct ipoib_cm_dev_priv {
241 int num_frags; 242 int num_frags;
242}; 243};
243 244
245struct ipoib_ethtool_st {
246 u16 coalesce_usecs;
247 u16 max_coalesced_frames;
248};
249
244/* 250/*
245 * Device private locking: tx_lock protects members used in TX fast 251 * Device private locking: tx_lock protects members used in TX fast
246 * path (and we use LLTX so upper layers don't do extra locking). 252 * path (and we use LLTX so upper layers don't do extra locking).
@@ -318,6 +324,8 @@ struct ipoib_dev_priv {
318 struct dentry *mcg_dentry; 324 struct dentry *mcg_dentry;
319 struct dentry *path_dentry; 325 struct dentry *path_dentry;
320#endif 326#endif
327 int hca_caps;
328 struct ipoib_ethtool_st ethtool;
321}; 329};
322 330
323struct ipoib_ah { 331struct ipoib_ah {
@@ -458,6 +466,8 @@ void ipoib_pkey_poll(struct work_struct *work);
458int ipoib_pkey_dev_delay_open(struct net_device *dev); 466int ipoib_pkey_dev_delay_open(struct net_device *dev);
459void ipoib_drain_cq(struct net_device *dev); 467void ipoib_drain_cq(struct net_device *dev);
460 468
469void ipoib_set_ethtool_ops(struct net_device *dev);
470
461#ifdef CONFIG_INFINIBAND_IPOIB_CM 471#ifdef CONFIG_INFINIBAND_IPOIB_CM
462 472
463#define IPOIB_FLAGS_RC 0x80 473#define IPOIB_FLAGS_RC 0x80
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 2490b2d79dbb..9db7b0bd9134 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1007,9 +1007,9 @@ static int ipoib_cm_modify_tx_init(struct net_device *dev,
1007 struct ipoib_dev_priv *priv = netdev_priv(dev); 1007 struct ipoib_dev_priv *priv = netdev_priv(dev);
1008 struct ib_qp_attr qp_attr; 1008 struct ib_qp_attr qp_attr;
1009 int qp_attr_mask, ret; 1009 int qp_attr_mask, ret;
1010 ret = ib_find_cached_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index); 1010 ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index);
1011 if (ret) { 1011 if (ret) {
1012 ipoib_warn(priv, "pkey 0x%x not in cache: %d\n", priv->pkey, ret); 1012 ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret);
1013 return ret; 1013 return ret;
1014 } 1014 }
1015 1015
@@ -1383,6 +1383,10 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
1383 set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); 1383 set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
1384 ipoib_warn(priv, "enabling connected mode " 1384 ipoib_warn(priv, "enabling connected mode "
1385 "will cause multicast packet drops\n"); 1385 "will cause multicast packet drops\n");
1386
1387 dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO);
1388 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
1389
1386 ipoib_flush_paths(dev); 1390 ipoib_flush_paths(dev);
1387 return count; 1391 return count;
1388 } 1392 }
@@ -1391,6 +1395,13 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
1391 clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); 1395 clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
1392 dev->mtu = min(priv->mcast_mtu, dev->mtu); 1396 dev->mtu = min(priv->mcast_mtu, dev->mtu);
1393 ipoib_flush_paths(dev); 1397 ipoib_flush_paths(dev);
1398
1399 if (test_bit(IPOIB_FLAG_CSUM, &priv->flags)) {
1400 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
1401 if (priv->hca_caps & IB_DEVICE_UD_TSO)
1402 dev->features |= NETIF_F_TSO;
1403 }
1404
1394 return count; 1405 return count;
1395 } 1406 }
1396 1407
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
new file mode 100644
index 000000000000..9a47428366c9
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -0,0 +1,99 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/kernel.h>
34#include <linux/ethtool.h>
35#include <linux/netdevice.h>
36
37#include "ipoib.h"
38
39static void ipoib_get_drvinfo(struct net_device *netdev,
40 struct ethtool_drvinfo *drvinfo)
41{
42 strncpy(drvinfo->driver, "ipoib", sizeof(drvinfo->driver) - 1);
43}
44
45static int ipoib_get_coalesce(struct net_device *dev,
46 struct ethtool_coalesce *coal)
47{
48 struct ipoib_dev_priv *priv = netdev_priv(dev);
49
50 coal->rx_coalesce_usecs = priv->ethtool.coalesce_usecs;
51 coal->tx_coalesce_usecs = priv->ethtool.coalesce_usecs;
52 coal->rx_max_coalesced_frames = priv->ethtool.max_coalesced_frames;
53 coal->tx_max_coalesced_frames = priv->ethtool.max_coalesced_frames;
54
55 return 0;
56}
57
58static int ipoib_set_coalesce(struct net_device *dev,
59 struct ethtool_coalesce *coal)
60{
61 struct ipoib_dev_priv *priv = netdev_priv(dev);
62 int ret;
63
64 /*
65 * Since IPoIB uses a single CQ for both rx and tx, we assume
66 * that rx params dictate the configuration. These values are
67 * saved in the private data and returned when ipoib_get_coalesce()
68 * is called.
69 */
70 if (coal->rx_coalesce_usecs > 0xffff ||
71 coal->rx_max_coalesced_frames > 0xffff)
72 return -EINVAL;
73
74 ret = ib_modify_cq(priv->cq, coal->rx_max_coalesced_frames,
75 coal->rx_coalesce_usecs);
76 if (ret && ret != -ENOSYS) {
77 ipoib_warn(priv, "failed modifying CQ (%d)\n", ret);
78 return ret;
79 }
80
81 coal->tx_coalesce_usecs = coal->rx_coalesce_usecs;
82 coal->tx_max_coalesced_frames = coal->rx_max_coalesced_frames;
83 priv->ethtool.coalesce_usecs = coal->rx_coalesce_usecs;
84 priv->ethtool.max_coalesced_frames = coal->rx_max_coalesced_frames;
85
86 return 0;
87}
88
89static const struct ethtool_ops ipoib_ethtool_ops = {
90 .get_drvinfo = ipoib_get_drvinfo,
91 .get_tso = ethtool_op_get_tso,
92 .get_coalesce = ipoib_get_coalesce,
93 .set_coalesce = ipoib_set_coalesce,
94};
95
96void ipoib_set_ethtool_ops(struct net_device *dev)
97{
98 SET_ETHTOOL_OPS(dev, &ipoib_ethtool_ops);
99}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 08c4396cf418..0205eb7c1bd3 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -39,6 +39,8 @@
39#include <linux/dma-mapping.h> 39#include <linux/dma-mapping.h>
40 40
41#include <rdma/ib_cache.h> 41#include <rdma/ib_cache.h>
42#include <linux/ip.h>
43#include <linux/tcp.h>
42 44
43#include "ipoib.h" 45#include "ipoib.h"
44 46
@@ -231,6 +233,10 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
231 skb->dev = dev; 233 skb->dev = dev;
232 /* XXX get correct PACKET_ type here */ 234 /* XXX get correct PACKET_ type here */
233 skb->pkt_type = PACKET_HOST; 235 skb->pkt_type = PACKET_HOST;
236
237 if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok))
238 skb->ip_summed = CHECKSUM_UNNECESSARY;
239
234 netif_receive_skb(skb); 240 netif_receive_skb(skb);
235 241
236repost: 242repost:
@@ -245,29 +251,37 @@ static int ipoib_dma_map_tx(struct ib_device *ca,
245 struct sk_buff *skb = tx_req->skb; 251 struct sk_buff *skb = tx_req->skb;
246 u64 *mapping = tx_req->mapping; 252 u64 *mapping = tx_req->mapping;
247 int i; 253 int i;
254 int off;
248 255
249 mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb), 256 if (skb_headlen(skb)) {
250 DMA_TO_DEVICE); 257 mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
251 if (unlikely(ib_dma_mapping_error(ca, mapping[0]))) 258 DMA_TO_DEVICE);
252 return -EIO; 259 if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
260 return -EIO;
261
262 off = 1;
263 } else
264 off = 0;
253 265
254 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { 266 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
255 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 267 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
256 mapping[i + 1] = ib_dma_map_page(ca, frag->page, 268 mapping[i + off] = ib_dma_map_page(ca, frag->page,
257 frag->page_offset, frag->size, 269 frag->page_offset, frag->size,
258 DMA_TO_DEVICE); 270 DMA_TO_DEVICE);
259 if (unlikely(ib_dma_mapping_error(ca, mapping[i + 1]))) 271 if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
260 goto partial_error; 272 goto partial_error;
261 } 273 }
262 return 0; 274 return 0;
263 275
264partial_error: 276partial_error:
265 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
266
267 for (; i > 0; --i) { 277 for (; i > 0; --i) {
268 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; 278 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
269 ib_dma_unmap_page(ca, mapping[i], frag->size, DMA_TO_DEVICE); 279 ib_dma_unmap_page(ca, mapping[i - !off], frag->size, DMA_TO_DEVICE);
270 } 280 }
281
282 if (off)
283 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
284
271 return -EIO; 285 return -EIO;
272} 286}
273 287
@@ -277,12 +291,17 @@ static void ipoib_dma_unmap_tx(struct ib_device *ca,
277 struct sk_buff *skb = tx_req->skb; 291 struct sk_buff *skb = tx_req->skb;
278 u64 *mapping = tx_req->mapping; 292 u64 *mapping = tx_req->mapping;
279 int i; 293 int i;
294 int off;
280 295
281 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE); 296 if (skb_headlen(skb)) {
297 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
298 off = 1;
299 } else
300 off = 0;
282 301
283 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { 302 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
284 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 303 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
285 ib_dma_unmap_page(ca, mapping[i + 1], frag->size, 304 ib_dma_unmap_page(ca, mapping[i + off], frag->size,
286 DMA_TO_DEVICE); 305 DMA_TO_DEVICE);
287 } 306 }
288} 307}
@@ -388,24 +407,40 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
388static inline int post_send(struct ipoib_dev_priv *priv, 407static inline int post_send(struct ipoib_dev_priv *priv,
389 unsigned int wr_id, 408 unsigned int wr_id,
390 struct ib_ah *address, u32 qpn, 409 struct ib_ah *address, u32 qpn,
391 u64 *mapping, int headlen, 410 struct ipoib_tx_buf *tx_req,
392 skb_frag_t *frags, 411 void *head, int hlen)
393 int nr_frags)
394{ 412{
395 struct ib_send_wr *bad_wr; 413 struct ib_send_wr *bad_wr;
396 int i; 414 int i, off;
415 struct sk_buff *skb = tx_req->skb;
416 skb_frag_t *frags = skb_shinfo(skb)->frags;
417 int nr_frags = skb_shinfo(skb)->nr_frags;
418 u64 *mapping = tx_req->mapping;
419
420 if (skb_headlen(skb)) {
421 priv->tx_sge[0].addr = mapping[0];
422 priv->tx_sge[0].length = skb_headlen(skb);
423 off = 1;
424 } else
425 off = 0;
397 426
398 priv->tx_sge[0].addr = mapping[0];
399 priv->tx_sge[0].length = headlen;
400 for (i = 0; i < nr_frags; ++i) { 427 for (i = 0; i < nr_frags; ++i) {
401 priv->tx_sge[i + 1].addr = mapping[i + 1]; 428 priv->tx_sge[i + off].addr = mapping[i + off];
402 priv->tx_sge[i + 1].length = frags[i].size; 429 priv->tx_sge[i + off].length = frags[i].size;
403 } 430 }
404 priv->tx_wr.num_sge = nr_frags + 1; 431 priv->tx_wr.num_sge = nr_frags + off;
405 priv->tx_wr.wr_id = wr_id; 432 priv->tx_wr.wr_id = wr_id;
406 priv->tx_wr.wr.ud.remote_qpn = qpn; 433 priv->tx_wr.wr.ud.remote_qpn = qpn;
407 priv->tx_wr.wr.ud.ah = address; 434 priv->tx_wr.wr.ud.ah = address;
408 435
436 if (head) {
437 priv->tx_wr.wr.ud.mss = skb_shinfo(skb)->gso_size;
438 priv->tx_wr.wr.ud.header = head;
439 priv->tx_wr.wr.ud.hlen = hlen;
440 priv->tx_wr.opcode = IB_WR_LSO;
441 } else
442 priv->tx_wr.opcode = IB_WR_SEND;
443
409 return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr); 444 return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr);
410} 445}
411 446
@@ -414,14 +449,30 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
414{ 449{
415 struct ipoib_dev_priv *priv = netdev_priv(dev); 450 struct ipoib_dev_priv *priv = netdev_priv(dev);
416 struct ipoib_tx_buf *tx_req; 451 struct ipoib_tx_buf *tx_req;
417 452 int hlen;
418 if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) { 453 void *phead;
419 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", 454
420 skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN); 455 if (skb_is_gso(skb)) {
421 ++dev->stats.tx_dropped; 456 hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
422 ++dev->stats.tx_errors; 457 phead = skb->data;
423 ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu); 458 if (unlikely(!skb_pull(skb, hlen))) {
424 return; 459 ipoib_warn(priv, "linear data too small\n");
460 ++dev->stats.tx_dropped;
461 ++dev->stats.tx_errors;
462 dev_kfree_skb_any(skb);
463 return;
464 }
465 } else {
466 if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
467 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
468 skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
469 ++dev->stats.tx_dropped;
470 ++dev->stats.tx_errors;
471 ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
472 return;
473 }
474 phead = NULL;
475 hlen = 0;
425 } 476 }
426 477
427 ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n", 478 ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n",
@@ -442,10 +493,13 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
442 return; 493 return;
443 } 494 }
444 495
496 if (skb->ip_summed == CHECKSUM_PARTIAL)
497 priv->tx_wr.send_flags |= IB_SEND_IP_CSUM;
498 else
499 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
500
445 if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), 501 if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
446 address->ah, qpn, 502 address->ah, qpn, tx_req, phead, hlen))) {
447 tx_req->mapping, skb_headlen(skb),
448 skb_shinfo(skb)->frags, skb_shinfo(skb)->nr_frags))) {
449 ipoib_warn(priv, "post_send failed\n"); 503 ipoib_warn(priv, "post_send failed\n");
450 ++dev->stats.tx_errors; 504 ++dev->stats.tx_errors;
451 ipoib_dma_unmap_tx(priv->ca, tx_req); 505 ipoib_dma_unmap_tx(priv->ca, tx_req);
@@ -540,7 +594,7 @@ static void ipoib_pkey_dev_check_presence(struct net_device *dev)
540 struct ipoib_dev_priv *priv = netdev_priv(dev); 594 struct ipoib_dev_priv *priv = netdev_priv(dev);
541 u16 pkey_index = 0; 595 u16 pkey_index = 0;
542 596
543 if (ib_find_cached_pkey(priv->ca, priv->port, priv->pkey, &pkey_index)) 597 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &pkey_index))
544 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 598 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
545 else 599 else
546 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 600 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
@@ -781,13 +835,13 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, int pkey_event)
781 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 835 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
782 ipoib_ib_dev_down(dev, 0); 836 ipoib_ib_dev_down(dev, 0);
783 ipoib_ib_dev_stop(dev, 0); 837 ipoib_ib_dev_stop(dev, 0);
784 ipoib_pkey_dev_delay_open(dev); 838 if (ipoib_pkey_dev_delay_open(dev))
785 return; 839 return;
786 } 840 }
787 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
788 841
789 /* restart QP only if P_Key index is changed */ 842 /* restart QP only if P_Key index is changed */
790 if (new_index == priv->pkey_index) { 843 if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
844 new_index == priv->pkey_index) {
791 ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n"); 845 ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
792 return; 846 return;
793 } 847 }
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 57282048865c..bd07f02cf02b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -359,8 +359,7 @@ void ipoib_flush_paths(struct net_device *dev)
359 spin_lock_irq(&priv->tx_lock); 359 spin_lock_irq(&priv->tx_lock);
360 spin_lock(&priv->lock); 360 spin_lock(&priv->lock);
361 361
362 list_splice(&priv->path_list, &remove_list); 362 list_splice_init(&priv->path_list, &remove_list);
363 INIT_LIST_HEAD(&priv->path_list);
364 363
365 list_for_each_entry(path, &remove_list, list) 364 list_for_each_entry(path, &remove_list, list)
366 rb_erase(&path->rb_node, &priv->path_tree); 365 rb_erase(&path->rb_node, &priv->path_tree);
@@ -952,6 +951,8 @@ static void ipoib_setup(struct net_device *dev)
952 dev->set_multicast_list = ipoib_set_mcast_list; 951 dev->set_multicast_list = ipoib_set_mcast_list;
953 dev->neigh_setup = ipoib_neigh_setup_dev; 952 dev->neigh_setup = ipoib_neigh_setup_dev;
954 953
954 ipoib_set_ethtool_ops(dev);
955
955 netif_napi_add(dev, &priv->napi, ipoib_poll, 100); 956 netif_napi_add(dev, &priv->napi, ipoib_poll, 100);
956 957
957 dev->watchdog_timeo = HZ; 958 dev->watchdog_timeo = HZ;
@@ -1105,6 +1106,7 @@ static struct net_device *ipoib_add_port(const char *format,
1105 struct ib_device *hca, u8 port) 1106 struct ib_device *hca, u8 port)
1106{ 1107{
1107 struct ipoib_dev_priv *priv; 1108 struct ipoib_dev_priv *priv;
1109 struct ib_device_attr *device_attr;
1108 int result = -ENOMEM; 1110 int result = -ENOMEM;
1109 1111
1110 priv = ipoib_intf_alloc(format); 1112 priv = ipoib_intf_alloc(format);
@@ -1120,6 +1122,29 @@ static struct net_device *ipoib_add_port(const char *format,
1120 goto device_init_failed; 1122 goto device_init_failed;
1121 } 1123 }
1122 1124
1125 device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
1126 if (!device_attr) {
1127 printk(KERN_WARNING "%s: allocation of %zu bytes failed\n",
1128 hca->name, sizeof *device_attr);
1129 goto device_init_failed;
1130 }
1131
1132 result = ib_query_device(hca, device_attr);
1133 if (result) {
1134 printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n",
1135 hca->name, result);
1136 kfree(device_attr);
1137 goto device_init_failed;
1138 }
1139 priv->hca_caps = device_attr->device_cap_flags;
1140
1141 kfree(device_attr);
1142
1143 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
1144 set_bit(IPOIB_FLAG_CSUM, &priv->flags);
1145 priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1146 }
1147
1123 /* 1148 /*
1124 * Set the full membership bit, so that we join the right 1149 * Set the full membership bit, so that we join the right
1125 * broadcast group, etc. 1150 * broadcast group, etc.
@@ -1137,7 +1162,6 @@ static struct net_device *ipoib_add_port(const char *format,
1137 } else 1162 } else
1138 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); 1163 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
1139 1164
1140
1141 result = ipoib_dev_init(priv->dev, hca, port); 1165 result = ipoib_dev_init(priv->dev, hca, port);
1142 if (result < 0) { 1166 if (result < 0) {
1143 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n", 1167 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
@@ -1155,6 +1179,9 @@ static struct net_device *ipoib_add_port(const char *format,
1155 goto event_failed; 1179 goto event_failed;
1156 } 1180 }
1157 1181
1182 if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO)
1183 priv->dev->features |= NETIF_F_TSO;
1184
1158 result = register_netdev(priv->dev); 1185 result = register_netdev(priv->dev);
1159 if (result) { 1186 if (result) {
1160 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n", 1187 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index a3aeb911f024..8a20e3742c43 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -192,6 +192,9 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
192 init_attr.send_cq = priv->cq; 192 init_attr.send_cq = priv->cq;
193 init_attr.recv_cq = priv->cq; 193 init_attr.recv_cq = priv->cq;
194 194
195 if (priv->hca_caps & IB_DEVICE_UD_TSO)
196 init_attr.create_flags = IB_QP_CREATE_IPOIB_UD_LSO;
197
195 if (dev->features & NETIF_F_SG) 198 if (dev->features & NETIF_F_SG)
196 init_attr.cap.max_send_sge = MAX_SKB_FRAGS + 1; 199 init_attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
197 200
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 83247f1fdf72..08dc81c46f41 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -405,7 +405,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
405 struct iser_dto *send_dto = NULL; 405 struct iser_dto *send_dto = NULL;
406 unsigned long buf_offset; 406 unsigned long buf_offset;
407 unsigned long data_seg_len; 407 unsigned long data_seg_len;
408 unsigned int itt; 408 uint32_t itt;
409 int err = 0; 409 int err = 0;
410 410
411 if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) { 411 if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
@@ -416,7 +416,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
416 if (iser_check_xmit(conn, ctask)) 416 if (iser_check_xmit(conn, ctask))
417 return -ENOBUFS; 417 return -ENOBUFS;
418 418
419 itt = ntohl(hdr->itt); 419 itt = (__force uint32_t)hdr->itt;
420 data_seg_len = ntoh24(hdr->dlength); 420 data_seg_len = ntoh24(hdr->dlength);
421 buf_offset = ntohl(hdr->offset); 421 buf_offset = ntohl(hdr->offset);
422 422
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 993f0a8ff28f..d19cfe605ebb 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -473,11 +473,8 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
473 iser_connect_error(cma_id); 473 iser_connect_error(cma_id);
474 break; 474 break;
475 case RDMA_CM_EVENT_DISCONNECTED: 475 case RDMA_CM_EVENT_DISCONNECTED:
476 iser_disconnected_handler(cma_id);
477 break;
478 case RDMA_CM_EVENT_DEVICE_REMOVAL: 476 case RDMA_CM_EVENT_DEVICE_REMOVAL:
479 iser_err("Device removal is currently unsupported\n"); 477 iser_disconnected_handler(cma_id);
480 BUG();
481 break; 478 break;
482 default: 479 default:
483 iser_err("Unexpected RDMA CM event (%d)\n", event->event); 480 iser_err("Unexpected RDMA CM event (%d)\n", event->event);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index fd4a49fc4773..125765aa9d59 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -68,7 +68,7 @@ static int srp_max_iu_len;
68 68
69module_param(srp_sg_tablesize, int, 0444); 69module_param(srp_sg_tablesize, int, 0444);
70MODULE_PARM_DESC(srp_sg_tablesize, 70MODULE_PARM_DESC(srp_sg_tablesize,
71 "Max number of gather/scatter entries per I/O (default is 12)"); 71 "Max number of gather/scatter entries per I/O (default is 12, max 255)");
72 72
73static int topspin_workarounds = 1; 73static int topspin_workarounds = 1;
74 74
@@ -2138,6 +2138,11 @@ static int __init srp_init_module(void)
2138{ 2138{
2139 int ret; 2139 int ret;
2140 2140
2141 if (srp_sg_tablesize > 255) {
2142 printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n");
2143 srp_sg_tablesize = 255;
2144 }
2145
2141 ib_srp_transport_template = 2146 ib_srp_transport_template =
2142 srp_attach_transport(&ib_srp_transport_functions); 2147 srp_attach_transport(&ib_srp_transport_functions);
2143 if (!ib_srp_transport_template) 2148 if (!ib_srp_transport_template)
diff --git a/drivers/net/mlx4/catas.c b/drivers/net/mlx4/catas.c
index 6b32ec94b3a8..aa9528779044 100644
--- a/drivers/net/mlx4/catas.c
+++ b/drivers/net/mlx4/catas.c
@@ -69,7 +69,7 @@ static void poll_catas(unsigned long dev_ptr)
69 if (readl(priv->catas_err.map)) { 69 if (readl(priv->catas_err.map)) {
70 dump_err_buf(dev); 70 dump_err_buf(dev);
71 71
72 mlx4_dispatch_event(dev, MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR, 0, 0); 72 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0);
73 73
74 if (internal_err_reset) { 74 if (internal_err_reset) {
75 spin_lock(&catas_lock); 75 spin_lock(&catas_lock);
diff --git a/drivers/net/mlx4/cmd.c b/drivers/net/mlx4/cmd.c
index db49051b97b1..70dff94a8bc6 100644
--- a/drivers/net/mlx4/cmd.c
+++ b/drivers/net/mlx4/cmd.c
@@ -106,7 +106,8 @@ struct mlx4_cmd_context {
106 u16 token; 106 u16 token;
107}; 107};
108 108
109static int mlx4_status_to_errno(u8 status) { 109static int mlx4_status_to_errno(u8 status)
110{
110 static const int trans_table[] = { 111 static const int trans_table[] = {
111 [CMD_STAT_INTERNAL_ERR] = -EIO, 112 [CMD_STAT_INTERNAL_ERR] = -EIO,
112 [CMD_STAT_BAD_OP] = -EPERM, 113 [CMD_STAT_BAD_OP] = -EPERM,
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
index d4441fee3d80..caa5bcf54e35 100644
--- a/drivers/net/mlx4/cq.c
+++ b/drivers/net/mlx4/cq.c
@@ -38,6 +38,7 @@
38#include <linux/hardirq.h> 38#include <linux/hardirq.h>
39 39
40#include <linux/mlx4/cmd.h> 40#include <linux/mlx4/cmd.h>
41#include <linux/mlx4/cq.h>
41 42
42#include "mlx4.h" 43#include "mlx4.h"
43#include "icm.h" 44#include "icm.h"
@@ -47,21 +48,19 @@ struct mlx4_cq_context {
47 u16 reserved1[3]; 48 u16 reserved1[3];
48 __be16 page_offset; 49 __be16 page_offset;
49 __be32 logsize_usrpage; 50 __be32 logsize_usrpage;
50 u8 reserved2; 51 __be16 cq_period;
51 u8 cq_period; 52 __be16 cq_max_count;
52 u8 reserved3; 53 u8 reserved2[3];
53 u8 cq_max_count;
54 u8 reserved4[3];
55 u8 comp_eqn; 54 u8 comp_eqn;
56 u8 log_page_size; 55 u8 log_page_size;
57 u8 reserved5[2]; 56 u8 reserved3[2];
58 u8 mtt_base_addr_h; 57 u8 mtt_base_addr_h;
59 __be32 mtt_base_addr_l; 58 __be32 mtt_base_addr_l;
60 __be32 last_notified_index; 59 __be32 last_notified_index;
61 __be32 solicit_producer_index; 60 __be32 solicit_producer_index;
62 __be32 consumer_index; 61 __be32 consumer_index;
63 __be32 producer_index; 62 __be32 producer_index;
64 u32 reserved6[2]; 63 u32 reserved4[2];
65 __be64 db_rec_addr; 64 __be64 db_rec_addr;
66}; 65};
67 66
@@ -121,6 +120,13 @@ static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
121 MLX4_CMD_TIME_CLASS_A); 120 MLX4_CMD_TIME_CLASS_A);
122} 121}
123 122
123static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
124 int cq_num, u32 opmod)
125{
126 return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ,
127 MLX4_CMD_TIME_CLASS_A);
128}
129
124static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 130static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
125 int cq_num) 131 int cq_num)
126{ 132{
@@ -129,6 +135,58 @@ static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
129 MLX4_CMD_TIME_CLASS_A); 135 MLX4_CMD_TIME_CLASS_A);
130} 136}
131 137
138int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
139 u16 count, u16 period)
140{
141 struct mlx4_cmd_mailbox *mailbox;
142 struct mlx4_cq_context *cq_context;
143 int err;
144
145 mailbox = mlx4_alloc_cmd_mailbox(dev);
146 if (IS_ERR(mailbox))
147 return PTR_ERR(mailbox);
148
149 cq_context = mailbox->buf;
150 memset(cq_context, 0, sizeof *cq_context);
151
152 cq_context->cq_max_count = cpu_to_be16(count);
153 cq_context->cq_period = cpu_to_be16(period);
154
155 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1);
156
157 mlx4_free_cmd_mailbox(dev, mailbox);
158 return err;
159}
160EXPORT_SYMBOL_GPL(mlx4_cq_modify);
161
162int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
163 int entries, struct mlx4_mtt *mtt)
164{
165 struct mlx4_cmd_mailbox *mailbox;
166 struct mlx4_cq_context *cq_context;
167 u64 mtt_addr;
168 int err;
169
170 mailbox = mlx4_alloc_cmd_mailbox(dev);
171 if (IS_ERR(mailbox))
172 return PTR_ERR(mailbox);
173
174 cq_context = mailbox->buf;
175 memset(cq_context, 0, sizeof *cq_context);
176
177 cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24);
178 cq_context->log_page_size = mtt->page_shift - 12;
179 mtt_addr = mlx4_mtt_addr(dev, mtt);
180 cq_context->mtt_base_addr_h = mtt_addr >> 32;
181 cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
182
183 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1);
184
185 mlx4_free_cmd_mailbox(dev, mailbox);
186 return err;
187}
188EXPORT_SYMBOL_GPL(mlx4_cq_resize);
189
132int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, 190int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
133 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq) 191 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq)
134{ 192{
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 9c36c2034030..e141a1513f07 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -202,7 +202,10 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
202 break; 202 break;
203 203
204 case MLX4_EVENT_TYPE_PORT_CHANGE: 204 case MLX4_EVENT_TYPE_PORT_CHANGE:
205 mlx4_dispatch_event(dev, eqe->type, eqe->subtype, 205 mlx4_dispatch_event(dev,
206 eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_ACTIVE ?
207 MLX4_DEV_EVENT_PORT_UP :
208 MLX4_DEV_EVENT_PORT_DOWN,
206 be32_to_cpu(eqe->event.port_change.port) >> 28); 209 be32_to_cpu(eqe->event.port_change.port) >> 28);
207 break; 210 break;
208 211
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 61dc4951d6b0..d82f2751d2c7 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -133,6 +133,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
133#define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27 133#define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27
134#define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29 134#define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29
135#define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b 135#define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b
136#define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d
136#define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f 137#define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f
137#define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33 138#define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33
138#define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35 139#define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35
@@ -215,6 +216,13 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
215 dev_cap->max_requester_per_qp = 1 << (field & 0x3f); 216 dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
216 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET); 217 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
217 dev_cap->max_responder_per_qp = 1 << (field & 0x3f); 218 dev_cap->max_responder_per_qp = 1 << (field & 0x3f);
219 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET);
220 field &= 0x1f;
221 if (!field)
222 dev_cap->max_gso_sz = 0;
223 else
224 dev_cap->max_gso_sz = 1 << field;
225
218 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET); 226 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET);
219 dev_cap->max_rdma_global = 1 << (field & 0x3f); 227 dev_cap->max_rdma_global = 1 << (field & 0x3f);
220 MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET); 228 MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
@@ -377,6 +385,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
377 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg); 385 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
378 mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n", 386 mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
379 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg); 387 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
388 mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
380 389
381 dump_dev_cap_flags(dev, dev_cap->flags); 390 dump_dev_cap_flags(dev, dev_cap->flags);
382 391
@@ -696,6 +705,10 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
696 /* Check port for UD address vector: */ 705 /* Check port for UD address vector: */
697 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1); 706 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1);
698 707
708 /* Enable IPoIB checksumming if we can: */
709 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
710 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3);
711
699 /* QPC/EEC/CQC/EQC/RDMARC attributes */ 712 /* QPC/EEC/CQC/EQC/RDMARC attributes */
700 713
701 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); 714 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index e16dec890413..306cb9b0242d 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -96,6 +96,7 @@ struct mlx4_dev_cap {
96 u8 bmme_flags; 96 u8 bmme_flags;
97 u32 reserved_lkey; 97 u32 reserved_lkey;
98 u64 max_icm_sz; 98 u64 max_icm_sz;
99 int max_gso_sz;
99}; 100};
100 101
101struct mlx4_adapter { 102struct mlx4_adapter {
diff --git a/drivers/net/mlx4/intf.c b/drivers/net/mlx4/intf.c
index be5d9e90ccf2..4a6c4d526f1b 100644
--- a/drivers/net/mlx4/intf.c
+++ b/drivers/net/mlx4/intf.c
@@ -30,8 +30,6 @@
30 * SOFTWARE. 30 * SOFTWARE.
31 */ 31 */
32 32
33#include <linux/mlx4/driver.h>
34
35#include "mlx4.h" 33#include "mlx4.h"
36 34
37struct mlx4_device_context { 35struct mlx4_device_context {
@@ -113,8 +111,7 @@ void mlx4_unregister_interface(struct mlx4_interface *intf)
113} 111}
114EXPORT_SYMBOL_GPL(mlx4_unregister_interface); 112EXPORT_SYMBOL_GPL(mlx4_unregister_interface);
115 113
116void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_event type, 114void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int port)
117 int subtype, int port)
118{ 115{
119 struct mlx4_priv *priv = mlx4_priv(dev); 116 struct mlx4_priv *priv = mlx4_priv(dev);
120 struct mlx4_device_context *dev_ctx; 117 struct mlx4_device_context *dev_ctx;
@@ -124,8 +121,7 @@ void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_event type,
124 121
125 list_for_each_entry(dev_ctx, &priv->ctx_list, list) 122 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
126 if (dev_ctx->intf->event) 123 if (dev_ctx->intf->event)
127 dev_ctx->intf->event(dev, dev_ctx->context, type, 124 dev_ctx->intf->event(dev, dev_ctx->context, type, port);
128 subtype, port);
129 125
130 spin_unlock_irqrestore(&priv->ctx_lock, flags); 126 spin_unlock_irqrestore(&priv->ctx_lock, flags);
131} 127}
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 08bfc130a33e..49a4acab5e82 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -76,7 +76,7 @@ static char mlx4_version[] __devinitdata =
76 DRV_VERSION " (" DRV_RELDATE ")\n"; 76 DRV_VERSION " (" DRV_RELDATE ")\n";
77 77
78static struct mlx4_profile default_profile = { 78static struct mlx4_profile default_profile = {
79 .num_qp = 1 << 16, 79 .num_qp = 1 << 17,
80 .num_srq = 1 << 16, 80 .num_srq = 1 << 16,
81 .rdmarc_per_qp = 1 << 4, 81 .rdmarc_per_qp = 1 << 4,
82 .num_cq = 1 << 16, 82 .num_cq = 1 << 16,
@@ -159,6 +159,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
159 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); 159 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
160 dev->caps.flags = dev_cap->flags; 160 dev->caps.flags = dev_cap->flags;
161 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 161 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
162 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
162 163
163 return 0; 164 return 0;
164} 165}
@@ -735,8 +736,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
735 } 736 }
736 737
737 /* 738 /*
738 * Check for BARs. We expect 0: 1MB, 2: 8MB, 4: DDR (may not 739 * Check for BARs. We expect 0: 1MB
739 * be present)
740 */ 740 */
741 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || 741 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
742 pci_resource_len(pdev, 0) != 1 << 20) { 742 pci_resource_len(pdev, 0) != 1 << 20) {
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index a99e7729d333..57f7f1f0d4ec 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -190,10 +190,6 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
190 } 190 }
191 index += dev->caps.num_mgms; 191 index += dev->caps.num_mgms;
192 192
193 err = mlx4_READ_MCG(dev, index, mailbox);
194 if (err)
195 goto out;
196
197 memset(mgm, 0, sizeof *mgm); 193 memset(mgm, 0, sizeof *mgm);
198 memcpy(mgm->gid, gid, 16); 194 memcpy(mgm->gid, gid, 16);
199 } 195 }
@@ -301,12 +297,10 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
301 mgm->qp[loc] = mgm->qp[i - 1]; 297 mgm->qp[loc] = mgm->qp[i - 1];
302 mgm->qp[i - 1] = 0; 298 mgm->qp[i - 1] = 0;
303 299
304 err = mlx4_WRITE_MCG(dev, index, mailbox); 300 if (i != 1) {
305 if (err) 301 err = mlx4_WRITE_MCG(dev, index, mailbox);
306 goto out;
307
308 if (i != 1)
309 goto out; 302 goto out;
303 }
310 304
311 if (prev == -1) { 305 if (prev == -1) {
312 /* Remove entry from MGM */ 306 /* Remove entry from MGM */
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 53a1cdddfc13..73336810e652 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -42,6 +42,7 @@
42#include <linux/timer.h> 42#include <linux/timer.h>
43 43
44#include <linux/mlx4/device.h> 44#include <linux/mlx4/device.h>
45#include <linux/mlx4/driver.h>
45#include <linux/mlx4/doorbell.h> 46#include <linux/mlx4/doorbell.h>
46 47
47#define DRV_NAME "mlx4_core" 48#define DRV_NAME "mlx4_core"
@@ -313,8 +314,7 @@ void mlx4_catas_cleanup(void);
313int mlx4_restart_one(struct pci_dev *pdev); 314int mlx4_restart_one(struct pci_dev *pdev);
314int mlx4_register_device(struct mlx4_dev *dev); 315int mlx4_register_device(struct mlx4_dev *dev);
315void mlx4_unregister_device(struct mlx4_dev *dev); 316void mlx4_unregister_device(struct mlx4_dev *dev);
316void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_event type, 317void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int port);
317 int subtype, int port);
318 318
319struct mlx4_dev_cap; 319struct mlx4_dev_cap;
320struct mlx4_init_hca_param; 320struct mlx4_init_hca_param;
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 7d1eaa97de13..77323a72dd3c 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -81,7 +81,7 @@ enum {
81 MLX4_CMD_SW2HW_CQ = 0x16, 81 MLX4_CMD_SW2HW_CQ = 0x16,
82 MLX4_CMD_HW2SW_CQ = 0x17, 82 MLX4_CMD_HW2SW_CQ = 0x17,
83 MLX4_CMD_QUERY_CQ = 0x18, 83 MLX4_CMD_QUERY_CQ = 0x18,
84 MLX4_CMD_RESIZE_CQ = 0x2c, 84 MLX4_CMD_MODIFY_CQ = 0x2c,
85 85
86 /* SRQ commands */ 86 /* SRQ commands */
87 MLX4_CMD_SW2HW_SRQ = 0x35, 87 MLX4_CMD_SW2HW_SRQ = 0x35,
diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h
index 0181e0a57cbf..071cf96cf01f 100644
--- a/include/linux/mlx4/cq.h
+++ b/include/linux/mlx4/cq.h
@@ -45,11 +45,11 @@ struct mlx4_cqe {
45 u8 sl; 45 u8 sl;
46 u8 reserved1; 46 u8 reserved1;
47 __be16 rlid; 47 __be16 rlid;
48 u32 reserved2; 48 __be32 ipoib_status;
49 __be32 byte_cnt; 49 __be32 byte_cnt;
50 __be16 wqe_index; 50 __be16 wqe_index;
51 __be16 checksum; 51 __be16 checksum;
52 u8 reserved3[3]; 52 u8 reserved2[3];
53 u8 owner_sr_opcode; 53 u8 owner_sr_opcode;
54}; 54};
55 55
@@ -85,6 +85,16 @@ enum {
85 MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR = 0x22, 85 MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR = 0x22,
86}; 86};
87 87
88enum {
89 MLX4_CQE_IPOIB_STATUS_IPV4 = 1 << 22,
90 MLX4_CQE_IPOIB_STATUS_IPV4F = 1 << 23,
91 MLX4_CQE_IPOIB_STATUS_IPV6 = 1 << 24,
92 MLX4_CQE_IPOIB_STATUS_IPV4OPT = 1 << 25,
93 MLX4_CQE_IPOIB_STATUS_TCP = 1 << 26,
94 MLX4_CQE_IPOIB_STATUS_UDP = 1 << 27,
95 MLX4_CQE_IPOIB_STATUS_IPOK = 1 << 28,
96};
97
88static inline void mlx4_cq_arm(struct mlx4_cq *cq, u32 cmd, 98static inline void mlx4_cq_arm(struct mlx4_cq *cq, u32 cmd,
89 void __iomem *uar_page, 99 void __iomem *uar_page,
90 spinlock_t *doorbell_lock) 100 spinlock_t *doorbell_lock)
@@ -120,4 +130,9 @@ enum {
120 MLX4_CQ_DB_REQ_NOT = 2 << 24 130 MLX4_CQ_DB_REQ_NOT = 2 << 24
121}; 131};
122 132
133int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
134 u16 count, u16 period);
135int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
136 int entries, struct mlx4_mtt *mtt);
137
123#endif /* MLX4_CQ_H */ 138#endif /* MLX4_CQ_H */
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 6cdf813cd478..ff7df1a2222f 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -186,6 +186,7 @@ struct mlx4_caps {
186 u32 flags; 186 u32 flags;
187 u16 stat_rate_support; 187 u16 stat_rate_support;
188 u8 port_width_cap[MLX4_MAX_PORTS + 1]; 188 u8 port_width_cap[MLX4_MAX_PORTS + 1];
189 int max_gso_sz;
189}; 190};
190 191
191struct mlx4_buf_list { 192struct mlx4_buf_list {
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index 1b835ca49df1..53c5fdb6eac4 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -48,8 +48,7 @@ struct mlx4_interface {
48 void * (*add) (struct mlx4_dev *dev); 48 void * (*add) (struct mlx4_dev *dev);
49 void (*remove)(struct mlx4_dev *dev, void *context); 49 void (*remove)(struct mlx4_dev *dev, void *context);
50 void (*event) (struct mlx4_dev *dev, void *context, 50 void (*event) (struct mlx4_dev *dev, void *context,
51 enum mlx4_dev_event event, int subtype, 51 enum mlx4_dev_event event, int port);
52 int port);
53 struct list_head list; 52 struct list_head list;
54}; 53};
55 54
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 09a2230923f2..a5e43febee4f 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -158,10 +158,12 @@ struct mlx4_qp_context {
158#define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232) 158#define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232)
159 159
160enum { 160enum {
161 MLX4_WQE_CTRL_NEC = 1 << 29, 161 MLX4_WQE_CTRL_NEC = 1 << 29,
162 MLX4_WQE_CTRL_FENCE = 1 << 6, 162 MLX4_WQE_CTRL_FENCE = 1 << 6,
163 MLX4_WQE_CTRL_CQ_UPDATE = 3 << 2, 163 MLX4_WQE_CTRL_CQ_UPDATE = 3 << 2,
164 MLX4_WQE_CTRL_SOLICITED = 1 << 1, 164 MLX4_WQE_CTRL_SOLICITED = 1 << 1,
165 MLX4_WQE_CTRL_IP_CSUM = 1 << 4,
166 MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5,
165}; 167};
166 168
167struct mlx4_wqe_ctrl_seg { 169struct mlx4_wqe_ctrl_seg {
@@ -217,6 +219,11 @@ struct mlx4_wqe_datagram_seg {
217 __be32 reservd[2]; 219 __be32 reservd[2];
218}; 220};
219 221
222struct mlx4_lso_seg {
223 __be32 mss_hdr_size;
224 __be32 header[0];
225};
226
220struct mlx4_wqe_bind_seg { 227struct mlx4_wqe_bind_seg {
221 __be32 flags1; 228 __be32 flags1;
222 __be32 flags2; 229 __be32 flags2;
diff --git a/include/rdma/ib_user_verbs.h b/include/rdma/ib_user_verbs.h
index 64a721fcbc1c..8d65bf0a625b 100644
--- a/include/rdma/ib_user_verbs.h
+++ b/include/rdma/ib_user_verbs.h
@@ -533,7 +533,10 @@ struct ib_uverbs_send_wr {
533 __u32 num_sge; 533 __u32 num_sge;
534 __u32 opcode; 534 __u32 opcode;
535 __u32 send_flags; 535 __u32 send_flags;
536 __u32 imm_data; 536 union {
537 __u32 imm_data;
538 __u32 invalidate_rkey;
539 } ex;
537 union { 540 union {
538 struct { 541 struct {
539 __u64 remote_addr; 542 __u64 remote_addr;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 701e7b40560a..95bf4bac44cb 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -94,7 +94,7 @@ enum ib_device_cap_flags {
94 IB_DEVICE_SRQ_RESIZE = (1<<13), 94 IB_DEVICE_SRQ_RESIZE = (1<<13),
95 IB_DEVICE_N_NOTIFY_CQ = (1<<14), 95 IB_DEVICE_N_NOTIFY_CQ = (1<<14),
96 IB_DEVICE_ZERO_STAG = (1<<15), 96 IB_DEVICE_ZERO_STAG = (1<<15),
97 IB_DEVICE_SEND_W_INV = (1<<16), 97 IB_DEVICE_RESERVED = (1<<16), /* old SEND_W_INV */
98 IB_DEVICE_MEM_WINDOW = (1<<17), 98 IB_DEVICE_MEM_WINDOW = (1<<17),
99 /* 99 /*
100 * Devices should set IB_DEVICE_UD_IP_SUM if they support 100 * Devices should set IB_DEVICE_UD_IP_SUM if they support
@@ -104,6 +104,8 @@ enum ib_device_cap_flags {
104 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode. 104 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
105 */ 105 */
106 IB_DEVICE_UD_IP_CSUM = (1<<18), 106 IB_DEVICE_UD_IP_CSUM = (1<<18),
107 IB_DEVICE_UD_TSO = (1<<19),
108 IB_DEVICE_SEND_W_INV = (1<<21),
107}; 109};
108 110
109enum ib_atomic_cap { 111enum ib_atomic_cap {
@@ -411,6 +413,7 @@ enum ib_wc_opcode {
411 IB_WC_COMP_SWAP, 413 IB_WC_COMP_SWAP,
412 IB_WC_FETCH_ADD, 414 IB_WC_FETCH_ADD,
413 IB_WC_BIND_MW, 415 IB_WC_BIND_MW,
416 IB_WC_LSO,
414/* 417/*
415 * Set value of IB_WC_RECV so consumers can test if a completion is a 418 * Set value of IB_WC_RECV so consumers can test if a completion is a
416 * receive by testing (opcode & IB_WC_RECV). 419 * receive by testing (opcode & IB_WC_RECV).
@@ -495,6 +498,10 @@ enum ib_qp_type {
495 IB_QPT_RAW_ETY 498 IB_QPT_RAW_ETY
496}; 499};
497 500
501enum ib_qp_create_flags {
502 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
503};
504
498struct ib_qp_init_attr { 505struct ib_qp_init_attr {
499 void (*event_handler)(struct ib_event *, void *); 506 void (*event_handler)(struct ib_event *, void *);
500 void *qp_context; 507 void *qp_context;
@@ -504,6 +511,7 @@ struct ib_qp_init_attr {
504 struct ib_qp_cap cap; 511 struct ib_qp_cap cap;
505 enum ib_sig_type sq_sig_type; 512 enum ib_sig_type sq_sig_type;
506 enum ib_qp_type qp_type; 513 enum ib_qp_type qp_type;
514 enum ib_qp_create_flags create_flags;
507 u8 port_num; /* special QP types only */ 515 u8 port_num; /* special QP types only */
508}; 516};
509 517
@@ -617,7 +625,9 @@ enum ib_wr_opcode {
617 IB_WR_SEND_WITH_IMM, 625 IB_WR_SEND_WITH_IMM,
618 IB_WR_RDMA_READ, 626 IB_WR_RDMA_READ,
619 IB_WR_ATOMIC_CMP_AND_SWP, 627 IB_WR_ATOMIC_CMP_AND_SWP,
620 IB_WR_ATOMIC_FETCH_AND_ADD 628 IB_WR_ATOMIC_FETCH_AND_ADD,
629 IB_WR_LSO,
630 IB_WR_SEND_WITH_INV,
621}; 631};
622 632
623enum ib_send_flags { 633enum ib_send_flags {
@@ -641,7 +651,10 @@ struct ib_send_wr {
641 int num_sge; 651 int num_sge;
642 enum ib_wr_opcode opcode; 652 enum ib_wr_opcode opcode;
643 int send_flags; 653 int send_flags;
644 __be32 imm_data; 654 union {
655 __be32 imm_data;
656 u32 invalidate_rkey;
657 } ex;
645 union { 658 union {
646 struct { 659 struct {
647 u64 remote_addr; 660 u64 remote_addr;
@@ -655,6 +668,9 @@ struct ib_send_wr {
655 } atomic; 668 } atomic;
656 struct { 669 struct {
657 struct ib_ah *ah; 670 struct ib_ah *ah;
671 void *header;
672 int hlen;
673 int mss;
658 u32 remote_qpn; 674 u32 remote_qpn;
659 u32 remote_qkey; 675 u32 remote_qkey;
660 u16 pkey_index; /* valid for GSI only */ 676 u16 pkey_index; /* valid for GSI only */
@@ -730,7 +746,7 @@ struct ib_uobject {
730 struct ib_ucontext *context; /* associated user context */ 746 struct ib_ucontext *context; /* associated user context */
731 void *object; /* containing object */ 747 void *object; /* containing object */
732 struct list_head list; /* link to context's list */ 748 struct list_head list; /* link to context's list */
733 u32 id; /* index into kernel idr */ 749 int id; /* index into kernel idr */
734 struct kref ref; 750 struct kref ref;
735 struct rw_semaphore mutex; /* protects .live */ 751 struct rw_semaphore mutex; /* protects .live */
736 int live; 752 int live;
@@ -971,6 +987,8 @@ struct ib_device {
971 int comp_vector, 987 int comp_vector,
972 struct ib_ucontext *context, 988 struct ib_ucontext *context,
973 struct ib_udata *udata); 989 struct ib_udata *udata);
990 int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
991 u16 cq_period);
974 int (*destroy_cq)(struct ib_cq *cq); 992 int (*destroy_cq)(struct ib_cq *cq);
975 int (*resize_cq)(struct ib_cq *cq, int cqe, 993 int (*resize_cq)(struct ib_cq *cq, int cqe,
976 struct ib_udata *udata); 994 struct ib_udata *udata);
@@ -1376,6 +1394,15 @@ struct ib_cq *ib_create_cq(struct ib_device *device,
1376int ib_resize_cq(struct ib_cq *cq, int cqe); 1394int ib_resize_cq(struct ib_cq *cq, int cqe);
1377 1395
1378/** 1396/**
1397 * ib_modify_cq - Modifies moderation params of the CQ
1398 * @cq: The CQ to modify.
1399 * @cq_count: number of CQEs that will trigger an event
1400 * @cq_period: max period of time in usec before triggering an event
1401 *
1402 */
1403int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1404
1405/**
1379 * ib_destroy_cq - Destroys the specified CQ. 1406 * ib_destroy_cq - Destroys the specified CQ.
1380 * @cq: The CQ to destroy. 1407 * @cq: The CQ to destroy.
1381 */ 1408 */
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index ffbf22a1d2ca..8ea283ecc522 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -1573,7 +1573,6 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
1573 send_wr.sg_list = req->rl_send_iov; 1573 send_wr.sg_list = req->rl_send_iov;
1574 send_wr.num_sge = req->rl_niovs; 1574 send_wr.num_sge = req->rl_niovs;
1575 send_wr.opcode = IB_WR_SEND; 1575 send_wr.opcode = IB_WR_SEND;
1576 send_wr.imm_data = 0;
1577 if (send_wr.num_sge == 4) /* no need to sync any pad (constant) */ 1576 if (send_wr.num_sge == 4) /* no need to sync any pad (constant) */
1578 ib_dma_sync_single_for_device(ia->ri_id->device, 1577 ib_dma_sync_single_for_device(ia->ri_id->device,
1579 req->rl_send_iov[3].addr, req->rl_send_iov[3].length, 1578 req->rl_send_iov[3].addr, req->rl_send_iov[3].length,