aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/core/cm.c15
-rw-r--r--drivers/infiniband/core/cm_msgs.h22
-rw-r--r--drivers/infiniband/core/device.c4
-rw-r--r--drivers/infiniband/core/mad.c40
-rw-r--r--drivers/infiniband/core/mad_rmpp.c2
-rw-r--r--drivers/infiniband/core/sa_query.c2
-rw-r--r--drivers/infiniband/core/sysfs.c19
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c30
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h3
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c3
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_ev.c5
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c17
-rw-r--r--drivers/infiniband/hw/ehca/ehca_sqp.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_eeprom.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mad.c95
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sdma.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_uc.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ud.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_sdma.c6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h10
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c27
-rw-r--r--drivers/infiniband/hw/mlx4/main.c5
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c22
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c25
-rw-r--r--drivers/infiniband/hw/nes/nes.c2
-rw-r--r--drivers/infiniband/hw/nes/nes.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c586
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.h12
-rw-r--r--drivers/infiniband/hw/nes/nes_context.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c17
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h5
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c142
-rw-r--r--drivers/infiniband/hw/nes/nes_user.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c249
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.h2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c9
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c7
-rw-r--r--drivers/net/mlx4/Makefile2
-rw-r--r--drivers/net/mlx4/catas.c16
-rw-r--r--drivers/net/mlx4/eq.c16
-rw-r--r--drivers/net/mlx4/main.c106
-rw-r--r--drivers/net/mlx4/mlx4.h27
-rw-r--r--drivers/net/mlx4/port.c13
-rw-r--r--drivers/net/mlx4/sense.c156
-rw-r--r--include/linux/mlx4/cmd.h1
-rw-r--r--include/linux/mlx4/device.h6
-rw-r--r--include/rdma/ib_cm.h12
-rw-r--r--include/rdma/ib_mad.h4
-rw-r--r--include/rdma/ib_smi.h34
53 files changed, 1185 insertions, 631 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index f1e82a92e61e..5130fc55b8e2 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -927,8 +927,7 @@ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
927 unsigned long flags; 927 unsigned long flags;
928 int ret = 0; 928 int ret = 0;
929 929
930 service_mask = service_mask ? service_mask : 930 service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
931 __constant_cpu_to_be64(~0ULL);
932 service_id &= service_mask; 931 service_id &= service_mask;
933 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID && 932 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
934 (service_id != IB_CM_ASSIGN_SERVICE_ID)) 933 (service_id != IB_CM_ASSIGN_SERVICE_ID))
@@ -954,7 +953,7 @@ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
954 spin_lock_irqsave(&cm.lock, flags); 953 spin_lock_irqsave(&cm.lock, flags);
955 if (service_id == IB_CM_ASSIGN_SERVICE_ID) { 954 if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
956 cm_id->service_id = cpu_to_be64(cm.listen_service_id++); 955 cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
957 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 956 cm_id->service_mask = ~cpu_to_be64(0);
958 } else { 957 } else {
959 cm_id->service_id = service_id; 958 cm_id->service_id = service_id;
960 cm_id->service_mask = service_mask; 959 cm_id->service_mask = service_mask;
@@ -1134,7 +1133,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
1134 goto error1; 1133 goto error1;
1135 } 1134 }
1136 cm_id->service_id = param->service_id; 1135 cm_id->service_id = param->service_id;
1137 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 1136 cm_id->service_mask = ~cpu_to_be64(0);
1138 cm_id_priv->timeout_ms = cm_convert_to_ms( 1137 cm_id_priv->timeout_ms = cm_convert_to_ms(
1139 param->primary_path->packet_life_time) * 2 + 1138 param->primary_path->packet_life_time) * 2 +
1140 cm_convert_to_ms( 1139 cm_convert_to_ms(
@@ -1545,7 +1544,7 @@ static int cm_req_handler(struct cm_work *work)
1545 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; 1544 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1546 cm_id_priv->id.context = listen_cm_id_priv->id.context; 1545 cm_id_priv->id.context = listen_cm_id_priv->id.context;
1547 cm_id_priv->id.service_id = req_msg->service_id; 1546 cm_id_priv->id.service_id = req_msg->service_id;
1548 cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); 1547 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
1549 1548
1550 cm_process_routed_req(req_msg, work->mad_recv_wc->wc); 1549 cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
1551 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); 1550 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
@@ -2898,7 +2897,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
2898 goto out; 2897 goto out;
2899 2898
2900 cm_id->service_id = param->service_id; 2899 cm_id->service_id = param->service_id;
2901 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 2900 cm_id->service_mask = ~cpu_to_be64(0);
2902 cm_id_priv->timeout_ms = param->timeout_ms; 2901 cm_id_priv->timeout_ms = param->timeout_ms;
2903 cm_id_priv->max_cm_retries = param->max_cm_retries; 2902 cm_id_priv->max_cm_retries = param->max_cm_retries;
2904 ret = cm_alloc_msg(cm_id_priv, &msg); 2903 ret = cm_alloc_msg(cm_id_priv, &msg);
@@ -2992,7 +2991,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
2992 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; 2991 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
2993 cm_id_priv->id.context = cur_cm_id_priv->id.context; 2992 cm_id_priv->id.context = cur_cm_id_priv->id.context;
2994 cm_id_priv->id.service_id = sidr_req_msg->service_id; 2993 cm_id_priv->id.service_id = sidr_req_msg->service_id;
2995 cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); 2994 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
2996 2995
2997 cm_format_sidr_req_event(work, &cur_cm_id_priv->id); 2996 cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
2998 cm_process_work(cm_id_priv, work); 2997 cm_process_work(cm_id_priv, work);
@@ -3789,7 +3788,7 @@ static int __init ib_cm_init(void)
3789 rwlock_init(&cm.device_lock); 3788 rwlock_init(&cm.device_lock);
3790 spin_lock_init(&cm.lock); 3789 spin_lock_init(&cm.lock);
3791 cm.listen_service_table = RB_ROOT; 3790 cm.listen_service_table = RB_ROOT;
3792 cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); 3791 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
3793 cm.remote_id_table = RB_ROOT; 3792 cm.remote_id_table = RB_ROOT;
3794 cm.remote_qp_table = RB_ROOT; 3793 cm.remote_qp_table = RB_ROOT;
3795 cm.remote_sidr_table = RB_ROOT; 3794 cm.remote_sidr_table = RB_ROOT;
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h
index aec9c7af825d..7e63c08f697c 100644
--- a/drivers/infiniband/core/cm_msgs.h
+++ b/drivers/infiniband/core/cm_msgs.h
@@ -44,17 +44,17 @@
44 44
45#define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */ 45#define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */
46 46
47#define CM_REQ_ATTR_ID __constant_htons(0x0010) 47#define CM_REQ_ATTR_ID cpu_to_be16(0x0010)
48#define CM_MRA_ATTR_ID __constant_htons(0x0011) 48#define CM_MRA_ATTR_ID cpu_to_be16(0x0011)
49#define CM_REJ_ATTR_ID __constant_htons(0x0012) 49#define CM_REJ_ATTR_ID cpu_to_be16(0x0012)
50#define CM_REP_ATTR_ID __constant_htons(0x0013) 50#define CM_REP_ATTR_ID cpu_to_be16(0x0013)
51#define CM_RTU_ATTR_ID __constant_htons(0x0014) 51#define CM_RTU_ATTR_ID cpu_to_be16(0x0014)
52#define CM_DREQ_ATTR_ID __constant_htons(0x0015) 52#define CM_DREQ_ATTR_ID cpu_to_be16(0x0015)
53#define CM_DREP_ATTR_ID __constant_htons(0x0016) 53#define CM_DREP_ATTR_ID cpu_to_be16(0x0016)
54#define CM_SIDR_REQ_ATTR_ID __constant_htons(0x0017) 54#define CM_SIDR_REQ_ATTR_ID cpu_to_be16(0x0017)
55#define CM_SIDR_REP_ATTR_ID __constant_htons(0x0018) 55#define CM_SIDR_REP_ATTR_ID cpu_to_be16(0x0018)
56#define CM_LAP_ATTR_ID __constant_htons(0x0019) 56#define CM_LAP_ATTR_ID cpu_to_be16(0x0019)
57#define CM_APR_ATTR_ID __constant_htons(0x001A) 57#define CM_APR_ATTR_ID cpu_to_be16(0x001A)
58 58
59enum cm_msg_sequence { 59enum cm_msg_sequence {
60 CM_MSG_SEQUENCE_REQ, 60 CM_MSG_SEQUENCE_REQ,
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 7913b804311e..d1fba4153332 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -193,7 +193,7 @@ void ib_dealloc_device(struct ib_device *device)
193 193
194 BUG_ON(device->reg_state != IB_DEV_UNREGISTERED); 194 BUG_ON(device->reg_state != IB_DEV_UNREGISTERED);
195 195
196 ib_device_unregister_sysfs(device); 196 kobject_put(&device->dev.kobj);
197} 197}
198EXPORT_SYMBOL(ib_dealloc_device); 198EXPORT_SYMBOL(ib_dealloc_device);
199 199
@@ -348,6 +348,8 @@ void ib_unregister_device(struct ib_device *device)
348 348
349 mutex_unlock(&device_mutex); 349 mutex_unlock(&device_mutex);
350 350
351 ib_device_unregister_sysfs(device);
352
351 spin_lock_irqsave(&device->client_data_lock, flags); 353 spin_lock_irqsave(&device->client_data_lock, flags);
352 list_for_each_entry_safe(context, tmp, &device->client_data_list, list) 354 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
353 kfree(context); 355 kfree(context);
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 5c54fc2350be..de922a04ca2d 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -301,6 +301,16 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
301 mad_agent_priv->agent.context = context; 301 mad_agent_priv->agent.context = context;
302 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; 302 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
303 mad_agent_priv->agent.port_num = port_num; 303 mad_agent_priv->agent.port_num = port_num;
304 spin_lock_init(&mad_agent_priv->lock);
305 INIT_LIST_HEAD(&mad_agent_priv->send_list);
306 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
307 INIT_LIST_HEAD(&mad_agent_priv->done_list);
308 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
309 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
310 INIT_LIST_HEAD(&mad_agent_priv->local_list);
311 INIT_WORK(&mad_agent_priv->local_work, local_completions);
312 atomic_set(&mad_agent_priv->refcount, 1);
313 init_completion(&mad_agent_priv->comp);
304 314
305 spin_lock_irqsave(&port_priv->reg_lock, flags); 315 spin_lock_irqsave(&port_priv->reg_lock, flags);
306 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id; 316 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
@@ -350,17 +360,6 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
350 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list); 360 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
351 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 361 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
352 362
353 spin_lock_init(&mad_agent_priv->lock);
354 INIT_LIST_HEAD(&mad_agent_priv->send_list);
355 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
356 INIT_LIST_HEAD(&mad_agent_priv->done_list);
357 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
358 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
359 INIT_LIST_HEAD(&mad_agent_priv->local_list);
360 INIT_WORK(&mad_agent_priv->local_work, local_completions);
361 atomic_set(&mad_agent_priv->refcount, 1);
362 init_completion(&mad_agent_priv->comp);
363
364 return &mad_agent_priv->agent; 363 return &mad_agent_priv->agent;
365 364
366error4: 365error4:
@@ -743,9 +742,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
743 break; 742 break;
744 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED: 743 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
745 kmem_cache_free(ib_mad_cache, mad_priv); 744 kmem_cache_free(ib_mad_cache, mad_priv);
746 kfree(local); 745 break;
747 ret = 1;
748 goto out;
749 case IB_MAD_RESULT_SUCCESS: 746 case IB_MAD_RESULT_SUCCESS:
750 /* Treat like an incoming receive MAD */ 747 /* Treat like an incoming receive MAD */
751 port_priv = ib_get_mad_port(mad_agent_priv->agent.device, 748 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
@@ -756,10 +753,12 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
756 &mad_priv->mad.mad); 753 &mad_priv->mad.mad);
757 } 754 }
758 if (!port_priv || !recv_mad_agent) { 755 if (!port_priv || !recv_mad_agent) {
756 /*
757 * No receiving agent so drop packet and
758 * generate send completion.
759 */
759 kmem_cache_free(ib_mad_cache, mad_priv); 760 kmem_cache_free(ib_mad_cache, mad_priv);
760 kfree(local); 761 break;
761 ret = 0;
762 goto out;
763 } 762 }
764 local->mad_priv = mad_priv; 763 local->mad_priv = mad_priv;
765 local->recv_mad_agent = recv_mad_agent; 764 local->recv_mad_agent = recv_mad_agent;
@@ -2356,7 +2355,7 @@ static void local_completions(struct work_struct *work)
2356 struct ib_mad_local_private *local; 2355 struct ib_mad_local_private *local;
2357 struct ib_mad_agent_private *recv_mad_agent; 2356 struct ib_mad_agent_private *recv_mad_agent;
2358 unsigned long flags; 2357 unsigned long flags;
2359 int recv = 0; 2358 int free_mad;
2360 struct ib_wc wc; 2359 struct ib_wc wc;
2361 struct ib_mad_send_wc mad_send_wc; 2360 struct ib_mad_send_wc mad_send_wc;
2362 2361
@@ -2370,14 +2369,15 @@ static void local_completions(struct work_struct *work)
2370 completion_list); 2369 completion_list);
2371 list_del(&local->completion_list); 2370 list_del(&local->completion_list);
2372 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2371 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2372 free_mad = 0;
2373 if (local->mad_priv) { 2373 if (local->mad_priv) {
2374 recv_mad_agent = local->recv_mad_agent; 2374 recv_mad_agent = local->recv_mad_agent;
2375 if (!recv_mad_agent) { 2375 if (!recv_mad_agent) {
2376 printk(KERN_ERR PFX "No receive MAD agent for local completion\n"); 2376 printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
2377 free_mad = 1;
2377 goto local_send_completion; 2378 goto local_send_completion;
2378 } 2379 }
2379 2380
2380 recv = 1;
2381 /* 2381 /*
2382 * Defined behavior is to complete response 2382 * Defined behavior is to complete response
2383 * before request 2383 * before request
@@ -2422,7 +2422,7 @@ local_send_completion:
2422 2422
2423 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2423 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2424 atomic_dec(&mad_agent_priv->refcount); 2424 atomic_dec(&mad_agent_priv->refcount);
2425 if (!recv) 2425 if (free_mad)
2426 kmem_cache_free(ib_mad_cache, local->mad_priv); 2426 kmem_cache_free(ib_mad_cache, local->mad_priv);
2427 kfree(local); 2427 kfree(local);
2428 } 2428 }
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index 3af2b84cd838..57a3c6f947b2 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -735,7 +735,7 @@ process_rmpp_data(struct ib_mad_agent_private *agent,
735 goto bad; 735 goto bad;
736 } 736 }
737 737
738 if (rmpp_hdr->seg_num == __constant_htonl(1)) { 738 if (rmpp_hdr->seg_num == cpu_to_be32(1)) {
739 if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) { 739 if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) {
740 rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG; 740 rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
741 goto bad; 741 goto bad;
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 7863a50d56f2..1865049e80f7 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -395,6 +395,8 @@ static void update_sm_ah(struct work_struct *work)
395 } 395 }
396 396
397 spin_lock_irq(&port->ah_lock); 397 spin_lock_irq(&port->ah_lock);
398 if (port->sm_ah)
399 kref_put(&port->sm_ah->ref, free_sm_ah);
398 port->sm_ah = new_ah; 400 port->sm_ah = new_ah;
399 spin_unlock_irq(&port->ah_lock); 401 spin_unlock_irq(&port->ah_lock);
400 402
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index b43f7d3682d3..5c04cfb54cb9 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -66,11 +66,6 @@ struct port_table_attribute {
66 int index; 66 int index;
67}; 67};
68 68
69static inline int ibdev_is_alive(const struct ib_device *dev)
70{
71 return dev->reg_state == IB_DEV_REGISTERED;
72}
73
74static ssize_t port_attr_show(struct kobject *kobj, 69static ssize_t port_attr_show(struct kobject *kobj,
75 struct attribute *attr, char *buf) 70 struct attribute *attr, char *buf)
76{ 71{
@@ -80,8 +75,6 @@ static ssize_t port_attr_show(struct kobject *kobj,
80 75
81 if (!port_attr->show) 76 if (!port_attr->show)
82 return -EIO; 77 return -EIO;
83 if (!ibdev_is_alive(p->ibdev))
84 return -ENODEV;
85 78
86 return port_attr->show(p, port_attr, buf); 79 return port_attr->show(p, port_attr, buf);
87} 80}
@@ -562,9 +555,6 @@ static ssize_t show_node_type(struct device *device,
562{ 555{
563 struct ib_device *dev = container_of(device, struct ib_device, dev); 556 struct ib_device *dev = container_of(device, struct ib_device, dev);
564 557
565 if (!ibdev_is_alive(dev))
566 return -ENODEV;
567
568 switch (dev->node_type) { 558 switch (dev->node_type) {
569 case RDMA_NODE_IB_CA: return sprintf(buf, "%d: CA\n", dev->node_type); 559 case RDMA_NODE_IB_CA: return sprintf(buf, "%d: CA\n", dev->node_type);
570 case RDMA_NODE_RNIC: return sprintf(buf, "%d: RNIC\n", dev->node_type); 560 case RDMA_NODE_RNIC: return sprintf(buf, "%d: RNIC\n", dev->node_type);
@@ -581,9 +571,6 @@ static ssize_t show_sys_image_guid(struct device *device,
581 struct ib_device_attr attr; 571 struct ib_device_attr attr;
582 ssize_t ret; 572 ssize_t ret;
583 573
584 if (!ibdev_is_alive(dev))
585 return -ENODEV;
586
587 ret = ib_query_device(dev, &attr); 574 ret = ib_query_device(dev, &attr);
588 if (ret) 575 if (ret)
589 return ret; 576 return ret;
@@ -600,9 +587,6 @@ static ssize_t show_node_guid(struct device *device,
600{ 587{
601 struct ib_device *dev = container_of(device, struct ib_device, dev); 588 struct ib_device *dev = container_of(device, struct ib_device, dev);
602 589
603 if (!ibdev_is_alive(dev))
604 return -ENODEV;
605
606 return sprintf(buf, "%04x:%04x:%04x:%04x\n", 590 return sprintf(buf, "%04x:%04x:%04x:%04x\n",
607 be16_to_cpu(((__be16 *) &dev->node_guid)[0]), 591 be16_to_cpu(((__be16 *) &dev->node_guid)[0]),
608 be16_to_cpu(((__be16 *) &dev->node_guid)[1]), 592 be16_to_cpu(((__be16 *) &dev->node_guid)[1]),
@@ -848,6 +832,9 @@ void ib_device_unregister_sysfs(struct ib_device *device)
848 struct kobject *p, *t; 832 struct kobject *p, *t;
849 struct ib_port *port; 833 struct ib_port *port;
850 834
835 /* Hold kobject until ib_dealloc_device() */
836 kobject_get(&device->dev.kobj);
837
851 list_for_each_entry_safe(p, t, &device->port_list, entry) { 838 list_for_each_entry_safe(p, t, &device->port_list, entry) {
852 list_del(&p->entry); 839 list_del(&p->entry);
853 port = container_of(p, struct ib_port, kobj); 840 port = container_of(p, struct ib_port, kobj);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 4dcf08b3fd83..d4d7204c11ed 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -450,7 +450,7 @@ static int cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq)
450 if ((CQE_OPCODE(*cqe) == T3_READ_RESP) && SQ_TYPE(*cqe)) 450 if ((CQE_OPCODE(*cqe) == T3_READ_RESP) && SQ_TYPE(*cqe))
451 return 0; 451 return 0;
452 452
453 if ((CQE_OPCODE(*cqe) == T3_SEND) && RQ_TYPE(*cqe) && 453 if (CQE_SEND_OPCODE(*cqe) && RQ_TYPE(*cqe) &&
454 Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) 454 Q_EMPTY(wq->rq_rptr, wq->rq_wptr))
455 return 0; 455 return 0;
456 456
@@ -938,6 +938,23 @@ int cxio_rdev_open(struct cxio_rdev *rdev_p)
938 if (!rdev_p->t3cdev_p) 938 if (!rdev_p->t3cdev_p)
939 rdev_p->t3cdev_p = dev2t3cdev(netdev_p); 939 rdev_p->t3cdev_p = dev2t3cdev(netdev_p);
940 rdev_p->t3cdev_p->ulp = (void *) rdev_p; 940 rdev_p->t3cdev_p->ulp = (void *) rdev_p;
941
942 err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_EMBEDDED_INFO,
943 &(rdev_p->fw_info));
944 if (err) {
945 printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n",
946 __func__, rdev_p->t3cdev_p, err);
947 goto err1;
948 }
949 if (G_FW_VERSION_MAJOR(rdev_p->fw_info.fw_vers) != CXIO_FW_MAJ) {
950 printk(KERN_ERR MOD "fatal firmware version mismatch: "
951 "need version %u but adapter has version %u\n",
952 CXIO_FW_MAJ,
953 G_FW_VERSION_MAJOR(rdev_p->fw_info.fw_vers));
954 err = -EINVAL;
955 goto err1;
956 }
957
941 err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_GET_PARAMS, 958 err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_GET_PARAMS,
942 &(rdev_p->rnic_info)); 959 &(rdev_p->rnic_info));
943 if (err) { 960 if (err) {
@@ -1204,11 +1221,12 @@ int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
1204 } 1221 }
1205 1222
1206 /* incoming SEND with no receive posted failures */ 1223 /* incoming SEND with no receive posted failures */
1207 if ((CQE_OPCODE(*hw_cqe) == T3_SEND) && RQ_TYPE(*hw_cqe) && 1224 if (CQE_SEND_OPCODE(*hw_cqe) && RQ_TYPE(*hw_cqe) &&
1208 Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) { 1225 Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) {
1209 ret = -1; 1226 ret = -1;
1210 goto skip_cqe; 1227 goto skip_cqe;
1211 } 1228 }
1229 BUG_ON((*cqe_flushed == 0) && !SW_CQE(*hw_cqe));
1212 goto proc_cqe; 1230 goto proc_cqe;
1213 } 1231 }
1214 1232
@@ -1223,6 +1241,13 @@ int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
1223 * then we complete this with TPT_ERR_MSN and mark the wq in 1241 * then we complete this with TPT_ERR_MSN and mark the wq in
1224 * error. 1242 * error.
1225 */ 1243 */
1244
1245 if (Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) {
1246 wq->error = 1;
1247 ret = -1;
1248 goto skip_cqe;
1249 }
1250
1226 if (unlikely((CQE_WRID_MSN(*hw_cqe) != (wq->rq_rptr + 1)))) { 1251 if (unlikely((CQE_WRID_MSN(*hw_cqe) != (wq->rq_rptr + 1)))) {
1227 wq->error = 1; 1252 wq->error = 1;
1228 hw_cqe->header |= htonl(V_CQE_STATUS(TPT_ERR_MSN)); 1253 hw_cqe->header |= htonl(V_CQE_STATUS(TPT_ERR_MSN));
@@ -1277,6 +1302,7 @@ proc_cqe:
1277 cxio_hal_pblpool_free(wq->rdev, 1302 cxio_hal_pblpool_free(wq->rdev,
1278 wq->rq[Q_PTR2IDX(wq->rq_rptr, 1303 wq->rq[Q_PTR2IDX(wq->rq_rptr,
1279 wq->rq_size_log2)].pbl_addr, T3_STAG0_PBL_SIZE); 1304 wq->rq_size_log2)].pbl_addr, T3_STAG0_PBL_SIZE);
1305 BUG_ON(Q_EMPTY(wq->rq_rptr, wq->rq_wptr));
1280 wq->rq_rptr++; 1306 wq->rq_rptr++;
1281 } 1307 }
1282 1308
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index 656fe47bc84f..e44dc2289471 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -61,6 +61,8 @@
61 61
62#define T3_MAX_DEV_NAME_LEN 32 62#define T3_MAX_DEV_NAME_LEN 32
63 63
64#define CXIO_FW_MAJ 7
65
64struct cxio_hal_ctrl_qp { 66struct cxio_hal_ctrl_qp {
65 u32 wptr; 67 u32 wptr;
66 u32 rptr; 68 u32 rptr;
@@ -108,6 +110,7 @@ struct cxio_rdev {
108 struct gen_pool *pbl_pool; 110 struct gen_pool *pbl_pool;
109 struct gen_pool *rqt_pool; 111 struct gen_pool *rqt_pool;
110 struct list_head entry; 112 struct list_head entry;
113 struct ch_embedded_info fw_info;
111}; 114};
112 115
113static inline int cxio_num_stags(struct cxio_rdev *rdev_p) 116static inline int cxio_num_stags(struct cxio_rdev *rdev_p)
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index 04618f7bfbb3..ff9be1a13106 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -604,6 +604,12 @@ struct t3_cqe {
604#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x).header))) 604#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x).header)))
605#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x).header))) 605#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x).header)))
606 606
607#define CQE_SEND_OPCODE(x)( \
608 (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND) || \
609 (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE) || \
610 (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_INV) || \
611 (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE_INV))
612
607#define CQE_LEN(x) (be32_to_cpu((x).len)) 613#define CQE_LEN(x) (be32_to_cpu((x).len))
608 614
609/* used for RQ completion processing */ 615/* used for RQ completion processing */
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 44e936e48a31..8699947aaf6c 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1678,6 +1678,9 @@ static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1678{ 1678{
1679 struct iwch_ep *ep = ctx; 1679 struct iwch_ep *ep = ctx;
1680 1680
1681 if (state_read(&ep->com) != FPDU_MODE)
1682 return CPL_RET_BUF_DONE;
1683
1681 PDBG("%s ep %p\n", __func__, ep); 1684 PDBG("%s ep %p\n", __func__, ep);
1682 skb_pull(skb, sizeof(struct cpl_rdma_terminate)); 1685 skb_pull(skb, sizeof(struct cpl_rdma_terminate));
1683 PDBG("%s saving %d bytes of term msg\n", __func__, skb->len); 1686 PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_ev.c b/drivers/infiniband/hw/cxgb3/iwch_ev.c
index 7b67a6771720..743c5d8b8806 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_ev.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_ev.c
@@ -179,11 +179,6 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
179 case TPT_ERR_BOUND: 179 case TPT_ERR_BOUND:
180 case TPT_ERR_INVALIDATE_SHARED_MR: 180 case TPT_ERR_INVALIDATE_SHARED_MR:
181 case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND: 181 case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
182 printk(KERN_ERR "%s - CQE Err qpid 0x%x opcode %d status 0x%x "
183 "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __func__,
184 CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
185 CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
186 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
187 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); 182 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
188 post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_ACCESS_ERR, 1); 183 post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_ACCESS_ERR, 1);
189 break; 184 break;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 19661b2f0406..c758fbd58478 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -99,8 +99,8 @@ static int build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr,
99 if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { 99 if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
100 plen = 4; 100 plen = 4;
101 wqe->write.sgl[0].stag = wr->ex.imm_data; 101 wqe->write.sgl[0].stag = wr->ex.imm_data;
102 wqe->write.sgl[0].len = __constant_cpu_to_be32(0); 102 wqe->write.sgl[0].len = cpu_to_be32(0);
103 wqe->write.num_sgle = __constant_cpu_to_be32(0); 103 wqe->write.num_sgle = cpu_to_be32(0);
104 *flit_cnt = 6; 104 *flit_cnt = 6;
105 } else { 105 } else {
106 plen = 0; 106 plen = 0;
@@ -195,15 +195,12 @@ static int build_inv_stag(union t3_wr *wqe, struct ib_send_wr *wr,
195 return 0; 195 return 0;
196} 196}
197 197
198/*
199 * TBD: this is going to be moved to firmware. Missing pdid/qpid check for now.
200 */
201static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list, 198static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
202 u32 num_sgle, u32 * pbl_addr, u8 * page_size) 199 u32 num_sgle, u32 * pbl_addr, u8 * page_size)
203{ 200{
204 int i; 201 int i;
205 struct iwch_mr *mhp; 202 struct iwch_mr *mhp;
206 u32 offset; 203 u64 offset;
207 for (i = 0; i < num_sgle; i++) { 204 for (i = 0; i < num_sgle; i++) {
208 205
209 mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8); 206 mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8);
@@ -235,8 +232,8 @@ static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
235 return -EINVAL; 232 return -EINVAL;
236 } 233 }
237 offset = sg_list[i].addr - mhp->attr.va_fbo; 234 offset = sg_list[i].addr - mhp->attr.va_fbo;
238 offset += ((u32) mhp->attr.va_fbo) % 235 offset += mhp->attr.va_fbo &
239 (1UL << (12 + mhp->attr.page_size)); 236 ((1UL << (12 + mhp->attr.page_size)) - 1);
240 pbl_addr[i] = ((mhp->attr.pbl_addr - 237 pbl_addr[i] = ((mhp->attr.pbl_addr -
241 rhp->rdev.rnic_info.pbl_base) >> 3) + 238 rhp->rdev.rnic_info.pbl_base) >> 3) +
242 (offset >> (12 + mhp->attr.page_size)); 239 (offset >> (12 + mhp->attr.page_size));
@@ -266,8 +263,8 @@ static int build_rdma_recv(struct iwch_qp *qhp, union t3_wr *wqe,
266 wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length); 263 wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
267 264
268 /* to in the WQE == the offset into the page */ 265 /* to in the WQE == the offset into the page */
269 wqe->recv.sgl[i].to = cpu_to_be64(((u32) wr->sg_list[i].addr) % 266 wqe->recv.sgl[i].to = cpu_to_be64(((u32)wr->sg_list[i].addr) &
270 (1UL << (12 + page_size[i]))); 267 ((1UL << (12 + page_size[i])) - 1));
271 268
272 /* pbl_addr is the adapters address in the PBL */ 269 /* pbl_addr is the adapters address in the PBL */
273 wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_addr[i]); 270 wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_addr[i]);
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c
index 44447aaa5501..c568b28f4e20 100644
--- a/drivers/infiniband/hw/ehca/ehca_sqp.c
+++ b/drivers/infiniband/hw/ehca/ehca_sqp.c
@@ -46,11 +46,11 @@
46#include "ehca_iverbs.h" 46#include "ehca_iverbs.h"
47#include "hcp_if.h" 47#include "hcp_if.h"
48 48
49#define IB_MAD_STATUS_REDIRECT __constant_htons(0x0002) 49#define IB_MAD_STATUS_REDIRECT cpu_to_be16(0x0002)
50#define IB_MAD_STATUS_UNSUP_VERSION __constant_htons(0x0004) 50#define IB_MAD_STATUS_UNSUP_VERSION cpu_to_be16(0x0004)
51#define IB_MAD_STATUS_UNSUP_METHOD __constant_htons(0x0008) 51#define IB_MAD_STATUS_UNSUP_METHOD cpu_to_be16(0x0008)
52 52
53#define IB_PMA_CLASS_PORT_INFO __constant_htons(0x0001) 53#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001)
54 54
55/** 55/**
56 * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue 56 * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue
diff --git a/drivers/infiniband/hw/ipath/ipath_eeprom.c b/drivers/infiniband/hw/ipath/ipath_eeprom.c
index dc37277f1c80..fc7181985e8e 100644
--- a/drivers/infiniband/hw/ipath/ipath_eeprom.c
+++ b/drivers/infiniband/hw/ipath/ipath_eeprom.c
@@ -772,8 +772,8 @@ void ipath_get_eeprom_info(struct ipath_devdata *dd)
772 "0x%x, not 0x%x\n", csum, ifp->if_csum); 772 "0x%x, not 0x%x\n", csum, ifp->if_csum);
773 goto done; 773 goto done;
774 } 774 }
775 if (*(__be64 *) ifp->if_guid == 0ULL || 775 if (*(__be64 *) ifp->if_guid == cpu_to_be64(0) ||
776 *(__be64 *) ifp->if_guid == __constant_cpu_to_be64(-1LL)) { 776 *(__be64 *) ifp->if_guid == ~cpu_to_be64(0)) {
777 ipath_dev_err(dd, "Invalid GUID %llx from flash; " 777 ipath_dev_err(dd, "Invalid GUID %llx from flash; "
778 "ignoring\n", 778 "ignoring\n",
779 *(unsigned long long *) ifp->if_guid); 779 *(unsigned long long *) ifp->if_guid);
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c
index 17a123197477..16a702d46018 100644
--- a/drivers/infiniband/hw/ipath/ipath_mad.c
+++ b/drivers/infiniband/hw/ipath/ipath_mad.c
@@ -37,10 +37,10 @@
37#include "ipath_verbs.h" 37#include "ipath_verbs.h"
38#include "ipath_common.h" 38#include "ipath_common.h"
39 39
40#define IB_SMP_UNSUP_VERSION __constant_htons(0x0004) 40#define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004)
41#define IB_SMP_UNSUP_METHOD __constant_htons(0x0008) 41#define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008)
42#define IB_SMP_UNSUP_METH_ATTR __constant_htons(0x000C) 42#define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C)
43#define IB_SMP_INVALID_FIELD __constant_htons(0x001C) 43#define IB_SMP_INVALID_FIELD cpu_to_be16(0x001C)
44 44
45static int reply(struct ib_smp *smp) 45static int reply(struct ib_smp *smp)
46{ 46{
@@ -789,12 +789,12 @@ static int recv_subn_set_pkeytable(struct ib_smp *smp,
789 return recv_subn_get_pkeytable(smp, ibdev); 789 return recv_subn_get_pkeytable(smp, ibdev);
790} 790}
791 791
792#define IB_PMA_CLASS_PORT_INFO __constant_htons(0x0001) 792#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001)
793#define IB_PMA_PORT_SAMPLES_CONTROL __constant_htons(0x0010) 793#define IB_PMA_PORT_SAMPLES_CONTROL cpu_to_be16(0x0010)
794#define IB_PMA_PORT_SAMPLES_RESULT __constant_htons(0x0011) 794#define IB_PMA_PORT_SAMPLES_RESULT cpu_to_be16(0x0011)
795#define IB_PMA_PORT_COUNTERS __constant_htons(0x0012) 795#define IB_PMA_PORT_COUNTERS cpu_to_be16(0x0012)
796#define IB_PMA_PORT_COUNTERS_EXT __constant_htons(0x001D) 796#define IB_PMA_PORT_COUNTERS_EXT cpu_to_be16(0x001D)
797#define IB_PMA_PORT_SAMPLES_RESULT_EXT __constant_htons(0x001E) 797#define IB_PMA_PORT_SAMPLES_RESULT_EXT cpu_to_be16(0x001E)
798 798
799struct ib_perf { 799struct ib_perf {
800 u8 base_version; 800 u8 base_version;
@@ -884,19 +884,19 @@ struct ib_pma_portcounters {
884 __be32 port_rcv_packets; 884 __be32 port_rcv_packets;
885} __attribute__ ((packed)); 885} __attribute__ ((packed));
886 886
887#define IB_PMA_SEL_SYMBOL_ERROR __constant_htons(0x0001) 887#define IB_PMA_SEL_SYMBOL_ERROR cpu_to_be16(0x0001)
888#define IB_PMA_SEL_LINK_ERROR_RECOVERY __constant_htons(0x0002) 888#define IB_PMA_SEL_LINK_ERROR_RECOVERY cpu_to_be16(0x0002)
889#define IB_PMA_SEL_LINK_DOWNED __constant_htons(0x0004) 889#define IB_PMA_SEL_LINK_DOWNED cpu_to_be16(0x0004)
890#define IB_PMA_SEL_PORT_RCV_ERRORS __constant_htons(0x0008) 890#define IB_PMA_SEL_PORT_RCV_ERRORS cpu_to_be16(0x0008)
891#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS __constant_htons(0x0010) 891#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS cpu_to_be16(0x0010)
892#define IB_PMA_SEL_PORT_XMIT_DISCARDS __constant_htons(0x0040) 892#define IB_PMA_SEL_PORT_XMIT_DISCARDS cpu_to_be16(0x0040)
893#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS __constant_htons(0x0200) 893#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS cpu_to_be16(0x0200)
894#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS __constant_htons(0x0400) 894#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS cpu_to_be16(0x0400)
895#define IB_PMA_SEL_PORT_VL15_DROPPED __constant_htons(0x0800) 895#define IB_PMA_SEL_PORT_VL15_DROPPED cpu_to_be16(0x0800)
896#define IB_PMA_SEL_PORT_XMIT_DATA __constant_htons(0x1000) 896#define IB_PMA_SEL_PORT_XMIT_DATA cpu_to_be16(0x1000)
897#define IB_PMA_SEL_PORT_RCV_DATA __constant_htons(0x2000) 897#define IB_PMA_SEL_PORT_RCV_DATA cpu_to_be16(0x2000)
898#define IB_PMA_SEL_PORT_XMIT_PACKETS __constant_htons(0x4000) 898#define IB_PMA_SEL_PORT_XMIT_PACKETS cpu_to_be16(0x4000)
899#define IB_PMA_SEL_PORT_RCV_PACKETS __constant_htons(0x8000) 899#define IB_PMA_SEL_PORT_RCV_PACKETS cpu_to_be16(0x8000)
900 900
901struct ib_pma_portcounters_ext { 901struct ib_pma_portcounters_ext {
902 u8 reserved; 902 u8 reserved;
@@ -913,14 +913,14 @@ struct ib_pma_portcounters_ext {
913 __be64 port_multicast_rcv_packets; 913 __be64 port_multicast_rcv_packets;
914} __attribute__ ((packed)); 914} __attribute__ ((packed));
915 915
916#define IB_PMA_SELX_PORT_XMIT_DATA __constant_htons(0x0001) 916#define IB_PMA_SELX_PORT_XMIT_DATA cpu_to_be16(0x0001)
917#define IB_PMA_SELX_PORT_RCV_DATA __constant_htons(0x0002) 917#define IB_PMA_SELX_PORT_RCV_DATA cpu_to_be16(0x0002)
918#define IB_PMA_SELX_PORT_XMIT_PACKETS __constant_htons(0x0004) 918#define IB_PMA_SELX_PORT_XMIT_PACKETS cpu_to_be16(0x0004)
919#define IB_PMA_SELX_PORT_RCV_PACKETS __constant_htons(0x0008) 919#define IB_PMA_SELX_PORT_RCV_PACKETS cpu_to_be16(0x0008)
920#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS __constant_htons(0x0010) 920#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS cpu_to_be16(0x0010)
921#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS __constant_htons(0x0020) 921#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS cpu_to_be16(0x0020)
922#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS __constant_htons(0x0040) 922#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS cpu_to_be16(0x0040)
923#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS __constant_htons(0x0080) 923#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS cpu_to_be16(0x0080)
924 924
925static int recv_pma_get_classportinfo(struct ib_perf *pmp) 925static int recv_pma_get_classportinfo(struct ib_perf *pmp)
926{ 926{
@@ -933,7 +933,7 @@ static int recv_pma_get_classportinfo(struct ib_perf *pmp)
933 pmp->status |= IB_SMP_INVALID_FIELD; 933 pmp->status |= IB_SMP_INVALID_FIELD;
934 934
935 /* Indicate AllPortSelect is valid (only one port anyway) */ 935 /* Indicate AllPortSelect is valid (only one port anyway) */
936 p->cap_mask = __constant_cpu_to_be16(1 << 8); 936 p->cap_mask = cpu_to_be16(1 << 8);
937 p->base_version = 1; 937 p->base_version = 1;
938 p->class_version = 1; 938 p->class_version = 1;
939 /* 939 /*
@@ -951,12 +951,11 @@ static int recv_pma_get_classportinfo(struct ib_perf *pmp)
951 * We support 5 counters which only count the mandatory quantities. 951 * We support 5 counters which only count the mandatory quantities.
952 */ 952 */
953#define COUNTER_MASK(q, n) (q << ((9 - n) * 3)) 953#define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
954#define COUNTER_MASK0_9 \ 954#define COUNTER_MASK0_9 cpu_to_be32(COUNTER_MASK(1, 0) | \
955 __constant_cpu_to_be32(COUNTER_MASK(1, 0) | \ 955 COUNTER_MASK(1, 1) | \
956 COUNTER_MASK(1, 1) | \ 956 COUNTER_MASK(1, 2) | \
957 COUNTER_MASK(1, 2) | \ 957 COUNTER_MASK(1, 3) | \
958 COUNTER_MASK(1, 3) | \ 958 COUNTER_MASK(1, 4))
959 COUNTER_MASK(1, 4))
960 959
961static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp, 960static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp,
962 struct ib_device *ibdev, u8 port) 961 struct ib_device *ibdev, u8 port)
@@ -1137,7 +1136,7 @@ static int recv_pma_get_portsamplesresult_ext(struct ib_perf *pmp,
1137 status = dev->pma_sample_status; 1136 status = dev->pma_sample_status;
1138 p->sample_status = cpu_to_be16(status); 1137 p->sample_status = cpu_to_be16(status);
1139 /* 64 bits */ 1138 /* 64 bits */
1140 p->extended_width = __constant_cpu_to_be32(0x80000000); 1139 p->extended_width = cpu_to_be32(0x80000000);
1141 for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++) 1140 for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
1142 p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 : 1141 p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 :
1143 cpu_to_be64( 1142 cpu_to_be64(
@@ -1185,7 +1184,7 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
1185 pmp->status |= IB_SMP_INVALID_FIELD; 1184 pmp->status |= IB_SMP_INVALID_FIELD;
1186 1185
1187 if (cntrs.symbol_error_counter > 0xFFFFUL) 1186 if (cntrs.symbol_error_counter > 0xFFFFUL)
1188 p->symbol_error_counter = __constant_cpu_to_be16(0xFFFF); 1187 p->symbol_error_counter = cpu_to_be16(0xFFFF);
1189 else 1188 else
1190 p->symbol_error_counter = 1189 p->symbol_error_counter =
1191 cpu_to_be16((u16)cntrs.symbol_error_counter); 1190 cpu_to_be16((u16)cntrs.symbol_error_counter);
@@ -1199,17 +1198,17 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
1199 else 1198 else
1200 p->link_downed_counter = (u8)cntrs.link_downed_counter; 1199 p->link_downed_counter = (u8)cntrs.link_downed_counter;
1201 if (cntrs.port_rcv_errors > 0xFFFFUL) 1200 if (cntrs.port_rcv_errors > 0xFFFFUL)
1202 p->port_rcv_errors = __constant_cpu_to_be16(0xFFFF); 1201 p->port_rcv_errors = cpu_to_be16(0xFFFF);
1203 else 1202 else
1204 p->port_rcv_errors = 1203 p->port_rcv_errors =
1205 cpu_to_be16((u16) cntrs.port_rcv_errors); 1204 cpu_to_be16((u16) cntrs.port_rcv_errors);
1206 if (cntrs.port_rcv_remphys_errors > 0xFFFFUL) 1205 if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
1207 p->port_rcv_remphys_errors = __constant_cpu_to_be16(0xFFFF); 1206 p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
1208 else 1207 else
1209 p->port_rcv_remphys_errors = 1208 p->port_rcv_remphys_errors =
1210 cpu_to_be16((u16)cntrs.port_rcv_remphys_errors); 1209 cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
1211 if (cntrs.port_xmit_discards > 0xFFFFUL) 1210 if (cntrs.port_xmit_discards > 0xFFFFUL)
1212 p->port_xmit_discards = __constant_cpu_to_be16(0xFFFF); 1211 p->port_xmit_discards = cpu_to_be16(0xFFFF);
1213 else 1212 else
1214 p->port_xmit_discards = 1213 p->port_xmit_discards =
1215 cpu_to_be16((u16)cntrs.port_xmit_discards); 1214 cpu_to_be16((u16)cntrs.port_xmit_discards);
@@ -1220,24 +1219,24 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
1220 p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) | 1219 p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
1221 cntrs.excessive_buffer_overrun_errors; 1220 cntrs.excessive_buffer_overrun_errors;
1222 if (cntrs.vl15_dropped > 0xFFFFUL) 1221 if (cntrs.vl15_dropped > 0xFFFFUL)
1223 p->vl15_dropped = __constant_cpu_to_be16(0xFFFF); 1222 p->vl15_dropped = cpu_to_be16(0xFFFF);
1224 else 1223 else
1225 p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped); 1224 p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
1226 if (cntrs.port_xmit_data > 0xFFFFFFFFUL) 1225 if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
1227 p->port_xmit_data = __constant_cpu_to_be32(0xFFFFFFFF); 1226 p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);
1228 else 1227 else
1229 p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data); 1228 p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
1230 if (cntrs.port_rcv_data > 0xFFFFFFFFUL) 1229 if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
1231 p->port_rcv_data = __constant_cpu_to_be32(0xFFFFFFFF); 1230 p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);
1232 else 1231 else
1233 p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data); 1232 p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
1234 if (cntrs.port_xmit_packets > 0xFFFFFFFFUL) 1233 if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
1235 p->port_xmit_packets = __constant_cpu_to_be32(0xFFFFFFFF); 1234 p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);
1236 else 1235 else
1237 p->port_xmit_packets = 1236 p->port_xmit_packets =
1238 cpu_to_be32((u32)cntrs.port_xmit_packets); 1237 cpu_to_be32((u32)cntrs.port_xmit_packets);
1239 if (cntrs.port_rcv_packets > 0xFFFFFFFFUL) 1238 if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
1240 p->port_rcv_packets = __constant_cpu_to_be32(0xFFFFFFFF); 1239 p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);
1241 else 1240 else
1242 p->port_rcv_packets = 1241 p->port_rcv_packets =
1243 cpu_to_be32((u32) cntrs.port_rcv_packets); 1242 cpu_to_be32((u32) cntrs.port_rcv_packets);
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index 9170710b950d..79b3dbc97179 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -1744,7 +1744,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1744 /* Signal completion event if the solicited bit is set. */ 1744 /* Signal completion event if the solicited bit is set. */
1745 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1745 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
1746 (ohdr->bth[0] & 1746 (ohdr->bth[0] &
1747 __constant_cpu_to_be32(1 << 23)) != 0); 1747 cpu_to_be32(1 << 23)) != 0);
1748 break; 1748 break;
1749 1749
1750 case OP(RDMA_WRITE_FIRST): 1750 case OP(RDMA_WRITE_FIRST):
diff --git a/drivers/infiniband/hw/ipath/ipath_sdma.c b/drivers/infiniband/hw/ipath/ipath_sdma.c
index 8e255adf5d9b..4b0698590850 100644
--- a/drivers/infiniband/hw/ipath/ipath_sdma.c
+++ b/drivers/infiniband/hw/ipath/ipath_sdma.c
@@ -781,10 +781,10 @@ retry:
781 descqp = &dd->ipath_sdma_descq[dd->ipath_sdma_descq_cnt].qw[0]; 781 descqp = &dd->ipath_sdma_descq[dd->ipath_sdma_descq_cnt].qw[0];
782 descqp -= 2; 782 descqp -= 2;
783 /* SDmaLastDesc */ 783 /* SDmaLastDesc */
784 descqp[0] |= __constant_cpu_to_le64(1ULL << 11); 784 descqp[0] |= cpu_to_le64(1ULL << 11);
785 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_INTREQ) { 785 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_INTREQ) {
786 /* SDmaIntReq */ 786 /* SDmaIntReq */
787 descqp[0] |= __constant_cpu_to_le64(1ULL << 15); 787 descqp[0] |= cpu_to_le64(1ULL << 15);
788 } 788 }
789 789
790 /* Commit writes to memory and advance the tail on the chip */ 790 /* Commit writes to memory and advance the tail on the chip */
diff --git a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c
index 82cc588b8bf2..22e60998f1a7 100644
--- a/drivers/infiniband/hw/ipath/ipath_uc.c
+++ b/drivers/infiniband/hw/ipath/ipath_uc.c
@@ -419,7 +419,7 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
419 /* Signal completion event if the solicited bit is set. */ 419 /* Signal completion event if the solicited bit is set. */
420 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 420 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
421 (ohdr->bth[0] & 421 (ohdr->bth[0] &
422 __constant_cpu_to_be32(1 << 23)) != 0); 422 cpu_to_be32(1 << 23)) != 0);
423 break; 423 break;
424 424
425 case OP(RDMA_WRITE_FIRST): 425 case OP(RDMA_WRITE_FIRST):
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
index 91c74cc797ae..6076cb61bf6a 100644
--- a/drivers/infiniband/hw/ipath/ipath_ud.c
+++ b/drivers/infiniband/hw/ipath/ipath_ud.c
@@ -370,7 +370,7 @@ int ipath_make_ud_req(struct ipath_qp *qp)
370 */ 370 */
371 ohdr->bth[1] = ah_attr->dlid >= IPATH_MULTICAST_LID_BASE && 371 ohdr->bth[1] = ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
372 ah_attr->dlid != IPATH_PERMISSIVE_LID ? 372 ah_attr->dlid != IPATH_PERMISSIVE_LID ?
373 __constant_cpu_to_be32(IPATH_MULTICAST_QPN) : 373 cpu_to_be32(IPATH_MULTICAST_QPN) :
374 cpu_to_be32(wqe->wr.wr.ud.remote_qpn); 374 cpu_to_be32(wqe->wr.wr.ud.remote_qpn);
375 ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPATH_PSN_MASK); 375 ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPATH_PSN_MASK);
376 /* 376 /*
@@ -573,7 +573,7 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
573 /* Signal completion event if the solicited bit is set. */ 573 /* Signal completion event if the solicited bit is set. */
574 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 574 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
575 (ohdr->bth[0] & 575 (ohdr->bth[0] &
576 __constant_cpu_to_be32(1 << 23)) != 0); 576 cpu_to_be32(1 << 23)) != 0);
577 577
578bail:; 578bail:;
579} 579}
diff --git a/drivers/infiniband/hw/ipath/ipath_user_sdma.c b/drivers/infiniband/hw/ipath/ipath_user_sdma.c
index 82d9a0b5ca2f..7bff4b9baa0a 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_sdma.c
@@ -667,13 +667,13 @@ static inline __le64 ipath_sdma_make_desc0(struct ipath_devdata *dd,
667 667
668static inline __le64 ipath_sdma_make_first_desc0(__le64 descq) 668static inline __le64 ipath_sdma_make_first_desc0(__le64 descq)
669{ 669{
670 return descq | __constant_cpu_to_le64(1ULL << 12); 670 return descq | cpu_to_le64(1ULL << 12);
671} 671}
672 672
673static inline __le64 ipath_sdma_make_last_desc0(__le64 descq) 673static inline __le64 ipath_sdma_make_last_desc0(__le64 descq)
674{ 674{
675 /* last */ /* dma head */ 675 /* last */ /* dma head */
676 return descq | __constant_cpu_to_le64(1ULL << 11 | 1ULL << 13); 676 return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
677} 677}
678 678
679static inline __le64 ipath_sdma_make_desc1(u64 addr) 679static inline __le64 ipath_sdma_make_desc1(u64 addr)
@@ -763,7 +763,7 @@ static int ipath_user_sdma_push_pkts(struct ipath_devdata *dd,
763 if (ofs >= IPATH_SMALLBUF_DWORDS) { 763 if (ofs >= IPATH_SMALLBUF_DWORDS) {
764 for (i = 0; i < pkt->naddr; i++) { 764 for (i = 0; i < pkt->naddr; i++) {
765 dd->ipath_sdma_descq[dtail].qw[0] |= 765 dd->ipath_sdma_descq[dtail].qw[0] |=
766 __constant_cpu_to_le64(1ULL << 14); 766 cpu_to_le64(1ULL << 14);
767 if (++dtail == dd->ipath_sdma_descq_cnt) 767 if (++dtail == dd->ipath_sdma_descq_cnt)
768 dtail = 0; 768 dtail = 0;
769 } 769 }
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index cdf0e6abd34d..9289ab4b0ae8 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -1585,7 +1585,7 @@ static int ipath_query_port(struct ib_device *ibdev,
1585 u64 ibcstat; 1585 u64 ibcstat;
1586 1586
1587 memset(props, 0, sizeof(*props)); 1587 memset(props, 0, sizeof(*props));
1588 props->lid = lid ? lid : __constant_be16_to_cpu(IB_LID_PERMISSIVE); 1588 props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
1589 props->lmc = dd->ipath_lmc; 1589 props->lmc = dd->ipath_lmc;
1590 props->sm_lid = dev->sm_lid; 1590 props->sm_lid = dev->sm_lid;
1591 props->sm_sl = dev->sm_sl; 1591 props->sm_sl = dev->sm_sl;
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index 11e3f613df93..ae6cff4abffc 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -86,11 +86,11 @@
86#define IB_PMA_SAMPLE_STATUS_RUNNING 0x02 86#define IB_PMA_SAMPLE_STATUS_RUNNING 0x02
87 87
88/* Mandatory IB performance counter select values. */ 88/* Mandatory IB performance counter select values. */
89#define IB_PMA_PORT_XMIT_DATA __constant_htons(0x0001) 89#define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001)
90#define IB_PMA_PORT_RCV_DATA __constant_htons(0x0002) 90#define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002)
91#define IB_PMA_PORT_XMIT_PKTS __constant_htons(0x0003) 91#define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003)
92#define IB_PMA_PORT_RCV_PKTS __constant_htons(0x0004) 92#define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004)
93#define IB_PMA_PORT_XMIT_WAIT __constant_htons(0x0005) 93#define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005)
94 94
95struct ib_reth { 95struct ib_reth {
96 __be64 vaddr; 96 __be64 vaddr;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 606f1e2ef284..19e68ab66168 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -147,7 +147,8 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
147 * Snoop SM MADs for port info and P_Key table sets, so we can 147 * Snoop SM MADs for port info and P_Key table sets, so we can
148 * synthesize LID change and P_Key change events. 148 * synthesize LID change and P_Key change events.
149 */ 149 */
150static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad) 150static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
151 u16 prev_lid)
151{ 152{
152 struct ib_event event; 153 struct ib_event event;
153 154
@@ -157,6 +158,7 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad)
157 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) { 158 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) {
158 struct ib_port_info *pinfo = 159 struct ib_port_info *pinfo =
159 (struct ib_port_info *) ((struct ib_smp *) mad)->data; 160 (struct ib_port_info *) ((struct ib_smp *) mad)->data;
161 u16 lid = be16_to_cpu(pinfo->lid);
160 162
161 update_sm_ah(to_mdev(ibdev), port_num, 163 update_sm_ah(to_mdev(ibdev), port_num,
162 be16_to_cpu(pinfo->sm_lid), 164 be16_to_cpu(pinfo->sm_lid),
@@ -165,12 +167,15 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad)
165 event.device = ibdev; 167 event.device = ibdev;
166 event.element.port_num = port_num; 168 event.element.port_num = port_num;
167 169
168 if (pinfo->clientrereg_resv_subnetto & 0x80) 170 if (pinfo->clientrereg_resv_subnetto & 0x80) {
169 event.event = IB_EVENT_CLIENT_REREGISTER; 171 event.event = IB_EVENT_CLIENT_REREGISTER;
170 else 172 ib_dispatch_event(&event);
171 event.event = IB_EVENT_LID_CHANGE; 173 }
172 174
173 ib_dispatch_event(&event); 175 if (prev_lid != lid) {
176 event.event = IB_EVENT_LID_CHANGE;
177 ib_dispatch_event(&event);
178 }
174 } 179 }
175 180
176 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) { 181 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) {
@@ -228,8 +233,9 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
228 struct ib_wc *in_wc, struct ib_grh *in_grh, 233 struct ib_wc *in_wc, struct ib_grh *in_grh,
229 struct ib_mad *in_mad, struct ib_mad *out_mad) 234 struct ib_mad *in_mad, struct ib_mad *out_mad)
230{ 235{
231 u16 slid; 236 u16 slid, prev_lid = 0;
232 int err; 237 int err;
238 struct ib_port_attr pattr;
233 239
234 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); 240 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
235 241
@@ -263,6 +269,13 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
263 } else 269 } else
264 return IB_MAD_RESULT_SUCCESS; 270 return IB_MAD_RESULT_SUCCESS;
265 271
272 if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
273 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
274 in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
275 in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
276 !ib_query_port(ibdev, port_num, &pattr))
277 prev_lid = pattr.lid;
278
266 err = mlx4_MAD_IFC(to_mdev(ibdev), 279 err = mlx4_MAD_IFC(to_mdev(ibdev),
267 mad_flags & IB_MAD_IGNORE_MKEY, 280 mad_flags & IB_MAD_IGNORE_MKEY,
268 mad_flags & IB_MAD_IGNORE_BKEY, 281 mad_flags & IB_MAD_IGNORE_BKEY,
@@ -271,7 +284,7 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
271 return IB_MAD_RESULT_FAILURE; 284 return IB_MAD_RESULT_FAILURE;
272 285
273 if (!out_mad->mad_hdr.status) { 286 if (!out_mad->mad_hdr.status) {
274 smp_snoop(ibdev, port_num, in_mad); 287 smp_snoop(ibdev, port_num, in_mad, prev_lid);
275 node_desc_override(ibdev, out_mad); 288 node_desc_override(ibdev, out_mad);
276 } 289 }
277 290
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 61588bd273bd..2ccb9d31771f 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -699,11 +699,12 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
699 struct mlx4_ib_dev *ibdev = ibdev_ptr; 699 struct mlx4_ib_dev *ibdev = ibdev_ptr;
700 int p; 700 int p;
701 701
702 mlx4_ib_mad_cleanup(ibdev);
703 ib_unregister_device(&ibdev->ib_dev);
704
702 for (p = 1; p <= ibdev->num_ports; ++p) 705 for (p = 1; p <= ibdev->num_ports; ++p)
703 mlx4_CLOSE_PORT(dev, p); 706 mlx4_CLOSE_PORT(dev, p);
704 707
705 mlx4_ib_mad_cleanup(ibdev);
706 ib_unregister_device(&ibdev->ib_dev);
707 iounmap(ibdev->uar_map); 708 iounmap(ibdev->uar_map);
708 mlx4_uar_free(dev, &ibdev->priv_uar); 709 mlx4_uar_free(dev, &ibdev->priv_uar);
709 mlx4_pd_free(dev, ibdev->priv_pdn); 710 mlx4_pd_free(dev, ibdev->priv_pdn);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index a91cb4c3fa5c..f385a24d31d2 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -71,17 +71,17 @@ enum {
71}; 71};
72 72
73static const __be32 mlx4_ib_opcode[] = { 73static const __be32 mlx4_ib_opcode[] = {
74 [IB_WR_SEND] = __constant_cpu_to_be32(MLX4_OPCODE_SEND), 74 [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND),
75 [IB_WR_LSO] = __constant_cpu_to_be32(MLX4_OPCODE_LSO), 75 [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO),
76 [IB_WR_SEND_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_SEND_IMM), 76 [IB_WR_SEND_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_SEND_IMM),
77 [IB_WR_RDMA_WRITE] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE), 77 [IB_WR_RDMA_WRITE] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
78 [IB_WR_RDMA_WRITE_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM), 78 [IB_WR_RDMA_WRITE_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
79 [IB_WR_RDMA_READ] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_READ), 79 [IB_WR_RDMA_READ] = cpu_to_be32(MLX4_OPCODE_RDMA_READ),
80 [IB_WR_ATOMIC_CMP_AND_SWP] = __constant_cpu_to_be32(MLX4_OPCODE_ATOMIC_CS), 80 [IB_WR_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS),
81 [IB_WR_ATOMIC_FETCH_AND_ADD] = __constant_cpu_to_be32(MLX4_OPCODE_ATOMIC_FA), 81 [IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA),
82 [IB_WR_SEND_WITH_INV] = __constant_cpu_to_be32(MLX4_OPCODE_SEND_INVAL), 82 [IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL),
83 [IB_WR_LOCAL_INV] = __constant_cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL), 83 [IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL),
84 [IB_WR_FAST_REG_MR] = __constant_cpu_to_be32(MLX4_OPCODE_FMR), 84 [IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR),
85}; 85};
86 86
87static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp) 87static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 640449582aba..5648659ff0b0 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -104,7 +104,8 @@ static void update_sm_ah(struct mthca_dev *dev,
104 */ 104 */
105static void smp_snoop(struct ib_device *ibdev, 105static void smp_snoop(struct ib_device *ibdev,
106 u8 port_num, 106 u8 port_num,
107 struct ib_mad *mad) 107 struct ib_mad *mad,
108 u16 prev_lid)
108{ 109{
109 struct ib_event event; 110 struct ib_event event;
110 111
@@ -114,6 +115,7 @@ static void smp_snoop(struct ib_device *ibdev,
114 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) { 115 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) {
115 struct ib_port_info *pinfo = 116 struct ib_port_info *pinfo =
116 (struct ib_port_info *) ((struct ib_smp *) mad)->data; 117 (struct ib_port_info *) ((struct ib_smp *) mad)->data;
118 u16 lid = be16_to_cpu(pinfo->lid);
117 119
118 mthca_update_rate(to_mdev(ibdev), port_num); 120 mthca_update_rate(to_mdev(ibdev), port_num);
119 update_sm_ah(to_mdev(ibdev), port_num, 121 update_sm_ah(to_mdev(ibdev), port_num,
@@ -123,12 +125,15 @@ static void smp_snoop(struct ib_device *ibdev,
123 event.device = ibdev; 125 event.device = ibdev;
124 event.element.port_num = port_num; 126 event.element.port_num = port_num;
125 127
126 if (pinfo->clientrereg_resv_subnetto & 0x80) 128 if (pinfo->clientrereg_resv_subnetto & 0x80) {
127 event.event = IB_EVENT_CLIENT_REREGISTER; 129 event.event = IB_EVENT_CLIENT_REREGISTER;
128 else 130 ib_dispatch_event(&event);
129 event.event = IB_EVENT_LID_CHANGE; 131 }
130 132
131 ib_dispatch_event(&event); 133 if (prev_lid != lid) {
134 event.event = IB_EVENT_LID_CHANGE;
135 ib_dispatch_event(&event);
136 }
132 } 137 }
133 138
134 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) { 139 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) {
@@ -196,6 +201,8 @@ int mthca_process_mad(struct ib_device *ibdev,
196 int err; 201 int err;
197 u8 status; 202 u8 status;
198 u16 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); 203 u16 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
204 u16 prev_lid = 0;
205 struct ib_port_attr pattr;
199 206
200 /* Forward locally generated traps to the SM */ 207 /* Forward locally generated traps to the SM */
201 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && 208 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
@@ -233,6 +240,12 @@ int mthca_process_mad(struct ib_device *ibdev,
233 return IB_MAD_RESULT_SUCCESS; 240 return IB_MAD_RESULT_SUCCESS;
234 } else 241 } else
235 return IB_MAD_RESULT_SUCCESS; 242 return IB_MAD_RESULT_SUCCESS;
243 if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
244 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
245 in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
246 in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
247 !ib_query_port(ibdev, port_num, &pattr))
248 prev_lid = pattr.lid;
236 249
237 err = mthca_MAD_IFC(to_mdev(ibdev), 250 err = mthca_MAD_IFC(to_mdev(ibdev),
238 mad_flags & IB_MAD_IGNORE_MKEY, 251 mad_flags & IB_MAD_IGNORE_MKEY,
@@ -252,7 +265,7 @@ int mthca_process_mad(struct ib_device *ibdev,
252 } 265 }
253 266
254 if (!out_mad->mad_hdr.status) { 267 if (!out_mad->mad_hdr.status) {
255 smp_snoop(ibdev, port_num, in_mad); 268 smp_snoop(ibdev, port_num, in_mad, prev_lid);
256 node_desc_override(ibdev, out_mad); 269 node_desc_override(ibdev, out_mad);
257 } 270 }
258 271
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index b9611ade9eab..ca599767ffbd 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. 2 * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. 3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index 13a5bb1a7bcf..04b12ad23390 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. 2 * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. 3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index a01b4488208b..5327f2bec6bf 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. 2 * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -103,6 +103,7 @@ static int nes_disconnect(struct nes_qp *nesqp, int abrupt);
103static void nes_disconnect_worker(struct work_struct *work); 103static void nes_disconnect_worker(struct work_struct *work);
104 104
105static int send_mpa_request(struct nes_cm_node *, struct sk_buff *); 105static int send_mpa_request(struct nes_cm_node *, struct sk_buff *);
106static int send_mpa_reject(struct nes_cm_node *);
106static int send_syn(struct nes_cm_node *, u32, struct sk_buff *); 107static int send_syn(struct nes_cm_node *, u32, struct sk_buff *);
107static int send_reset(struct nes_cm_node *, struct sk_buff *); 108static int send_reset(struct nes_cm_node *, struct sk_buff *);
108static int send_ack(struct nes_cm_node *cm_node, struct sk_buff *skb); 109static int send_ack(struct nes_cm_node *cm_node, struct sk_buff *skb);
@@ -113,8 +114,7 @@ static void process_packet(struct nes_cm_node *, struct sk_buff *,
113static void active_open_err(struct nes_cm_node *, struct sk_buff *, int); 114static void active_open_err(struct nes_cm_node *, struct sk_buff *, int);
114static void passive_open_err(struct nes_cm_node *, struct sk_buff *, int); 115static void passive_open_err(struct nes_cm_node *, struct sk_buff *, int);
115static void cleanup_retrans_entry(struct nes_cm_node *); 116static void cleanup_retrans_entry(struct nes_cm_node *);
116static void handle_rcv_mpa(struct nes_cm_node *, struct sk_buff *, 117static void handle_rcv_mpa(struct nes_cm_node *, struct sk_buff *);
117 enum nes_cm_event_type);
118static void free_retrans_entry(struct nes_cm_node *cm_node); 118static void free_retrans_entry(struct nes_cm_node *cm_node);
119static int handle_tcp_options(struct nes_cm_node *cm_node, struct tcphdr *tcph, 119static int handle_tcp_options(struct nes_cm_node *cm_node, struct tcphdr *tcph,
120 struct sk_buff *skb, int optionsize, int passive); 120 struct sk_buff *skb, int optionsize, int passive);
@@ -124,6 +124,8 @@ static void cm_event_connected(struct nes_cm_event *);
124static void cm_event_connect_error(struct nes_cm_event *); 124static void cm_event_connect_error(struct nes_cm_event *);
125static void cm_event_reset(struct nes_cm_event *); 125static void cm_event_reset(struct nes_cm_event *);
126static void cm_event_mpa_req(struct nes_cm_event *); 126static void cm_event_mpa_req(struct nes_cm_event *);
127static void cm_event_mpa_reject(struct nes_cm_event *);
128static void handle_recv_entry(struct nes_cm_node *cm_node, u32 rem_node);
127 129
128static void print_core(struct nes_cm_core *core); 130static void print_core(struct nes_cm_core *core);
129 131
@@ -196,7 +198,6 @@ static struct nes_cm_event *create_event(struct nes_cm_node *cm_node,
196 */ 198 */
197static int send_mpa_request(struct nes_cm_node *cm_node, struct sk_buff *skb) 199static int send_mpa_request(struct nes_cm_node *cm_node, struct sk_buff *skb)
198{ 200{
199 int ret;
200 if (!skb) { 201 if (!skb) {
201 nes_debug(NES_DBG_CM, "skb set to NULL\n"); 202 nes_debug(NES_DBG_CM, "skb set to NULL\n");
202 return -1; 203 return -1;
@@ -206,11 +207,27 @@ static int send_mpa_request(struct nes_cm_node *cm_node, struct sk_buff *skb)
206 form_cm_frame(skb, cm_node, NULL, 0, &cm_node->mpa_frame, 207 form_cm_frame(skb, cm_node, NULL, 0, &cm_node->mpa_frame,
207 cm_node->mpa_frame_size, SET_ACK); 208 cm_node->mpa_frame_size, SET_ACK);
208 209
209 ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0); 210 return schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0);
210 if (ret < 0) 211}
211 return ret;
212 212
213 return 0; 213
214
215static int send_mpa_reject(struct nes_cm_node *cm_node)
216{
217 struct sk_buff *skb = NULL;
218
219 skb = dev_alloc_skb(MAX_CM_BUFFER);
220 if (!skb) {
221 nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
222 return -ENOMEM;
223 }
224
225 /* send an MPA reject frame */
226 form_cm_frame(skb, cm_node, NULL, 0, &cm_node->mpa_frame,
227 cm_node->mpa_frame_size, SET_ACK | SET_FIN);
228
229 cm_node->state = NES_CM_STATE_FIN_WAIT1;
230 return schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0);
214} 231}
215 232
216 233
@@ -218,14 +235,17 @@ static int send_mpa_request(struct nes_cm_node *cm_node, struct sk_buff *skb)
218 * recv_mpa - process a received TCP pkt, we are expecting an 235 * recv_mpa - process a received TCP pkt, we are expecting an
219 * IETF MPA frame 236 * IETF MPA frame
220 */ 237 */
221static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 len) 238static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type,
239 u32 len)
222{ 240{
223 struct ietf_mpa_frame *mpa_frame; 241 struct ietf_mpa_frame *mpa_frame;
224 242
243 *type = NES_MPA_REQUEST_ACCEPT;
244
225 /* assume req frame is in tcp data payload */ 245 /* assume req frame is in tcp data payload */
226 if (len < sizeof(struct ietf_mpa_frame)) { 246 if (len < sizeof(struct ietf_mpa_frame)) {
227 nes_debug(NES_DBG_CM, "The received ietf buffer was too small (%x)\n", len); 247 nes_debug(NES_DBG_CM, "The received ietf buffer was too small (%x)\n", len);
228 return -1; 248 return -EINVAL;
229 } 249 }
230 250
231 mpa_frame = (struct ietf_mpa_frame *)buffer; 251 mpa_frame = (struct ietf_mpa_frame *)buffer;
@@ -234,14 +254,25 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 len)
234 if (cm_node->mpa_frame_size + sizeof(struct ietf_mpa_frame) != len) { 254 if (cm_node->mpa_frame_size + sizeof(struct ietf_mpa_frame) != len) {
235 nes_debug(NES_DBG_CM, "The received ietf buffer was not right" 255 nes_debug(NES_DBG_CM, "The received ietf buffer was not right"
236 " complete (%x + %x != %x)\n", 256 " complete (%x + %x != %x)\n",
237 cm_node->mpa_frame_size, (u32)sizeof(struct ietf_mpa_frame), len); 257 cm_node->mpa_frame_size,
238 return -1; 258 (u32)sizeof(struct ietf_mpa_frame), len);
259 return -EINVAL;
260 }
261 /* make sure it does not exceed the max size */
262 if (len > MAX_CM_BUFFER) {
263 nes_debug(NES_DBG_CM, "The received ietf buffer was too large"
264 " (%x + %x != %x)\n",
265 cm_node->mpa_frame_size,
266 (u32)sizeof(struct ietf_mpa_frame), len);
267 return -EINVAL;
239 } 268 }
240 269
241 /* copy entire MPA frame to our cm_node's frame */ 270 /* copy entire MPA frame to our cm_node's frame */
242 memcpy(cm_node->mpa_frame_buf, buffer + sizeof(struct ietf_mpa_frame), 271 memcpy(cm_node->mpa_frame_buf, buffer + sizeof(struct ietf_mpa_frame),
243 cm_node->mpa_frame_size); 272 cm_node->mpa_frame_size);
244 273
274 if (mpa_frame->flags & IETF_MPA_FLAGS_REJECT)
275 *type = NES_MPA_REQUEST_REJECT;
245 return 0; 276 return 0;
246} 277}
247 278
@@ -380,7 +411,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
380 411
381 new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC); 412 new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
382 if (!new_send) 413 if (!new_send)
383 return -1; 414 return -ENOMEM;
384 415
385 /* new_send->timetosend = currenttime */ 416 /* new_send->timetosend = currenttime */
386 new_send->retrycount = NES_DEFAULT_RETRYS; 417 new_send->retrycount = NES_DEFAULT_RETRYS;
@@ -394,9 +425,11 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
394 425
395 if (type == NES_TIMER_TYPE_CLOSE) { 426 if (type == NES_TIMER_TYPE_CLOSE) {
396 new_send->timetosend += (HZ/10); 427 new_send->timetosend += (HZ/10);
397 spin_lock_irqsave(&cm_node->recv_list_lock, flags); 428 if (cm_node->recv_entry) {
398 list_add_tail(&new_send->list, &cm_node->recv_list); 429 WARN_ON(1);
399 spin_unlock_irqrestore(&cm_node->recv_list_lock, flags); 430 return -EINVAL;
431 }
432 cm_node->recv_entry = new_send;
400 } 433 }
401 434
402 if (type == NES_TIMER_TYPE_SEND) { 435 if (type == NES_TIMER_TYPE_SEND) {
@@ -435,24 +468,78 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
435 return ret; 468 return ret;
436} 469}
437 470
471static void nes_retrans_expired(struct nes_cm_node *cm_node)
472{
473 switch (cm_node->state) {
474 case NES_CM_STATE_SYN_RCVD:
475 case NES_CM_STATE_CLOSING:
476 rem_ref_cm_node(cm_node->cm_core, cm_node);
477 break;
478 case NES_CM_STATE_LAST_ACK:
479 case NES_CM_STATE_FIN_WAIT1:
480 case NES_CM_STATE_MPAREJ_RCVD:
481 send_reset(cm_node, NULL);
482 break;
483 default:
484 create_event(cm_node, NES_CM_EVENT_ABORTED);
485 }
486}
487
488static void handle_recv_entry(struct nes_cm_node *cm_node, u32 rem_node)
489{
490 struct nes_timer_entry *recv_entry = cm_node->recv_entry;
491 struct iw_cm_id *cm_id = cm_node->cm_id;
492 struct nes_qp *nesqp;
493 unsigned long qplockflags;
494
495 if (!recv_entry)
496 return;
497 nesqp = (struct nes_qp *)recv_entry->skb;
498 if (nesqp) {
499 spin_lock_irqsave(&nesqp->lock, qplockflags);
500 if (nesqp->cm_id) {
501 nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, "
502 "refcount = %d: HIT A "
503 "NES_TIMER_TYPE_CLOSE with something "
504 "to do!!!\n", nesqp->hwqp.qp_id, cm_id,
505 atomic_read(&nesqp->refcount));
506 nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
507 nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT;
508 nesqp->ibqp_state = IB_QPS_ERR;
509 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
510 nes_cm_disconn(nesqp);
511 } else {
512 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
513 nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, "
514 "refcount = %d: HIT A "
515 "NES_TIMER_TYPE_CLOSE with nothing "
516 "to do!!!\n", nesqp->hwqp.qp_id, cm_id,
517 atomic_read(&nesqp->refcount));
518 }
519 } else if (rem_node) {
520 /* TIME_WAIT state */
521 rem_ref_cm_node(cm_node->cm_core, cm_node);
522 }
523 if (cm_node->cm_id)
524 cm_id->rem_ref(cm_id);
525 kfree(recv_entry);
526 cm_node->recv_entry = NULL;
527}
438 528
439/** 529/**
440 * nes_cm_timer_tick 530 * nes_cm_timer_tick
441 */ 531 */
442static void nes_cm_timer_tick(unsigned long pass) 532static void nes_cm_timer_tick(unsigned long pass)
443{ 533{
444 unsigned long flags, qplockflags; 534 unsigned long flags;
445 unsigned long nexttimeout = jiffies + NES_LONG_TIME; 535 unsigned long nexttimeout = jiffies + NES_LONG_TIME;
446 struct iw_cm_id *cm_id;
447 struct nes_cm_node *cm_node; 536 struct nes_cm_node *cm_node;
448 struct nes_timer_entry *send_entry, *recv_entry; 537 struct nes_timer_entry *send_entry, *recv_entry;
449 struct list_head *list_core, *list_core_temp; 538 struct list_head *list_core_temp;
450 struct list_head *list_node, *list_node_temp; 539 struct list_head *list_node;
451 struct nes_cm_core *cm_core = g_cm_core; 540 struct nes_cm_core *cm_core = g_cm_core;
452 struct nes_qp *nesqp;
453 u32 settimer = 0; 541 u32 settimer = 0;
454 int ret = NETDEV_TX_OK; 542 int ret = NETDEV_TX_OK;
455 enum nes_cm_node_state last_state;
456 543
457 struct list_head timer_list; 544 struct list_head timer_list;
458 INIT_LIST_HEAD(&timer_list); 545 INIT_LIST_HEAD(&timer_list);
@@ -461,7 +548,7 @@ static void nes_cm_timer_tick(unsigned long pass)
461 list_for_each_safe(list_node, list_core_temp, 548 list_for_each_safe(list_node, list_core_temp,
462 &cm_core->connected_nodes) { 549 &cm_core->connected_nodes) {
463 cm_node = container_of(list_node, struct nes_cm_node, list); 550 cm_node = container_of(list_node, struct nes_cm_node, list);
464 if (!list_empty(&cm_node->recv_list) || (cm_node->send_entry)) { 551 if ((cm_node->recv_entry) || (cm_node->send_entry)) {
465 add_ref_cm_node(cm_node); 552 add_ref_cm_node(cm_node);
466 list_add(&cm_node->timer_entry, &timer_list); 553 list_add(&cm_node->timer_entry, &timer_list);
467 } 554 }
@@ -471,54 +558,18 @@ static void nes_cm_timer_tick(unsigned long pass)
471 list_for_each_safe(list_node, list_core_temp, &timer_list) { 558 list_for_each_safe(list_node, list_core_temp, &timer_list) {
472 cm_node = container_of(list_node, struct nes_cm_node, 559 cm_node = container_of(list_node, struct nes_cm_node,
473 timer_entry); 560 timer_entry);
474 spin_lock_irqsave(&cm_node->recv_list_lock, flags); 561 recv_entry = cm_node->recv_entry;
475 list_for_each_safe(list_core, list_node_temp, 562
476 &cm_node->recv_list) { 563 if (recv_entry) {
477 recv_entry = container_of(list_core,
478 struct nes_timer_entry, list);
479 if (!recv_entry)
480 break;
481 if (time_after(recv_entry->timetosend, jiffies)) { 564 if (time_after(recv_entry->timetosend, jiffies)) {
482 if (nexttimeout > recv_entry->timetosend || 565 if (nexttimeout > recv_entry->timetosend ||
483 !settimer) { 566 !settimer) {
484 nexttimeout = recv_entry->timetosend; 567 nexttimeout = recv_entry->timetosend;
485 settimer = 1; 568 settimer = 1;
486 } 569 }
487 continue; 570 } else
488 } 571 handle_recv_entry(cm_node, 1);
489 list_del(&recv_entry->list);
490 cm_id = cm_node->cm_id;
491 spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
492 nesqp = (struct nes_qp *)recv_entry->skb;
493 spin_lock_irqsave(&nesqp->lock, qplockflags);
494 if (nesqp->cm_id) {
495 nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, "
496 "refcount = %d: HIT A "
497 "NES_TIMER_TYPE_CLOSE with something "
498 "to do!!!\n", nesqp->hwqp.qp_id, cm_id,
499 atomic_read(&nesqp->refcount));
500 nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
501 nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT;
502 nesqp->ibqp_state = IB_QPS_ERR;
503 spin_unlock_irqrestore(&nesqp->lock,
504 qplockflags);
505 nes_cm_disconn(nesqp);
506 } else {
507 spin_unlock_irqrestore(&nesqp->lock,
508 qplockflags);
509 nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, "
510 "refcount = %d: HIT A "
511 "NES_TIMER_TYPE_CLOSE with nothing "
512 "to do!!!\n", nesqp->hwqp.qp_id, cm_id,
513 atomic_read(&nesqp->refcount));
514 }
515 if (cm_id)
516 cm_id->rem_ref(cm_id);
517
518 kfree(recv_entry);
519 spin_lock_irqsave(&cm_node->recv_list_lock, flags);
520 } 572 }
521 spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
522 573
523 spin_lock_irqsave(&cm_node->retrans_list_lock, flags); 574 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
524 do { 575 do {
@@ -533,12 +584,11 @@ static void nes_cm_timer_tick(unsigned long pass)
533 nexttimeout = 584 nexttimeout =
534 send_entry->timetosend; 585 send_entry->timetosend;
535 settimer = 1; 586 settimer = 1;
536 break;
537 } 587 }
538 } else { 588 } else {
539 free_retrans_entry(cm_node); 589 free_retrans_entry(cm_node);
540 break;
541 } 590 }
591 break;
542 } 592 }
543 593
544 if ((cm_node->state == NES_CM_STATE_TSA) || 594 if ((cm_node->state == NES_CM_STATE_TSA) ||
@@ -550,16 +600,12 @@ static void nes_cm_timer_tick(unsigned long pass)
550 if (!send_entry->retranscount || 600 if (!send_entry->retranscount ||
551 !send_entry->retrycount) { 601 !send_entry->retrycount) {
552 cm_packets_dropped++; 602 cm_packets_dropped++;
553 last_state = cm_node->state;
554 cm_node->state = NES_CM_STATE_CLOSED;
555 free_retrans_entry(cm_node); 603 free_retrans_entry(cm_node);
604
556 spin_unlock_irqrestore( 605 spin_unlock_irqrestore(
557 &cm_node->retrans_list_lock, flags); 606 &cm_node->retrans_list_lock, flags);
558 if (last_state == NES_CM_STATE_SYN_RCVD) 607 nes_retrans_expired(cm_node);
559 rem_ref_cm_node(cm_core, cm_node); 608 cm_node->state = NES_CM_STATE_CLOSED;
560 else
561 create_event(cm_node,
562 NES_CM_EVENT_ABORTED);
563 spin_lock_irqsave(&cm_node->retrans_list_lock, 609 spin_lock_irqsave(&cm_node->retrans_list_lock,
564 flags); 610 flags);
565 break; 611 break;
@@ -714,7 +760,7 @@ static int send_reset(struct nes_cm_node *cm_node, struct sk_buff *skb)
714 skb = dev_alloc_skb(MAX_CM_BUFFER); 760 skb = dev_alloc_skb(MAX_CM_BUFFER);
715 if (!skb) { 761 if (!skb) {
716 nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n"); 762 nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
717 return -1; 763 return -ENOMEM;
718 } 764 }
719 765
720 form_cm_frame(skb, cm_node, NULL, 0, NULL, 0, flags); 766 form_cm_frame(skb, cm_node, NULL, 0, NULL, 0, flags);
@@ -778,14 +824,10 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core,
778 unsigned long flags; 824 unsigned long flags;
779 struct list_head *hte; 825 struct list_head *hte;
780 struct nes_cm_node *cm_node; 826 struct nes_cm_node *cm_node;
781 __be32 tmp_addr = cpu_to_be32(loc_addr);
782 827
783 /* get a handle on the hte */ 828 /* get a handle on the hte */
784 hte = &cm_core->connected_nodes; 829 hte = &cm_core->connected_nodes;
785 830
786 nes_debug(NES_DBG_CM, "Searching for an owner node: %pI4:%x from core %p->%p\n",
787 &tmp_addr, loc_port, cm_core, hte);
788
789 /* walk list and find cm_node associated with this session ID */ 831 /* walk list and find cm_node associated with this session ID */
790 spin_lock_irqsave(&cm_core->ht_lock, flags); 832 spin_lock_irqsave(&cm_core->ht_lock, flags);
791 list_for_each_entry(cm_node, hte, list) { 833 list_for_each_entry(cm_node, hte, list) {
@@ -875,7 +917,8 @@ static int add_hte_node(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node
875static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, 917static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
876 struct nes_cm_listener *listener, int free_hanging_nodes) 918 struct nes_cm_listener *listener, int free_hanging_nodes)
877{ 919{
878 int ret = 1; 920 int ret = -EINVAL;
921 int err = 0;
879 unsigned long flags; 922 unsigned long flags;
880 struct list_head *list_pos = NULL; 923 struct list_head *list_pos = NULL;
881 struct list_head *list_temp = NULL; 924 struct list_head *list_temp = NULL;
@@ -904,10 +947,60 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
904 947
905 list_for_each_safe(list_pos, list_temp, &reset_list) { 948 list_for_each_safe(list_pos, list_temp, &reset_list) {
906 cm_node = container_of(list_pos, struct nes_cm_node, 949 cm_node = container_of(list_pos, struct nes_cm_node,
907 reset_entry); 950 reset_entry);
908 cleanup_retrans_entry(cm_node); 951 {
909 send_reset(cm_node, NULL); 952 struct nes_cm_node *loopback = cm_node->loopbackpartner;
910 rem_ref_cm_node(cm_node->cm_core, cm_node); 953 if (NES_CM_STATE_FIN_WAIT1 <= cm_node->state) {
954 rem_ref_cm_node(cm_node->cm_core, cm_node);
955 } else {
956 if (!loopback) {
957 cleanup_retrans_entry(cm_node);
958 err = send_reset(cm_node, NULL);
959 if (err) {
960 cm_node->state =
961 NES_CM_STATE_CLOSED;
962 WARN_ON(1);
963 } else {
964 cm_node->state =
965 NES_CM_STATE_CLOSED;
966 rem_ref_cm_node(
967 cm_node->cm_core,
968 cm_node);
969 }
970 } else {
971 struct nes_cm_event event;
972
973 event.cm_node = loopback;
974 event.cm_info.rem_addr =
975 loopback->rem_addr;
976 event.cm_info.loc_addr =
977 loopback->loc_addr;
978 event.cm_info.rem_port =
979 loopback->rem_port;
980 event.cm_info.loc_port =
981 loopback->loc_port;
982 event.cm_info.cm_id = loopback->cm_id;
983 cm_event_connect_error(&event);
984 loopback->state = NES_CM_STATE_CLOSED;
985
986 event.cm_node = cm_node;
987 event.cm_info.rem_addr =
988 cm_node->rem_addr;
989 event.cm_info.loc_addr =
990 cm_node->loc_addr;
991 event.cm_info.rem_port =
992 cm_node->rem_port;
993 event.cm_info.loc_port =
994 cm_node->loc_port;
995 event.cm_info.cm_id = cm_node->cm_id;
996 cm_event_reset(&event);
997
998 rem_ref_cm_node(cm_node->cm_core,
999 cm_node);
1000
1001 }
1002 }
1003 }
911 } 1004 }
912 1005
913 spin_lock_irqsave(&cm_core->listen_list_lock, flags); 1006 spin_lock_irqsave(&cm_core->listen_list_lock, flags);
@@ -968,6 +1061,7 @@ static inline int mini_cm_accelerated(struct nes_cm_core *cm_core,
968 if (cm_node->accept_pend) { 1061 if (cm_node->accept_pend) {
969 BUG_ON(!cm_node->listener); 1062 BUG_ON(!cm_node->listener);
970 atomic_dec(&cm_node->listener->pend_accepts_cnt); 1063 atomic_dec(&cm_node->listener->pend_accepts_cnt);
1064 cm_node->accept_pend = 0;
971 BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0); 1065 BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0);
972 } 1066 }
973 1067
@@ -994,7 +1088,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip)
994 memset(&fl, 0, sizeof fl); 1088 memset(&fl, 0, sizeof fl);
995 fl.nl_u.ip4_u.daddr = htonl(dst_ip); 1089 fl.nl_u.ip4_u.daddr = htonl(dst_ip);
996 if (ip_route_output_key(&init_net, &rt, &fl)) { 1090 if (ip_route_output_key(&init_net, &rt, &fl)) {
997 printk("%s: ip_route_output_key failed for 0x%08X\n", 1091 printk(KERN_ERR "%s: ip_route_output_key failed for 0x%08X\n",
998 __func__, dst_ip); 1092 __func__, dst_ip);
999 return rc; 1093 return rc;
1000 } 1094 }
@@ -1057,8 +1151,6 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
1057 cm_node->cm_id); 1151 cm_node->cm_id);
1058 1152
1059 spin_lock_init(&cm_node->retrans_list_lock); 1153 spin_lock_init(&cm_node->retrans_list_lock);
1060 INIT_LIST_HEAD(&cm_node->recv_list);
1061 spin_lock_init(&cm_node->recv_list_lock);
1062 1154
1063 cm_node->loopbackpartner = NULL; 1155 cm_node->loopbackpartner = NULL;
1064 atomic_set(&cm_node->ref_count, 1); 1156 atomic_set(&cm_node->ref_count, 1);
@@ -1126,10 +1218,7 @@ static int add_ref_cm_node(struct nes_cm_node *cm_node)
1126static int rem_ref_cm_node(struct nes_cm_core *cm_core, 1218static int rem_ref_cm_node(struct nes_cm_core *cm_core,
1127 struct nes_cm_node *cm_node) 1219 struct nes_cm_node *cm_node)
1128{ 1220{
1129 unsigned long flags, qplockflags; 1221 unsigned long flags;
1130 struct nes_timer_entry *recv_entry;
1131 struct iw_cm_id *cm_id;
1132 struct list_head *list_core, *list_node_temp;
1133 struct nes_qp *nesqp; 1222 struct nes_qp *nesqp;
1134 1223
1135 if (!cm_node) 1224 if (!cm_node)
@@ -1150,38 +1239,9 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
1150 atomic_dec(&cm_node->listener->pend_accepts_cnt); 1239 atomic_dec(&cm_node->listener->pend_accepts_cnt);
1151 BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0); 1240 BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0);
1152 } 1241 }
1153 BUG_ON(cm_node->send_entry); 1242 WARN_ON(cm_node->send_entry);
1154 spin_lock_irqsave(&cm_node->recv_list_lock, flags); 1243 if (cm_node->recv_entry)
1155 list_for_each_safe(list_core, list_node_temp, &cm_node->recv_list) { 1244 handle_recv_entry(cm_node, 0);
1156 recv_entry = container_of(list_core, struct nes_timer_entry,
1157 list);
1158 list_del(&recv_entry->list);
1159 cm_id = cm_node->cm_id;
1160 spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
1161 nesqp = (struct nes_qp *)recv_entry->skb;
1162 spin_lock_irqsave(&nesqp->lock, qplockflags);
1163 if (nesqp->cm_id) {
1164 nes_debug(NES_DBG_CM, "QP%u: cm_id = %p: HIT A "
1165 "NES_TIMER_TYPE_CLOSE with something to do!\n",
1166 nesqp->hwqp.qp_id, cm_id);
1167 nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
1168 nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT;
1169 nesqp->ibqp_state = IB_QPS_ERR;
1170 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
1171 nes_cm_disconn(nesqp);
1172 } else {
1173 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
1174 nes_debug(NES_DBG_CM, "QP%u: cm_id = %p: HIT A "
1175 "NES_TIMER_TYPE_CLOSE with nothing to do!\n",
1176 nesqp->hwqp.qp_id, cm_id);
1177 }
1178 cm_id->rem_ref(cm_id);
1179
1180 kfree(recv_entry);
1181 spin_lock_irqsave(&cm_node->recv_list_lock, flags);
1182 }
1183 spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
1184
1185 if (cm_node->listener) { 1245 if (cm_node->listener) {
1186 mini_cm_dec_refcnt_listen(cm_core, cm_node->listener, 0); 1246 mini_cm_dec_refcnt_listen(cm_core, cm_node->listener, 0);
1187 } else { 1247 } else {
@@ -1266,8 +1326,7 @@ static void drop_packet(struct sk_buff *skb)
1266 dev_kfree_skb_any(skb); 1326 dev_kfree_skb_any(skb);
1267} 1327}
1268 1328
1269static void handle_fin_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, 1329static void handle_fin_pkt(struct nes_cm_node *cm_node)
1270 struct tcphdr *tcph)
1271{ 1330{
1272 nes_debug(NES_DBG_CM, "Received FIN, cm_node = %p, state = %u. " 1331 nes_debug(NES_DBG_CM, "Received FIN, cm_node = %p, state = %u. "
1273 "refcnt=%d\n", cm_node, cm_node->state, 1332 "refcnt=%d\n", cm_node, cm_node->state,
@@ -1279,23 +1338,30 @@ static void handle_fin_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1279 case NES_CM_STATE_SYN_SENT: 1338 case NES_CM_STATE_SYN_SENT:
1280 case NES_CM_STATE_ESTABLISHED: 1339 case NES_CM_STATE_ESTABLISHED:
1281 case NES_CM_STATE_MPAREQ_SENT: 1340 case NES_CM_STATE_MPAREQ_SENT:
1341 case NES_CM_STATE_MPAREJ_RCVD:
1282 cm_node->state = NES_CM_STATE_LAST_ACK; 1342 cm_node->state = NES_CM_STATE_LAST_ACK;
1283 send_fin(cm_node, skb); 1343 send_fin(cm_node, NULL);
1284 break; 1344 break;
1285 case NES_CM_STATE_FIN_WAIT1: 1345 case NES_CM_STATE_FIN_WAIT1:
1286 cm_node->state = NES_CM_STATE_CLOSING; 1346 cm_node->state = NES_CM_STATE_CLOSING;
1287 send_ack(cm_node, skb); 1347 send_ack(cm_node, NULL);
1348 /* Wait for ACK as this is simultanous close..
1349 * After we receive ACK, do not send anything..
1350 * Just rm the node.. Done.. */
1288 break; 1351 break;
1289 case NES_CM_STATE_FIN_WAIT2: 1352 case NES_CM_STATE_FIN_WAIT2:
1290 cm_node->state = NES_CM_STATE_TIME_WAIT; 1353 cm_node->state = NES_CM_STATE_TIME_WAIT;
1291 send_ack(cm_node, skb); 1354 send_ack(cm_node, NULL);
1355 schedule_nes_timer(cm_node, NULL, NES_TIMER_TYPE_CLOSE, 1, 0);
1356 break;
1357 case NES_CM_STATE_TIME_WAIT:
1292 cm_node->state = NES_CM_STATE_CLOSED; 1358 cm_node->state = NES_CM_STATE_CLOSED;
1359 rem_ref_cm_node(cm_node->cm_core, cm_node);
1293 break; 1360 break;
1294 case NES_CM_STATE_TSA: 1361 case NES_CM_STATE_TSA:
1295 default: 1362 default:
1296 nes_debug(NES_DBG_CM, "Error Rcvd FIN for node-%p state = %d\n", 1363 nes_debug(NES_DBG_CM, "Error Rcvd FIN for node-%p state = %d\n",
1297 cm_node, cm_node->state); 1364 cm_node, cm_node->state);
1298 drop_packet(skb);
1299 break; 1365 break;
1300 } 1366 }
1301} 1367}
@@ -1341,23 +1407,35 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1341 cleanup_retrans_entry(cm_node); 1407 cleanup_retrans_entry(cm_node);
1342 drop_packet(skb); 1408 drop_packet(skb);
1343 break; 1409 break;
1410 case NES_CM_STATE_TIME_WAIT:
1411 cleanup_retrans_entry(cm_node);
1412 cm_node->state = NES_CM_STATE_CLOSED;
1413 rem_ref_cm_node(cm_node->cm_core, cm_node);
1414 drop_packet(skb);
1415 break;
1416 case NES_CM_STATE_FIN_WAIT1:
1417 cleanup_retrans_entry(cm_node);
1418 nes_debug(NES_DBG_CM, "Bad state %s[%u]\n", __func__, __LINE__);
1344 default: 1419 default:
1345 drop_packet(skb); 1420 drop_packet(skb);
1346 break; 1421 break;
1347 } 1422 }
1348} 1423}
1349 1424
1350static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb, 1425
1351 enum nes_cm_event_type type) 1426static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb)
1352{ 1427{
1353 1428
1354 int ret; 1429 int ret = 0;
1355 int datasize = skb->len; 1430 int datasize = skb->len;
1356 u8 *dataloc = skb->data; 1431 u8 *dataloc = skb->data;
1357 ret = parse_mpa(cm_node, dataloc, datasize); 1432
1358 if (ret < 0) { 1433 enum nes_cm_event_type type = NES_CM_EVENT_UNKNOWN;
1434 u32 res_type;
1435 ret = parse_mpa(cm_node, dataloc, &res_type, datasize);
1436 if (ret) {
1359 nes_debug(NES_DBG_CM, "didn't like MPA Request\n"); 1437 nes_debug(NES_DBG_CM, "didn't like MPA Request\n");
1360 if (type == NES_CM_EVENT_CONNECTED) { 1438 if (cm_node->state == NES_CM_STATE_MPAREQ_SENT) {
1361 nes_debug(NES_DBG_CM, "%s[%u] create abort for " 1439 nes_debug(NES_DBG_CM, "%s[%u] create abort for "
1362 "cm_node=%p listener=%p state=%d\n", __func__, 1440 "cm_node=%p listener=%p state=%d\n", __func__,
1363 __LINE__, cm_node, cm_node->listener, 1441 __LINE__, cm_node, cm_node->listener,
@@ -1366,18 +1444,38 @@ static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb,
1366 } else { 1444 } else {
1367 passive_open_err(cm_node, skb, 1); 1445 passive_open_err(cm_node, skb, 1);
1368 } 1446 }
1369 } else { 1447 return;
1370 cleanup_retrans_entry(cm_node); 1448 }
1371 dev_kfree_skb_any(skb); 1449
1372 if (type == NES_CM_EVENT_CONNECTED) 1450 switch (cm_node->state) {
1451 case NES_CM_STATE_ESTABLISHED:
1452 if (res_type == NES_MPA_REQUEST_REJECT) {
1453 /*BIG problem as we are receiving the MPA.. So should
1454 * not be REJECT.. This is Passive Open.. We can
1455 * only receive it Reject for Active Open...*/
1456 WARN_ON(1);
1457 }
1458 cm_node->state = NES_CM_STATE_MPAREQ_RCVD;
1459 type = NES_CM_EVENT_MPA_REQ;
1460 atomic_set(&cm_node->passive_state,
1461 NES_PASSIVE_STATE_INDICATED);
1462 break;
1463 case NES_CM_STATE_MPAREQ_SENT:
1464 if (res_type == NES_MPA_REQUEST_REJECT) {
1465 type = NES_CM_EVENT_MPA_REJECT;
1466 cm_node->state = NES_CM_STATE_MPAREJ_RCVD;
1467 } else {
1468 type = NES_CM_EVENT_CONNECTED;
1373 cm_node->state = NES_CM_STATE_TSA; 1469 cm_node->state = NES_CM_STATE_TSA;
1374 else 1470 }
1375 atomic_set(&cm_node->passive_state,
1376 NES_PASSIVE_STATE_INDICATED);
1377 create_event(cm_node, type);
1378 1471
1472 break;
1473 default:
1474 WARN_ON(1);
1475 break;
1379 } 1476 }
1380 return ; 1477 dev_kfree_skb_any(skb);
1478 create_event(cm_node, type);
1381} 1479}
1382 1480
1383static void indicate_pkt_err(struct nes_cm_node *cm_node, struct sk_buff *skb) 1481static void indicate_pkt_err(struct nes_cm_node *cm_node, struct sk_buff *skb)
@@ -1465,8 +1563,6 @@ static void handle_syn_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1465 break; 1563 break;
1466 case NES_CM_STATE_LISTENING: 1564 case NES_CM_STATE_LISTENING:
1467 /* Passive OPEN */ 1565 /* Passive OPEN */
1468 cm_node->accept_pend = 1;
1469 atomic_inc(&cm_node->listener->pend_accepts_cnt);
1470 if (atomic_read(&cm_node->listener->pend_accepts_cnt) > 1566 if (atomic_read(&cm_node->listener->pend_accepts_cnt) >
1471 cm_node->listener->backlog) { 1567 cm_node->listener->backlog) {
1472 nes_debug(NES_DBG_CM, "drop syn due to backlog " 1568 nes_debug(NES_DBG_CM, "drop syn due to backlog "
@@ -1484,6 +1580,9 @@ static void handle_syn_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1484 } 1580 }
1485 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1; 1581 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
1486 BUG_ON(cm_node->send_entry); 1582 BUG_ON(cm_node->send_entry);
1583 cm_node->accept_pend = 1;
1584 atomic_inc(&cm_node->listener->pend_accepts_cnt);
1585
1487 cm_node->state = NES_CM_STATE_SYN_RCVD; 1586 cm_node->state = NES_CM_STATE_SYN_RCVD;
1488 send_syn(cm_node, 1, skb); 1587 send_syn(cm_node, 1, skb);
1489 break; 1588 break;
@@ -1518,6 +1617,7 @@ static void handle_synack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1518 inc_sequence = ntohl(tcph->seq); 1617 inc_sequence = ntohl(tcph->seq);
1519 switch (cm_node->state) { 1618 switch (cm_node->state) {
1520 case NES_CM_STATE_SYN_SENT: 1619 case NES_CM_STATE_SYN_SENT:
1620 cleanup_retrans_entry(cm_node);
1521 /* active open */ 1621 /* active open */
1522 if (check_syn(cm_node, tcph, skb)) 1622 if (check_syn(cm_node, tcph, skb))
1523 return; 1623 return;
@@ -1567,10 +1667,7 @@ static void handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1567 u32 rem_seq; 1667 u32 rem_seq;
1568 int ret; 1668 int ret;
1569 int optionsize; 1669 int optionsize;
1570 u32 temp_seq = cm_node->tcp_cntxt.loc_seq_num;
1571
1572 optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); 1670 optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
1573 cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
1574 1671
1575 if (check_seq(cm_node, tcph, skb)) 1672 if (check_seq(cm_node, tcph, skb))
1576 return; 1673 return;
@@ -1580,7 +1677,7 @@ static void handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1580 rem_seq = ntohl(tcph->seq); 1677 rem_seq = ntohl(tcph->seq);
1581 rem_seq_ack = ntohl(tcph->ack_seq); 1678 rem_seq_ack = ntohl(tcph->ack_seq);
1582 datasize = skb->len; 1679 datasize = skb->len;
1583 1680 cleanup_retrans_entry(cm_node);
1584 switch (cm_node->state) { 1681 switch (cm_node->state) {
1585 case NES_CM_STATE_SYN_RCVD: 1682 case NES_CM_STATE_SYN_RCVD:
1586 /* Passive OPEN */ 1683 /* Passive OPEN */
@@ -1588,7 +1685,6 @@ static void handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1588 if (ret) 1685 if (ret)
1589 break; 1686 break;
1590 cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq); 1687 cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
1591 cm_node->tcp_cntxt.loc_seq_num = temp_seq;
1592 if (cm_node->tcp_cntxt.rem_ack_num != 1688 if (cm_node->tcp_cntxt.rem_ack_num !=
1593 cm_node->tcp_cntxt.loc_seq_num) { 1689 cm_node->tcp_cntxt.loc_seq_num) {
1594 nes_debug(NES_DBG_CM, "rem_ack_num != loc_seq_num\n"); 1690 nes_debug(NES_DBG_CM, "rem_ack_num != loc_seq_num\n");
@@ -1597,31 +1693,30 @@ static void handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1597 return; 1693 return;
1598 } 1694 }
1599 cm_node->state = NES_CM_STATE_ESTABLISHED; 1695 cm_node->state = NES_CM_STATE_ESTABLISHED;
1696 cleanup_retrans_entry(cm_node);
1600 if (datasize) { 1697 if (datasize) {
1601 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; 1698 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
1602 cm_node->state = NES_CM_STATE_MPAREQ_RCVD; 1699 handle_rcv_mpa(cm_node, skb);
1603 handle_rcv_mpa(cm_node, skb, NES_CM_EVENT_MPA_REQ); 1700 } else { /* rcvd ACK only */
1604 } else { /* rcvd ACK only */
1605 dev_kfree_skb_any(skb); 1701 dev_kfree_skb_any(skb);
1606 cleanup_retrans_entry(cm_node); 1702 cleanup_retrans_entry(cm_node);
1607 } 1703 }
1608 break; 1704 break;
1609 case NES_CM_STATE_ESTABLISHED: 1705 case NES_CM_STATE_ESTABLISHED:
1610 /* Passive OPEN */ 1706 /* Passive OPEN */
1611 /* We expect mpa frame to be received only */ 1707 cleanup_retrans_entry(cm_node);
1612 if (datasize) { 1708 if (datasize) {
1613 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; 1709 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
1614 cm_node->state = NES_CM_STATE_MPAREQ_RCVD; 1710 handle_rcv_mpa(cm_node, skb);
1615 handle_rcv_mpa(cm_node, skb,
1616 NES_CM_EVENT_MPA_REQ);
1617 } else 1711 } else
1618 drop_packet(skb); 1712 drop_packet(skb);
1619 break; 1713 break;
1620 case NES_CM_STATE_MPAREQ_SENT: 1714 case NES_CM_STATE_MPAREQ_SENT:
1715 cleanup_retrans_entry(cm_node);
1621 cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq); 1716 cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
1622 if (datasize) { 1717 if (datasize) {
1623 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; 1718 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
1624 handle_rcv_mpa(cm_node, skb, NES_CM_EVENT_CONNECTED); 1719 handle_rcv_mpa(cm_node, skb);
1625 } else { /* Could be just an ack pkt.. */ 1720 } else { /* Could be just an ack pkt.. */
1626 cleanup_retrans_entry(cm_node); 1721 cleanup_retrans_entry(cm_node);
1627 dev_kfree_skb_any(skb); 1722 dev_kfree_skb_any(skb);
@@ -1632,13 +1727,24 @@ static void handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1632 cleanup_retrans_entry(cm_node); 1727 cleanup_retrans_entry(cm_node);
1633 send_reset(cm_node, skb); 1728 send_reset(cm_node, skb);
1634 break; 1729 break;
1730 case NES_CM_STATE_LAST_ACK:
1731 cleanup_retrans_entry(cm_node);
1732 cm_node->state = NES_CM_STATE_CLOSED;
1733 cm_node->cm_id->rem_ref(cm_node->cm_id);
1734 case NES_CM_STATE_CLOSING:
1735 cleanup_retrans_entry(cm_node);
1736 rem_ref_cm_node(cm_node->cm_core, cm_node);
1737 drop_packet(skb);
1738 break;
1635 case NES_CM_STATE_FIN_WAIT1: 1739 case NES_CM_STATE_FIN_WAIT1:
1740 cleanup_retrans_entry(cm_node);
1741 drop_packet(skb);
1742 cm_node->state = NES_CM_STATE_FIN_WAIT2;
1743 break;
1636 case NES_CM_STATE_SYN_SENT: 1744 case NES_CM_STATE_SYN_SENT:
1637 case NES_CM_STATE_FIN_WAIT2: 1745 case NES_CM_STATE_FIN_WAIT2:
1638 case NES_CM_STATE_TSA: 1746 case NES_CM_STATE_TSA:
1639 case NES_CM_STATE_MPAREQ_RCVD: 1747 case NES_CM_STATE_MPAREQ_RCVD:
1640 case NES_CM_STATE_LAST_ACK:
1641 case NES_CM_STATE_CLOSING:
1642 case NES_CM_STATE_UNKNOWN: 1748 case NES_CM_STATE_UNKNOWN:
1643 default: 1749 default:
1644 drop_packet(skb); 1750 drop_packet(skb);
@@ -1748,6 +1854,7 @@ static void process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb,
1748{ 1854{
1749 enum nes_tcpip_pkt_type pkt_type = NES_PKT_TYPE_UNKNOWN; 1855 enum nes_tcpip_pkt_type pkt_type = NES_PKT_TYPE_UNKNOWN;
1750 struct tcphdr *tcph = tcp_hdr(skb); 1856 struct tcphdr *tcph = tcp_hdr(skb);
1857 u32 fin_set = 0;
1751 skb_pull(skb, ip_hdr(skb)->ihl << 2); 1858 skb_pull(skb, ip_hdr(skb)->ihl << 2);
1752 1859
1753 nes_debug(NES_DBG_CM, "process_packet: cm_node=%p state =%d syn=%d " 1860 nes_debug(NES_DBG_CM, "process_packet: cm_node=%p state =%d syn=%d "
@@ -1760,10 +1867,10 @@ static void process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb,
1760 pkt_type = NES_PKT_TYPE_SYN; 1867 pkt_type = NES_PKT_TYPE_SYN;
1761 if (tcph->ack) 1868 if (tcph->ack)
1762 pkt_type = NES_PKT_TYPE_SYNACK; 1869 pkt_type = NES_PKT_TYPE_SYNACK;
1763 } else if (tcph->fin) 1870 } else if (tcph->ack)
1764 pkt_type = NES_PKT_TYPE_FIN;
1765 else if (tcph->ack)
1766 pkt_type = NES_PKT_TYPE_ACK; 1871 pkt_type = NES_PKT_TYPE_ACK;
1872 if (tcph->fin)
1873 fin_set = 1;
1767 1874
1768 switch (pkt_type) { 1875 switch (pkt_type) {
1769 case NES_PKT_TYPE_SYN: 1876 case NES_PKT_TYPE_SYN:
@@ -1774,15 +1881,16 @@ static void process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb,
1774 break; 1881 break;
1775 case NES_PKT_TYPE_ACK: 1882 case NES_PKT_TYPE_ACK:
1776 handle_ack_pkt(cm_node, skb, tcph); 1883 handle_ack_pkt(cm_node, skb, tcph);
1884 if (fin_set)
1885 handle_fin_pkt(cm_node);
1777 break; 1886 break;
1778 case NES_PKT_TYPE_RST: 1887 case NES_PKT_TYPE_RST:
1779 handle_rst_pkt(cm_node, skb, tcph); 1888 handle_rst_pkt(cm_node, skb, tcph);
1780 break; 1889 break;
1781 case NES_PKT_TYPE_FIN:
1782 handle_fin_pkt(cm_node, skb, tcph);
1783 break;
1784 default: 1890 default:
1785 drop_packet(skb); 1891 drop_packet(skb);
1892 if (fin_set)
1893 handle_fin_pkt(cm_node);
1786 break; 1894 break;
1787 } 1895 }
1788} 1896}
@@ -1925,7 +2033,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
1925 loopbackremotenode->tcp_cntxt.rcv_wscale; 2033 loopbackremotenode->tcp_cntxt.rcv_wscale;
1926 loopbackremotenode->tcp_cntxt.snd_wscale = 2034 loopbackremotenode->tcp_cntxt.snd_wscale =
1927 cm_node->tcp_cntxt.rcv_wscale; 2035 cm_node->tcp_cntxt.rcv_wscale;
1928 2036 loopbackremotenode->state = NES_CM_STATE_MPAREQ_RCVD;
1929 create_event(loopbackremotenode, NES_CM_EVENT_MPA_REQ); 2037 create_event(loopbackremotenode, NES_CM_EVENT_MPA_REQ);
1930 } 2038 }
1931 return cm_node; 2039 return cm_node;
@@ -1980,7 +2088,11 @@ static int mini_cm_reject(struct nes_cm_core *cm_core,
1980 struct ietf_mpa_frame *mpa_frame, struct nes_cm_node *cm_node) 2088 struct ietf_mpa_frame *mpa_frame, struct nes_cm_node *cm_node)
1981{ 2089{
1982 int ret = 0; 2090 int ret = 0;
2091 int err = 0;
1983 int passive_state; 2092 int passive_state;
2093 struct nes_cm_event event;
2094 struct iw_cm_id *cm_id = cm_node->cm_id;
2095 struct nes_cm_node *loopback = cm_node->loopbackpartner;
1984 2096
1985 nes_debug(NES_DBG_CM, "%s cm_node=%p type=%d state=%d\n", 2097 nes_debug(NES_DBG_CM, "%s cm_node=%p type=%d state=%d\n",
1986 __func__, cm_node, cm_node->tcp_cntxt.client, cm_node->state); 2098 __func__, cm_node, cm_node->tcp_cntxt.client, cm_node->state);
@@ -1989,12 +2101,38 @@ static int mini_cm_reject(struct nes_cm_core *cm_core,
1989 return ret; 2101 return ret;
1990 cleanup_retrans_entry(cm_node); 2102 cleanup_retrans_entry(cm_node);
1991 2103
1992 passive_state = atomic_add_return(1, &cm_node->passive_state); 2104 if (!loopback) {
1993 cm_node->state = NES_CM_STATE_CLOSED; 2105 passive_state = atomic_add_return(1, &cm_node->passive_state);
1994 if (passive_state == NES_SEND_RESET_EVENT) 2106 if (passive_state == NES_SEND_RESET_EVENT) {
2107 cm_node->state = NES_CM_STATE_CLOSED;
2108 rem_ref_cm_node(cm_core, cm_node);
2109 } else {
2110 ret = send_mpa_reject(cm_node);
2111 if (ret) {
2112 cm_node->state = NES_CM_STATE_CLOSED;
2113 err = send_reset(cm_node, NULL);
2114 if (err)
2115 WARN_ON(1);
2116 } else
2117 cm_id->add_ref(cm_id);
2118 }
2119 } else {
2120 cm_node->cm_id = NULL;
2121 event.cm_node = loopback;
2122 event.cm_info.rem_addr = loopback->rem_addr;
2123 event.cm_info.loc_addr = loopback->loc_addr;
2124 event.cm_info.rem_port = loopback->rem_port;
2125 event.cm_info.loc_port = loopback->loc_port;
2126 event.cm_info.cm_id = loopback->cm_id;
2127 cm_event_mpa_reject(&event);
1995 rem_ref_cm_node(cm_core, cm_node); 2128 rem_ref_cm_node(cm_core, cm_node);
1996 else 2129 loopback->state = NES_CM_STATE_CLOSING;
1997 ret = send_reset(cm_node, NULL); 2130
2131 cm_id = loopback->cm_id;
2132 rem_ref_cm_node(cm_core, loopback);
2133 cm_id->rem_ref(cm_id);
2134 }
2135
1998 return ret; 2136 return ret;
1999} 2137}
2000 2138
@@ -2031,6 +2169,7 @@ static int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_nod
2031 case NES_CM_STATE_CLOSING: 2169 case NES_CM_STATE_CLOSING:
2032 ret = -1; 2170 ret = -1;
2033 break; 2171 break;
2172 case NES_CM_STATE_MPAREJ_RCVD:
2034 case NES_CM_STATE_LISTENING: 2173 case NES_CM_STATE_LISTENING:
2035 case NES_CM_STATE_UNKNOWN: 2174 case NES_CM_STATE_UNKNOWN:
2036 case NES_CM_STATE_INITED: 2175 case NES_CM_STATE_INITED:
@@ -2227,15 +2366,15 @@ static int mini_cm_set(struct nes_cm_core *cm_core, u32 type, u32 value)
2227 int ret = 0; 2366 int ret = 0;
2228 2367
2229 switch (type) { 2368 switch (type) {
2230 case NES_CM_SET_PKT_SIZE: 2369 case NES_CM_SET_PKT_SIZE:
2231 cm_core->mtu = value; 2370 cm_core->mtu = value;
2232 break; 2371 break;
2233 case NES_CM_SET_FREE_PKT_Q_SIZE: 2372 case NES_CM_SET_FREE_PKT_Q_SIZE:
2234 cm_core->free_tx_pkt_max = value; 2373 cm_core->free_tx_pkt_max = value;
2235 break; 2374 break;
2236 default: 2375 default:
2237 /* unknown set option */ 2376 /* unknown set option */
2238 ret = -EINVAL; 2377 ret = -EINVAL;
2239 } 2378 }
2240 2379
2241 return ret; 2380 return ret;
@@ -2625,9 +2764,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2625 NES_QPCONTEXT_ORDIRD_WRPDU); 2764 NES_QPCONTEXT_ORDIRD_WRPDU);
2626 } else { 2765 } else {
2627 nesqp->nesqp_context->ird_ord_sizes |= 2766 nesqp->nesqp_context->ird_ord_sizes |=
2628 cpu_to_le32((NES_QPCONTEXT_ORDIRD_LSMM_PRESENT | 2767 cpu_to_le32(NES_QPCONTEXT_ORDIRD_WRPDU);
2629 NES_QPCONTEXT_ORDIRD_WRPDU |
2630 NES_QPCONTEXT_ORDIRD_ALSMM));
2631 } 2768 }
2632 nesqp->skip_lsmm = 1; 2769 nesqp->skip_lsmm = 1;
2633 2770
@@ -2749,23 +2886,35 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2749int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) 2886int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
2750{ 2887{
2751 struct nes_cm_node *cm_node; 2888 struct nes_cm_node *cm_node;
2889 struct nes_cm_node *loopback;
2890
2752 struct nes_cm_core *cm_core; 2891 struct nes_cm_core *cm_core;
2753 2892
2754 atomic_inc(&cm_rejects); 2893 atomic_inc(&cm_rejects);
2755 cm_node = (struct nes_cm_node *) cm_id->provider_data; 2894 cm_node = (struct nes_cm_node *) cm_id->provider_data;
2895 loopback = cm_node->loopbackpartner;
2756 cm_core = cm_node->cm_core; 2896 cm_core = cm_node->cm_core;
2897 cm_node->cm_id = cm_id;
2757 cm_node->mpa_frame_size = sizeof(struct ietf_mpa_frame) + pdata_len; 2898 cm_node->mpa_frame_size = sizeof(struct ietf_mpa_frame) + pdata_len;
2758 2899
2900 if (cm_node->mpa_frame_size > MAX_CM_BUFFER)
2901 return -EINVAL;
2902
2759 strcpy(&cm_node->mpa_frame.key[0], IEFT_MPA_KEY_REP); 2903 strcpy(&cm_node->mpa_frame.key[0], IEFT_MPA_KEY_REP);
2760 memcpy(&cm_node->mpa_frame.priv_data, pdata, pdata_len); 2904 if (loopback) {
2905 memcpy(&loopback->mpa_frame.priv_data, pdata, pdata_len);
2906 loopback->mpa_frame.priv_data_len = pdata_len;
2907 loopback->mpa_frame_size = sizeof(struct ietf_mpa_frame) +
2908 pdata_len;
2909 } else {
2910 memcpy(&cm_node->mpa_frame.priv_data, pdata, pdata_len);
2911 cm_node->mpa_frame.priv_data_len = cpu_to_be16(pdata_len);
2912 }
2761 2913
2762 cm_node->mpa_frame.priv_data_len = cpu_to_be16(pdata_len);
2763 cm_node->mpa_frame.rev = mpa_version; 2914 cm_node->mpa_frame.rev = mpa_version;
2764 cm_node->mpa_frame.flags = IETF_MPA_FLAGS_CRC | IETF_MPA_FLAGS_REJECT; 2915 cm_node->mpa_frame.flags = IETF_MPA_FLAGS_CRC | IETF_MPA_FLAGS_REJECT;
2765 2916
2766 cm_core->api->reject(cm_core, &cm_node->mpa_frame, cm_node); 2917 return cm_core->api->reject(cm_core, &cm_node->mpa_frame, cm_node);
2767
2768 return 0;
2769} 2918}
2770 2919
2771 2920
@@ -3274,13 +3423,56 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
3274 cm_event.remote_addr.sin_family = AF_INET; 3423 cm_event.remote_addr.sin_family = AF_INET;
3275 cm_event.remote_addr.sin_port = htons(event->cm_info.rem_port); 3424 cm_event.remote_addr.sin_port = htons(event->cm_info.rem_port);
3276 cm_event.remote_addr.sin_addr.s_addr = htonl(event->cm_info.rem_addr); 3425 cm_event.remote_addr.sin_addr.s_addr = htonl(event->cm_info.rem_addr);
3426 cm_event.private_data = cm_node->mpa_frame_buf;
3427 cm_event.private_data_len = (u8) cm_node->mpa_frame_size;
3428
3429 ret = cm_id->event_handler(cm_id, &cm_event);
3430 if (ret)
3431 printk(KERN_ERR "%s[%u] OFA CM event_handler returned, ret=%d\n",
3432 __func__, __LINE__, ret);
3433 return;
3434}
3435
3436
3437static void cm_event_mpa_reject(struct nes_cm_event *event)
3438{
3439 struct iw_cm_id *cm_id;
3440 struct iw_cm_event cm_event;
3441 struct nes_cm_node *cm_node;
3442 int ret;
3443
3444 cm_node = event->cm_node;
3445 if (!cm_node)
3446 return;
3447 cm_id = cm_node->cm_id;
3448
3449 atomic_inc(&cm_connect_reqs);
3450 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
3451 cm_node, cm_id, jiffies);
3452
3453 cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
3454 cm_event.status = -ECONNREFUSED;
3455 cm_event.provider_data = cm_id->provider_data;
3456
3457 cm_event.local_addr.sin_family = AF_INET;
3458 cm_event.local_addr.sin_port = htons(event->cm_info.loc_port);
3459 cm_event.local_addr.sin_addr.s_addr = htonl(event->cm_info.loc_addr);
3460
3461 cm_event.remote_addr.sin_family = AF_INET;
3462 cm_event.remote_addr.sin_port = htons(event->cm_info.rem_port);
3463 cm_event.remote_addr.sin_addr.s_addr = htonl(event->cm_info.rem_addr);
3277 3464
3278 cm_event.private_data = cm_node->mpa_frame_buf; 3465 cm_event.private_data = cm_node->mpa_frame_buf;
3279 cm_event.private_data_len = (u8) cm_node->mpa_frame_size; 3466 cm_event.private_data_len = (u8) cm_node->mpa_frame_size;
3467
3468 nes_debug(NES_DBG_CM, "call CM_EVENT_MPA_REJECTED, local_addr=%08x, "
3469 "remove_addr=%08x\n",
3470 cm_event.local_addr.sin_addr.s_addr,
3471 cm_event.remote_addr.sin_addr.s_addr);
3280 3472
3281 ret = cm_id->event_handler(cm_id, &cm_event); 3473 ret = cm_id->event_handler(cm_id, &cm_event);
3282 if (ret) 3474 if (ret)
3283 printk("%s[%u] OFA CM event_handler returned, ret=%d\n", 3475 printk(KERN_ERR "%s[%u] OFA CM event_handler returned, ret=%d\n",
3284 __func__, __LINE__, ret); 3476 __func__, __LINE__, ret);
3285 3477
3286 return; 3478 return;
@@ -3345,6 +3537,14 @@ static void nes_cm_event_handler(struct work_struct *work)
3345 cm_event_connected(event); 3537 cm_event_connected(event);
3346 nes_debug(NES_DBG_CM, "CM Event: CONNECTED\n"); 3538 nes_debug(NES_DBG_CM, "CM Event: CONNECTED\n");
3347 break; 3539 break;
3540 case NES_CM_EVENT_MPA_REJECT:
3541 if ((!event->cm_node->cm_id) ||
3542 (event->cm_node->state == NES_CM_STATE_TSA))
3543 break;
3544 cm_event_mpa_reject(event);
3545 nes_debug(NES_DBG_CM, "CM Event: REJECT\n");
3546 break;
3547
3348 case NES_CM_EVENT_ABORTED: 3548 case NES_CM_EVENT_ABORTED:
3349 if ((!event->cm_node->cm_id) || 3549 if ((!event->cm_node->cm_id) ||
3350 (event->cm_node->state == NES_CM_STATE_TSA)) 3550 (event->cm_node->state == NES_CM_STATE_TSA))
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
index fafa35042ebd..d5f778202eb7 100644
--- a/drivers/infiniband/hw/nes/nes_cm.h
+++ b/drivers/infiniband/hw/nes/nes_cm.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. 2 * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -39,6 +39,9 @@
39#define NES_MANAGE_APBVT_DEL 0 39#define NES_MANAGE_APBVT_DEL 0
40#define NES_MANAGE_APBVT_ADD 1 40#define NES_MANAGE_APBVT_ADD 1
41 41
42#define NES_MPA_REQUEST_ACCEPT 1
43#define NES_MPA_REQUEST_REJECT 2
44
42/* IETF MPA -- defines, enums, structs */ 45/* IETF MPA -- defines, enums, structs */
43#define IEFT_MPA_KEY_REQ "MPA ID Req Frame" 46#define IEFT_MPA_KEY_REQ "MPA ID Req Frame"
44#define IEFT_MPA_KEY_REP "MPA ID Rep Frame" 47#define IEFT_MPA_KEY_REP "MPA ID Rep Frame"
@@ -186,6 +189,7 @@ enum nes_cm_node_state {
186 NES_CM_STATE_ACCEPTING, 189 NES_CM_STATE_ACCEPTING,
187 NES_CM_STATE_MPAREQ_SENT, 190 NES_CM_STATE_MPAREQ_SENT,
188 NES_CM_STATE_MPAREQ_RCVD, 191 NES_CM_STATE_MPAREQ_RCVD,
192 NES_CM_STATE_MPAREJ_RCVD,
189 NES_CM_STATE_TSA, 193 NES_CM_STATE_TSA,
190 NES_CM_STATE_FIN_WAIT1, 194 NES_CM_STATE_FIN_WAIT1,
191 NES_CM_STATE_FIN_WAIT2, 195 NES_CM_STATE_FIN_WAIT2,
@@ -278,13 +282,12 @@ struct nes_cm_node {
278 struct nes_timer_entry *send_entry; 282 struct nes_timer_entry *send_entry;
279 283
280 spinlock_t retrans_list_lock; 284 spinlock_t retrans_list_lock;
281 struct list_head recv_list; 285 struct nes_timer_entry *recv_entry;
282 spinlock_t recv_list_lock;
283 286
284 int send_write0; 287 int send_write0;
285 union { 288 union {
286 struct ietf_mpa_frame mpa_frame; 289 struct ietf_mpa_frame mpa_frame;
287 u8 mpa_frame_buf[NES_CM_DEFAULT_MTU]; 290 u8 mpa_frame_buf[MAX_CM_BUFFER];
288 }; 291 };
289 u16 mpa_frame_size; 292 u16 mpa_frame_size;
290 struct iw_cm_id *cm_id; 293 struct iw_cm_id *cm_id;
@@ -326,6 +329,7 @@ enum nes_cm_event_type {
326 NES_CM_EVENT_MPA_REQ, 329 NES_CM_EVENT_MPA_REQ,
327 NES_CM_EVENT_MPA_CONNECT, 330 NES_CM_EVENT_MPA_CONNECT,
328 NES_CM_EVENT_MPA_ACCEPT, 331 NES_CM_EVENT_MPA_ACCEPT,
332 NES_CM_EVENT_MPA_REJECT,
329 NES_CM_EVENT_MPA_ESTABLISHED, 333 NES_CM_EVENT_MPA_ESTABLISHED,
330 NES_CM_EVENT_CONNECTED, 334 NES_CM_EVENT_CONNECTED,
331 NES_CM_EVENT_CLOSED, 335 NES_CM_EVENT_CLOSED,
diff --git a/drivers/infiniband/hw/nes/nes_context.h b/drivers/infiniband/hw/nes/nes_context.h
index da9daba8e668..0fb8d81d9a62 100644
--- a/drivers/infiniband/hw/nes/nes_context.h
+++ b/drivers/infiniband/hw/nes/nes_context.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. 2 * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 5d139db1b771..9a51f25c6cee 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. 2 * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -254,6 +254,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
254 u32 adapter_size; 254 u32 adapter_size;
255 u32 arp_table_size; 255 u32 arp_table_size;
256 u16 vendor_id; 256 u16 vendor_id;
257 u16 device_id;
257 u8 OneG_Mode; 258 u8 OneG_Mode;
258 u8 func_index; 259 u8 func_index;
259 260
@@ -356,6 +357,13 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
356 return NULL; 357 return NULL;
357 } 358 }
358 359
360 nesadapter->vendor_id = (((u32) nesadapter->mac_addr_high) << 8) |
361 (nesadapter->mac_addr_low >> 24);
362
363 pci_bus_read_config_word(nesdev->pcidev->bus, nesdev->pcidev->devfn,
364 PCI_DEVICE_ID, &device_id);
365 nesadapter->vendor_part_id = device_id;
366
359 if (nes_init_serdes(nesdev, hw_rev, port_count, nesadapter, 367 if (nes_init_serdes(nesdev, hw_rev, port_count, nesadapter,
360 OneG_Mode)) { 368 OneG_Mode)) {
361 kfree(nesadapter); 369 kfree(nesadapter);
@@ -1636,7 +1644,6 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev)
1636 nesvnic->post_cqp_request = nes_post_cqp_request; 1644 nesvnic->post_cqp_request = nes_post_cqp_request;
1637 nesvnic->mcrq_mcast_filter = NULL; 1645 nesvnic->mcrq_mcast_filter = NULL;
1638 1646
1639 spin_lock_init(&nesvnic->nic.sq_lock);
1640 spin_lock_init(&nesvnic->nic.rq_lock); 1647 spin_lock_init(&nesvnic->nic.rq_lock);
1641 1648
1642 /* setup the RQ */ 1649 /* setup the RQ */
@@ -2261,6 +2268,8 @@ static void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq)
2261 2268
2262 if (++head >= aeq_size) 2269 if (++head >= aeq_size)
2263 head = 0; 2270 head = 0;
2271
2272 nes_write32(nesdev->regs + NES_AEQ_ALLOC, 1 << 16);
2264 } 2273 }
2265 while (1); 2274 while (1);
2266 aeq->aeq_head = head; 2275 aeq->aeq_head = head;
@@ -2622,9 +2631,9 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
2622 } else 2631 } else
2623 break; 2632 break;
2624 } 2633 }
2625 if (skb)
2626 dev_kfree_skb_any(skb);
2627 } 2634 }
2635 if (skb)
2636 dev_kfree_skb_any(skb);
2628 nesnic->sq_tail++; 2637 nesnic->sq_tail++;
2629 nesnic->sq_tail &= nesnic->sq_size-1; 2638 nesnic->sq_tail &= nesnic->sq_size-1;
2630 if (sq_cqes > 128) { 2639 if (sq_cqes > 128) {
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index bc0b4de04450..f41a8710d2a8 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -1,5 +1,5 @@
1/* 1/*
2* Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. 2* Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
3* 3*
4* This software is available to you under a choice of one of two 4* This software is available to you under a choice of one of two
5* licenses. You may choose to be licensed under the terms of the GNU 5* licenses. You may choose to be licensed under the terms of the GNU
@@ -61,6 +61,7 @@ enum pci_regs {
61 NES_CQ_ACK = 0x0034, 61 NES_CQ_ACK = 0x0034,
62 NES_WQE_ALLOC = 0x0040, 62 NES_WQE_ALLOC = 0x0040,
63 NES_CQE_ALLOC = 0x0044, 63 NES_CQE_ALLOC = 0x0044,
64 NES_AEQ_ALLOC = 0x0048
64}; 65};
65 66
66enum indexed_regs { 67enum indexed_regs {
@@ -875,7 +876,6 @@ struct nes_hw_nic {
875 u8 replenishing_rq; 876 u8 replenishing_rq;
876 u8 reserved; 877 u8 reserved;
877 878
878 spinlock_t sq_lock;
879 spinlock_t rq_lock; 879 spinlock_t rq_lock;
880}; 880};
881 881
@@ -1147,7 +1147,6 @@ struct nes_ib_device;
1147struct nes_vnic { 1147struct nes_vnic {
1148 struct nes_ib_device *nesibdev; 1148 struct nes_ib_device *nesibdev;
1149 u64 sq_full; 1149 u64 sq_full;
1150 u64 sq_locked;
1151 u64 tso_requests; 1150 u64 tso_requests;
1152 u64 segmented_tso_requests; 1151 u64 segmented_tso_requests;
1153 u64 linearized_skbs; 1152 u64 linearized_skbs;
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 57a47cf7e513..025ed9f7d9c2 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. 2 * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -400,8 +400,7 @@ static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
400 if (skb_headlen(skb) == skb->len) { 400 if (skb_headlen(skb) == skb->len) {
401 if (skb_headlen(skb) <= NES_FIRST_FRAG_SIZE) { 401 if (skb_headlen(skb) <= NES_FIRST_FRAG_SIZE) {
402 nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_2_1_IDX] = 0; 402 nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_2_1_IDX] = 0;
403 nesnic->tx_skb[nesnic->sq_head] = NULL; 403 nesnic->tx_skb[nesnic->sq_head] = skb;
404 dev_kfree_skb(skb);
405 } 404 }
406 } else { 405 } else {
407 /* Deal with Fragments */ 406 /* Deal with Fragments */
@@ -453,7 +452,6 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
453 u32 wqe_count=1; 452 u32 wqe_count=1;
454 u32 send_rc; 453 u32 send_rc;
455 struct iphdr *iph; 454 struct iphdr *iph;
456 unsigned long flags;
457 __le16 *wqe_fragment_length; 455 __le16 *wqe_fragment_length;
458 u32 nr_frags; 456 u32 nr_frags;
459 u32 original_first_length; 457 u32 original_first_length;
@@ -480,13 +478,6 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
480 if (netif_queue_stopped(netdev)) 478 if (netif_queue_stopped(netdev))
481 return NETDEV_TX_BUSY; 479 return NETDEV_TX_BUSY;
482 480
483 local_irq_save(flags);
484 if (!spin_trylock(&nesnic->sq_lock)) {
485 local_irq_restore(flags);
486 nesvnic->sq_locked++;
487 return NETDEV_TX_LOCKED;
488 }
489
490 /* Check if SQ is full */ 481 /* Check if SQ is full */
491 if ((((nesnic->sq_tail+(nesnic->sq_size*2))-nesnic->sq_head) & (nesnic->sq_size - 1)) == 1) { 482 if ((((nesnic->sq_tail+(nesnic->sq_size*2))-nesnic->sq_head) & (nesnic->sq_size - 1)) == 1) {
492 if (!netif_queue_stopped(netdev)) { 483 if (!netif_queue_stopped(netdev)) {
@@ -498,7 +489,6 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
498 } 489 }
499 } 490 }
500 nesvnic->sq_full++; 491 nesvnic->sq_full++;
501 spin_unlock_irqrestore(&nesnic->sq_lock, flags);
502 return NETDEV_TX_BUSY; 492 return NETDEV_TX_BUSY;
503 } 493 }
504 494
@@ -531,7 +521,6 @@ sq_no_longer_full:
531 } 521 }
532 } 522 }
533 nesvnic->sq_full++; 523 nesvnic->sq_full++;
534 spin_unlock_irqrestore(&nesnic->sq_lock, flags);
535 nes_debug(NES_DBG_NIC_TX, "%s: HNIC SQ full- TSO request has too many frags!\n", 524 nes_debug(NES_DBG_NIC_TX, "%s: HNIC SQ full- TSO request has too many frags!\n",
536 netdev->name); 525 netdev->name);
537 return NETDEV_TX_BUSY; 526 return NETDEV_TX_BUSY;
@@ -656,17 +645,13 @@ tso_sq_no_longer_full:
656 skb_set_transport_header(skb, hoffset); 645 skb_set_transport_header(skb, hoffset);
657 skb_set_network_header(skb, nhoffset); 646 skb_set_network_header(skb, nhoffset);
658 send_rc = nes_nic_send(skb, netdev); 647 send_rc = nes_nic_send(skb, netdev);
659 if (send_rc != NETDEV_TX_OK) { 648 if (send_rc != NETDEV_TX_OK)
660 spin_unlock_irqrestore(&nesnic->sq_lock, flags);
661 return NETDEV_TX_OK; 649 return NETDEV_TX_OK;
662 }
663 } 650 }
664 } else { 651 } else {
665 send_rc = nes_nic_send(skb, netdev); 652 send_rc = nes_nic_send(skb, netdev);
666 if (send_rc != NETDEV_TX_OK) { 653 if (send_rc != NETDEV_TX_OK)
667 spin_unlock_irqrestore(&nesnic->sq_lock, flags);
668 return NETDEV_TX_OK; 654 return NETDEV_TX_OK;
669 }
670 } 655 }
671 656
672 barrier(); 657 barrier();
@@ -676,7 +661,6 @@ tso_sq_no_longer_full:
676 (wqe_count << 24) | (1 << 23) | nesvnic->nic.qp_id); 661 (wqe_count << 24) | (1 << 23) | nesvnic->nic.qp_id);
677 662
678 netdev->trans_start = jiffies; 663 netdev->trans_start = jiffies;
679 spin_unlock_irqrestore(&nesnic->sq_lock, flags);
680 664
681 return NETDEV_TX_OK; 665 return NETDEV_TX_OK;
682} 666}
@@ -1012,7 +996,6 @@ static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = {
1012 "Pause Frames Received", 996 "Pause Frames Received",
1013 "Internal Routing Errors", 997 "Internal Routing Errors",
1014 "SQ SW Dropped SKBs", 998 "SQ SW Dropped SKBs",
1015 "SQ Locked",
1016 "SQ Full", 999 "SQ Full",
1017 "Segmented TSO Requests", 1000 "Segmented TSO Requests",
1018 "Rx Symbol Errors", 1001 "Rx Symbol Errors",
@@ -1129,16 +1112,17 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
1129 struct nes_device *nesdev = nesvnic->nesdev; 1112 struct nes_device *nesdev = nesvnic->nesdev;
1130 u32 nic_count; 1113 u32 nic_count;
1131 u32 u32temp; 1114 u32 u32temp;
1115 u32 index = 0;
1132 1116
1133 target_ethtool_stats->n_stats = NES_ETHTOOL_STAT_COUNT; 1117 target_ethtool_stats->n_stats = NES_ETHTOOL_STAT_COUNT;
1134 target_stat_values[0] = nesvnic->nesdev->link_status_interrupts; 1118 target_stat_values[index] = nesvnic->nesdev->link_status_interrupts;
1135 target_stat_values[1] = nesvnic->linearized_skbs; 1119 target_stat_values[++index] = nesvnic->linearized_skbs;
1136 target_stat_values[2] = nesvnic->tso_requests; 1120 target_stat_values[++index] = nesvnic->tso_requests;
1137 1121
1138 u32temp = nes_read_indexed(nesdev, 1122 u32temp = nes_read_indexed(nesdev,
1139 NES_IDX_MAC_TX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200)); 1123 NES_IDX_MAC_TX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200));
1140 nesvnic->nesdev->mac_pause_frames_sent += u32temp; 1124 nesvnic->nesdev->mac_pause_frames_sent += u32temp;
1141 target_stat_values[3] = nesvnic->nesdev->mac_pause_frames_sent; 1125 target_stat_values[++index] = nesvnic->nesdev->mac_pause_frames_sent;
1142 1126
1143 u32temp = nes_read_indexed(nesdev, 1127 u32temp = nes_read_indexed(nesdev,
1144 NES_IDX_MAC_RX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200)); 1128 NES_IDX_MAC_RX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200));
@@ -1209,60 +1193,59 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
1209 nesvnic->endnode_ipv4_tcp_retransmits += u32temp; 1193 nesvnic->endnode_ipv4_tcp_retransmits += u32temp;
1210 } 1194 }
1211 1195
1212 target_stat_values[4] = nesvnic->nesdev->mac_pause_frames_received; 1196 target_stat_values[++index] = nesvnic->nesdev->mac_pause_frames_received;
1213 target_stat_values[5] = nesdev->nesadapter->nic_rx_eth_route_err; 1197 target_stat_values[++index] = nesdev->nesadapter->nic_rx_eth_route_err;
1214 target_stat_values[6] = nesvnic->tx_sw_dropped; 1198 target_stat_values[++index] = nesvnic->tx_sw_dropped;
1215 target_stat_values[7] = nesvnic->sq_locked; 1199 target_stat_values[++index] = nesvnic->sq_full;
1216 target_stat_values[8] = nesvnic->sq_full; 1200 target_stat_values[++index] = nesvnic->segmented_tso_requests;
1217 target_stat_values[9] = nesvnic->segmented_tso_requests; 1201 target_stat_values[++index] = nesvnic->nesdev->mac_rx_symbol_err_frames;
1218 target_stat_values[10] = nesvnic->nesdev->mac_rx_symbol_err_frames; 1202 target_stat_values[++index] = nesvnic->nesdev->mac_rx_jabber_frames;
1219 target_stat_values[11] = nesvnic->nesdev->mac_rx_jabber_frames; 1203 target_stat_values[++index] = nesvnic->nesdev->mac_rx_oversized_frames;
1220 target_stat_values[12] = nesvnic->nesdev->mac_rx_oversized_frames; 1204 target_stat_values[++index] = nesvnic->nesdev->mac_rx_short_frames;
1221 target_stat_values[13] = nesvnic->nesdev->mac_rx_short_frames; 1205 target_stat_values[++index] = nesvnic->endnode_nstat_rx_discard;
1222 target_stat_values[14] = nesvnic->endnode_nstat_rx_discard; 1206 target_stat_values[++index] = nesvnic->endnode_nstat_rx_octets;
1223 target_stat_values[15] = nesvnic->endnode_nstat_rx_octets; 1207 target_stat_values[++index] = nesvnic->endnode_nstat_rx_frames;
1224 target_stat_values[16] = nesvnic->endnode_nstat_rx_frames; 1208 target_stat_values[++index] = nesvnic->endnode_nstat_tx_octets;
1225 target_stat_values[17] = nesvnic->endnode_nstat_tx_octets; 1209 target_stat_values[++index] = nesvnic->endnode_nstat_tx_frames;
1226 target_stat_values[18] = nesvnic->endnode_nstat_tx_frames; 1210 target_stat_values[++index] = mh_detected;
1227 target_stat_values[19] = mh_detected; 1211 target_stat_values[++index] = mh_pauses_sent;
1228 target_stat_values[20] = mh_pauses_sent; 1212 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
1229 target_stat_values[21] = nesvnic->endnode_ipv4_tcp_retransmits; 1213 target_stat_values[++index] = atomic_read(&cm_connects);
1230 target_stat_values[22] = atomic_read(&cm_connects); 1214 target_stat_values[++index] = atomic_read(&cm_accepts);
1231 target_stat_values[23] = atomic_read(&cm_accepts); 1215 target_stat_values[++index] = atomic_read(&cm_disconnects);
1232 target_stat_values[24] = atomic_read(&cm_disconnects); 1216 target_stat_values[++index] = atomic_read(&cm_connecteds);
1233 target_stat_values[25] = atomic_read(&cm_connecteds); 1217 target_stat_values[++index] = atomic_read(&cm_connect_reqs);
1234 target_stat_values[26] = atomic_read(&cm_connect_reqs); 1218 target_stat_values[++index] = atomic_read(&cm_rejects);
1235 target_stat_values[27] = atomic_read(&cm_rejects); 1219 target_stat_values[++index] = atomic_read(&mod_qp_timouts);
1236 target_stat_values[28] = atomic_read(&mod_qp_timouts); 1220 target_stat_values[++index] = atomic_read(&qps_created);
1237 target_stat_values[29] = atomic_read(&qps_created); 1221 target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
1238 target_stat_values[30] = atomic_read(&sw_qps_destroyed); 1222 target_stat_values[++index] = atomic_read(&qps_destroyed);
1239 target_stat_values[31] = atomic_read(&qps_destroyed); 1223 target_stat_values[++index] = atomic_read(&cm_closes);
1240 target_stat_values[32] = atomic_read(&cm_closes); 1224 target_stat_values[++index] = cm_packets_sent;
1241 target_stat_values[33] = cm_packets_sent; 1225 target_stat_values[++index] = cm_packets_bounced;
1242 target_stat_values[34] = cm_packets_bounced; 1226 target_stat_values[++index] = cm_packets_created;
1243 target_stat_values[35] = cm_packets_created; 1227 target_stat_values[++index] = cm_packets_received;
1244 target_stat_values[36] = cm_packets_received; 1228 target_stat_values[++index] = cm_packets_dropped;
1245 target_stat_values[37] = cm_packets_dropped; 1229 target_stat_values[++index] = cm_packets_retrans;
1246 target_stat_values[38] = cm_packets_retrans; 1230 target_stat_values[++index] = cm_listens_created;
1247 target_stat_values[39] = cm_listens_created; 1231 target_stat_values[++index] = cm_listens_destroyed;
1248 target_stat_values[40] = cm_listens_destroyed; 1232 target_stat_values[++index] = cm_backlog_drops;
1249 target_stat_values[41] = cm_backlog_drops; 1233 target_stat_values[++index] = atomic_read(&cm_loopbacks);
1250 target_stat_values[42] = atomic_read(&cm_loopbacks); 1234 target_stat_values[++index] = atomic_read(&cm_nodes_created);
1251 target_stat_values[43] = atomic_read(&cm_nodes_created); 1235 target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
1252 target_stat_values[44] = atomic_read(&cm_nodes_destroyed); 1236 target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
1253 target_stat_values[45] = atomic_read(&cm_accel_dropped_pkts); 1237 target_stat_values[++index] = atomic_read(&cm_resets_recvd);
1254 target_stat_values[46] = atomic_read(&cm_resets_recvd); 1238 target_stat_values[++index] = int_mod_timer_init;
1255 target_stat_values[47] = int_mod_timer_init; 1239 target_stat_values[++index] = int_mod_cq_depth_1;
1256 target_stat_values[48] = int_mod_cq_depth_1; 1240 target_stat_values[++index] = int_mod_cq_depth_4;
1257 target_stat_values[49] = int_mod_cq_depth_4; 1241 target_stat_values[++index] = int_mod_cq_depth_16;
1258 target_stat_values[50] = int_mod_cq_depth_16; 1242 target_stat_values[++index] = int_mod_cq_depth_24;
1259 target_stat_values[51] = int_mod_cq_depth_24; 1243 target_stat_values[++index] = int_mod_cq_depth_32;
1260 target_stat_values[52] = int_mod_cq_depth_32; 1244 target_stat_values[++index] = int_mod_cq_depth_128;
1261 target_stat_values[53] = int_mod_cq_depth_128; 1245 target_stat_values[++index] = int_mod_cq_depth_256;
1262 target_stat_values[54] = int_mod_cq_depth_256; 1246 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
1263 target_stat_values[55] = nesvnic->lro_mgr.stats.aggregated; 1247 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
1264 target_stat_values[56] = nesvnic->lro_mgr.stats.flushed; 1248 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
1265 target_stat_values[57] = nesvnic->lro_mgr.stats.no_desc;
1266 1249
1267} 1250}
1268 1251
@@ -1616,7 +1599,6 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
1616 nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n"); 1599 nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
1617 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 1600 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1618 netdev->vlan_rx_register = nes_netdev_vlan_rx_register; 1601 netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
1619 netdev->features |= NETIF_F_LLTX;
1620 1602
1621 /* Fill in the port structure */ 1603 /* Fill in the port structure */
1622 nesvnic->netdev = netdev; 1604 nesvnic->netdev = netdev;
diff --git a/drivers/infiniband/hw/nes/nes_user.h b/drivers/infiniband/hw/nes/nes_user.h
index e64306bce80b..cc90c14b49eb 100644
--- a/drivers/infiniband/hw/nes/nes_user.h
+++ b/drivers/infiniband/hw/nes/nes_user.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2008 NetEffect. All rights reserved. 2 * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
3 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Topspin Communications. All rights reserved.
4 * Copyright (c) 2005 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 Cisco Systems. All rights reserved.
5 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. 5 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index 6f3bc1b6bf22..a282031d15c7 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. 2 * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 4fdb72454f94..96d953540a2c 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. 2 * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -551,6 +551,7 @@ static int nes_dealloc_fmr(struct ib_fmr *ibfmr)
551 struct nes_device *nesdev = nesvnic->nesdev; 551 struct nes_device *nesdev = nesvnic->nesdev;
552 struct nes_adapter *nesadapter = nesdev->nesadapter; 552 struct nes_adapter *nesadapter = nesdev->nesadapter;
553 int i = 0; 553 int i = 0;
554 int rc;
554 555
555 /* free the resources */ 556 /* free the resources */
556 if (nesfmr->leaf_pbl_cnt == 0) { 557 if (nesfmr->leaf_pbl_cnt == 0) {
@@ -572,7 +573,9 @@ static int nes_dealloc_fmr(struct ib_fmr *ibfmr)
572 nesmr->ibmw.rkey = ibfmr->rkey; 573 nesmr->ibmw.rkey = ibfmr->rkey;
573 nesmr->ibmw.uobject = NULL; 574 nesmr->ibmw.uobject = NULL;
574 575
575 if (nesfmr->nesmr.pbls_used != 0) { 576 rc = nes_dealloc_mw(&nesmr->ibmw);
577
578 if ((rc == 0) && (nesfmr->nesmr.pbls_used != 0)) {
576 spin_lock_irqsave(&nesadapter->pbl_lock, flags); 579 spin_lock_irqsave(&nesadapter->pbl_lock, flags);
577 if (nesfmr->nesmr.pbl_4k) { 580 if (nesfmr->nesmr.pbl_4k) {
578 nesadapter->free_4kpbl += nesfmr->nesmr.pbls_used; 581 nesadapter->free_4kpbl += nesfmr->nesmr.pbls_used;
@@ -584,7 +587,7 @@ static int nes_dealloc_fmr(struct ib_fmr *ibfmr)
584 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); 587 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
585 } 588 }
586 589
587 return nes_dealloc_mw(&nesmr->ibmw); 590 return rc;
588} 591}
589 592
590 593
@@ -1884,21 +1887,75 @@ static int nes_destroy_cq(struct ib_cq *ib_cq)
1884 return ret; 1887 return ret;
1885} 1888}
1886 1889
1890/**
1891 * root_256
1892 */
1893static u32 root_256(struct nes_device *nesdev,
1894 struct nes_root_vpbl *root_vpbl,
1895 struct nes_root_vpbl *new_root,
1896 u16 pbl_count_4k,
1897 u16 pbl_count_256)
1898{
1899 u64 leaf_pbl;
1900 int i, j, k;
1901
1902 if (pbl_count_4k == 1) {
1903 new_root->pbl_vbase = pci_alloc_consistent(nesdev->pcidev,
1904 512, &new_root->pbl_pbase);
1905
1906 if (new_root->pbl_vbase == NULL)
1907 return 0;
1908
1909 leaf_pbl = (u64)root_vpbl->pbl_pbase;
1910 for (i = 0; i < 16; i++) {
1911 new_root->pbl_vbase[i].pa_low =
1912 cpu_to_le32((u32)leaf_pbl);
1913 new_root->pbl_vbase[i].pa_high =
1914 cpu_to_le32((u32)((((u64)leaf_pbl) >> 32)));
1915 leaf_pbl += 256;
1916 }
1917 } else {
1918 for (i = 3; i >= 0; i--) {
1919 j = i * 16;
1920 root_vpbl->pbl_vbase[j] = root_vpbl->pbl_vbase[i];
1921 leaf_pbl = le32_to_cpu(root_vpbl->pbl_vbase[j].pa_low) +
1922 (((u64)le32_to_cpu(root_vpbl->pbl_vbase[j].pa_high))
1923 << 32);
1924 for (k = 1; k < 16; k++) {
1925 leaf_pbl += 256;
1926 root_vpbl->pbl_vbase[j + k].pa_low =
1927 cpu_to_le32((u32)leaf_pbl);
1928 root_vpbl->pbl_vbase[j + k].pa_high =
1929 cpu_to_le32((u32)((((u64)leaf_pbl) >> 32)));
1930 }
1931 }
1932 }
1933
1934 return 1;
1935}
1936
1887 1937
1888/** 1938/**
1889 * nes_reg_mr 1939 * nes_reg_mr
1890 */ 1940 */
1891static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd, 1941static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
1892 u32 stag, u64 region_length, struct nes_root_vpbl *root_vpbl, 1942 u32 stag, u64 region_length, struct nes_root_vpbl *root_vpbl,
1893 dma_addr_t single_buffer, u16 pbl_count, u16 residual_page_count, 1943 dma_addr_t single_buffer, u16 pbl_count_4k,
1894 int acc, u64 *iova_start) 1944 u16 residual_page_count_4k, int acc, u64 *iova_start,
1945 u16 *actual_pbl_cnt, u8 *used_4k_pbls)
1895{ 1946{
1896 struct nes_hw_cqp_wqe *cqp_wqe; 1947 struct nes_hw_cqp_wqe *cqp_wqe;
1897 struct nes_cqp_request *cqp_request; 1948 struct nes_cqp_request *cqp_request;
1898 unsigned long flags; 1949 unsigned long flags;
1899 int ret; 1950 int ret;
1900 struct nes_adapter *nesadapter = nesdev->nesadapter; 1951 struct nes_adapter *nesadapter = nesdev->nesadapter;
1901 /* int count; */ 1952 uint pg_cnt = 0;
1953 u16 pbl_count_256;
1954 u16 pbl_count = 0;
1955 u8 use_256_pbls = 0;
1956 u8 use_4k_pbls = 0;
1957 u16 use_two_level = (pbl_count_4k > 1) ? 1 : 0;
1958 struct nes_root_vpbl new_root = {0, 0, 0};
1902 u32 opcode = 0; 1959 u32 opcode = 0;
1903 u16 major_code; 1960 u16 major_code;
1904 1961
@@ -1911,41 +1968,70 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
1911 cqp_request->waiting = 1; 1968 cqp_request->waiting = 1;
1912 cqp_wqe = &cqp_request->cqp_wqe; 1969 cqp_wqe = &cqp_request->cqp_wqe;
1913 1970
1914 spin_lock_irqsave(&nesadapter->pbl_lock, flags); 1971 if (pbl_count_4k) {
1915 /* track PBL resources */ 1972 spin_lock_irqsave(&nesadapter->pbl_lock, flags);
1916 if (pbl_count != 0) { 1973
1917 if (pbl_count > 1) { 1974 pg_cnt = ((pbl_count_4k - 1) * 512) + residual_page_count_4k;
1918 /* Two level PBL */ 1975 pbl_count_256 = (pg_cnt + 31) / 32;
1919 if ((pbl_count+1) > nesadapter->free_4kpbl) { 1976 if (pg_cnt <= 32) {
1920 nes_debug(NES_DBG_MR, "Out of 4KB Pbls for two level request.\n"); 1977 if (pbl_count_256 <= nesadapter->free_256pbl)
1921 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); 1978 use_256_pbls = 1;
1922 nes_free_cqp_request(nesdev, cqp_request); 1979 else if (pbl_count_4k <= nesadapter->free_4kpbl)
1923 return -ENOMEM; 1980 use_4k_pbls = 1;
1924 } else { 1981 } else if (pg_cnt <= 2048) {
1925 nesadapter->free_4kpbl -= pbl_count+1; 1982 if (((pbl_count_4k + use_two_level) <= nesadapter->free_4kpbl) &&
1926 } 1983 (nesadapter->free_4kpbl > (nesadapter->max_4kpbl >> 1))) {
1927 } else if (residual_page_count > 32) { 1984 use_4k_pbls = 1;
1928 if (pbl_count > nesadapter->free_4kpbl) { 1985 } else if ((pbl_count_256 + 1) <= nesadapter->free_256pbl) {
1929 nes_debug(NES_DBG_MR, "Out of 4KB Pbls.\n"); 1986 use_256_pbls = 1;
1930 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); 1987 use_two_level = 1;
1931 nes_free_cqp_request(nesdev, cqp_request); 1988 } else if ((pbl_count_4k + use_two_level) <= nesadapter->free_4kpbl) {
1932 return -ENOMEM; 1989 use_4k_pbls = 1;
1933 } else {
1934 nesadapter->free_4kpbl -= pbl_count;
1935 } 1990 }
1936 } else { 1991 } else {
1937 if (pbl_count > nesadapter->free_256pbl) { 1992 if ((pbl_count_4k + 1) <= nesadapter->free_4kpbl)
1938 nes_debug(NES_DBG_MR, "Out of 256B Pbls.\n"); 1993 use_4k_pbls = 1;
1939 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
1940 nes_free_cqp_request(nesdev, cqp_request);
1941 return -ENOMEM;
1942 } else {
1943 nesadapter->free_256pbl -= pbl_count;
1944 }
1945 } 1994 }
1995
1996 if (use_256_pbls) {
1997 pbl_count = pbl_count_256;
1998 nesadapter->free_256pbl -= pbl_count + use_two_level;
1999 } else if (use_4k_pbls) {
2000 pbl_count = pbl_count_4k;
2001 nesadapter->free_4kpbl -= pbl_count + use_two_level;
2002 } else {
2003 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
2004 nes_debug(NES_DBG_MR, "Out of Pbls\n");
2005 nes_free_cqp_request(nesdev, cqp_request);
2006 return -ENOMEM;
2007 }
2008
2009 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
1946 } 2010 }
1947 2011
1948 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); 2012 if (use_256_pbls && use_two_level) {
2013 if (root_256(nesdev, root_vpbl, &new_root, pbl_count_4k, pbl_count_256) == 1) {
2014 if (new_root.pbl_pbase != 0)
2015 root_vpbl = &new_root;
2016 } else {
2017 spin_lock_irqsave(&nesadapter->pbl_lock, flags);
2018 nesadapter->free_256pbl += pbl_count_256 + use_two_level;
2019 use_256_pbls = 0;
2020
2021 if (pbl_count_4k == 1)
2022 use_two_level = 0;
2023 pbl_count = pbl_count_4k;
2024
2025 if ((pbl_count_4k + use_two_level) <= nesadapter->free_4kpbl) {
2026 nesadapter->free_4kpbl -= pbl_count + use_two_level;
2027 use_4k_pbls = 1;
2028 }
2029 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
2030
2031 if (use_4k_pbls == 0)
2032 return -ENOMEM;
2033 }
2034 }
1949 2035
1950 opcode = NES_CQP_REGISTER_STAG | NES_CQP_STAG_RIGHTS_LOCAL_READ | 2036 opcode = NES_CQP_REGISTER_STAG | NES_CQP_STAG_RIGHTS_LOCAL_READ |
1951 NES_CQP_STAG_VA_TO | NES_CQP_STAG_MR; 2037 NES_CQP_STAG_VA_TO | NES_CQP_STAG_MR;
@@ -1974,10 +2060,9 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
1974 } else { 2060 } else {
1975 set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PA_LOW_IDX, root_vpbl->pbl_pbase); 2061 set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PA_LOW_IDX, root_vpbl->pbl_pbase);
1976 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX, pbl_count); 2062 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX, pbl_count);
1977 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_LEN_IDX, 2063 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_LEN_IDX, (pg_cnt * 8));
1978 (((pbl_count - 1) * 4096) + (residual_page_count*8)));
1979 2064
1980 if ((pbl_count > 1) || (residual_page_count > 32)) 2065 if (use_4k_pbls)
1981 cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(NES_CQP_STAG_PBL_BLK_SIZE); 2066 cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(NES_CQP_STAG_PBL_BLK_SIZE);
1982 } 2067 }
1983 barrier(); 2068 barrier();
@@ -1994,13 +2079,25 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
1994 major_code = cqp_request->major_code; 2079 major_code = cqp_request->major_code;
1995 nes_put_cqp_request(nesdev, cqp_request); 2080 nes_put_cqp_request(nesdev, cqp_request);
1996 2081
2082 if ((!ret || major_code) && pbl_count != 0) {
2083 spin_lock_irqsave(&nesadapter->pbl_lock, flags);
2084 if (use_256_pbls)
2085 nesadapter->free_256pbl += pbl_count + use_two_level;
2086 else if (use_4k_pbls)
2087 nesadapter->free_4kpbl += pbl_count + use_two_level;
2088 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
2089 }
2090 if (new_root.pbl_pbase)
2091 pci_free_consistent(nesdev->pcidev, 512, new_root.pbl_vbase,
2092 new_root.pbl_pbase);
2093
1997 if (!ret) 2094 if (!ret)
1998 return -ETIME; 2095 return -ETIME;
1999 else if (major_code) 2096 else if (major_code)
2000 return -EIO; 2097 return -EIO;
2001 else
2002 return 0;
2003 2098
2099 *actual_pbl_cnt = pbl_count + use_two_level;
2100 *used_4k_pbls = use_4k_pbls;
2004 return 0; 2101 return 0;
2005} 2102}
2006 2103
@@ -2165,18 +2262,14 @@ static struct ib_mr *nes_reg_phys_mr(struct ib_pd *ib_pd,
2165 pbl_count = root_pbl_index; 2262 pbl_count = root_pbl_index;
2166 } 2263 }
2167 ret = nes_reg_mr(nesdev, nespd, stag, region_length, &root_vpbl, 2264 ret = nes_reg_mr(nesdev, nespd, stag, region_length, &root_vpbl,
2168 buffer_list[0].addr, pbl_count, (u16)cur_pbl_index, acc, iova_start); 2265 buffer_list[0].addr, pbl_count, (u16)cur_pbl_index, acc, iova_start,
2266 &nesmr->pbls_used, &nesmr->pbl_4k);
2169 2267
2170 if (ret == 0) { 2268 if (ret == 0) {
2171 nesmr->ibmr.rkey = stag; 2269 nesmr->ibmr.rkey = stag;
2172 nesmr->ibmr.lkey = stag; 2270 nesmr->ibmr.lkey = stag;
2173 nesmr->mode = IWNES_MEMREG_TYPE_MEM; 2271 nesmr->mode = IWNES_MEMREG_TYPE_MEM;
2174 ibmr = &nesmr->ibmr; 2272 ibmr = &nesmr->ibmr;
2175 nesmr->pbl_4k = ((pbl_count > 1) || (cur_pbl_index > 32)) ? 1 : 0;
2176 nesmr->pbls_used = pbl_count;
2177 if (pbl_count > 1) {
2178 nesmr->pbls_used++;
2179 }
2180 } else { 2273 } else {
2181 kfree(nesmr); 2274 kfree(nesmr);
2182 ibmr = ERR_PTR(-ENOMEM); 2275 ibmr = ERR_PTR(-ENOMEM);
@@ -2454,8 +2547,9 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
2454 stag, (unsigned int)iova_start, 2547 stag, (unsigned int)iova_start,
2455 (unsigned int)region_length, stag_index, 2548 (unsigned int)region_length, stag_index,
2456 (unsigned long long)region->length, pbl_count); 2549 (unsigned long long)region->length, pbl_count);
2457 ret = nes_reg_mr( nesdev, nespd, stag, region->length, &root_vpbl, 2550 ret = nes_reg_mr(nesdev, nespd, stag, region->length, &root_vpbl,
2458 first_dma_addr, pbl_count, (u16)cur_pbl_index, acc, &iova_start); 2551 first_dma_addr, pbl_count, (u16)cur_pbl_index, acc,
2552 &iova_start, &nesmr->pbls_used, &nesmr->pbl_4k);
2459 2553
2460 nes_debug(NES_DBG_MR, "ret=%d\n", ret); 2554 nes_debug(NES_DBG_MR, "ret=%d\n", ret);
2461 2555
@@ -2464,11 +2558,6 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
2464 nesmr->ibmr.lkey = stag; 2558 nesmr->ibmr.lkey = stag;
2465 nesmr->mode = IWNES_MEMREG_TYPE_MEM; 2559 nesmr->mode = IWNES_MEMREG_TYPE_MEM;
2466 ibmr = &nesmr->ibmr; 2560 ibmr = &nesmr->ibmr;
2467 nesmr->pbl_4k = ((pbl_count > 1) || (cur_pbl_index > 32)) ? 1 : 0;
2468 nesmr->pbls_used = pbl_count;
2469 if (pbl_count > 1) {
2470 nesmr->pbls_used++;
2471 }
2472 } else { 2561 } else {
2473 ib_umem_release(region); 2562 ib_umem_release(region);
2474 kfree(nesmr); 2563 kfree(nesmr);
@@ -2607,24 +2696,6 @@ static int nes_dereg_mr(struct ib_mr *ib_mr)
2607 cqp_request->waiting = 1; 2696 cqp_request->waiting = 1;
2608 cqp_wqe = &cqp_request->cqp_wqe; 2697 cqp_wqe = &cqp_request->cqp_wqe;
2609 2698
2610 spin_lock_irqsave(&nesadapter->pbl_lock, flags);
2611 if (nesmr->pbls_used != 0) {
2612 if (nesmr->pbl_4k) {
2613 nesadapter->free_4kpbl += nesmr->pbls_used;
2614 if (nesadapter->free_4kpbl > nesadapter->max_4kpbl) {
2615 printk(KERN_ERR PFX "free 4KB PBLs(%u) has exceeded the max(%u)\n",
2616 nesadapter->free_4kpbl, nesadapter->max_4kpbl);
2617 }
2618 } else {
2619 nesadapter->free_256pbl += nesmr->pbls_used;
2620 if (nesadapter->free_256pbl > nesadapter->max_256pbl) {
2621 printk(KERN_ERR PFX "free 256B PBLs(%u) has exceeded the max(%u)\n",
2622 nesadapter->free_256pbl, nesadapter->max_256pbl);
2623 }
2624 }
2625 }
2626
2627 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
2628 nes_fill_init_cqp_wqe(cqp_wqe, nesdev); 2699 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
2629 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, 2700 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
2630 NES_CQP_DEALLOCATE_STAG | NES_CQP_STAG_VA_TO | 2701 NES_CQP_DEALLOCATE_STAG | NES_CQP_STAG_VA_TO |
@@ -2642,11 +2713,6 @@ static int nes_dereg_mr(struct ib_mr *ib_mr)
2642 " CQP Major:Minor codes = 0x%04X:0x%04X\n", 2713 " CQP Major:Minor codes = 0x%04X:0x%04X\n",
2643 ib_mr->rkey, ret, cqp_request->major_code, cqp_request->minor_code); 2714 ib_mr->rkey, ret, cqp_request->major_code, cqp_request->minor_code);
2644 2715
2645 nes_free_resource(nesadapter, nesadapter->allocated_mrs,
2646 (ib_mr->rkey & 0x0fffff00) >> 8);
2647
2648 kfree(nesmr);
2649
2650 major_code = cqp_request->major_code; 2716 major_code = cqp_request->major_code;
2651 minor_code = cqp_request->minor_code; 2717 minor_code = cqp_request->minor_code;
2652 2718
@@ -2662,8 +2728,33 @@ static int nes_dereg_mr(struct ib_mr *ib_mr)
2662 " to destroy STag, ib_mr=%p, rkey = 0x%08X\n", 2728 " to destroy STag, ib_mr=%p, rkey = 0x%08X\n",
2663 major_code, minor_code, ib_mr, ib_mr->rkey); 2729 major_code, minor_code, ib_mr, ib_mr->rkey);
2664 return -EIO; 2730 return -EIO;
2665 } else 2731 }
2666 return 0; 2732
2733 if (nesmr->pbls_used != 0) {
2734 spin_lock_irqsave(&nesadapter->pbl_lock, flags);
2735 if (nesmr->pbl_4k) {
2736 nesadapter->free_4kpbl += nesmr->pbls_used;
2737 if (nesadapter->free_4kpbl > nesadapter->max_4kpbl)
2738 printk(KERN_ERR PFX "free 4KB PBLs(%u) has "
2739 "exceeded the max(%u)\n",
2740 nesadapter->free_4kpbl,
2741 nesadapter->max_4kpbl);
2742 } else {
2743 nesadapter->free_256pbl += nesmr->pbls_used;
2744 if (nesadapter->free_256pbl > nesadapter->max_256pbl)
2745 printk(KERN_ERR PFX "free 256B PBLs(%u) has "
2746 "exceeded the max(%u)\n",
2747 nesadapter->free_256pbl,
2748 nesadapter->max_256pbl);
2749 }
2750 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
2751 }
2752 nes_free_resource(nesadapter, nesadapter->allocated_mrs,
2753 (ib_mr->rkey & 0x0fffff00) >> 8);
2754
2755 kfree(nesmr);
2756
2757 return 0;
2667} 2758}
2668 2759
2669 2760
diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h
index 6c6b4da5184f..da3c368f1ef8 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.h
+++ b/drivers/infiniband/hw/nes/nes_verbs.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. 2 * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. 3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 0bd2a4ff0842..353c13b91e8f 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -660,8 +660,12 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
660 660
661 path = __path_find(dev, phdr->hwaddr + 4); 661 path = __path_find(dev, phdr->hwaddr + 4);
662 if (!path || !path->valid) { 662 if (!path || !path->valid) {
663 if (!path) 663 int new_path = 0;
664
665 if (!path) {
664 path = path_rec_create(dev, phdr->hwaddr + 4); 666 path = path_rec_create(dev, phdr->hwaddr + 4);
667 new_path = 1;
668 }
665 if (path) { 669 if (path) {
666 /* put pseudoheader back on for next time */ 670 /* put pseudoheader back on for next time */
667 skb_push(skb, sizeof *phdr); 671 skb_push(skb, sizeof *phdr);
@@ -669,7 +673,8 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
669 673
670 if (!path->query && path_rec_start(dev, path)) { 674 if (!path->query && path_rec_start(dev, path)) {
671 spin_unlock_irqrestore(&priv->lock, flags); 675 spin_unlock_irqrestore(&priv->lock, flags);
672 path_free(dev, path); 676 if (new_path)
677 path_free(dev, path);
673 return; 678 return;
674 } else 679 } else
675 __path_add(dev, path); 680 __path_add(dev, path);
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 319b188145be..ea9e1556e0d6 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -401,13 +401,6 @@ static void iser_route_handler(struct rdma_cm_id *cma_id)
401 if (ret) 401 if (ret)
402 goto failure; 402 goto failure;
403 403
404 iser_dbg("path.mtu is %d setting it to %d\n",
405 cma_id->route.path_rec->mtu, IB_MTU_1024);
406
407 /* we must set the MTU to 1024 as this is what the target is assuming */
408 if (cma_id->route.path_rec->mtu > IB_MTU_1024)
409 cma_id->route.path_rec->mtu = IB_MTU_1024;
410
411 memset(&conn_param, 0, sizeof conn_param); 404 memset(&conn_param, 0, sizeof conn_param);
412 conn_param.responder_resources = 4; 405 conn_param.responder_resources = 4;
413 conn_param.initiator_depth = 1; 406 conn_param.initiator_depth = 1;
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
index a7a97bf998f8..21040a0d81fe 100644
--- a/drivers/net/mlx4/Makefile
+++ b/drivers/net/mlx4/Makefile
@@ -1,7 +1,7 @@
1obj-$(CONFIG_MLX4_CORE) += mlx4_core.o 1obj-$(CONFIG_MLX4_CORE) += mlx4_core.o
2 2
3mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \ 3mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
4 mr.o pd.o port.o profile.o qp.o reset.o srq.o 4 mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o
5 5
6obj-$(CONFIG_MLX4_EN) += mlx4_en.o 6obj-$(CONFIG_MLX4_EN) += mlx4_en.o
7 7
diff --git a/drivers/net/mlx4/catas.c b/drivers/net/mlx4/catas.c
index f094ee00c416..aa9674b7f19c 100644
--- a/drivers/net/mlx4/catas.c
+++ b/drivers/net/mlx4/catas.c
@@ -42,7 +42,6 @@ enum {
42static DEFINE_SPINLOCK(catas_lock); 42static DEFINE_SPINLOCK(catas_lock);
43 43
44static LIST_HEAD(catas_list); 44static LIST_HEAD(catas_list);
45static struct workqueue_struct *catas_wq;
46static struct work_struct catas_work; 45static struct work_struct catas_work;
47 46
48static int internal_err_reset = 1; 47static int internal_err_reset = 1;
@@ -77,7 +76,7 @@ static void poll_catas(unsigned long dev_ptr)
77 list_add(&priv->catas_err.list, &catas_list); 76 list_add(&priv->catas_err.list, &catas_list);
78 spin_unlock(&catas_lock); 77 spin_unlock(&catas_lock);
79 78
80 queue_work(catas_wq, &catas_work); 79 queue_work(mlx4_wq, &catas_work);
81 } 80 }
82 } else 81 } else
83 mod_timer(&priv->catas_err.timer, 82 mod_timer(&priv->catas_err.timer,
@@ -146,18 +145,7 @@ void mlx4_stop_catas_poll(struct mlx4_dev *dev)
146 spin_unlock_irq(&catas_lock); 145 spin_unlock_irq(&catas_lock);
147} 146}
148 147
149int __init mlx4_catas_init(void) 148void __init mlx4_catas_init(void)
150{ 149{
151 INIT_WORK(&catas_work, catas_reset); 150 INIT_WORK(&catas_work, catas_reset);
152
153 catas_wq = create_singlethread_workqueue("mlx4_err");
154 if (!catas_wq)
155 return -ENOMEM;
156
157 return 0;
158}
159
160void mlx4_catas_cleanup(void)
161{
162 destroy_workqueue(catas_wq);
163} 151}
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 2c19bff7cbab..8830dcb92ec8 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -163,6 +163,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
163 int cqn; 163 int cqn;
164 int eqes_found = 0; 164 int eqes_found = 0;
165 int set_ci = 0; 165 int set_ci = 0;
166 int port;
166 167
167 while ((eqe = next_eqe_sw(eq))) { 168 while ((eqe = next_eqe_sw(eq))) {
168 /* 169 /*
@@ -203,11 +204,16 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
203 break; 204 break;
204 205
205 case MLX4_EVENT_TYPE_PORT_CHANGE: 206 case MLX4_EVENT_TYPE_PORT_CHANGE:
206 mlx4_dispatch_event(dev, 207 port = be32_to_cpu(eqe->event.port_change.port) >> 28;
207 eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_ACTIVE ? 208 if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
208 MLX4_DEV_EVENT_PORT_UP : 209 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
209 MLX4_DEV_EVENT_PORT_DOWN, 210 port);
210 be32_to_cpu(eqe->event.port_change.port) >> 28); 211 mlx4_priv(dev)->sense.do_sense_port[port] = 1;
212 } else {
213 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP,
214 port);
215 mlx4_priv(dev)->sense.do_sense_port[port] = 0;
216 }
211 break; 217 break;
212 218
213 case MLX4_EVENT_TYPE_CQ_ERROR: 219 case MLX4_EVENT_TYPE_CQ_ERROR:
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 6ef2490d5c3e..a66f5b2fd288 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -51,6 +51,8 @@ MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
51MODULE_LICENSE("Dual BSD/GPL"); 51MODULE_LICENSE("Dual BSD/GPL");
52MODULE_VERSION(DRV_VERSION); 52MODULE_VERSION(DRV_VERSION);
53 53
54struct workqueue_struct *mlx4_wq;
55
54#ifdef CONFIG_MLX4_DEBUG 56#ifdef CONFIG_MLX4_DEBUG
55 57
56int mlx4_debug_level = 0; 58int mlx4_debug_level = 0;
@@ -98,24 +100,23 @@ module_param_named(use_prio, use_prio, bool, 0444);
98MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports " 100MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
99 "(0/1, default 0)"); 101 "(0/1, default 0)");
100 102
101static int mlx4_check_port_params(struct mlx4_dev *dev, 103int mlx4_check_port_params(struct mlx4_dev *dev,
102 enum mlx4_port_type *port_type) 104 enum mlx4_port_type *port_type)
103{ 105{
104 int i; 106 int i;
105 107
106 for (i = 0; i < dev->caps.num_ports - 1; i++) { 108 for (i = 0; i < dev->caps.num_ports - 1; i++) {
107 if (port_type[i] != port_type[i+1] && 109 if (port_type[i] != port_type[i + 1]) {
108 !(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { 110 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
109 mlx4_err(dev, "Only same port types supported " 111 mlx4_err(dev, "Only same port types supported "
110 "on this HCA, aborting.\n"); 112 "on this HCA, aborting.\n");
111 return -EINVAL; 113 return -EINVAL;
114 }
115 if (port_type[i] == MLX4_PORT_TYPE_ETH &&
116 port_type[i + 1] == MLX4_PORT_TYPE_IB)
117 return -EINVAL;
112 } 118 }
113 } 119 }
114 if ((port_type[0] == MLX4_PORT_TYPE_ETH) &&
115 (port_type[1] == MLX4_PORT_TYPE_IB)) {
116 mlx4_err(dev, "eth-ib configuration is not supported.\n");
117 return -EINVAL;
118 }
119 120
120 for (i = 0; i < dev->caps.num_ports; i++) { 121 for (i = 0; i < dev->caps.num_ports; i++) {
121 if (!(port_type[i] & dev->caps.supported_type[i+1])) { 122 if (!(port_type[i] & dev->caps.supported_type[i+1])) {
@@ -225,6 +226,9 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
225 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB; 226 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
226 else 227 else
227 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; 228 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
229 dev->caps.possible_type[i] = dev->caps.port_type[i];
230 mlx4_priv(dev)->sense.sense_allowed[i] =
231 dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO;
228 232
229 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) { 233 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
230 dev->caps.log_num_macs = dev_cap->log_max_macs[i]; 234 dev->caps.log_num_macs = dev_cap->log_max_macs[i];
@@ -263,14 +267,16 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
263 * Change the port configuration of the device. 267 * Change the port configuration of the device.
264 * Every user of this function must hold the port mutex. 268 * Every user of this function must hold the port mutex.
265 */ 269 */
266static int mlx4_change_port_types(struct mlx4_dev *dev, 270int mlx4_change_port_types(struct mlx4_dev *dev,
267 enum mlx4_port_type *port_types) 271 enum mlx4_port_type *port_types)
268{ 272{
269 int err = 0; 273 int err = 0;
270 int change = 0; 274 int change = 0;
271 int port; 275 int port;
272 276
273 for (port = 0; port < dev->caps.num_ports; port++) { 277 for (port = 0; port < dev->caps.num_ports; port++) {
278 /* Change the port type only if the new type is different
279 * from the current, and not set to Auto */
274 if (port_types[port] != dev->caps.port_type[port + 1]) { 280 if (port_types[port] != dev->caps.port_type[port + 1]) {
275 change = 1; 281 change = 1;
276 dev->caps.port_type[port + 1] = port_types[port]; 282 dev->caps.port_type[port + 1] = port_types[port];
@@ -302,10 +308,17 @@ static ssize_t show_port_type(struct device *dev,
302 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 308 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
303 port_attr); 309 port_attr);
304 struct mlx4_dev *mdev = info->dev; 310 struct mlx4_dev *mdev = info->dev;
311 char type[8];
312
313 sprintf(type, "%s",
314 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
315 "ib" : "eth");
316 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
317 sprintf(buf, "auto (%s)\n", type);
318 else
319 sprintf(buf, "%s\n", type);
305 320
306 return sprintf(buf, "%s\n", 321 return strlen(buf);
307 mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB ?
308 "ib" : "eth");
309} 322}
310 323
311static ssize_t set_port_type(struct device *dev, 324static ssize_t set_port_type(struct device *dev,
@@ -317,6 +330,7 @@ static ssize_t set_port_type(struct device *dev,
317 struct mlx4_dev *mdev = info->dev; 330 struct mlx4_dev *mdev = info->dev;
318 struct mlx4_priv *priv = mlx4_priv(mdev); 331 struct mlx4_priv *priv = mlx4_priv(mdev);
319 enum mlx4_port_type types[MLX4_MAX_PORTS]; 332 enum mlx4_port_type types[MLX4_MAX_PORTS];
333 enum mlx4_port_type new_types[MLX4_MAX_PORTS];
320 int i; 334 int i;
321 int err = 0; 335 int err = 0;
322 336
@@ -324,26 +338,56 @@ static ssize_t set_port_type(struct device *dev,
324 info->tmp_type = MLX4_PORT_TYPE_IB; 338 info->tmp_type = MLX4_PORT_TYPE_IB;
325 else if (!strcmp(buf, "eth\n")) 339 else if (!strcmp(buf, "eth\n"))
326 info->tmp_type = MLX4_PORT_TYPE_ETH; 340 info->tmp_type = MLX4_PORT_TYPE_ETH;
341 else if (!strcmp(buf, "auto\n"))
342 info->tmp_type = MLX4_PORT_TYPE_AUTO;
327 else { 343 else {
328 mlx4_err(mdev, "%s is not supported port type\n", buf); 344 mlx4_err(mdev, "%s is not supported port type\n", buf);
329 return -EINVAL; 345 return -EINVAL;
330 } 346 }
331 347
348 mlx4_stop_sense(mdev);
332 mutex_lock(&priv->port_mutex); 349 mutex_lock(&priv->port_mutex);
333 for (i = 0; i < mdev->caps.num_ports; i++) 350 /* Possible type is always the one that was delivered */
351 mdev->caps.possible_type[info->port] = info->tmp_type;
352
353 for (i = 0; i < mdev->caps.num_ports; i++) {
334 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type : 354 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
335 mdev->caps.port_type[i+1]; 355 mdev->caps.possible_type[i+1];
356 if (types[i] == MLX4_PORT_TYPE_AUTO)
357 types[i] = mdev->caps.port_type[i+1];
358 }
336 359
337 err = mlx4_check_port_params(mdev, types); 360 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
361 for (i = 1; i <= mdev->caps.num_ports; i++) {
362 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
363 mdev->caps.possible_type[i] = mdev->caps.port_type[i];
364 err = -EINVAL;
365 }
366 }
367 }
368 if (err) {
369 mlx4_err(mdev, "Auto sensing is not supported on this HCA. "
370 "Set only 'eth' or 'ib' for both ports "
371 "(should be the same)\n");
372 goto out;
373 }
374
375 mlx4_do_sense_ports(mdev, new_types, types);
376
377 err = mlx4_check_port_params(mdev, new_types);
338 if (err) 378 if (err)
339 goto out; 379 goto out;
340 380
341 for (i = 1; i <= mdev->caps.num_ports; i++) 381 /* We are about to apply the changes after the configuration
342 priv->port[i].tmp_type = 0; 382 * was verified, no need to remember the temporary types
383 * any more */
384 for (i = 0; i < mdev->caps.num_ports; i++)
385 priv->port[i + 1].tmp_type = 0;
343 386
344 err = mlx4_change_port_types(mdev, types); 387 err = mlx4_change_port_types(mdev, new_types);
345 388
346out: 389out:
390 mlx4_start_sense(mdev);
347 mutex_unlock(&priv->port_mutex); 391 mutex_unlock(&priv->port_mutex);
348 return err ? err : count; 392 return err ? err : count;
349} 393}
@@ -1117,6 +1161,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1117 if (err) 1161 if (err)
1118 goto err_port; 1162 goto err_port;
1119 1163
1164 mlx4_sense_init(dev);
1165 mlx4_start_sense(dev);
1166
1120 pci_set_drvdata(pdev, dev); 1167 pci_set_drvdata(pdev, dev);
1121 1168
1122 return 0; 1169 return 0;
@@ -1182,6 +1229,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
1182 int p; 1229 int p;
1183 1230
1184 if (dev) { 1231 if (dev) {
1232 mlx4_stop_sense(dev);
1185 mlx4_unregister_device(dev); 1233 mlx4_unregister_device(dev);
1186 1234
1187 for (p = 1; p <= dev->caps.num_ports; p++) { 1235 for (p = 1; p <= dev->caps.num_ports; p++) {
@@ -1230,6 +1278,8 @@ static struct pci_device_id mlx4_pci_table[] = {
1230 { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */ 1278 { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
1231 { PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */ 1279 { PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */
1232 { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */ 1280 { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
1281 { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */
1282 { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
1233 { 0, } 1283 { 0, }
1234}; 1284};
1235 1285
@@ -1264,9 +1314,11 @@ static int __init mlx4_init(void)
1264 if (mlx4_verify_params()) 1314 if (mlx4_verify_params())
1265 return -EINVAL; 1315 return -EINVAL;
1266 1316
1267 ret = mlx4_catas_init(); 1317 mlx4_catas_init();
1268 if (ret) 1318
1269 return ret; 1319 mlx4_wq = create_singlethread_workqueue("mlx4");
1320 if (!mlx4_wq)
1321 return -ENOMEM;
1270 1322
1271 ret = pci_register_driver(&mlx4_driver); 1323 ret = pci_register_driver(&mlx4_driver);
1272 return ret < 0 ? ret : 0; 1324 return ret < 0 ? ret : 0;
@@ -1275,7 +1327,7 @@ static int __init mlx4_init(void)
1275static void __exit mlx4_cleanup(void) 1327static void __exit mlx4_cleanup(void)
1276{ 1328{
1277 pci_unregister_driver(&mlx4_driver); 1329 pci_unregister_driver(&mlx4_driver);
1278 mlx4_catas_cleanup(); 1330 destroy_workqueue(mlx4_wq);
1279} 1331}
1280 1332
1281module_init(mlx4_init); 1333module_init(mlx4_init);
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index e0213bad61c7..5bd79c2b184f 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -40,6 +40,7 @@
40#include <linux/mutex.h> 40#include <linux/mutex.h>
41#include <linux/radix-tree.h> 41#include <linux/radix-tree.h>
42#include <linux/timer.h> 42#include <linux/timer.h>
43#include <linux/workqueue.h>
43 44
44#include <linux/mlx4/device.h> 45#include <linux/mlx4/device.h>
45#include <linux/mlx4/driver.h> 46#include <linux/mlx4/driver.h>
@@ -276,6 +277,13 @@ struct mlx4_port_info {
276 struct mlx4_vlan_table vlan_table; 277 struct mlx4_vlan_table vlan_table;
277}; 278};
278 279
280struct mlx4_sense {
281 struct mlx4_dev *dev;
282 u8 do_sense_port[MLX4_MAX_PORTS + 1];
283 u8 sense_allowed[MLX4_MAX_PORTS + 1];
284 struct delayed_work sense_poll;
285};
286
279struct mlx4_priv { 287struct mlx4_priv {
280 struct mlx4_dev dev; 288 struct mlx4_dev dev;
281 289
@@ -305,6 +313,7 @@ struct mlx4_priv {
305 struct mlx4_uar driver_uar; 313 struct mlx4_uar driver_uar;
306 void __iomem *kar; 314 void __iomem *kar;
307 struct mlx4_port_info port[MLX4_MAX_PORTS + 1]; 315 struct mlx4_port_info port[MLX4_MAX_PORTS + 1];
316 struct mlx4_sense sense;
308 struct mutex port_mutex; 317 struct mutex port_mutex;
309}; 318};
310 319
@@ -313,6 +322,10 @@ static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
313 return container_of(dev, struct mlx4_priv, dev); 322 return container_of(dev, struct mlx4_priv, dev);
314} 323}
315 324
325#define MLX4_SENSE_RANGE (HZ * 3)
326
327extern struct workqueue_struct *mlx4_wq;
328
316u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap); 329u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
317void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj); 330void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
318u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align); 331u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align);
@@ -346,8 +359,7 @@ void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
346 359
347void mlx4_start_catas_poll(struct mlx4_dev *dev); 360void mlx4_start_catas_poll(struct mlx4_dev *dev);
348void mlx4_stop_catas_poll(struct mlx4_dev *dev); 361void mlx4_stop_catas_poll(struct mlx4_dev *dev);
349int mlx4_catas_init(void); 362void mlx4_catas_init(void);
350void mlx4_catas_cleanup(void);
351int mlx4_restart_one(struct pci_dev *pdev); 363int mlx4_restart_one(struct pci_dev *pdev);
352int mlx4_register_device(struct mlx4_dev *dev); 364int mlx4_register_device(struct mlx4_dev *dev);
353void mlx4_unregister_device(struct mlx4_dev *dev); 365void mlx4_unregister_device(struct mlx4_dev *dev);
@@ -379,6 +391,17 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
379 391
380void mlx4_handle_catas_err(struct mlx4_dev *dev); 392void mlx4_handle_catas_err(struct mlx4_dev *dev);
381 393
394void mlx4_do_sense_ports(struct mlx4_dev *dev,
395 enum mlx4_port_type *stype,
396 enum mlx4_port_type *defaults);
397void mlx4_start_sense(struct mlx4_dev *dev);
398void mlx4_stop_sense(struct mlx4_dev *dev);
399void mlx4_sense_init(struct mlx4_dev *dev);
400int mlx4_check_port_params(struct mlx4_dev *dev,
401 enum mlx4_port_type *port_type);
402int mlx4_change_port_types(struct mlx4_dev *dev,
403 enum mlx4_port_type *port_types);
404
382void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table); 405void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
383void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); 406void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
384 407
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c
index 0a057e5dc63b..7cce3342ef8c 100644
--- a/drivers/net/mlx4/port.c
+++ b/drivers/net/mlx4/port.c
@@ -298,20 +298,17 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
298{ 298{
299 struct mlx4_cmd_mailbox *mailbox; 299 struct mlx4_cmd_mailbox *mailbox;
300 int err; 300 int err;
301 u8 is_eth = dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
302 301
303 mailbox = mlx4_alloc_cmd_mailbox(dev); 302 mailbox = mlx4_alloc_cmd_mailbox(dev);
304 if (IS_ERR(mailbox)) 303 if (IS_ERR(mailbox))
305 return PTR_ERR(mailbox); 304 return PTR_ERR(mailbox);
306 305
307 memset(mailbox->buf, 0, 256); 306 memset(mailbox->buf, 0, 256);
308 if (is_eth) { 307 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
309 ((u8 *) mailbox->buf)[3] = 6; 308 return 0;
310 ((__be16 *) mailbox->buf)[4] = cpu_to_be16(1 << 15); 309
311 ((__be16 *) mailbox->buf)[6] = cpu_to_be16(1 << 15); 310 ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
312 } else 311 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
313 ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
314 err = mlx4_cmd(dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
315 MLX4_CMD_TIME_CLASS_B); 312 MLX4_CMD_TIME_CLASS_B);
316 313
317 mlx4_free_cmd_mailbox(dev, mailbox); 314 mlx4_free_cmd_mailbox(dev, mailbox);
diff --git a/drivers/net/mlx4/sense.c b/drivers/net/mlx4/sense.c
new file mode 100644
index 000000000000..6d5089ecb5af
--- /dev/null
+++ b/drivers/net/mlx4/sense.c
@@ -0,0 +1,156 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/errno.h>
35#include <linux/if_ether.h>
36
37#include <linux/mlx4/cmd.h>
38
39#include "mlx4.h"
40
41static int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
42 enum mlx4_port_type *type)
43{
44 u64 out_param;
45 int err = 0;
46
47 err = mlx4_cmd_imm(dev, 0, &out_param, port, 0,
48 MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B);
49 if (err) {
50 mlx4_err(dev, "Sense command failed for port: %d\n", port);
51 return err;
52 }
53
54 if (out_param > 2) {
55 mlx4_err(dev, "Sense returned illegal value: 0x%llx\n", out_param);
56 return EINVAL;
57 }
58
59 *type = out_param;
60 return 0;
61}
62
63void mlx4_do_sense_ports(struct mlx4_dev *dev,
64 enum mlx4_port_type *stype,
65 enum mlx4_port_type *defaults)
66{
67 struct mlx4_sense *sense = &mlx4_priv(dev)->sense;
68 int err;
69 int i;
70
71 for (i = 1; i <= dev->caps.num_ports; i++) {
72 stype[i - 1] = 0;
73 if (sense->do_sense_port[i] && sense->sense_allowed[i] &&
74 dev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
75 err = mlx4_SENSE_PORT(dev, i, &stype[i - 1]);
76 if (err)
77 stype[i - 1] = defaults[i - 1];
78 } else
79 stype[i - 1] = defaults[i - 1];
80 }
81
82 /*
83 * Adjust port configuration:
84 * If port 1 sensed nothing and port 2 is IB, set both as IB
85 * If port 2 sensed nothing and port 1 is Eth, set both as Eth
86 */
87 if (stype[0] == MLX4_PORT_TYPE_ETH) {
88 for (i = 1; i < dev->caps.num_ports; i++)
89 stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_ETH;
90 }
91 if (stype[dev->caps.num_ports - 1] == MLX4_PORT_TYPE_IB) {
92 for (i = 0; i < dev->caps.num_ports - 1; i++)
93 stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_IB;
94 }
95
96 /*
97 * If sensed nothing, remain in current configuration.
98 */
99 for (i = 0; i < dev->caps.num_ports; i++)
100 stype[i] = stype[i] ? stype[i] : defaults[i];
101
102}
103
104static void mlx4_sense_port(struct work_struct *work)
105{
106 struct delayed_work *delay = container_of(work, struct delayed_work, work);
107 struct mlx4_sense *sense = container_of(delay, struct mlx4_sense,
108 sense_poll);
109 struct mlx4_dev *dev = sense->dev;
110 struct mlx4_priv *priv = mlx4_priv(dev);
111 enum mlx4_port_type stype[MLX4_MAX_PORTS];
112
113 mutex_lock(&priv->port_mutex);
114 mlx4_do_sense_ports(dev, stype, &dev->caps.port_type[1]);
115
116 if (mlx4_check_port_params(dev, stype))
117 goto sense_again;
118
119 if (mlx4_change_port_types(dev, stype))
120 mlx4_err(dev, "Failed to change port_types\n");
121
122sense_again:
123 mutex_unlock(&priv->port_mutex);
124 queue_delayed_work(mlx4_wq , &sense->sense_poll,
125 round_jiffies_relative(MLX4_SENSE_RANGE));
126}
127
128void mlx4_start_sense(struct mlx4_dev *dev)
129{
130 struct mlx4_priv *priv = mlx4_priv(dev);
131 struct mlx4_sense *sense = &priv->sense;
132
133 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP))
134 return;
135
136 queue_delayed_work(mlx4_wq , &sense->sense_poll,
137 round_jiffies_relative(MLX4_SENSE_RANGE));
138}
139
140void mlx4_stop_sense(struct mlx4_dev *dev)
141{
142 cancel_delayed_work_sync(&mlx4_priv(dev)->sense.sense_poll);
143}
144
145void mlx4_sense_init(struct mlx4_dev *dev)
146{
147 struct mlx4_priv *priv = mlx4_priv(dev);
148 struct mlx4_sense *sense = &priv->sense;
149 int port;
150
151 sense->dev = dev;
152 for (port = 1; port <= dev->caps.num_ports; port++)
153 sense->do_sense_port[port] = 1;
154
155 INIT_DELAYED_WORK_DEFERRABLE(&sense->sense_poll, mlx4_sense_port);
156}
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index cf9c679ab38b..0f82293a82ed 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -55,6 +55,7 @@ enum {
55 MLX4_CMD_CLOSE_PORT = 0xa, 55 MLX4_CMD_CLOSE_PORT = 0xa,
56 MLX4_CMD_QUERY_HCA = 0xb, 56 MLX4_CMD_QUERY_HCA = 0xb,
57 MLX4_CMD_QUERY_PORT = 0x43, 57 MLX4_CMD_QUERY_PORT = 0x43,
58 MLX4_CMD_SENSE_PORT = 0x4d,
58 MLX4_CMD_SET_PORT = 0xc, 59 MLX4_CMD_SET_PORT = 0xc,
59 MLX4_CMD_ACCESS_DDR = 0x2e, 60 MLX4_CMD_ACCESS_DDR = 0x2e,
60 MLX4_CMD_MAP_ICM = 0xffa, 61 MLX4_CMD_MAP_ICM = 0xffa,
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 8f659cc29960..3aff8a6a389e 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -155,8 +155,9 @@ enum mlx4_qp_region {
155}; 155};
156 156
157enum mlx4_port_type { 157enum mlx4_port_type {
158 MLX4_PORT_TYPE_IB = 1 << 0, 158 MLX4_PORT_TYPE_IB = 1,
159 MLX4_PORT_TYPE_ETH = 1 << 1, 159 MLX4_PORT_TYPE_ETH = 2,
160 MLX4_PORT_TYPE_AUTO = 3
160}; 161};
161 162
162enum mlx4_special_vlan_idx { 163enum mlx4_special_vlan_idx {
@@ -237,6 +238,7 @@ struct mlx4_caps {
237 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; 238 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
238 u8 supported_type[MLX4_MAX_PORTS + 1]; 239 u8 supported_type[MLX4_MAX_PORTS + 1];
239 u32 port_mask; 240 u32 port_mask;
241 enum mlx4_port_type possible_type[MLX4_MAX_PORTS + 1];
240}; 242};
241 243
242struct mlx4_buf_list { 244struct mlx4_buf_list {
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index ec7c6d99ed3f..938858304300 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -314,12 +314,12 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
314 */ 314 */
315void ib_destroy_cm_id(struct ib_cm_id *cm_id); 315void ib_destroy_cm_id(struct ib_cm_id *cm_id);
316 316
317#define IB_SERVICE_ID_AGN_MASK __constant_cpu_to_be64(0xFF00000000000000ULL) 317#define IB_SERVICE_ID_AGN_MASK cpu_to_be64(0xFF00000000000000ULL)
318#define IB_CM_ASSIGN_SERVICE_ID __constant_cpu_to_be64(0x0200000000000000ULL) 318#define IB_CM_ASSIGN_SERVICE_ID cpu_to_be64(0x0200000000000000ULL)
319#define IB_CMA_SERVICE_ID __constant_cpu_to_be64(0x0000000001000000ULL) 319#define IB_CMA_SERVICE_ID cpu_to_be64(0x0000000001000000ULL)
320#define IB_CMA_SERVICE_ID_MASK __constant_cpu_to_be64(0xFFFFFFFFFF000000ULL) 320#define IB_CMA_SERVICE_ID_MASK cpu_to_be64(0xFFFFFFFFFF000000ULL)
321#define IB_SDP_SERVICE_ID __constant_cpu_to_be64(0x0000000000010000ULL) 321#define IB_SDP_SERVICE_ID cpu_to_be64(0x0000000000010000ULL)
322#define IB_SDP_SERVICE_ID_MASK __constant_cpu_to_be64(0xFFFFFFFFFFFF0000ULL) 322#define IB_SDP_SERVICE_ID_MASK cpu_to_be64(0xFFFFFFFFFFFF0000ULL)
323 323
324struct ib_cm_compare_data { 324struct ib_cm_compare_data {
325 u8 data[IB_CM_COMPARE_SIZE]; 325 u8 data[IB_CM_COMPARE_SIZE];
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 5f6c40fffcf4..d3b9401b77b0 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -107,7 +107,7 @@
107#define IB_MGMT_RMPP_STATUS_ABORT_MAX 127 107#define IB_MGMT_RMPP_STATUS_ABORT_MAX 127
108 108
109#define IB_QP0 0 109#define IB_QP0 0
110#define IB_QP1 __constant_htonl(1) 110#define IB_QP1 cpu_to_be32(1)
111#define IB_QP1_QKEY 0x80010000 111#define IB_QP1_QKEY 0x80010000
112#define IB_QP_SET_QKEY 0x80000000 112#define IB_QP_SET_QKEY 0x80000000
113 113
@@ -290,7 +290,7 @@ static inline void ib_set_rmpp_resptime(struct ib_rmpp_hdr *rmpp_hdr, u8 rtime)
290 */ 290 */
291static inline void ib_set_rmpp_flags(struct ib_rmpp_hdr *rmpp_hdr, u8 flags) 291static inline void ib_set_rmpp_flags(struct ib_rmpp_hdr *rmpp_hdr, u8 flags)
292{ 292{
293 rmpp_hdr->rmpp_rtime_flags = (rmpp_hdr->rmpp_rtime_flags & 0xF1) | 293 rmpp_hdr->rmpp_rtime_flags = (rmpp_hdr->rmpp_rtime_flags & 0xF8) |
294 (flags & 0x7); 294 (flags & 0x7);
295} 295}
296 296
diff --git a/include/rdma/ib_smi.h b/include/rdma/ib_smi.h
index aaca0878668f..98b9086d769a 100644
--- a/include/rdma/ib_smi.h
+++ b/include/rdma/ib_smi.h
@@ -63,25 +63,25 @@ struct ib_smp {
63 u8 return_path[IB_SMP_MAX_PATH_HOPS]; 63 u8 return_path[IB_SMP_MAX_PATH_HOPS];
64} __attribute__ ((packed)); 64} __attribute__ ((packed));
65 65
66#define IB_SMP_DIRECTION __constant_htons(0x8000) 66#define IB_SMP_DIRECTION cpu_to_be16(0x8000)
67 67
68/* Subnet management attributes */ 68/* Subnet management attributes */
69#define IB_SMP_ATTR_NOTICE __constant_htons(0x0002) 69#define IB_SMP_ATTR_NOTICE cpu_to_be16(0x0002)
70#define IB_SMP_ATTR_NODE_DESC __constant_htons(0x0010) 70#define IB_SMP_ATTR_NODE_DESC cpu_to_be16(0x0010)
71#define IB_SMP_ATTR_NODE_INFO __constant_htons(0x0011) 71#define IB_SMP_ATTR_NODE_INFO cpu_to_be16(0x0011)
72#define IB_SMP_ATTR_SWITCH_INFO __constant_htons(0x0012) 72#define IB_SMP_ATTR_SWITCH_INFO cpu_to_be16(0x0012)
73#define IB_SMP_ATTR_GUID_INFO __constant_htons(0x0014) 73#define IB_SMP_ATTR_GUID_INFO cpu_to_be16(0x0014)
74#define IB_SMP_ATTR_PORT_INFO __constant_htons(0x0015) 74#define IB_SMP_ATTR_PORT_INFO cpu_to_be16(0x0015)
75#define IB_SMP_ATTR_PKEY_TABLE __constant_htons(0x0016) 75#define IB_SMP_ATTR_PKEY_TABLE cpu_to_be16(0x0016)
76#define IB_SMP_ATTR_SL_TO_VL_TABLE __constant_htons(0x0017) 76#define IB_SMP_ATTR_SL_TO_VL_TABLE cpu_to_be16(0x0017)
77#define IB_SMP_ATTR_VL_ARB_TABLE __constant_htons(0x0018) 77#define IB_SMP_ATTR_VL_ARB_TABLE cpu_to_be16(0x0018)
78#define IB_SMP_ATTR_LINEAR_FORWARD_TABLE __constant_htons(0x0019) 78#define IB_SMP_ATTR_LINEAR_FORWARD_TABLE cpu_to_be16(0x0019)
79#define IB_SMP_ATTR_RANDOM_FORWARD_TABLE __constant_htons(0x001A) 79#define IB_SMP_ATTR_RANDOM_FORWARD_TABLE cpu_to_be16(0x001A)
80#define IB_SMP_ATTR_MCAST_FORWARD_TABLE __constant_htons(0x001B) 80#define IB_SMP_ATTR_MCAST_FORWARD_TABLE cpu_to_be16(0x001B)
81#define IB_SMP_ATTR_SM_INFO __constant_htons(0x0020) 81#define IB_SMP_ATTR_SM_INFO cpu_to_be16(0x0020)
82#define IB_SMP_ATTR_VENDOR_DIAG __constant_htons(0x0030) 82#define IB_SMP_ATTR_VENDOR_DIAG cpu_to_be16(0x0030)
83#define IB_SMP_ATTR_LED_INFO __constant_htons(0x0031) 83#define IB_SMP_ATTR_LED_INFO cpu_to_be16(0x0031)
84#define IB_SMP_ATTR_VENDOR_MASK __constant_htons(0xFF00) 84#define IB_SMP_ATTR_VENDOR_MASK cpu_to_be16(0xFF00)
85 85
86struct ib_port_info { 86struct ib_port_info {
87 __be64 mkey; 87 __be64 mkey;