aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-03-26 18:47:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-03-26 18:47:08 -0400
commit39b566eedbe9e35d38502cc5e62ef7abf1aff9c9 (patch)
tree114d963f3eb4e8aff401ed74ae0429aefc55c9fd /drivers/infiniband
parent39f15003c7b268e4199d5ddce60a6944a74a14b7 (diff)
parent09f98bafea792644f2dea39eb080aa57d854f5b3 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (30 commits) RDMA/cxgb3: Enforce required firmware IB/mlx4: Unregister IB device prior to CLOSE PORT command mlx4_core: Add link type autosensing mlx4_core: Don't perform SET_PORT command for Ethernet ports RDMA/nes: Handle MPA Reject message properly RDMA/nes: Improve use of PBLs RDMA/nes: Remove LLTX RDMA/nes: Inform hardware that asynchronous event has been handled RDMA/nes: Fix tmp_addr compilation warning RDMA/nes: Report correct vendor_id and vendor_part_id RDMA/nes: Update copyright to new legal entity and year RDMA/nes: Account for freed PBL after HW operation IB: Remove useless ibdev_is_alive() tests from sysfs code IB/sa_query: Fix AH leak due to update_sm_ah() race IB/mad: Fix ib_post_send_mad() returning 0 with no generate send comp IB/mad: initialize mad_agent_priv before putting on lists IB/mad: Fix null pointer dereference in local_completions() IB/mad: Fix RMPP header RRespTime manipulation IB/iser: Remove hard setting of path MTU mlx4_core: Add device IDs for MT25458 10GigE devices ...
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/cm.c15
-rw-r--r--drivers/infiniband/core/cm_msgs.h22
-rw-r--r--drivers/infiniband/core/device.c4
-rw-r--r--drivers/infiniband/core/mad.c40
-rw-r--r--drivers/infiniband/core/mad_rmpp.c2
-rw-r--r--drivers/infiniband/core/sa_query.c2
-rw-r--r--drivers/infiniband/core/sysfs.c19
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c30
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h3
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c3
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_ev.c5
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c17
-rw-r--r--drivers/infiniband/hw/ehca/ehca_sqp.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_eeprom.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_init_chip.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mad.c95
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sdma.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_uc.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ud.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_pages.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_sdma.c6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h10
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c27
-rw-r--r--drivers/infiniband/hw/mlx4/main.c5
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c22
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c25
-rw-r--r--drivers/infiniband/hw/nes/nes.c2
-rw-r--r--drivers/infiniband/hw/nes/nes.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c586
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.h12
-rw-r--r--drivers/infiniband/hw/nes/nes_context.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c17
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h5
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c142
-rw-r--r--drivers/infiniband/hw/nes/nes_user.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c249
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.h2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c9
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c7
43 files changed, 881 insertions, 552 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index f1e82a92e61e..5130fc55b8e2 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -927,8 +927,7 @@ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
927 unsigned long flags; 927 unsigned long flags;
928 int ret = 0; 928 int ret = 0;
929 929
930 service_mask = service_mask ? service_mask : 930 service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
931 __constant_cpu_to_be64(~0ULL);
932 service_id &= service_mask; 931 service_id &= service_mask;
933 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID && 932 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
934 (service_id != IB_CM_ASSIGN_SERVICE_ID)) 933 (service_id != IB_CM_ASSIGN_SERVICE_ID))
@@ -954,7 +953,7 @@ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
954 spin_lock_irqsave(&cm.lock, flags); 953 spin_lock_irqsave(&cm.lock, flags);
955 if (service_id == IB_CM_ASSIGN_SERVICE_ID) { 954 if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
956 cm_id->service_id = cpu_to_be64(cm.listen_service_id++); 955 cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
957 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 956 cm_id->service_mask = ~cpu_to_be64(0);
958 } else { 957 } else {
959 cm_id->service_id = service_id; 958 cm_id->service_id = service_id;
960 cm_id->service_mask = service_mask; 959 cm_id->service_mask = service_mask;
@@ -1134,7 +1133,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
1134 goto error1; 1133 goto error1;
1135 } 1134 }
1136 cm_id->service_id = param->service_id; 1135 cm_id->service_id = param->service_id;
1137 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 1136 cm_id->service_mask = ~cpu_to_be64(0);
1138 cm_id_priv->timeout_ms = cm_convert_to_ms( 1137 cm_id_priv->timeout_ms = cm_convert_to_ms(
1139 param->primary_path->packet_life_time) * 2 + 1138 param->primary_path->packet_life_time) * 2 +
1140 cm_convert_to_ms( 1139 cm_convert_to_ms(
@@ -1545,7 +1544,7 @@ static int cm_req_handler(struct cm_work *work)
1545 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; 1544 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1546 cm_id_priv->id.context = listen_cm_id_priv->id.context; 1545 cm_id_priv->id.context = listen_cm_id_priv->id.context;
1547 cm_id_priv->id.service_id = req_msg->service_id; 1546 cm_id_priv->id.service_id = req_msg->service_id;
1548 cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); 1547 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
1549 1548
1550 cm_process_routed_req(req_msg, work->mad_recv_wc->wc); 1549 cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
1551 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); 1550 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
@@ -2898,7 +2897,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
2898 goto out; 2897 goto out;
2899 2898
2900 cm_id->service_id = param->service_id; 2899 cm_id->service_id = param->service_id;
2901 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 2900 cm_id->service_mask = ~cpu_to_be64(0);
2902 cm_id_priv->timeout_ms = param->timeout_ms; 2901 cm_id_priv->timeout_ms = param->timeout_ms;
2903 cm_id_priv->max_cm_retries = param->max_cm_retries; 2902 cm_id_priv->max_cm_retries = param->max_cm_retries;
2904 ret = cm_alloc_msg(cm_id_priv, &msg); 2903 ret = cm_alloc_msg(cm_id_priv, &msg);
@@ -2992,7 +2991,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
2992 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; 2991 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
2993 cm_id_priv->id.context = cur_cm_id_priv->id.context; 2992 cm_id_priv->id.context = cur_cm_id_priv->id.context;
2994 cm_id_priv->id.service_id = sidr_req_msg->service_id; 2993 cm_id_priv->id.service_id = sidr_req_msg->service_id;
2995 cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); 2994 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
2996 2995
2997 cm_format_sidr_req_event(work, &cur_cm_id_priv->id); 2996 cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
2998 cm_process_work(cm_id_priv, work); 2997 cm_process_work(cm_id_priv, work);
@@ -3789,7 +3788,7 @@ static int __init ib_cm_init(void)
3789 rwlock_init(&cm.device_lock); 3788 rwlock_init(&cm.device_lock);
3790 spin_lock_init(&cm.lock); 3789 spin_lock_init(&cm.lock);
3791 cm.listen_service_table = RB_ROOT; 3790 cm.listen_service_table = RB_ROOT;
3792 cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); 3791 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
3793 cm.remote_id_table = RB_ROOT; 3792 cm.remote_id_table = RB_ROOT;
3794 cm.remote_qp_table = RB_ROOT; 3793 cm.remote_qp_table = RB_ROOT;
3795 cm.remote_sidr_table = RB_ROOT; 3794 cm.remote_sidr_table = RB_ROOT;
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h
index aec9c7af825d..7e63c08f697c 100644
--- a/drivers/infiniband/core/cm_msgs.h
+++ b/drivers/infiniband/core/cm_msgs.h
@@ -44,17 +44,17 @@
44 44
45#define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */ 45#define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */
46 46
47#define CM_REQ_ATTR_ID __constant_htons(0x0010) 47#define CM_REQ_ATTR_ID cpu_to_be16(0x0010)
48#define CM_MRA_ATTR_ID __constant_htons(0x0011) 48#define CM_MRA_ATTR_ID cpu_to_be16(0x0011)
49#define CM_REJ_ATTR_ID __constant_htons(0x0012) 49#define CM_REJ_ATTR_ID cpu_to_be16(0x0012)
50#define CM_REP_ATTR_ID __constant_htons(0x0013) 50#define CM_REP_ATTR_ID cpu_to_be16(0x0013)
51#define CM_RTU_ATTR_ID __constant_htons(0x0014) 51#define CM_RTU_ATTR_ID cpu_to_be16(0x0014)
52#define CM_DREQ_ATTR_ID __constant_htons(0x0015) 52#define CM_DREQ_ATTR_ID cpu_to_be16(0x0015)
53#define CM_DREP_ATTR_ID __constant_htons(0x0016) 53#define CM_DREP_ATTR_ID cpu_to_be16(0x0016)
54#define CM_SIDR_REQ_ATTR_ID __constant_htons(0x0017) 54#define CM_SIDR_REQ_ATTR_ID cpu_to_be16(0x0017)
55#define CM_SIDR_REP_ATTR_ID __constant_htons(0x0018) 55#define CM_SIDR_REP_ATTR_ID cpu_to_be16(0x0018)
56#define CM_LAP_ATTR_ID __constant_htons(0x0019) 56#define CM_LAP_ATTR_ID cpu_to_be16(0x0019)
57#define CM_APR_ATTR_ID __constant_htons(0x001A) 57#define CM_APR_ATTR_ID cpu_to_be16(0x001A)
58 58
59enum cm_msg_sequence { 59enum cm_msg_sequence {
60 CM_MSG_SEQUENCE_REQ, 60 CM_MSG_SEQUENCE_REQ,
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 7913b804311e..d1fba4153332 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -193,7 +193,7 @@ void ib_dealloc_device(struct ib_device *device)
193 193
194 BUG_ON(device->reg_state != IB_DEV_UNREGISTERED); 194 BUG_ON(device->reg_state != IB_DEV_UNREGISTERED);
195 195
196 ib_device_unregister_sysfs(device); 196 kobject_put(&device->dev.kobj);
197} 197}
198EXPORT_SYMBOL(ib_dealloc_device); 198EXPORT_SYMBOL(ib_dealloc_device);
199 199
@@ -348,6 +348,8 @@ void ib_unregister_device(struct ib_device *device)
348 348
349 mutex_unlock(&device_mutex); 349 mutex_unlock(&device_mutex);
350 350
351 ib_device_unregister_sysfs(device);
352
351 spin_lock_irqsave(&device->client_data_lock, flags); 353 spin_lock_irqsave(&device->client_data_lock, flags);
352 list_for_each_entry_safe(context, tmp, &device->client_data_list, list) 354 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
353 kfree(context); 355 kfree(context);
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 5c54fc2350be..de922a04ca2d 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -301,6 +301,16 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
301 mad_agent_priv->agent.context = context; 301 mad_agent_priv->agent.context = context;
302 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; 302 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
303 mad_agent_priv->agent.port_num = port_num; 303 mad_agent_priv->agent.port_num = port_num;
304 spin_lock_init(&mad_agent_priv->lock);
305 INIT_LIST_HEAD(&mad_agent_priv->send_list);
306 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
307 INIT_LIST_HEAD(&mad_agent_priv->done_list);
308 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
309 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
310 INIT_LIST_HEAD(&mad_agent_priv->local_list);
311 INIT_WORK(&mad_agent_priv->local_work, local_completions);
312 atomic_set(&mad_agent_priv->refcount, 1);
313 init_completion(&mad_agent_priv->comp);
304 314
305 spin_lock_irqsave(&port_priv->reg_lock, flags); 315 spin_lock_irqsave(&port_priv->reg_lock, flags);
306 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id; 316 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
@@ -350,17 +360,6 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
350 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list); 360 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
351 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 361 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
352 362
353 spin_lock_init(&mad_agent_priv->lock);
354 INIT_LIST_HEAD(&mad_agent_priv->send_list);
355 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
356 INIT_LIST_HEAD(&mad_agent_priv->done_list);
357 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
358 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
359 INIT_LIST_HEAD(&mad_agent_priv->local_list);
360 INIT_WORK(&mad_agent_priv->local_work, local_completions);
361 atomic_set(&mad_agent_priv->refcount, 1);
362 init_completion(&mad_agent_priv->comp);
363
364 return &mad_agent_priv->agent; 363 return &mad_agent_priv->agent;
365 364
366error4: 365error4:
@@ -743,9 +742,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
743 break; 742 break;
744 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED: 743 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
745 kmem_cache_free(ib_mad_cache, mad_priv); 744 kmem_cache_free(ib_mad_cache, mad_priv);
746 kfree(local); 745 break;
747 ret = 1;
748 goto out;
749 case IB_MAD_RESULT_SUCCESS: 746 case IB_MAD_RESULT_SUCCESS:
750 /* Treat like an incoming receive MAD */ 747 /* Treat like an incoming receive MAD */
751 port_priv = ib_get_mad_port(mad_agent_priv->agent.device, 748 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
@@ -756,10 +753,12 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
756 &mad_priv->mad.mad); 753 &mad_priv->mad.mad);
757 } 754 }
758 if (!port_priv || !recv_mad_agent) { 755 if (!port_priv || !recv_mad_agent) {
756 /*
757 * No receiving agent so drop packet and
758 * generate send completion.
759 */
759 kmem_cache_free(ib_mad_cache, mad_priv); 760 kmem_cache_free(ib_mad_cache, mad_priv);
760 kfree(local); 761 break;
761 ret = 0;
762 goto out;
763 } 762 }
764 local->mad_priv = mad_priv; 763 local->mad_priv = mad_priv;
765 local->recv_mad_agent = recv_mad_agent; 764 local->recv_mad_agent = recv_mad_agent;
@@ -2356,7 +2355,7 @@ static void local_completions(struct work_struct *work)
2356 struct ib_mad_local_private *local; 2355 struct ib_mad_local_private *local;
2357 struct ib_mad_agent_private *recv_mad_agent; 2356 struct ib_mad_agent_private *recv_mad_agent;
2358 unsigned long flags; 2357 unsigned long flags;
2359 int recv = 0; 2358 int free_mad;
2360 struct ib_wc wc; 2359 struct ib_wc wc;
2361 struct ib_mad_send_wc mad_send_wc; 2360 struct ib_mad_send_wc mad_send_wc;
2362 2361
@@ -2370,14 +2369,15 @@ static void local_completions(struct work_struct *work)
2370 completion_list); 2369 completion_list);
2371 list_del(&local->completion_list); 2370 list_del(&local->completion_list);
2372 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2371 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2372 free_mad = 0;
2373 if (local->mad_priv) { 2373 if (local->mad_priv) {
2374 recv_mad_agent = local->recv_mad_agent; 2374 recv_mad_agent = local->recv_mad_agent;
2375 if (!recv_mad_agent) { 2375 if (!recv_mad_agent) {
2376 printk(KERN_ERR PFX "No receive MAD agent for local completion\n"); 2376 printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
2377 free_mad = 1;
2377 goto local_send_completion; 2378 goto local_send_completion;
2378 } 2379 }
2379 2380
2380 recv = 1;
2381 /* 2381 /*
2382 * Defined behavior is to complete response 2382 * Defined behavior is to complete response
2383 * before request 2383 * before request
@@ -2422,7 +2422,7 @@ local_send_completion:
2422 2422
2423 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2423 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2424 atomic_dec(&mad_agent_priv->refcount); 2424 atomic_dec(&mad_agent_priv->refcount);
2425 if (!recv) 2425 if (free_mad)
2426 kmem_cache_free(ib_mad_cache, local->mad_priv); 2426 kmem_cache_free(ib_mad_cache, local->mad_priv);
2427 kfree(local); 2427 kfree(local);
2428 } 2428 }
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index 3af2b84cd838..57a3c6f947b2 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -735,7 +735,7 @@ process_rmpp_data(struct ib_mad_agent_private *agent,
735 goto bad; 735 goto bad;
736 } 736 }
737 737
738 if (rmpp_hdr->seg_num == __constant_htonl(1)) { 738 if (rmpp_hdr->seg_num == cpu_to_be32(1)) {
739 if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) { 739 if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) {
740 rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG; 740 rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
741 goto bad; 741 goto bad;
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 7863a50d56f2..1865049e80f7 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -395,6 +395,8 @@ static void update_sm_ah(struct work_struct *work)
395 } 395 }
396 396
397 spin_lock_irq(&port->ah_lock); 397 spin_lock_irq(&port->ah_lock);
398 if (port->sm_ah)
399 kref_put(&port->sm_ah->ref, free_sm_ah);
398 port->sm_ah = new_ah; 400 port->sm_ah = new_ah;
399 spin_unlock_irq(&port->ah_lock); 401 spin_unlock_irq(&port->ah_lock);
400 402
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index b43f7d3682d3..5c04cfb54cb9 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -66,11 +66,6 @@ struct port_table_attribute {
66 int index; 66 int index;
67}; 67};
68 68
69static inline int ibdev_is_alive(const struct ib_device *dev)
70{
71 return dev->reg_state == IB_DEV_REGISTERED;
72}
73
74static ssize_t port_attr_show(struct kobject *kobj, 69static ssize_t port_attr_show(struct kobject *kobj,
75 struct attribute *attr, char *buf) 70 struct attribute *attr, char *buf)
76{ 71{
@@ -80,8 +75,6 @@ static ssize_t port_attr_show(struct kobject *kobj,
80 75
81 if (!port_attr->show) 76 if (!port_attr->show)
82 return -EIO; 77 return -EIO;
83 if (!ibdev_is_alive(p->ibdev))
84 return -ENODEV;
85 78
86 return port_attr->show(p, port_attr, buf); 79 return port_attr->show(p, port_attr, buf);
87} 80}
@@ -562,9 +555,6 @@ static ssize_t show_node_type(struct device *device,
562{ 555{
563 struct ib_device *dev = container_of(device, struct ib_device, dev); 556 struct ib_device *dev = container_of(device, struct ib_device, dev);
564 557
565 if (!ibdev_is_alive(dev))
566 return -ENODEV;
567
568 switch (dev->node_type) { 558 switch (dev->node_type) {
569 case RDMA_NODE_IB_CA: return sprintf(buf, "%d: CA\n", dev->node_type); 559 case RDMA_NODE_IB_CA: return sprintf(buf, "%d: CA\n", dev->node_type);
570 case RDMA_NODE_RNIC: return sprintf(buf, "%d: RNIC\n", dev->node_type); 560 case RDMA_NODE_RNIC: return sprintf(buf, "%d: RNIC\n", dev->node_type);
@@ -581,9 +571,6 @@ static ssize_t show_sys_image_guid(struct device *device,
581 struct ib_device_attr attr; 571 struct ib_device_attr attr;
582 ssize_t ret; 572 ssize_t ret;
583 573
584 if (!ibdev_is_alive(dev))
585 return -ENODEV;
586
587 ret = ib_query_device(dev, &attr); 574 ret = ib_query_device(dev, &attr);
588 if (ret) 575 if (ret)
589 return ret; 576 return ret;
@@ -600,9 +587,6 @@ static ssize_t show_node_guid(struct device *device,
600{ 587{
601 struct ib_device *dev = container_of(device, struct ib_device, dev); 588 struct ib_device *dev = container_of(device, struct ib_device, dev);
602 589
603 if (!ibdev_is_alive(dev))
604 return -ENODEV;
605
606 return sprintf(buf, "%04x:%04x:%04x:%04x\n", 590 return sprintf(buf, "%04x:%04x:%04x:%04x\n",
607 be16_to_cpu(((__be16 *) &dev->node_guid)[0]), 591 be16_to_cpu(((__be16 *) &dev->node_guid)[0]),
608 be16_to_cpu(((__be16 *) &dev->node_guid)[1]), 592 be16_to_cpu(((__be16 *) &dev->node_guid)[1]),
@@ -848,6 +832,9 @@ void ib_device_unregister_sysfs(struct ib_device *device)
848 struct kobject *p, *t; 832 struct kobject *p, *t;
849 struct ib_port *port; 833 struct ib_port *port;
850 834
835 /* Hold kobject until ib_dealloc_device() */
836 kobject_get(&device->dev.kobj);
837
851 list_for_each_entry_safe(p, t, &device->port_list, entry) { 838 list_for_each_entry_safe(p, t, &device->port_list, entry) {
852 list_del(&p->entry); 839 list_del(&p->entry);
853 port = container_of(p, struct ib_port, kobj); 840 port = container_of(p, struct ib_port, kobj);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 4dcf08b3fd83..d4d7204c11ed 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -450,7 +450,7 @@ static int cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq)
450 if ((CQE_OPCODE(*cqe) == T3_READ_RESP) && SQ_TYPE(*cqe)) 450 if ((CQE_OPCODE(*cqe) == T3_READ_RESP) && SQ_TYPE(*cqe))
451 return 0; 451 return 0;
452 452
453 if ((CQE_OPCODE(*cqe) == T3_SEND) && RQ_TYPE(*cqe) && 453 if (CQE_SEND_OPCODE(*cqe) && RQ_TYPE(*cqe) &&
454 Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) 454 Q_EMPTY(wq->rq_rptr, wq->rq_wptr))
455 return 0; 455 return 0;
456 456
@@ -938,6 +938,23 @@ int cxio_rdev_open(struct cxio_rdev *rdev_p)
938 if (!rdev_p->t3cdev_p) 938 if (!rdev_p->t3cdev_p)
939 rdev_p->t3cdev_p = dev2t3cdev(netdev_p); 939 rdev_p->t3cdev_p = dev2t3cdev(netdev_p);
940 rdev_p->t3cdev_p->ulp = (void *) rdev_p; 940 rdev_p->t3cdev_p->ulp = (void *) rdev_p;
941
942 err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_EMBEDDED_INFO,
943 &(rdev_p->fw_info));
944 if (err) {
945 printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n",
946 __func__, rdev_p->t3cdev_p, err);
947 goto err1;
948 }
949 if (G_FW_VERSION_MAJOR(rdev_p->fw_info.fw_vers) != CXIO_FW_MAJ) {
950 printk(KERN_ERR MOD "fatal firmware version mismatch: "
951 "need version %u but adapter has version %u\n",
952 CXIO_FW_MAJ,
953 G_FW_VERSION_MAJOR(rdev_p->fw_info.fw_vers));
954 err = -EINVAL;
955 goto err1;
956 }
957
941 err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_GET_PARAMS, 958 err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_GET_PARAMS,
942 &(rdev_p->rnic_info)); 959 &(rdev_p->rnic_info));
943 if (err) { 960 if (err) {
@@ -1204,11 +1221,12 @@ int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
1204 } 1221 }
1205 1222
1206 /* incoming SEND with no receive posted failures */ 1223 /* incoming SEND with no receive posted failures */
1207 if ((CQE_OPCODE(*hw_cqe) == T3_SEND) && RQ_TYPE(*hw_cqe) && 1224 if (CQE_SEND_OPCODE(*hw_cqe) && RQ_TYPE(*hw_cqe) &&
1208 Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) { 1225 Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) {
1209 ret = -1; 1226 ret = -1;
1210 goto skip_cqe; 1227 goto skip_cqe;
1211 } 1228 }
1229 BUG_ON((*cqe_flushed == 0) && !SW_CQE(*hw_cqe));
1212 goto proc_cqe; 1230 goto proc_cqe;
1213 } 1231 }
1214 1232
@@ -1223,6 +1241,13 @@ int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
1223 * then we complete this with TPT_ERR_MSN and mark the wq in 1241 * then we complete this with TPT_ERR_MSN and mark the wq in
1224 * error. 1242 * error.
1225 */ 1243 */
1244
1245 if (Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) {
1246 wq->error = 1;
1247 ret = -1;
1248 goto skip_cqe;
1249 }
1250
1226 if (unlikely((CQE_WRID_MSN(*hw_cqe) != (wq->rq_rptr + 1)))) { 1251 if (unlikely((CQE_WRID_MSN(*hw_cqe) != (wq->rq_rptr + 1)))) {
1227 wq->error = 1; 1252 wq->error = 1;
1228 hw_cqe->header |= htonl(V_CQE_STATUS(TPT_ERR_MSN)); 1253 hw_cqe->header |= htonl(V_CQE_STATUS(TPT_ERR_MSN));
@@ -1277,6 +1302,7 @@ proc_cqe:
1277 cxio_hal_pblpool_free(wq->rdev, 1302 cxio_hal_pblpool_free(wq->rdev,
1278 wq->rq[Q_PTR2IDX(wq->rq_rptr, 1303 wq->rq[Q_PTR2IDX(wq->rq_rptr,
1279 wq->rq_size_log2)].pbl_addr, T3_STAG0_PBL_SIZE); 1304 wq->rq_size_log2)].pbl_addr, T3_STAG0_PBL_SIZE);
1305 BUG_ON(Q_EMPTY(wq->rq_rptr, wq->rq_wptr));
1280 wq->rq_rptr++; 1306 wq->rq_rptr++;
1281 } 1307 }
1282 1308
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index 656fe47bc84f..e44dc2289471 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -61,6 +61,8 @@
61 61
62#define T3_MAX_DEV_NAME_LEN 32 62#define T3_MAX_DEV_NAME_LEN 32
63 63
64#define CXIO_FW_MAJ 7
65
64struct cxio_hal_ctrl_qp { 66struct cxio_hal_ctrl_qp {
65 u32 wptr; 67 u32 wptr;
66 u32 rptr; 68 u32 rptr;
@@ -108,6 +110,7 @@ struct cxio_rdev {
108 struct gen_pool *pbl_pool; 110 struct gen_pool *pbl_pool;
109 struct gen_pool *rqt_pool; 111 struct gen_pool *rqt_pool;
110 struct list_head entry; 112 struct list_head entry;
113 struct ch_embedded_info fw_info;
111}; 114};
112 115
113static inline int cxio_num_stags(struct cxio_rdev *rdev_p) 116static inline int cxio_num_stags(struct cxio_rdev *rdev_p)
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index 04618f7bfbb3..ff9be1a13106 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -604,6 +604,12 @@ struct t3_cqe {
604#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x).header))) 604#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x).header)))
605#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x).header))) 605#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x).header)))
606 606
607#define CQE_SEND_OPCODE(x)( \
608 (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND) || \
609 (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE) || \
610 (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_INV) || \
611 (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE_INV))
612
607#define CQE_LEN(x) (be32_to_cpu((x).len)) 613#define CQE_LEN(x) (be32_to_cpu((x).len))
608 614
609/* used for RQ completion processing */ 615/* used for RQ completion processing */
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 44e936e48a31..8699947aaf6c 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1678,6 +1678,9 @@ static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1678{ 1678{
1679 struct iwch_ep *ep = ctx; 1679 struct iwch_ep *ep = ctx;
1680 1680
1681 if (state_read(&ep->com) != FPDU_MODE)
1682 return CPL_RET_BUF_DONE;
1683
1681 PDBG("%s ep %p\n", __func__, ep); 1684 PDBG("%s ep %p\n", __func__, ep);
1682 skb_pull(skb, sizeof(struct cpl_rdma_terminate)); 1685 skb_pull(skb, sizeof(struct cpl_rdma_terminate));
1683 PDBG("%s saving %d bytes of term msg\n", __func__, skb->len); 1686 PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_ev.c b/drivers/infiniband/hw/cxgb3/iwch_ev.c
index 7b67a6771720..743c5d8b8806 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_ev.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_ev.c
@@ -179,11 +179,6 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
179 case TPT_ERR_BOUND: 179 case TPT_ERR_BOUND:
180 case TPT_ERR_INVALIDATE_SHARED_MR: 180 case TPT_ERR_INVALIDATE_SHARED_MR:
181 case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND: 181 case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
182 printk(KERN_ERR "%s - CQE Err qpid 0x%x opcode %d status 0x%x "
183 "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __func__,
184 CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
185 CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
186 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
187 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); 182 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
188 post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_ACCESS_ERR, 1); 183 post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_ACCESS_ERR, 1);
189 break; 184 break;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 19661b2f0406..c758fbd58478 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -99,8 +99,8 @@ static int build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr,
99 if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { 99 if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
100 plen = 4; 100 plen = 4;
101 wqe->write.sgl[0].stag = wr->ex.imm_data; 101 wqe->write.sgl[0].stag = wr->ex.imm_data;
102 wqe->write.sgl[0].len = __constant_cpu_to_be32(0); 102 wqe->write.sgl[0].len = cpu_to_be32(0);
103 wqe->write.num_sgle = __constant_cpu_to_be32(0); 103 wqe->write.num_sgle = cpu_to_be32(0);
104 *flit_cnt = 6; 104 *flit_cnt = 6;
105 } else { 105 } else {
106 plen = 0; 106 plen = 0;
@@ -195,15 +195,12 @@ static int build_inv_stag(union t3_wr *wqe, struct ib_send_wr *wr,
195 return 0; 195 return 0;
196} 196}
197 197
198/*
199 * TBD: this is going to be moved to firmware. Missing pdid/qpid check for now.
200 */
201static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list, 198static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
202 u32 num_sgle, u32 * pbl_addr, u8 * page_size) 199 u32 num_sgle, u32 * pbl_addr, u8 * page_size)
203{ 200{
204 int i; 201 int i;
205 struct iwch_mr *mhp; 202 struct iwch_mr *mhp;
206 u32 offset; 203 u64 offset;
207 for (i = 0; i < num_sgle; i++) { 204 for (i = 0; i < num_sgle; i++) {
208 205
209 mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8); 206 mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8);
@@ -235,8 +232,8 @@ static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
235 return -EINVAL; 232 return -EINVAL;
236 } 233 }
237 offset = sg_list[i].addr - mhp->attr.va_fbo; 234 offset = sg_list[i].addr - mhp->attr.va_fbo;
238 offset += ((u32) mhp->attr.va_fbo) % 235 offset += mhp->attr.va_fbo &
239 (1UL << (12 + mhp->attr.page_size)); 236 ((1UL << (12 + mhp->attr.page_size)) - 1);
240 pbl_addr[i] = ((mhp->attr.pbl_addr - 237 pbl_addr[i] = ((mhp->attr.pbl_addr -
241 rhp->rdev.rnic_info.pbl_base) >> 3) + 238 rhp->rdev.rnic_info.pbl_base) >> 3) +
242 (offset >> (12 + mhp->attr.page_size)); 239 (offset >> (12 + mhp->attr.page_size));
@@ -266,8 +263,8 @@ static int build_rdma_recv(struct iwch_qp *qhp, union t3_wr *wqe,
266 wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length); 263 wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
267 264
268 /* to in the WQE == the offset into the page */ 265 /* to in the WQE == the offset into the page */
269 wqe->recv.sgl[i].to = cpu_to_be64(((u32) wr->sg_list[i].addr) % 266 wqe->recv.sgl[i].to = cpu_to_be64(((u32)wr->sg_list[i].addr) &
270 (1UL << (12 + page_size[i]))); 267 ((1UL << (12 + page_size[i])) - 1));
271 268
272 /* pbl_addr is the adapters address in the PBL */ 269 /* pbl_addr is the adapters address in the PBL */
273 wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_addr[i]); 270 wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_addr[i]);
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c
index 44447aaa5501..c568b28f4e20 100644
--- a/drivers/infiniband/hw/ehca/ehca_sqp.c
+++ b/drivers/infiniband/hw/ehca/ehca_sqp.c
@@ -46,11 +46,11 @@
46#include "ehca_iverbs.h" 46#include "ehca_iverbs.h"
47#include "hcp_if.h" 47#include "hcp_if.h"
48 48
49#define IB_MAD_STATUS_REDIRECT __constant_htons(0x0002) 49#define IB_MAD_STATUS_REDIRECT cpu_to_be16(0x0002)
50#define IB_MAD_STATUS_UNSUP_VERSION __constant_htons(0x0004) 50#define IB_MAD_STATUS_UNSUP_VERSION cpu_to_be16(0x0004)
51#define IB_MAD_STATUS_UNSUP_METHOD __constant_htons(0x0008) 51#define IB_MAD_STATUS_UNSUP_METHOD cpu_to_be16(0x0008)
52 52
53#define IB_PMA_CLASS_PORT_INFO __constant_htons(0x0001) 53#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001)
54 54
55/** 55/**
56 * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue 56 * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue
diff --git a/drivers/infiniband/hw/ipath/ipath_eeprom.c b/drivers/infiniband/hw/ipath/ipath_eeprom.c
index dc37277f1c80..fc7181985e8e 100644
--- a/drivers/infiniband/hw/ipath/ipath_eeprom.c
+++ b/drivers/infiniband/hw/ipath/ipath_eeprom.c
@@ -772,8 +772,8 @@ void ipath_get_eeprom_info(struct ipath_devdata *dd)
772 "0x%x, not 0x%x\n", csum, ifp->if_csum); 772 "0x%x, not 0x%x\n", csum, ifp->if_csum);
773 goto done; 773 goto done;
774 } 774 }
775 if (*(__be64 *) ifp->if_guid == 0ULL || 775 if (*(__be64 *) ifp->if_guid == cpu_to_be64(0) ||
776 *(__be64 *) ifp->if_guid == __constant_cpu_to_be64(-1LL)) { 776 *(__be64 *) ifp->if_guid == ~cpu_to_be64(0)) {
777 ipath_dev_err(dd, "Invalid GUID %llx from flash; " 777 ipath_dev_err(dd, "Invalid GUID %llx from flash; "
778 "ignoring\n", 778 "ignoring\n",
779 *(unsigned long long *) ifp->if_guid); 779 *(unsigned long long *) ifp->if_guid);
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
index 64aeefbd2a5d..077879c0bdb5 100644
--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
+++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
@@ -455,7 +455,7 @@ static void init_shadow_tids(struct ipath_devdata *dd)
455 if (!addrs) { 455 if (!addrs) {
456 ipath_dev_err(dd, "failed to allocate shadow dma handle " 456 ipath_dev_err(dd, "failed to allocate shadow dma handle "
457 "array, no expected sends!\n"); 457 "array, no expected sends!\n");
458 vfree(dd->ipath_pageshadow); 458 vfree(pages);
459 dd->ipath_pageshadow = NULL; 459 dd->ipath_pageshadow = NULL;
460 return; 460 return;
461 } 461 }
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c
index 17a123197477..16a702d46018 100644
--- a/drivers/infiniband/hw/ipath/ipath_mad.c
+++ b/drivers/infiniband/hw/ipath/ipath_mad.c
@@ -37,10 +37,10 @@
37#include "ipath_verbs.h" 37#include "ipath_verbs.h"
38#include "ipath_common.h" 38#include "ipath_common.h"
39 39
40#define IB_SMP_UNSUP_VERSION __constant_htons(0x0004) 40#define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004)
41#define IB_SMP_UNSUP_METHOD __constant_htons(0x0008) 41#define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008)
42#define IB_SMP_UNSUP_METH_ATTR __constant_htons(0x000C) 42#define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C)
43#define IB_SMP_INVALID_FIELD __constant_htons(0x001C) 43#define IB_SMP_INVALID_FIELD cpu_to_be16(0x001C)
44 44
45static int reply(struct ib_smp *smp) 45static int reply(struct ib_smp *smp)
46{ 46{
@@ -789,12 +789,12 @@ static int recv_subn_set_pkeytable(struct ib_smp *smp,
789 return recv_subn_get_pkeytable(smp, ibdev); 789 return recv_subn_get_pkeytable(smp, ibdev);
790} 790}
791 791
792#define IB_PMA_CLASS_PORT_INFO __constant_htons(0x0001) 792#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001)
793#define IB_PMA_PORT_SAMPLES_CONTROL __constant_htons(0x0010) 793#define IB_PMA_PORT_SAMPLES_CONTROL cpu_to_be16(0x0010)
794#define IB_PMA_PORT_SAMPLES_RESULT __constant_htons(0x0011) 794#define IB_PMA_PORT_SAMPLES_RESULT cpu_to_be16(0x0011)
795#define IB_PMA_PORT_COUNTERS __constant_htons(0x0012) 795#define IB_PMA_PORT_COUNTERS cpu_to_be16(0x0012)
796#define IB_PMA_PORT_COUNTERS_EXT __constant_htons(0x001D) 796#define IB_PMA_PORT_COUNTERS_EXT cpu_to_be16(0x001D)
797#define IB_PMA_PORT_SAMPLES_RESULT_EXT __constant_htons(0x001E) 797#define IB_PMA_PORT_SAMPLES_RESULT_EXT cpu_to_be16(0x001E)
798 798
799struct ib_perf { 799struct ib_perf {
800 u8 base_version; 800 u8 base_version;
@@ -884,19 +884,19 @@ struct ib_pma_portcounters {
884 __be32 port_rcv_packets; 884 __be32 port_rcv_packets;
885} __attribute__ ((packed)); 885} __attribute__ ((packed));
886 886
887#define IB_PMA_SEL_SYMBOL_ERROR __constant_htons(0x0001) 887#define IB_PMA_SEL_SYMBOL_ERROR cpu_to_be16(0x0001)
888#define IB_PMA_SEL_LINK_ERROR_RECOVERY __constant_htons(0x0002) 888#define IB_PMA_SEL_LINK_ERROR_RECOVERY cpu_to_be16(0x0002)
889#define IB_PMA_SEL_LINK_DOWNED __constant_htons(0x0004) 889#define IB_PMA_SEL_LINK_DOWNED cpu_to_be16(0x0004)
890#define IB_PMA_SEL_PORT_RCV_ERRORS __constant_htons(0x0008) 890#define IB_PMA_SEL_PORT_RCV_ERRORS cpu_to_be16(0x0008)
891#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS __constant_htons(0x0010) 891#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS cpu_to_be16(0x0010)
892#define IB_PMA_SEL_PORT_XMIT_DISCARDS __constant_htons(0x0040) 892#define IB_PMA_SEL_PORT_XMIT_DISCARDS cpu_to_be16(0x0040)
893#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS __constant_htons(0x0200) 893#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS cpu_to_be16(0x0200)
894#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS __constant_htons(0x0400) 894#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS cpu_to_be16(0x0400)
895#define IB_PMA_SEL_PORT_VL15_DROPPED __constant_htons(0x0800) 895#define IB_PMA_SEL_PORT_VL15_DROPPED cpu_to_be16(0x0800)
896#define IB_PMA_SEL_PORT_XMIT_DATA __constant_htons(0x1000) 896#define IB_PMA_SEL_PORT_XMIT_DATA cpu_to_be16(0x1000)
897#define IB_PMA_SEL_PORT_RCV_DATA __constant_htons(0x2000) 897#define IB_PMA_SEL_PORT_RCV_DATA cpu_to_be16(0x2000)
898#define IB_PMA_SEL_PORT_XMIT_PACKETS __constant_htons(0x4000) 898#define IB_PMA_SEL_PORT_XMIT_PACKETS cpu_to_be16(0x4000)
899#define IB_PMA_SEL_PORT_RCV_PACKETS __constant_htons(0x8000) 899#define IB_PMA_SEL_PORT_RCV_PACKETS cpu_to_be16(0x8000)
900 900
901struct ib_pma_portcounters_ext { 901struct ib_pma_portcounters_ext {
902 u8 reserved; 902 u8 reserved;
@@ -913,14 +913,14 @@ struct ib_pma_portcounters_ext {
913 __be64 port_multicast_rcv_packets; 913 __be64 port_multicast_rcv_packets;
914} __attribute__ ((packed)); 914} __attribute__ ((packed));
915 915
916#define IB_PMA_SELX_PORT_XMIT_DATA __constant_htons(0x0001) 916#define IB_PMA_SELX_PORT_XMIT_DATA cpu_to_be16(0x0001)
917#define IB_PMA_SELX_PORT_RCV_DATA __constant_htons(0x0002) 917#define IB_PMA_SELX_PORT_RCV_DATA cpu_to_be16(0x0002)
918#define IB_PMA_SELX_PORT_XMIT_PACKETS __constant_htons(0x0004) 918#define IB_PMA_SELX_PORT_XMIT_PACKETS cpu_to_be16(0x0004)
919#define IB_PMA_SELX_PORT_RCV_PACKETS __constant_htons(0x0008) 919#define IB_PMA_SELX_PORT_RCV_PACKETS cpu_to_be16(0x0008)
920#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS __constant_htons(0x0010) 920#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS cpu_to_be16(0x0010)
921#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS __constant_htons(0x0020) 921#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS cpu_to_be16(0x0020)
922#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS __constant_htons(0x0040) 922#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS cpu_to_be16(0x0040)
923#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS __constant_htons(0x0080) 923#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS cpu_to_be16(0x0080)
924 924
925static int recv_pma_get_classportinfo(struct ib_perf *pmp) 925static int recv_pma_get_classportinfo(struct ib_perf *pmp)
926{ 926{
@@ -933,7 +933,7 @@ static int recv_pma_get_classportinfo(struct ib_perf *pmp)
933 pmp->status |= IB_SMP_INVALID_FIELD; 933 pmp->status |= IB_SMP_INVALID_FIELD;
934 934
935 /* Indicate AllPortSelect is valid (only one port anyway) */ 935 /* Indicate AllPortSelect is valid (only one port anyway) */
936 p->cap_mask = __constant_cpu_to_be16(1 << 8); 936 p->cap_mask = cpu_to_be16(1 << 8);
937 p->base_version = 1; 937 p->base_version = 1;
938 p->class_version = 1; 938 p->class_version = 1;
939 /* 939 /*
@@ -951,12 +951,11 @@ static int recv_pma_get_classportinfo(struct ib_perf *pmp)
951 * We support 5 counters which only count the mandatory quantities. 951 * We support 5 counters which only count the mandatory quantities.
952 */ 952 */
953#define COUNTER_MASK(q, n) (q << ((9 - n) * 3)) 953#define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
954#define COUNTER_MASK0_9 \ 954#define COUNTER_MASK0_9 cpu_to_be32(COUNTER_MASK(1, 0) | \
955 __constant_cpu_to_be32(COUNTER_MASK(1, 0) | \ 955 COUNTER_MASK(1, 1) | \
956 COUNTER_MASK(1, 1) | \ 956 COUNTER_MASK(1, 2) | \
957 COUNTER_MASK(1, 2) | \ 957 COUNTER_MASK(1, 3) | \
958 COUNTER_MASK(1, 3) | \ 958 COUNTER_MASK(1, 4))
959 COUNTER_MASK(1, 4))
960 959
961static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp, 960static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp,
962 struct ib_device *ibdev, u8 port) 961 struct ib_device *ibdev, u8 port)
@@ -1137,7 +1136,7 @@ static int recv_pma_get_portsamplesresult_ext(struct ib_perf *pmp,
1137 status = dev->pma_sample_status; 1136 status = dev->pma_sample_status;
1138 p->sample_status = cpu_to_be16(status); 1137 p->sample_status = cpu_to_be16(status);
1139 /* 64 bits */ 1138 /* 64 bits */
1140 p->extended_width = __constant_cpu_to_be32(0x80000000); 1139 p->extended_width = cpu_to_be32(0x80000000);
1141 for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++) 1140 for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
1142 p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 : 1141 p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 :
1143 cpu_to_be64( 1142 cpu_to_be64(
@@ -1185,7 +1184,7 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
1185 pmp->status |= IB_SMP_INVALID_FIELD; 1184 pmp->status |= IB_SMP_INVALID_FIELD;
1186 1185
1187 if (cntrs.symbol_error_counter > 0xFFFFUL) 1186 if (cntrs.symbol_error_counter > 0xFFFFUL)
1188 p->symbol_error_counter = __constant_cpu_to_be16(0xFFFF); 1187 p->symbol_error_counter = cpu_to_be16(0xFFFF);
1189 else 1188 else
1190 p->symbol_error_counter = 1189 p->symbol_error_counter =
1191 cpu_to_be16((u16)cntrs.symbol_error_counter); 1190 cpu_to_be16((u16)cntrs.symbol_error_counter);
@@ -1199,17 +1198,17 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
1199 else 1198 else
1200 p->link_downed_counter = (u8)cntrs.link_downed_counter; 1199 p->link_downed_counter = (u8)cntrs.link_downed_counter;
1201 if (cntrs.port_rcv_errors > 0xFFFFUL) 1200 if (cntrs.port_rcv_errors > 0xFFFFUL)
1202 p->port_rcv_errors = __constant_cpu_to_be16(0xFFFF); 1201 p->port_rcv_errors = cpu_to_be16(0xFFFF);
1203 else 1202 else
1204 p->port_rcv_errors = 1203 p->port_rcv_errors =
1205 cpu_to_be16((u16) cntrs.port_rcv_errors); 1204 cpu_to_be16((u16) cntrs.port_rcv_errors);
1206 if (cntrs.port_rcv_remphys_errors > 0xFFFFUL) 1205 if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
1207 p->port_rcv_remphys_errors = __constant_cpu_to_be16(0xFFFF); 1206 p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
1208 else 1207 else
1209 p->port_rcv_remphys_errors = 1208 p->port_rcv_remphys_errors =
1210 cpu_to_be16((u16)cntrs.port_rcv_remphys_errors); 1209 cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
1211 if (cntrs.port_xmit_discards > 0xFFFFUL) 1210 if (cntrs.port_xmit_discards > 0xFFFFUL)
1212 p->port_xmit_discards = __constant_cpu_to_be16(0xFFFF); 1211 p->port_xmit_discards = cpu_to_be16(0xFFFF);
1213 else 1212 else
1214 p->port_xmit_discards = 1213 p->port_xmit_discards =
1215 cpu_to_be16((u16)cntrs.port_xmit_discards); 1214 cpu_to_be16((u16)cntrs.port_xmit_discards);
@@ -1220,24 +1219,24 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
1220 p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) | 1219 p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
1221 cntrs.excessive_buffer_overrun_errors; 1220 cntrs.excessive_buffer_overrun_errors;
1222 if (cntrs.vl15_dropped > 0xFFFFUL) 1221 if (cntrs.vl15_dropped > 0xFFFFUL)
1223 p->vl15_dropped = __constant_cpu_to_be16(0xFFFF); 1222 p->vl15_dropped = cpu_to_be16(0xFFFF);
1224 else 1223 else
1225 p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped); 1224 p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
1226 if (cntrs.port_xmit_data > 0xFFFFFFFFUL) 1225 if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
1227 p->port_xmit_data = __constant_cpu_to_be32(0xFFFFFFFF); 1226 p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);
1228 else 1227 else
1229 p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data); 1228 p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
1230 if (cntrs.port_rcv_data > 0xFFFFFFFFUL) 1229 if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
1231 p->port_rcv_data = __constant_cpu_to_be32(0xFFFFFFFF); 1230 p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);
1232 else 1231 else
1233 p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data); 1232 p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
1234 if (cntrs.port_xmit_packets > 0xFFFFFFFFUL) 1233 if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
1235 p->port_xmit_packets = __constant_cpu_to_be32(0xFFFFFFFF); 1234 p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);
1236 else 1235 else
1237 p->port_xmit_packets = 1236 p->port_xmit_packets =
1238 cpu_to_be32((u32)cntrs.port_xmit_packets); 1237 cpu_to_be32((u32)cntrs.port_xmit_packets);
1239 if (cntrs.port_rcv_packets > 0xFFFFFFFFUL) 1238 if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
1240 p->port_rcv_packets = __constant_cpu_to_be32(0xFFFFFFFF); 1239 p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);
1241 else 1240 else
1242 p->port_rcv_packets = 1241 p->port_rcv_packets =
1243 cpu_to_be32((u32) cntrs.port_rcv_packets); 1242 cpu_to_be32((u32) cntrs.port_rcv_packets);
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index 9170710b950d..79b3dbc97179 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -1744,7 +1744,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1744 /* Signal completion event if the solicited bit is set. */ 1744 /* Signal completion event if the solicited bit is set. */
1745 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1745 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
1746 (ohdr->bth[0] & 1746 (ohdr->bth[0] &
1747 __constant_cpu_to_be32(1 << 23)) != 0); 1747 cpu_to_be32(1 << 23)) != 0);
1748 break; 1748 break;
1749 1749
1750 case OP(RDMA_WRITE_FIRST): 1750 case OP(RDMA_WRITE_FIRST):
diff --git a/drivers/infiniband/hw/ipath/ipath_sdma.c b/drivers/infiniband/hw/ipath/ipath_sdma.c
index 8e255adf5d9b..4b0698590850 100644
--- a/drivers/infiniband/hw/ipath/ipath_sdma.c
+++ b/drivers/infiniband/hw/ipath/ipath_sdma.c
@@ -781,10 +781,10 @@ retry:
781 descqp = &dd->ipath_sdma_descq[dd->ipath_sdma_descq_cnt].qw[0]; 781 descqp = &dd->ipath_sdma_descq[dd->ipath_sdma_descq_cnt].qw[0];
782 descqp -= 2; 782 descqp -= 2;
783 /* SDmaLastDesc */ 783 /* SDmaLastDesc */
784 descqp[0] |= __constant_cpu_to_le64(1ULL << 11); 784 descqp[0] |= cpu_to_le64(1ULL << 11);
785 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_INTREQ) { 785 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_INTREQ) {
786 /* SDmaIntReq */ 786 /* SDmaIntReq */
787 descqp[0] |= __constant_cpu_to_le64(1ULL << 15); 787 descqp[0] |= cpu_to_le64(1ULL << 15);
788 } 788 }
789 789
790 /* Commit writes to memory and advance the tail on the chip */ 790 /* Commit writes to memory and advance the tail on the chip */
diff --git a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c
index 82cc588b8bf2..22e60998f1a7 100644
--- a/drivers/infiniband/hw/ipath/ipath_uc.c
+++ b/drivers/infiniband/hw/ipath/ipath_uc.c
@@ -419,7 +419,7 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
419 /* Signal completion event if the solicited bit is set. */ 419 /* Signal completion event if the solicited bit is set. */
420 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 420 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
421 (ohdr->bth[0] & 421 (ohdr->bth[0] &
422 __constant_cpu_to_be32(1 << 23)) != 0); 422 cpu_to_be32(1 << 23)) != 0);
423 break; 423 break;
424 424
425 case OP(RDMA_WRITE_FIRST): 425 case OP(RDMA_WRITE_FIRST):
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
index 91c74cc797ae..6076cb61bf6a 100644
--- a/drivers/infiniband/hw/ipath/ipath_ud.c
+++ b/drivers/infiniband/hw/ipath/ipath_ud.c
@@ -370,7 +370,7 @@ int ipath_make_ud_req(struct ipath_qp *qp)
370 */ 370 */
371 ohdr->bth[1] = ah_attr->dlid >= IPATH_MULTICAST_LID_BASE && 371 ohdr->bth[1] = ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
372 ah_attr->dlid != IPATH_PERMISSIVE_LID ? 372 ah_attr->dlid != IPATH_PERMISSIVE_LID ?
373 __constant_cpu_to_be32(IPATH_MULTICAST_QPN) : 373 cpu_to_be32(IPATH_MULTICAST_QPN) :
374 cpu_to_be32(wqe->wr.wr.ud.remote_qpn); 374 cpu_to_be32(wqe->wr.wr.ud.remote_qpn);
375 ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPATH_PSN_MASK); 375 ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPATH_PSN_MASK);
376 /* 376 /*
@@ -573,7 +573,7 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
573 /* Signal completion event if the solicited bit is set. */ 573 /* Signal completion event if the solicited bit is set. */
574 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 574 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
575 (ohdr->bth[0] & 575 (ohdr->bth[0] &
576 __constant_cpu_to_be32(1 << 23)) != 0); 576 cpu_to_be32(1 << 23)) != 0);
577 577
578bail:; 578bail:;
579} 579}
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c
index 0190edc8044e..855911e7396d 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_pages.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c
@@ -209,20 +209,20 @@ void ipath_release_user_pages_on_close(struct page **p, size_t num_pages)
209 209
210 mm = get_task_mm(current); 210 mm = get_task_mm(current);
211 if (!mm) 211 if (!mm)
212 goto bail; 212 return;
213 213
214 work = kmalloc(sizeof(*work), GFP_KERNEL); 214 work = kmalloc(sizeof(*work), GFP_KERNEL);
215 if (!work) 215 if (!work)
216 goto bail_mm; 216 goto bail_mm;
217 217
218 goto bail;
219
220 INIT_WORK(&work->work, user_pages_account); 218 INIT_WORK(&work->work, user_pages_account);
221 work->mm = mm; 219 work->mm = mm;
222 work->num_pages = num_pages; 220 work->num_pages = num_pages;
223 221
222 schedule_work(&work->work);
223 return;
224
224bail_mm: 225bail_mm:
225 mmput(mm); 226 mmput(mm);
226bail:
227 return; 227 return;
228} 228}
diff --git a/drivers/infiniband/hw/ipath/ipath_user_sdma.c b/drivers/infiniband/hw/ipath/ipath_user_sdma.c
index 82d9a0b5ca2f..7bff4b9baa0a 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_sdma.c
@@ -667,13 +667,13 @@ static inline __le64 ipath_sdma_make_desc0(struct ipath_devdata *dd,
667 667
668static inline __le64 ipath_sdma_make_first_desc0(__le64 descq) 668static inline __le64 ipath_sdma_make_first_desc0(__le64 descq)
669{ 669{
670 return descq | __constant_cpu_to_le64(1ULL << 12); 670 return descq | cpu_to_le64(1ULL << 12);
671} 671}
672 672
673static inline __le64 ipath_sdma_make_last_desc0(__le64 descq) 673static inline __le64 ipath_sdma_make_last_desc0(__le64 descq)
674{ 674{
675 /* last */ /* dma head */ 675 /* last */ /* dma head */
676 return descq | __constant_cpu_to_le64(1ULL << 11 | 1ULL << 13); 676 return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
677} 677}
678 678
679static inline __le64 ipath_sdma_make_desc1(u64 addr) 679static inline __le64 ipath_sdma_make_desc1(u64 addr)
@@ -763,7 +763,7 @@ static int ipath_user_sdma_push_pkts(struct ipath_devdata *dd,
763 if (ofs >= IPATH_SMALLBUF_DWORDS) { 763 if (ofs >= IPATH_SMALLBUF_DWORDS) {
764 for (i = 0; i < pkt->naddr; i++) { 764 for (i = 0; i < pkt->naddr; i++) {
765 dd->ipath_sdma_descq[dtail].qw[0] |= 765 dd->ipath_sdma_descq[dtail].qw[0] |=
766 __constant_cpu_to_le64(1ULL << 14); 766 cpu_to_le64(1ULL << 14);
767 if (++dtail == dd->ipath_sdma_descq_cnt) 767 if (++dtail == dd->ipath_sdma_descq_cnt)
768 dtail = 0; 768 dtail = 0;
769 } 769 }
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index cdf0e6abd34d..9289ab4b0ae8 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -1585,7 +1585,7 @@ static int ipath_query_port(struct ib_device *ibdev,
1585 u64 ibcstat; 1585 u64 ibcstat;
1586 1586
1587 memset(props, 0, sizeof(*props)); 1587 memset(props, 0, sizeof(*props));
1588 props->lid = lid ? lid : __constant_be16_to_cpu(IB_LID_PERMISSIVE); 1588 props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
1589 props->lmc = dd->ipath_lmc; 1589 props->lmc = dd->ipath_lmc;
1590 props->sm_lid = dev->sm_lid; 1590 props->sm_lid = dev->sm_lid;
1591 props->sm_sl = dev->sm_sl; 1591 props->sm_sl = dev->sm_sl;
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index 11e3f613df93..ae6cff4abffc 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -86,11 +86,11 @@
86#define IB_PMA_SAMPLE_STATUS_RUNNING 0x02 86#define IB_PMA_SAMPLE_STATUS_RUNNING 0x02
87 87
88/* Mandatory IB performance counter select values. */ 88/* Mandatory IB performance counter select values. */
89#define IB_PMA_PORT_XMIT_DATA __constant_htons(0x0001) 89#define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001)
90#define IB_PMA_PORT_RCV_DATA __constant_htons(0x0002) 90#define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002)
91#define IB_PMA_PORT_XMIT_PKTS __constant_htons(0x0003) 91#define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003)
92#define IB_PMA_PORT_RCV_PKTS __constant_htons(0x0004) 92#define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004)
93#define IB_PMA_PORT_XMIT_WAIT __constant_htons(0x0005) 93#define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005)
94 94
95struct ib_reth { 95struct ib_reth {
96 __be64 vaddr; 96 __be64 vaddr;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 606f1e2ef284..19e68ab66168 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -147,7 +147,8 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
147 * Snoop SM MADs for port info and P_Key table sets, so we can 147 * Snoop SM MADs for port info and P_Key table sets, so we can
148 * synthesize LID change and P_Key change events. 148 * synthesize LID change and P_Key change events.
149 */ 149 */
150static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad) 150static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
151 u16 prev_lid)
151{ 152{
152 struct ib_event event; 153 struct ib_event event;
153 154
@@ -157,6 +158,7 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad)
157 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) { 158 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) {
158 struct ib_port_info *pinfo = 159 struct ib_port_info *pinfo =
159 (struct ib_port_info *) ((struct ib_smp *) mad)->data; 160 (struct ib_port_info *) ((struct ib_smp *) mad)->data;
161 u16 lid = be16_to_cpu(pinfo->lid);
160 162
161 update_sm_ah(to_mdev(ibdev), port_num, 163 update_sm_ah(to_mdev(ibdev), port_num,
162 be16_to_cpu(pinfo->sm_lid), 164 be16_to_cpu(pinfo->sm_lid),
@@ -165,12 +167,15 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad)
165 event.device = ibdev; 167 event.device = ibdev;
166 event.element.port_num = port_num; 168 event.element.port_num = port_num;
167 169
168 if (pinfo->clientrereg_resv_subnetto & 0x80) 170 if (pinfo->clientrereg_resv_subnetto & 0x80) {
169 event.event = IB_EVENT_CLIENT_REREGISTER; 171 event.event = IB_EVENT_CLIENT_REREGISTER;
170 else 172 ib_dispatch_event(&event);
171 event.event = IB_EVENT_LID_CHANGE; 173 }
172 174
173 ib_dispatch_event(&event); 175 if (prev_lid != lid) {
176 event.event = IB_EVENT_LID_CHANGE;
177 ib_dispatch_event(&event);
178 }
174 } 179 }
175 180
176 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) { 181 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) {
@@ -228,8 +233,9 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
228 struct ib_wc *in_wc, struct ib_grh *in_grh, 233 struct ib_wc *in_wc, struct ib_grh *in_grh,
229 struct ib_mad *in_mad, struct ib_mad *out_mad) 234 struct ib_mad *in_mad, struct ib_mad *out_mad)
230{ 235{
231 u16 slid; 236 u16 slid, prev_lid = 0;
232 int err; 237 int err;
238 struct ib_port_attr pattr;
233 239
234 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); 240 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
235 241
@@ -263,6 +269,13 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
263 } else 269 } else
264 return IB_MAD_RESULT_SUCCESS; 270 return IB_MAD_RESULT_SUCCESS;
265 271
272 if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
273 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
274 in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
275 in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
276 !ib_query_port(ibdev, port_num, &pattr))
277 prev_lid = pattr.lid;
278
266 err = mlx4_MAD_IFC(to_mdev(ibdev), 279 err = mlx4_MAD_IFC(to_mdev(ibdev),
267 mad_flags & IB_MAD_IGNORE_MKEY, 280 mad_flags & IB_MAD_IGNORE_MKEY,
268 mad_flags & IB_MAD_IGNORE_BKEY, 281 mad_flags & IB_MAD_IGNORE_BKEY,
@@ -271,7 +284,7 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
271 return IB_MAD_RESULT_FAILURE; 284 return IB_MAD_RESULT_FAILURE;
272 285
273 if (!out_mad->mad_hdr.status) { 286 if (!out_mad->mad_hdr.status) {
274 smp_snoop(ibdev, port_num, in_mad); 287 smp_snoop(ibdev, port_num, in_mad, prev_lid);
275 node_desc_override(ibdev, out_mad); 288 node_desc_override(ibdev, out_mad);
276 } 289 }
277 290
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 61588bd273bd..2ccb9d31771f 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -699,11 +699,12 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
699 struct mlx4_ib_dev *ibdev = ibdev_ptr; 699 struct mlx4_ib_dev *ibdev = ibdev_ptr;
700 int p; 700 int p;
701 701
702 mlx4_ib_mad_cleanup(ibdev);
703 ib_unregister_device(&ibdev->ib_dev);
704
702 for (p = 1; p <= ibdev->num_ports; ++p) 705 for (p = 1; p <= ibdev->num_ports; ++p)
703 mlx4_CLOSE_PORT(dev, p); 706 mlx4_CLOSE_PORT(dev, p);
704 707
705 mlx4_ib_mad_cleanup(ibdev);
706 ib_unregister_device(&ibdev->ib_dev);
707 iounmap(ibdev->uar_map); 708 iounmap(ibdev->uar_map);
708 mlx4_uar_free(dev, &ibdev->priv_uar); 709 mlx4_uar_free(dev, &ibdev->priv_uar);
709 mlx4_pd_free(dev, ibdev->priv_pdn); 710 mlx4_pd_free(dev, ibdev->priv_pdn);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index a91cb4c3fa5c..f385a24d31d2 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -71,17 +71,17 @@ enum {
71}; 71};
72 72
73static const __be32 mlx4_ib_opcode[] = { 73static const __be32 mlx4_ib_opcode[] = {
74 [IB_WR_SEND] = __constant_cpu_to_be32(MLX4_OPCODE_SEND), 74 [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND),
75 [IB_WR_LSO] = __constant_cpu_to_be32(MLX4_OPCODE_LSO), 75 [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO),
76 [IB_WR_SEND_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_SEND_IMM), 76 [IB_WR_SEND_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_SEND_IMM),
77 [IB_WR_RDMA_WRITE] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE), 77 [IB_WR_RDMA_WRITE] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
78 [IB_WR_RDMA_WRITE_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM), 78 [IB_WR_RDMA_WRITE_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
79 [IB_WR_RDMA_READ] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_READ), 79 [IB_WR_RDMA_READ] = cpu_to_be32(MLX4_OPCODE_RDMA_READ),
80 [IB_WR_ATOMIC_CMP_AND_SWP] = __constant_cpu_to_be32(MLX4_OPCODE_ATOMIC_CS), 80 [IB_WR_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS),
81 [IB_WR_ATOMIC_FETCH_AND_ADD] = __constant_cpu_to_be32(MLX4_OPCODE_ATOMIC_FA), 81 [IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA),
82 [IB_WR_SEND_WITH_INV] = __constant_cpu_to_be32(MLX4_OPCODE_SEND_INVAL), 82 [IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL),
83 [IB_WR_LOCAL_INV] = __constant_cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL), 83 [IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL),
84 [IB_WR_FAST_REG_MR] = __constant_cpu_to_be32(MLX4_OPCODE_FMR), 84 [IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR),
85}; 85};
86 86
87static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp) 87static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 640449582aba..5648659ff0b0 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -104,7 +104,8 @@ static void update_sm_ah(struct mthca_dev *dev,
104 */ 104 */
105static void smp_snoop(struct ib_device *ibdev, 105static void smp_snoop(struct ib_device *ibdev,
106 u8 port_num, 106 u8 port_num,
107 struct ib_mad *mad) 107 struct ib_mad *mad,
108 u16 prev_lid)
108{ 109{
109 struct ib_event event; 110 struct ib_event event;
110 111
@@ -114,6 +115,7 @@ static void smp_snoop(struct ib_device *ibdev,
114 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) { 115 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) {
115 struct ib_port_info *pinfo = 116 struct ib_port_info *pinfo =
116 (struct ib_port_info *) ((struct ib_smp *) mad)->data; 117 (struct ib_port_info *) ((struct ib_smp *) mad)->data;
118 u16 lid = be16_to_cpu(pinfo->lid);
117 119
118 mthca_update_rate(to_mdev(ibdev), port_num); 120 mthca_update_rate(to_mdev(ibdev), port_num);
119 update_sm_ah(to_mdev(ibdev), port_num, 121 update_sm_ah(to_mdev(ibdev), port_num,
@@ -123,12 +125,15 @@ static void smp_snoop(struct ib_device *ibdev,
123 event.device = ibdev; 125 event.device = ibdev;
124 event.element.port_num = port_num; 126 event.element.port_num = port_num;
125 127
126 if (pinfo->clientrereg_resv_subnetto & 0x80) 128 if (pinfo->clientrereg_resv_subnetto & 0x80) {
127 event.event = IB_EVENT_CLIENT_REREGISTER; 129 event.event = IB_EVENT_CLIENT_REREGISTER;
128 else 130 ib_dispatch_event(&event);
129 event.event = IB_EVENT_LID_CHANGE; 131 }
130 132
131 ib_dispatch_event(&event); 133 if (prev_lid != lid) {
134 event.event = IB_EVENT_LID_CHANGE;
135 ib_dispatch_event(&event);
136 }
132 } 137 }
133 138
134 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) { 139 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) {
@@ -196,6 +201,8 @@ int mthca_process_mad(struct ib_device *ibdev,
196 int err; 201 int err;
197 u8 status; 202 u8 status;
198 u16 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); 203 u16 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
204 u16 prev_lid = 0;
205 struct ib_port_attr pattr;
199 206
200 /* Forward locally generated traps to the SM */ 207 /* Forward locally generated traps to the SM */
201 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && 208 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
@@ -233,6 +240,12 @@ int mthca_process_mad(struct ib_device *ibdev,
233 return IB_MAD_RESULT_SUCCESS; 240 return IB_MAD_RESULT_SUCCESS;
234 } else 241 } else
235 return IB_MAD_RESULT_SUCCESS; 242 return IB_MAD_RESULT_SUCCESS;
243 if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
244 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
245 in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
246 in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
247 !ib_query_port(ibdev, port_num, &pattr))
248 prev_lid = pattr.lid;
236 249
237 err = mthca_MAD_IFC(to_mdev(ibdev), 250 err = mthca_MAD_IFC(to_mdev(ibdev),
238 mad_flags & IB_MAD_IGNORE_MKEY, 251 mad_flags & IB_MAD_IGNORE_MKEY,
@@ -252,7 +265,7 @@ int mthca_process_mad(struct ib_device *ibdev,
252 } 265 }
253 266
254 if (!out_mad->mad_hdr.status) { 267 if (!out_mad->mad_hdr.status) {
255 smp_snoop(ibdev, port_num, in_mad); 268 smp_snoop(ibdev, port_num, in_mad, prev_lid);
256 node_desc_override(ibdev, out_mad); 269 node_desc_override(ibdev, out_mad);
257 } 270 }
258 271
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index b9611ade9eab..ca599767ffbd 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. 2 * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. 3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index 13a5bb1a7bcf..04b12ad23390 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. 2 * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. 3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 4a65b96db2c8..52425154acd4 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. 2 * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -103,6 +103,7 @@ static int nes_disconnect(struct nes_qp *nesqp, int abrupt);
103static void nes_disconnect_worker(struct work_struct *work); 103static void nes_disconnect_worker(struct work_struct *work);
104 104
105static int send_mpa_request(struct nes_cm_node *, struct sk_buff *); 105static int send_mpa_request(struct nes_cm_node *, struct sk_buff *);
106static int send_mpa_reject(struct nes_cm_node *);
106static int send_syn(struct nes_cm_node *, u32, struct sk_buff *); 107static int send_syn(struct nes_cm_node *, u32, struct sk_buff *);
107static int send_reset(struct nes_cm_node *, struct sk_buff *); 108static int send_reset(struct nes_cm_node *, struct sk_buff *);
108static int send_ack(struct nes_cm_node *cm_node, struct sk_buff *skb); 109static int send_ack(struct nes_cm_node *cm_node, struct sk_buff *skb);
@@ -113,8 +114,7 @@ static void process_packet(struct nes_cm_node *, struct sk_buff *,
113static void active_open_err(struct nes_cm_node *, struct sk_buff *, int); 114static void active_open_err(struct nes_cm_node *, struct sk_buff *, int);
114static void passive_open_err(struct nes_cm_node *, struct sk_buff *, int); 115static void passive_open_err(struct nes_cm_node *, struct sk_buff *, int);
115static void cleanup_retrans_entry(struct nes_cm_node *); 116static void cleanup_retrans_entry(struct nes_cm_node *);
116static void handle_rcv_mpa(struct nes_cm_node *, struct sk_buff *, 117static void handle_rcv_mpa(struct nes_cm_node *, struct sk_buff *);
117 enum nes_cm_event_type);
118static void free_retrans_entry(struct nes_cm_node *cm_node); 118static void free_retrans_entry(struct nes_cm_node *cm_node);
119static int handle_tcp_options(struct nes_cm_node *cm_node, struct tcphdr *tcph, 119static int handle_tcp_options(struct nes_cm_node *cm_node, struct tcphdr *tcph,
120 struct sk_buff *skb, int optionsize, int passive); 120 struct sk_buff *skb, int optionsize, int passive);
@@ -124,6 +124,8 @@ static void cm_event_connected(struct nes_cm_event *);
124static void cm_event_connect_error(struct nes_cm_event *); 124static void cm_event_connect_error(struct nes_cm_event *);
125static void cm_event_reset(struct nes_cm_event *); 125static void cm_event_reset(struct nes_cm_event *);
126static void cm_event_mpa_req(struct nes_cm_event *); 126static void cm_event_mpa_req(struct nes_cm_event *);
127static void cm_event_mpa_reject(struct nes_cm_event *);
128static void handle_recv_entry(struct nes_cm_node *cm_node, u32 rem_node);
127 129
128static void print_core(struct nes_cm_core *core); 130static void print_core(struct nes_cm_core *core);
129 131
@@ -196,7 +198,6 @@ static struct nes_cm_event *create_event(struct nes_cm_node *cm_node,
196 */ 198 */
197static int send_mpa_request(struct nes_cm_node *cm_node, struct sk_buff *skb) 199static int send_mpa_request(struct nes_cm_node *cm_node, struct sk_buff *skb)
198{ 200{
199 int ret;
200 if (!skb) { 201 if (!skb) {
201 nes_debug(NES_DBG_CM, "skb set to NULL\n"); 202 nes_debug(NES_DBG_CM, "skb set to NULL\n");
202 return -1; 203 return -1;
@@ -206,11 +207,27 @@ static int send_mpa_request(struct nes_cm_node *cm_node, struct sk_buff *skb)
206 form_cm_frame(skb, cm_node, NULL, 0, &cm_node->mpa_frame, 207 form_cm_frame(skb, cm_node, NULL, 0, &cm_node->mpa_frame,
207 cm_node->mpa_frame_size, SET_ACK); 208 cm_node->mpa_frame_size, SET_ACK);
208 209
209 ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0); 210 return schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0);
210 if (ret < 0) 211}
211 return ret;
212 212
213 return 0; 213
214
215static int send_mpa_reject(struct nes_cm_node *cm_node)
216{
217 struct sk_buff *skb = NULL;
218
219 skb = dev_alloc_skb(MAX_CM_BUFFER);
220 if (!skb) {
221 nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
222 return -ENOMEM;
223 }
224
225 /* send an MPA reject frame */
226 form_cm_frame(skb, cm_node, NULL, 0, &cm_node->mpa_frame,
227 cm_node->mpa_frame_size, SET_ACK | SET_FIN);
228
229 cm_node->state = NES_CM_STATE_FIN_WAIT1;
230 return schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0);
214} 231}
215 232
216 233
@@ -218,14 +235,17 @@ static int send_mpa_request(struct nes_cm_node *cm_node, struct sk_buff *skb)
218 * recv_mpa - process a received TCP pkt, we are expecting an 235 * recv_mpa - process a received TCP pkt, we are expecting an
219 * IETF MPA frame 236 * IETF MPA frame
220 */ 237 */
221static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 len) 238static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type,
239 u32 len)
222{ 240{
223 struct ietf_mpa_frame *mpa_frame; 241 struct ietf_mpa_frame *mpa_frame;
224 242
243 *type = NES_MPA_REQUEST_ACCEPT;
244
225 /* assume req frame is in tcp data payload */ 245 /* assume req frame is in tcp data payload */
226 if (len < sizeof(struct ietf_mpa_frame)) { 246 if (len < sizeof(struct ietf_mpa_frame)) {
227 nes_debug(NES_DBG_CM, "The received ietf buffer was too small (%x)\n", len); 247 nes_debug(NES_DBG_CM, "The received ietf buffer was too small (%x)\n", len);
228 return -1; 248 return -EINVAL;
229 } 249 }
230 250
231 mpa_frame = (struct ietf_mpa_frame *)buffer; 251 mpa_frame = (struct ietf_mpa_frame *)buffer;
@@ -234,14 +254,25 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 len)
234 if (cm_node->mpa_frame_size + sizeof(struct ietf_mpa_frame) != len) { 254 if (cm_node->mpa_frame_size + sizeof(struct ietf_mpa_frame) != len) {
235 nes_debug(NES_DBG_CM, "The received ietf buffer was not right" 255 nes_debug(NES_DBG_CM, "The received ietf buffer was not right"
236 " complete (%x + %x != %x)\n", 256 " complete (%x + %x != %x)\n",
237 cm_node->mpa_frame_size, (u32)sizeof(struct ietf_mpa_frame), len); 257 cm_node->mpa_frame_size,
238 return -1; 258 (u32)sizeof(struct ietf_mpa_frame), len);
259 return -EINVAL;
260 }
261 /* make sure it does not exceed the max size */
262 if (len > MAX_CM_BUFFER) {
263 nes_debug(NES_DBG_CM, "The received ietf buffer was too large"
264 " (%x + %x != %x)\n",
265 cm_node->mpa_frame_size,
266 (u32)sizeof(struct ietf_mpa_frame), len);
267 return -EINVAL;
239 } 268 }
240 269
241 /* copy entire MPA frame to our cm_node's frame */ 270 /* copy entire MPA frame to our cm_node's frame */
242 memcpy(cm_node->mpa_frame_buf, buffer + sizeof(struct ietf_mpa_frame), 271 memcpy(cm_node->mpa_frame_buf, buffer + sizeof(struct ietf_mpa_frame),
243 cm_node->mpa_frame_size); 272 cm_node->mpa_frame_size);
244 273
274 if (mpa_frame->flags & IETF_MPA_FLAGS_REJECT)
275 *type = NES_MPA_REQUEST_REJECT;
245 return 0; 276 return 0;
246} 277}
247 278
@@ -380,7 +411,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
380 411
381 new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC); 412 new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
382 if (!new_send) 413 if (!new_send)
383 return -1; 414 return -ENOMEM;
384 415
385 /* new_send->timetosend = currenttime */ 416 /* new_send->timetosend = currenttime */
386 new_send->retrycount = NES_DEFAULT_RETRYS; 417 new_send->retrycount = NES_DEFAULT_RETRYS;
@@ -394,9 +425,11 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
394 425
395 if (type == NES_TIMER_TYPE_CLOSE) { 426 if (type == NES_TIMER_TYPE_CLOSE) {
396 new_send->timetosend += (HZ/10); 427 new_send->timetosend += (HZ/10);
397 spin_lock_irqsave(&cm_node->recv_list_lock, flags); 428 if (cm_node->recv_entry) {
398 list_add_tail(&new_send->list, &cm_node->recv_list); 429 WARN_ON(1);
399 spin_unlock_irqrestore(&cm_node->recv_list_lock, flags); 430 return -EINVAL;
431 }
432 cm_node->recv_entry = new_send;
400 } 433 }
401 434
402 if (type == NES_TIMER_TYPE_SEND) { 435 if (type == NES_TIMER_TYPE_SEND) {
@@ -435,24 +468,78 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
435 return ret; 468 return ret;
436} 469}
437 470
471static void nes_retrans_expired(struct nes_cm_node *cm_node)
472{
473 switch (cm_node->state) {
474 case NES_CM_STATE_SYN_RCVD:
475 case NES_CM_STATE_CLOSING:
476 rem_ref_cm_node(cm_node->cm_core, cm_node);
477 break;
478 case NES_CM_STATE_LAST_ACK:
479 case NES_CM_STATE_FIN_WAIT1:
480 case NES_CM_STATE_MPAREJ_RCVD:
481 send_reset(cm_node, NULL);
482 break;
483 default:
484 create_event(cm_node, NES_CM_EVENT_ABORTED);
485 }
486}
487
488static void handle_recv_entry(struct nes_cm_node *cm_node, u32 rem_node)
489{
490 struct nes_timer_entry *recv_entry = cm_node->recv_entry;
491 struct iw_cm_id *cm_id = cm_node->cm_id;
492 struct nes_qp *nesqp;
493 unsigned long qplockflags;
494
495 if (!recv_entry)
496 return;
497 nesqp = (struct nes_qp *)recv_entry->skb;
498 if (nesqp) {
499 spin_lock_irqsave(&nesqp->lock, qplockflags);
500 if (nesqp->cm_id) {
501 nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, "
502 "refcount = %d: HIT A "
503 "NES_TIMER_TYPE_CLOSE with something "
504 "to do!!!\n", nesqp->hwqp.qp_id, cm_id,
505 atomic_read(&nesqp->refcount));
506 nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
507 nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT;
508 nesqp->ibqp_state = IB_QPS_ERR;
509 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
510 nes_cm_disconn(nesqp);
511 } else {
512 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
513 nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, "
514 "refcount = %d: HIT A "
515 "NES_TIMER_TYPE_CLOSE with nothing "
516 "to do!!!\n", nesqp->hwqp.qp_id, cm_id,
517 atomic_read(&nesqp->refcount));
518 }
519 } else if (rem_node) {
520 /* TIME_WAIT state */
521 rem_ref_cm_node(cm_node->cm_core, cm_node);
522 }
523 if (cm_node->cm_id)
524 cm_id->rem_ref(cm_id);
525 kfree(recv_entry);
526 cm_node->recv_entry = NULL;
527}
438 528
439/** 529/**
440 * nes_cm_timer_tick 530 * nes_cm_timer_tick
441 */ 531 */
442static void nes_cm_timer_tick(unsigned long pass) 532static void nes_cm_timer_tick(unsigned long pass)
443{ 533{
444 unsigned long flags, qplockflags; 534 unsigned long flags;
445 unsigned long nexttimeout = jiffies + NES_LONG_TIME; 535 unsigned long nexttimeout = jiffies + NES_LONG_TIME;
446 struct iw_cm_id *cm_id;
447 struct nes_cm_node *cm_node; 536 struct nes_cm_node *cm_node;
448 struct nes_timer_entry *send_entry, *recv_entry; 537 struct nes_timer_entry *send_entry, *recv_entry;
449 struct list_head *list_core, *list_core_temp; 538 struct list_head *list_core_temp;
450 struct list_head *list_node, *list_node_temp; 539 struct list_head *list_node;
451 struct nes_cm_core *cm_core = g_cm_core; 540 struct nes_cm_core *cm_core = g_cm_core;
452 struct nes_qp *nesqp;
453 u32 settimer = 0; 541 u32 settimer = 0;
454 int ret = NETDEV_TX_OK; 542 int ret = NETDEV_TX_OK;
455 enum nes_cm_node_state last_state;
456 543
457 struct list_head timer_list; 544 struct list_head timer_list;
458 INIT_LIST_HEAD(&timer_list); 545 INIT_LIST_HEAD(&timer_list);
@@ -461,7 +548,7 @@ static void nes_cm_timer_tick(unsigned long pass)
461 list_for_each_safe(list_node, list_core_temp, 548 list_for_each_safe(list_node, list_core_temp,
462 &cm_core->connected_nodes) { 549 &cm_core->connected_nodes) {
463 cm_node = container_of(list_node, struct nes_cm_node, list); 550 cm_node = container_of(list_node, struct nes_cm_node, list);
464 if (!list_empty(&cm_node->recv_list) || (cm_node->send_entry)) { 551 if ((cm_node->recv_entry) || (cm_node->send_entry)) {
465 add_ref_cm_node(cm_node); 552 add_ref_cm_node(cm_node);
466 list_add(&cm_node->timer_entry, &timer_list); 553 list_add(&cm_node->timer_entry, &timer_list);
467 } 554 }
@@ -471,54 +558,18 @@ static void nes_cm_timer_tick(unsigned long pass)
471 list_for_each_safe(list_node, list_core_temp, &timer_list) { 558 list_for_each_safe(list_node, list_core_temp, &timer_list) {
472 cm_node = container_of(list_node, struct nes_cm_node, 559 cm_node = container_of(list_node, struct nes_cm_node,
473 timer_entry); 560 timer_entry);
474 spin_lock_irqsave(&cm_node->recv_list_lock, flags); 561 recv_entry = cm_node->recv_entry;
475 list_for_each_safe(list_core, list_node_temp, 562
476 &cm_node->recv_list) { 563 if (recv_entry) {
477 recv_entry = container_of(list_core,
478 struct nes_timer_entry, list);
479 if (!recv_entry)
480 break;
481 if (time_after(recv_entry->timetosend, jiffies)) { 564 if (time_after(recv_entry->timetosend, jiffies)) {
482 if (nexttimeout > recv_entry->timetosend || 565 if (nexttimeout > recv_entry->timetosend ||
483 !settimer) { 566 !settimer) {
484 nexttimeout = recv_entry->timetosend; 567 nexttimeout = recv_entry->timetosend;
485 settimer = 1; 568 settimer = 1;
486 } 569 }
487 continue; 570 } else
488 } 571 handle_recv_entry(cm_node, 1);
489 list_del(&recv_entry->list);
490 cm_id = cm_node->cm_id;
491 spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
492 nesqp = (struct nes_qp *)recv_entry->skb;
493 spin_lock_irqsave(&nesqp->lock, qplockflags);
494 if (nesqp->cm_id) {
495 nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, "
496 "refcount = %d: HIT A "
497 "NES_TIMER_TYPE_CLOSE with something "
498 "to do!!!\n", nesqp->hwqp.qp_id, cm_id,
499 atomic_read(&nesqp->refcount));
500 nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
501 nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT;
502 nesqp->ibqp_state = IB_QPS_ERR;
503 spin_unlock_irqrestore(&nesqp->lock,
504 qplockflags);
505 nes_cm_disconn(nesqp);
506 } else {
507 spin_unlock_irqrestore(&nesqp->lock,
508 qplockflags);
509 nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, "
510 "refcount = %d: HIT A "
511 "NES_TIMER_TYPE_CLOSE with nothing "
512 "to do!!!\n", nesqp->hwqp.qp_id, cm_id,
513 atomic_read(&nesqp->refcount));
514 }
515 if (cm_id)
516 cm_id->rem_ref(cm_id);
517
518 kfree(recv_entry);
519 spin_lock_irqsave(&cm_node->recv_list_lock, flags);
520 } 572 }
521 spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
522 573
523 spin_lock_irqsave(&cm_node->retrans_list_lock, flags); 574 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
524 do { 575 do {
@@ -533,12 +584,11 @@ static void nes_cm_timer_tick(unsigned long pass)
533 nexttimeout = 584 nexttimeout =
534 send_entry->timetosend; 585 send_entry->timetosend;
535 settimer = 1; 586 settimer = 1;
536 break;
537 } 587 }
538 } else { 588 } else {
539 free_retrans_entry(cm_node); 589 free_retrans_entry(cm_node);
540 break;
541 } 590 }
591 break;
542 } 592 }
543 593
544 if ((cm_node->state == NES_CM_STATE_TSA) || 594 if ((cm_node->state == NES_CM_STATE_TSA) ||
@@ -550,16 +600,12 @@ static void nes_cm_timer_tick(unsigned long pass)
550 if (!send_entry->retranscount || 600 if (!send_entry->retranscount ||
551 !send_entry->retrycount) { 601 !send_entry->retrycount) {
552 cm_packets_dropped++; 602 cm_packets_dropped++;
553 last_state = cm_node->state;
554 cm_node->state = NES_CM_STATE_CLOSED;
555 free_retrans_entry(cm_node); 603 free_retrans_entry(cm_node);
604
556 spin_unlock_irqrestore( 605 spin_unlock_irqrestore(
557 &cm_node->retrans_list_lock, flags); 606 &cm_node->retrans_list_lock, flags);
558 if (last_state == NES_CM_STATE_SYN_RCVD) 607 nes_retrans_expired(cm_node);
559 rem_ref_cm_node(cm_core, cm_node); 608 cm_node->state = NES_CM_STATE_CLOSED;
560 else
561 create_event(cm_node,
562 NES_CM_EVENT_ABORTED);
563 spin_lock_irqsave(&cm_node->retrans_list_lock, 609 spin_lock_irqsave(&cm_node->retrans_list_lock,
564 flags); 610 flags);
565 break; 611 break;
@@ -714,7 +760,7 @@ static int send_reset(struct nes_cm_node *cm_node, struct sk_buff *skb)
714 skb = dev_alloc_skb(MAX_CM_BUFFER); 760 skb = dev_alloc_skb(MAX_CM_BUFFER);
715 if (!skb) { 761 if (!skb) {
716 nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n"); 762 nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
717 return -1; 763 return -ENOMEM;
718 } 764 }
719 765
720 form_cm_frame(skb, cm_node, NULL, 0, NULL, 0, flags); 766 form_cm_frame(skb, cm_node, NULL, 0, NULL, 0, flags);
@@ -778,14 +824,10 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core,
778 unsigned long flags; 824 unsigned long flags;
779 struct list_head *hte; 825 struct list_head *hte;
780 struct nes_cm_node *cm_node; 826 struct nes_cm_node *cm_node;
781 __be32 tmp_addr = cpu_to_be32(loc_addr);
782 827
783 /* get a handle on the hte */ 828 /* get a handle on the hte */
784 hte = &cm_core->connected_nodes; 829 hte = &cm_core->connected_nodes;
785 830
786 nes_debug(NES_DBG_CM, "Searching for an owner node: %pI4:%x from core %p->%p\n",
787 &tmp_addr, loc_port, cm_core, hte);
788
789 /* walk list and find cm_node associated with this session ID */ 831 /* walk list and find cm_node associated with this session ID */
790 spin_lock_irqsave(&cm_core->ht_lock, flags); 832 spin_lock_irqsave(&cm_core->ht_lock, flags);
791 list_for_each_entry(cm_node, hte, list) { 833 list_for_each_entry(cm_node, hte, list) {
@@ -875,7 +917,8 @@ static int add_hte_node(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node
875static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, 917static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
876 struct nes_cm_listener *listener, int free_hanging_nodes) 918 struct nes_cm_listener *listener, int free_hanging_nodes)
877{ 919{
878 int ret = 1; 920 int ret = -EINVAL;
921 int err = 0;
879 unsigned long flags; 922 unsigned long flags;
880 struct list_head *list_pos = NULL; 923 struct list_head *list_pos = NULL;
881 struct list_head *list_temp = NULL; 924 struct list_head *list_temp = NULL;
@@ -904,10 +947,60 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
904 947
905 list_for_each_safe(list_pos, list_temp, &reset_list) { 948 list_for_each_safe(list_pos, list_temp, &reset_list) {
906 cm_node = container_of(list_pos, struct nes_cm_node, 949 cm_node = container_of(list_pos, struct nes_cm_node,
907 reset_entry); 950 reset_entry);
908 cleanup_retrans_entry(cm_node); 951 {
909 send_reset(cm_node, NULL); 952 struct nes_cm_node *loopback = cm_node->loopbackpartner;
910 rem_ref_cm_node(cm_node->cm_core, cm_node); 953 if (NES_CM_STATE_FIN_WAIT1 <= cm_node->state) {
954 rem_ref_cm_node(cm_node->cm_core, cm_node);
955 } else {
956 if (!loopback) {
957 cleanup_retrans_entry(cm_node);
958 err = send_reset(cm_node, NULL);
959 if (err) {
960 cm_node->state =
961 NES_CM_STATE_CLOSED;
962 WARN_ON(1);
963 } else {
964 cm_node->state =
965 NES_CM_STATE_CLOSED;
966 rem_ref_cm_node(
967 cm_node->cm_core,
968 cm_node);
969 }
970 } else {
971 struct nes_cm_event event;
972
973 event.cm_node = loopback;
974 event.cm_info.rem_addr =
975 loopback->rem_addr;
976 event.cm_info.loc_addr =
977 loopback->loc_addr;
978 event.cm_info.rem_port =
979 loopback->rem_port;
980 event.cm_info.loc_port =
981 loopback->loc_port;
982 event.cm_info.cm_id = loopback->cm_id;
983 cm_event_connect_error(&event);
984 loopback->state = NES_CM_STATE_CLOSED;
985
986 event.cm_node = cm_node;
987 event.cm_info.rem_addr =
988 cm_node->rem_addr;
989 event.cm_info.loc_addr =
990 cm_node->loc_addr;
991 event.cm_info.rem_port =
992 cm_node->rem_port;
993 event.cm_info.loc_port =
994 cm_node->loc_port;
995 event.cm_info.cm_id = cm_node->cm_id;
996 cm_event_reset(&event);
997
998 rem_ref_cm_node(cm_node->cm_core,
999 cm_node);
1000
1001 }
1002 }
1003 }
911 } 1004 }
912 1005
913 spin_lock_irqsave(&cm_core->listen_list_lock, flags); 1006 spin_lock_irqsave(&cm_core->listen_list_lock, flags);
@@ -968,6 +1061,7 @@ static inline int mini_cm_accelerated(struct nes_cm_core *cm_core,
968 if (cm_node->accept_pend) { 1061 if (cm_node->accept_pend) {
969 BUG_ON(!cm_node->listener); 1062 BUG_ON(!cm_node->listener);
970 atomic_dec(&cm_node->listener->pend_accepts_cnt); 1063 atomic_dec(&cm_node->listener->pend_accepts_cnt);
1064 cm_node->accept_pend = 0;
971 BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0); 1065 BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0);
972 } 1066 }
973 1067
@@ -994,7 +1088,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip)
994 memset(&fl, 0, sizeof fl); 1088 memset(&fl, 0, sizeof fl);
995 fl.nl_u.ip4_u.daddr = htonl(dst_ip); 1089 fl.nl_u.ip4_u.daddr = htonl(dst_ip);
996 if (ip_route_output_key(&init_net, &rt, &fl)) { 1090 if (ip_route_output_key(&init_net, &rt, &fl)) {
997 printk("%s: ip_route_output_key failed for 0x%08X\n", 1091 printk(KERN_ERR "%s: ip_route_output_key failed for 0x%08X\n",
998 __func__, dst_ip); 1092 __func__, dst_ip);
999 return rc; 1093 return rc;
1000 } 1094 }
@@ -1057,8 +1151,6 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
1057 cm_node->cm_id); 1151 cm_node->cm_id);
1058 1152
1059 spin_lock_init(&cm_node->retrans_list_lock); 1153 spin_lock_init(&cm_node->retrans_list_lock);
1060 INIT_LIST_HEAD(&cm_node->recv_list);
1061 spin_lock_init(&cm_node->recv_list_lock);
1062 1154
1063 cm_node->loopbackpartner = NULL; 1155 cm_node->loopbackpartner = NULL;
1064 atomic_set(&cm_node->ref_count, 1); 1156 atomic_set(&cm_node->ref_count, 1);
@@ -1126,10 +1218,7 @@ static int add_ref_cm_node(struct nes_cm_node *cm_node)
1126static int rem_ref_cm_node(struct nes_cm_core *cm_core, 1218static int rem_ref_cm_node(struct nes_cm_core *cm_core,
1127 struct nes_cm_node *cm_node) 1219 struct nes_cm_node *cm_node)
1128{ 1220{
1129 unsigned long flags, qplockflags; 1221 unsigned long flags;
1130 struct nes_timer_entry *recv_entry;
1131 struct iw_cm_id *cm_id;
1132 struct list_head *list_core, *list_node_temp;
1133 struct nes_qp *nesqp; 1222 struct nes_qp *nesqp;
1134 1223
1135 if (!cm_node) 1224 if (!cm_node)
@@ -1150,38 +1239,9 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
1150 atomic_dec(&cm_node->listener->pend_accepts_cnt); 1239 atomic_dec(&cm_node->listener->pend_accepts_cnt);
1151 BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0); 1240 BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0);
1152 } 1241 }
1153 BUG_ON(cm_node->send_entry); 1242 WARN_ON(cm_node->send_entry);
1154 spin_lock_irqsave(&cm_node->recv_list_lock, flags); 1243 if (cm_node->recv_entry)
1155 list_for_each_safe(list_core, list_node_temp, &cm_node->recv_list) { 1244 handle_recv_entry(cm_node, 0);
1156 recv_entry = container_of(list_core, struct nes_timer_entry,
1157 list);
1158 list_del(&recv_entry->list);
1159 cm_id = cm_node->cm_id;
1160 spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
1161 nesqp = (struct nes_qp *)recv_entry->skb;
1162 spin_lock_irqsave(&nesqp->lock, qplockflags);
1163 if (nesqp->cm_id) {
1164 nes_debug(NES_DBG_CM, "QP%u: cm_id = %p: HIT A "
1165 "NES_TIMER_TYPE_CLOSE with something to do!\n",
1166 nesqp->hwqp.qp_id, cm_id);
1167 nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
1168 nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT;
1169 nesqp->ibqp_state = IB_QPS_ERR;
1170 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
1171 nes_cm_disconn(nesqp);
1172 } else {
1173 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
1174 nes_debug(NES_DBG_CM, "QP%u: cm_id = %p: HIT A "
1175 "NES_TIMER_TYPE_CLOSE with nothing to do!\n",
1176 nesqp->hwqp.qp_id, cm_id);
1177 }
1178 cm_id->rem_ref(cm_id);
1179
1180 kfree(recv_entry);
1181 spin_lock_irqsave(&cm_node->recv_list_lock, flags);
1182 }
1183 spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
1184
1185 if (cm_node->listener) { 1245 if (cm_node->listener) {
1186 mini_cm_dec_refcnt_listen(cm_core, cm_node->listener, 0); 1246 mini_cm_dec_refcnt_listen(cm_core, cm_node->listener, 0);
1187 } else { 1247 } else {
@@ -1266,8 +1326,7 @@ static void drop_packet(struct sk_buff *skb)
1266 dev_kfree_skb_any(skb); 1326 dev_kfree_skb_any(skb);
1267} 1327}
1268 1328
1269static void handle_fin_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, 1329static void handle_fin_pkt(struct nes_cm_node *cm_node)
1270 struct tcphdr *tcph)
1271{ 1330{
1272 nes_debug(NES_DBG_CM, "Received FIN, cm_node = %p, state = %u. " 1331 nes_debug(NES_DBG_CM, "Received FIN, cm_node = %p, state = %u. "
1273 "refcnt=%d\n", cm_node, cm_node->state, 1332 "refcnt=%d\n", cm_node, cm_node->state,
@@ -1279,23 +1338,30 @@ static void handle_fin_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1279 case NES_CM_STATE_SYN_SENT: 1338 case NES_CM_STATE_SYN_SENT:
1280 case NES_CM_STATE_ESTABLISHED: 1339 case NES_CM_STATE_ESTABLISHED:
1281 case NES_CM_STATE_MPAREQ_SENT: 1340 case NES_CM_STATE_MPAREQ_SENT:
1341 case NES_CM_STATE_MPAREJ_RCVD:
1282 cm_node->state = NES_CM_STATE_LAST_ACK; 1342 cm_node->state = NES_CM_STATE_LAST_ACK;
1283 send_fin(cm_node, skb); 1343 send_fin(cm_node, NULL);
1284 break; 1344 break;
1285 case NES_CM_STATE_FIN_WAIT1: 1345 case NES_CM_STATE_FIN_WAIT1:
1286 cm_node->state = NES_CM_STATE_CLOSING; 1346 cm_node->state = NES_CM_STATE_CLOSING;
1287 send_ack(cm_node, skb); 1347 send_ack(cm_node, NULL);
1348 /* Wait for ACK as this is simultanous close..
1349 * After we receive ACK, do not send anything..
1350 * Just rm the node.. Done.. */
1288 break; 1351 break;
1289 case NES_CM_STATE_FIN_WAIT2: 1352 case NES_CM_STATE_FIN_WAIT2:
1290 cm_node->state = NES_CM_STATE_TIME_WAIT; 1353 cm_node->state = NES_CM_STATE_TIME_WAIT;
1291 send_ack(cm_node, skb); 1354 send_ack(cm_node, NULL);
1355 schedule_nes_timer(cm_node, NULL, NES_TIMER_TYPE_CLOSE, 1, 0);
1356 break;
1357 case NES_CM_STATE_TIME_WAIT:
1292 cm_node->state = NES_CM_STATE_CLOSED; 1358 cm_node->state = NES_CM_STATE_CLOSED;
1359 rem_ref_cm_node(cm_node->cm_core, cm_node);
1293 break; 1360 break;
1294 case NES_CM_STATE_TSA: 1361 case NES_CM_STATE_TSA:
1295 default: 1362 default:
1296 nes_debug(NES_DBG_CM, "Error Rcvd FIN for node-%p state = %d\n", 1363 nes_debug(NES_DBG_CM, "Error Rcvd FIN for node-%p state = %d\n",
1297 cm_node, cm_node->state); 1364 cm_node, cm_node->state);
1298 drop_packet(skb);
1299 break; 1365 break;
1300 } 1366 }
1301} 1367}
@@ -1341,23 +1407,35 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1341 cleanup_retrans_entry(cm_node); 1407 cleanup_retrans_entry(cm_node);
1342 drop_packet(skb); 1408 drop_packet(skb);
1343 break; 1409 break;
1410 case NES_CM_STATE_TIME_WAIT:
1411 cleanup_retrans_entry(cm_node);
1412 cm_node->state = NES_CM_STATE_CLOSED;
1413 rem_ref_cm_node(cm_node->cm_core, cm_node);
1414 drop_packet(skb);
1415 break;
1416 case NES_CM_STATE_FIN_WAIT1:
1417 cleanup_retrans_entry(cm_node);
1418 nes_debug(NES_DBG_CM, "Bad state %s[%u]\n", __func__, __LINE__);
1344 default: 1419 default:
1345 drop_packet(skb); 1420 drop_packet(skb);
1346 break; 1421 break;
1347 } 1422 }
1348} 1423}
1349 1424
1350static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb, 1425
1351 enum nes_cm_event_type type) 1426static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb)
1352{ 1427{
1353 1428
1354 int ret; 1429 int ret = 0;
1355 int datasize = skb->len; 1430 int datasize = skb->len;
1356 u8 *dataloc = skb->data; 1431 u8 *dataloc = skb->data;
1357 ret = parse_mpa(cm_node, dataloc, datasize); 1432
1358 if (ret < 0) { 1433 enum nes_cm_event_type type = NES_CM_EVENT_UNKNOWN;
1434 u32 res_type;
1435 ret = parse_mpa(cm_node, dataloc, &res_type, datasize);
1436 if (ret) {
1359 nes_debug(NES_DBG_CM, "didn't like MPA Request\n"); 1437 nes_debug(NES_DBG_CM, "didn't like MPA Request\n");
1360 if (type == NES_CM_EVENT_CONNECTED) { 1438 if (cm_node->state == NES_CM_STATE_MPAREQ_SENT) {
1361 nes_debug(NES_DBG_CM, "%s[%u] create abort for " 1439 nes_debug(NES_DBG_CM, "%s[%u] create abort for "
1362 "cm_node=%p listener=%p state=%d\n", __func__, 1440 "cm_node=%p listener=%p state=%d\n", __func__,
1363 __LINE__, cm_node, cm_node->listener, 1441 __LINE__, cm_node, cm_node->listener,
@@ -1366,18 +1444,38 @@ static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb,
1366 } else { 1444 } else {
1367 passive_open_err(cm_node, skb, 1); 1445 passive_open_err(cm_node, skb, 1);
1368 } 1446 }
1369 } else { 1447 return;
1370 cleanup_retrans_entry(cm_node); 1448 }
1371 dev_kfree_skb_any(skb); 1449
1372 if (type == NES_CM_EVENT_CONNECTED) 1450 switch (cm_node->state) {
1451 case NES_CM_STATE_ESTABLISHED:
1452 if (res_type == NES_MPA_REQUEST_REJECT) {
1453 /*BIG problem as we are receiving the MPA.. So should
1454 * not be REJECT.. This is Passive Open.. We can
1455 * only receive it Reject for Active Open...*/
1456 WARN_ON(1);
1457 }
1458 cm_node->state = NES_CM_STATE_MPAREQ_RCVD;
1459 type = NES_CM_EVENT_MPA_REQ;
1460 atomic_set(&cm_node->passive_state,
1461 NES_PASSIVE_STATE_INDICATED);
1462 break;
1463 case NES_CM_STATE_MPAREQ_SENT:
1464 if (res_type == NES_MPA_REQUEST_REJECT) {
1465 type = NES_CM_EVENT_MPA_REJECT;
1466 cm_node->state = NES_CM_STATE_MPAREJ_RCVD;
1467 } else {
1468 type = NES_CM_EVENT_CONNECTED;
1373 cm_node->state = NES_CM_STATE_TSA; 1469 cm_node->state = NES_CM_STATE_TSA;
1374 else 1470 }
1375 atomic_set(&cm_node->passive_state,
1376 NES_PASSIVE_STATE_INDICATED);
1377 create_event(cm_node, type);
1378 1471
1472 break;
1473 default:
1474 WARN_ON(1);
1475 break;
1379 } 1476 }
1380 return ; 1477 dev_kfree_skb_any(skb);
1478 create_event(cm_node, type);
1381} 1479}
1382 1480
1383static void indicate_pkt_err(struct nes_cm_node *cm_node, struct sk_buff *skb) 1481static void indicate_pkt_err(struct nes_cm_node *cm_node, struct sk_buff *skb)
@@ -1465,8 +1563,6 @@ static void handle_syn_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1465 break; 1563 break;
1466 case NES_CM_STATE_LISTENING: 1564 case NES_CM_STATE_LISTENING:
1467 /* Passive OPEN */ 1565 /* Passive OPEN */
1468 cm_node->accept_pend = 1;
1469 atomic_inc(&cm_node->listener->pend_accepts_cnt);
1470 if (atomic_read(&cm_node->listener->pend_accepts_cnt) > 1566 if (atomic_read(&cm_node->listener->pend_accepts_cnt) >
1471 cm_node->listener->backlog) { 1567 cm_node->listener->backlog) {
1472 nes_debug(NES_DBG_CM, "drop syn due to backlog " 1568 nes_debug(NES_DBG_CM, "drop syn due to backlog "
@@ -1484,6 +1580,9 @@ static void handle_syn_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1484 } 1580 }
1485 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1; 1581 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
1486 BUG_ON(cm_node->send_entry); 1582 BUG_ON(cm_node->send_entry);
1583 cm_node->accept_pend = 1;
1584 atomic_inc(&cm_node->listener->pend_accepts_cnt);
1585
1487 cm_node->state = NES_CM_STATE_SYN_RCVD; 1586 cm_node->state = NES_CM_STATE_SYN_RCVD;
1488 send_syn(cm_node, 1, skb); 1587 send_syn(cm_node, 1, skb);
1489 break; 1588 break;
@@ -1518,6 +1617,7 @@ static void handle_synack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1518 inc_sequence = ntohl(tcph->seq); 1617 inc_sequence = ntohl(tcph->seq);
1519 switch (cm_node->state) { 1618 switch (cm_node->state) {
1520 case NES_CM_STATE_SYN_SENT: 1619 case NES_CM_STATE_SYN_SENT:
1620 cleanup_retrans_entry(cm_node);
1521 /* active open */ 1621 /* active open */
1522 if (check_syn(cm_node, tcph, skb)) 1622 if (check_syn(cm_node, tcph, skb))
1523 return; 1623 return;
@@ -1567,10 +1667,7 @@ static void handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1567 u32 rem_seq; 1667 u32 rem_seq;
1568 int ret; 1668 int ret;
1569 int optionsize; 1669 int optionsize;
1570 u32 temp_seq = cm_node->tcp_cntxt.loc_seq_num;
1571
1572 optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); 1670 optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
1573 cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
1574 1671
1575 if (check_seq(cm_node, tcph, skb)) 1672 if (check_seq(cm_node, tcph, skb))
1576 return; 1673 return;
@@ -1580,7 +1677,7 @@ static void handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1580 rem_seq = ntohl(tcph->seq); 1677 rem_seq = ntohl(tcph->seq);
1581 rem_seq_ack = ntohl(tcph->ack_seq); 1678 rem_seq_ack = ntohl(tcph->ack_seq);
1582 datasize = skb->len; 1679 datasize = skb->len;
1583 1680 cleanup_retrans_entry(cm_node);
1584 switch (cm_node->state) { 1681 switch (cm_node->state) {
1585 case NES_CM_STATE_SYN_RCVD: 1682 case NES_CM_STATE_SYN_RCVD:
1586 /* Passive OPEN */ 1683 /* Passive OPEN */
@@ -1588,7 +1685,6 @@ static void handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1588 if (ret) 1685 if (ret)
1589 break; 1686 break;
1590 cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq); 1687 cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
1591 cm_node->tcp_cntxt.loc_seq_num = temp_seq;
1592 if (cm_node->tcp_cntxt.rem_ack_num != 1688 if (cm_node->tcp_cntxt.rem_ack_num !=
1593 cm_node->tcp_cntxt.loc_seq_num) { 1689 cm_node->tcp_cntxt.loc_seq_num) {
1594 nes_debug(NES_DBG_CM, "rem_ack_num != loc_seq_num\n"); 1690 nes_debug(NES_DBG_CM, "rem_ack_num != loc_seq_num\n");
@@ -1597,31 +1693,30 @@ static void handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1597 return; 1693 return;
1598 } 1694 }
1599 cm_node->state = NES_CM_STATE_ESTABLISHED; 1695 cm_node->state = NES_CM_STATE_ESTABLISHED;
1696 cleanup_retrans_entry(cm_node);
1600 if (datasize) { 1697 if (datasize) {
1601 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; 1698 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
1602 cm_node->state = NES_CM_STATE_MPAREQ_RCVD; 1699 handle_rcv_mpa(cm_node, skb);
1603 handle_rcv_mpa(cm_node, skb, NES_CM_EVENT_MPA_REQ); 1700 } else { /* rcvd ACK only */
1604 } else { /* rcvd ACK only */
1605 dev_kfree_skb_any(skb); 1701 dev_kfree_skb_any(skb);
1606 cleanup_retrans_entry(cm_node); 1702 cleanup_retrans_entry(cm_node);
1607 } 1703 }
1608 break; 1704 break;
1609 case NES_CM_STATE_ESTABLISHED: 1705 case NES_CM_STATE_ESTABLISHED:
1610 /* Passive OPEN */ 1706 /* Passive OPEN */
1611 /* We expect mpa frame to be received only */ 1707 cleanup_retrans_entry(cm_node);
1612 if (datasize) { 1708 if (datasize) {
1613 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; 1709 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
1614 cm_node->state = NES_CM_STATE_MPAREQ_RCVD; 1710 handle_rcv_mpa(cm_node, skb);
1615 handle_rcv_mpa(cm_node, skb,
1616 NES_CM_EVENT_MPA_REQ);
1617 } else 1711 } else
1618 drop_packet(skb); 1712 drop_packet(skb);
1619 break; 1713 break;
1620 case NES_CM_STATE_MPAREQ_SENT: 1714 case NES_CM_STATE_MPAREQ_SENT:
1715 cleanup_retrans_entry(cm_node);
1621 cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq); 1716 cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
1622 if (datasize) { 1717 if (datasize) {
1623 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; 1718 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
1624 handle_rcv_mpa(cm_node, skb, NES_CM_EVENT_CONNECTED); 1719 handle_rcv_mpa(cm_node, skb);
1625 } else { /* Could be just an ack pkt.. */ 1720 } else { /* Could be just an ack pkt.. */
1626 cleanup_retrans_entry(cm_node); 1721 cleanup_retrans_entry(cm_node);
1627 dev_kfree_skb_any(skb); 1722 dev_kfree_skb_any(skb);
@@ -1632,13 +1727,24 @@ static void handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1632 cleanup_retrans_entry(cm_node); 1727 cleanup_retrans_entry(cm_node);
1633 send_reset(cm_node, skb); 1728 send_reset(cm_node, skb);
1634 break; 1729 break;
1730 case NES_CM_STATE_LAST_ACK:
1731 cleanup_retrans_entry(cm_node);
1732 cm_node->state = NES_CM_STATE_CLOSED;
1733 cm_node->cm_id->rem_ref(cm_node->cm_id);
1734 case NES_CM_STATE_CLOSING:
1735 cleanup_retrans_entry(cm_node);
1736 rem_ref_cm_node(cm_node->cm_core, cm_node);
1737 drop_packet(skb);
1738 break;
1635 case NES_CM_STATE_FIN_WAIT1: 1739 case NES_CM_STATE_FIN_WAIT1:
1740 cleanup_retrans_entry(cm_node);
1741 drop_packet(skb);
1742 cm_node->state = NES_CM_STATE_FIN_WAIT2;
1743 break;
1636 case NES_CM_STATE_SYN_SENT: 1744 case NES_CM_STATE_SYN_SENT:
1637 case NES_CM_STATE_FIN_WAIT2: 1745 case NES_CM_STATE_FIN_WAIT2:
1638 case NES_CM_STATE_TSA: 1746 case NES_CM_STATE_TSA:
1639 case NES_CM_STATE_MPAREQ_RCVD: 1747 case NES_CM_STATE_MPAREQ_RCVD:
1640 case NES_CM_STATE_LAST_ACK:
1641 case NES_CM_STATE_CLOSING:
1642 case NES_CM_STATE_UNKNOWN: 1748 case NES_CM_STATE_UNKNOWN:
1643 default: 1749 default:
1644 drop_packet(skb); 1750 drop_packet(skb);
@@ -1748,6 +1854,7 @@ static void process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb,
1748{ 1854{
1749 enum nes_tcpip_pkt_type pkt_type = NES_PKT_TYPE_UNKNOWN; 1855 enum nes_tcpip_pkt_type pkt_type = NES_PKT_TYPE_UNKNOWN;
1750 struct tcphdr *tcph = tcp_hdr(skb); 1856 struct tcphdr *tcph = tcp_hdr(skb);
1857 u32 fin_set = 0;
1751 skb_pull(skb, ip_hdr(skb)->ihl << 2); 1858 skb_pull(skb, ip_hdr(skb)->ihl << 2);
1752 1859
1753 nes_debug(NES_DBG_CM, "process_packet: cm_node=%p state =%d syn=%d " 1860 nes_debug(NES_DBG_CM, "process_packet: cm_node=%p state =%d syn=%d "
@@ -1760,10 +1867,10 @@ static void process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb,
1760 pkt_type = NES_PKT_TYPE_SYN; 1867 pkt_type = NES_PKT_TYPE_SYN;
1761 if (tcph->ack) 1868 if (tcph->ack)
1762 pkt_type = NES_PKT_TYPE_SYNACK; 1869 pkt_type = NES_PKT_TYPE_SYNACK;
1763 } else if (tcph->fin) 1870 } else if (tcph->ack)
1764 pkt_type = NES_PKT_TYPE_FIN;
1765 else if (tcph->ack)
1766 pkt_type = NES_PKT_TYPE_ACK; 1871 pkt_type = NES_PKT_TYPE_ACK;
1872 if (tcph->fin)
1873 fin_set = 1;
1767 1874
1768 switch (pkt_type) { 1875 switch (pkt_type) {
1769 case NES_PKT_TYPE_SYN: 1876 case NES_PKT_TYPE_SYN:
@@ -1774,15 +1881,16 @@ static void process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb,
1774 break; 1881 break;
1775 case NES_PKT_TYPE_ACK: 1882 case NES_PKT_TYPE_ACK:
1776 handle_ack_pkt(cm_node, skb, tcph); 1883 handle_ack_pkt(cm_node, skb, tcph);
1884 if (fin_set)
1885 handle_fin_pkt(cm_node);
1777 break; 1886 break;
1778 case NES_PKT_TYPE_RST: 1887 case NES_PKT_TYPE_RST:
1779 handle_rst_pkt(cm_node, skb, tcph); 1888 handle_rst_pkt(cm_node, skb, tcph);
1780 break; 1889 break;
1781 case NES_PKT_TYPE_FIN:
1782 handle_fin_pkt(cm_node, skb, tcph);
1783 break;
1784 default: 1890 default:
1785 drop_packet(skb); 1891 drop_packet(skb);
1892 if (fin_set)
1893 handle_fin_pkt(cm_node);
1786 break; 1894 break;
1787 } 1895 }
1788} 1896}
@@ -1925,7 +2033,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
1925 loopbackremotenode->tcp_cntxt.rcv_wscale; 2033 loopbackremotenode->tcp_cntxt.rcv_wscale;
1926 loopbackremotenode->tcp_cntxt.snd_wscale = 2034 loopbackremotenode->tcp_cntxt.snd_wscale =
1927 cm_node->tcp_cntxt.rcv_wscale; 2035 cm_node->tcp_cntxt.rcv_wscale;
1928 2036 loopbackremotenode->state = NES_CM_STATE_MPAREQ_RCVD;
1929 create_event(loopbackremotenode, NES_CM_EVENT_MPA_REQ); 2037 create_event(loopbackremotenode, NES_CM_EVENT_MPA_REQ);
1930 } 2038 }
1931 return cm_node; 2039 return cm_node;
@@ -1980,7 +2088,11 @@ static int mini_cm_reject(struct nes_cm_core *cm_core,
1980 struct ietf_mpa_frame *mpa_frame, struct nes_cm_node *cm_node) 2088 struct ietf_mpa_frame *mpa_frame, struct nes_cm_node *cm_node)
1981{ 2089{
1982 int ret = 0; 2090 int ret = 0;
2091 int err = 0;
1983 int passive_state; 2092 int passive_state;
2093 struct nes_cm_event event;
2094 struct iw_cm_id *cm_id = cm_node->cm_id;
2095 struct nes_cm_node *loopback = cm_node->loopbackpartner;
1984 2096
1985 nes_debug(NES_DBG_CM, "%s cm_node=%p type=%d state=%d\n", 2097 nes_debug(NES_DBG_CM, "%s cm_node=%p type=%d state=%d\n",
1986 __func__, cm_node, cm_node->tcp_cntxt.client, cm_node->state); 2098 __func__, cm_node, cm_node->tcp_cntxt.client, cm_node->state);
@@ -1989,12 +2101,38 @@ static int mini_cm_reject(struct nes_cm_core *cm_core,
1989 return ret; 2101 return ret;
1990 cleanup_retrans_entry(cm_node); 2102 cleanup_retrans_entry(cm_node);
1991 2103
1992 passive_state = atomic_add_return(1, &cm_node->passive_state); 2104 if (!loopback) {
1993 cm_node->state = NES_CM_STATE_CLOSED; 2105 passive_state = atomic_add_return(1, &cm_node->passive_state);
1994 if (passive_state == NES_SEND_RESET_EVENT) 2106 if (passive_state == NES_SEND_RESET_EVENT) {
2107 cm_node->state = NES_CM_STATE_CLOSED;
2108 rem_ref_cm_node(cm_core, cm_node);
2109 } else {
2110 ret = send_mpa_reject(cm_node);
2111 if (ret) {
2112 cm_node->state = NES_CM_STATE_CLOSED;
2113 err = send_reset(cm_node, NULL);
2114 if (err)
2115 WARN_ON(1);
2116 } else
2117 cm_id->add_ref(cm_id);
2118 }
2119 } else {
2120 cm_node->cm_id = NULL;
2121 event.cm_node = loopback;
2122 event.cm_info.rem_addr = loopback->rem_addr;
2123 event.cm_info.loc_addr = loopback->loc_addr;
2124 event.cm_info.rem_port = loopback->rem_port;
2125 event.cm_info.loc_port = loopback->loc_port;
2126 event.cm_info.cm_id = loopback->cm_id;
2127 cm_event_mpa_reject(&event);
1995 rem_ref_cm_node(cm_core, cm_node); 2128 rem_ref_cm_node(cm_core, cm_node);
1996 else 2129 loopback->state = NES_CM_STATE_CLOSING;
1997 ret = send_reset(cm_node, NULL); 2130
2131 cm_id = loopback->cm_id;
2132 rem_ref_cm_node(cm_core, loopback);
2133 cm_id->rem_ref(cm_id);
2134 }
2135
1998 return ret; 2136 return ret;
1999} 2137}
2000 2138
@@ -2031,6 +2169,7 @@ static int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_nod
2031 case NES_CM_STATE_CLOSING: 2169 case NES_CM_STATE_CLOSING:
2032 ret = -1; 2170 ret = -1;
2033 break; 2171 break;
2172 case NES_CM_STATE_MPAREJ_RCVD:
2034 case NES_CM_STATE_LISTENING: 2173 case NES_CM_STATE_LISTENING:
2035 case NES_CM_STATE_UNKNOWN: 2174 case NES_CM_STATE_UNKNOWN:
2036 case NES_CM_STATE_INITED: 2175 case NES_CM_STATE_INITED:
@@ -2227,15 +2366,15 @@ static int mini_cm_set(struct nes_cm_core *cm_core, u32 type, u32 value)
2227 int ret = 0; 2366 int ret = 0;
2228 2367
2229 switch (type) { 2368 switch (type) {
2230 case NES_CM_SET_PKT_SIZE: 2369 case NES_CM_SET_PKT_SIZE:
2231 cm_core->mtu = value; 2370 cm_core->mtu = value;
2232 break; 2371 break;
2233 case NES_CM_SET_FREE_PKT_Q_SIZE: 2372 case NES_CM_SET_FREE_PKT_Q_SIZE:
2234 cm_core->free_tx_pkt_max = value; 2373 cm_core->free_tx_pkt_max = value;
2235 break; 2374 break;
2236 default: 2375 default:
2237 /* unknown set option */ 2376 /* unknown set option */
2238 ret = -EINVAL; 2377 ret = -EINVAL;
2239 } 2378 }
2240 2379
2241 return ret; 2380 return ret;
@@ -2654,9 +2793,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2654 NES_QPCONTEXT_ORDIRD_WRPDU); 2793 NES_QPCONTEXT_ORDIRD_WRPDU);
2655 } else { 2794 } else {
2656 nesqp->nesqp_context->ird_ord_sizes |= 2795 nesqp->nesqp_context->ird_ord_sizes |=
2657 cpu_to_le32((NES_QPCONTEXT_ORDIRD_LSMM_PRESENT | 2796 cpu_to_le32(NES_QPCONTEXT_ORDIRD_WRPDU);
2658 NES_QPCONTEXT_ORDIRD_WRPDU |
2659 NES_QPCONTEXT_ORDIRD_ALSMM));
2660 } 2797 }
2661 nesqp->skip_lsmm = 1; 2798 nesqp->skip_lsmm = 1;
2662 2799
@@ -2778,23 +2915,35 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2778int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) 2915int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
2779{ 2916{
2780 struct nes_cm_node *cm_node; 2917 struct nes_cm_node *cm_node;
2918 struct nes_cm_node *loopback;
2919
2781 struct nes_cm_core *cm_core; 2920 struct nes_cm_core *cm_core;
2782 2921
2783 atomic_inc(&cm_rejects); 2922 atomic_inc(&cm_rejects);
2784 cm_node = (struct nes_cm_node *) cm_id->provider_data; 2923 cm_node = (struct nes_cm_node *) cm_id->provider_data;
2924 loopback = cm_node->loopbackpartner;
2785 cm_core = cm_node->cm_core; 2925 cm_core = cm_node->cm_core;
2926 cm_node->cm_id = cm_id;
2786 cm_node->mpa_frame_size = sizeof(struct ietf_mpa_frame) + pdata_len; 2927 cm_node->mpa_frame_size = sizeof(struct ietf_mpa_frame) + pdata_len;
2787 2928
2929 if (cm_node->mpa_frame_size > MAX_CM_BUFFER)
2930 return -EINVAL;
2931
2788 strcpy(&cm_node->mpa_frame.key[0], IEFT_MPA_KEY_REP); 2932 strcpy(&cm_node->mpa_frame.key[0], IEFT_MPA_KEY_REP);
2789 memcpy(&cm_node->mpa_frame.priv_data, pdata, pdata_len); 2933 if (loopback) {
2934 memcpy(&loopback->mpa_frame.priv_data, pdata, pdata_len);
2935 loopback->mpa_frame.priv_data_len = pdata_len;
2936 loopback->mpa_frame_size = sizeof(struct ietf_mpa_frame) +
2937 pdata_len;
2938 } else {
2939 memcpy(&cm_node->mpa_frame.priv_data, pdata, pdata_len);
2940 cm_node->mpa_frame.priv_data_len = cpu_to_be16(pdata_len);
2941 }
2790 2942
2791 cm_node->mpa_frame.priv_data_len = cpu_to_be16(pdata_len);
2792 cm_node->mpa_frame.rev = mpa_version; 2943 cm_node->mpa_frame.rev = mpa_version;
2793 cm_node->mpa_frame.flags = IETF_MPA_FLAGS_CRC | IETF_MPA_FLAGS_REJECT; 2944 cm_node->mpa_frame.flags = IETF_MPA_FLAGS_CRC | IETF_MPA_FLAGS_REJECT;
2794 2945
2795 cm_core->api->reject(cm_core, &cm_node->mpa_frame, cm_node); 2946 return cm_core->api->reject(cm_core, &cm_node->mpa_frame, cm_node);
2796
2797 return 0;
2798} 2947}
2799 2948
2800 2949
@@ -3303,13 +3452,56 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
3303 cm_event.remote_addr.sin_family = AF_INET; 3452 cm_event.remote_addr.sin_family = AF_INET;
3304 cm_event.remote_addr.sin_port = htons(event->cm_info.rem_port); 3453 cm_event.remote_addr.sin_port = htons(event->cm_info.rem_port);
3305 cm_event.remote_addr.sin_addr.s_addr = htonl(event->cm_info.rem_addr); 3454 cm_event.remote_addr.sin_addr.s_addr = htonl(event->cm_info.rem_addr);
3455 cm_event.private_data = cm_node->mpa_frame_buf;
3456 cm_event.private_data_len = (u8) cm_node->mpa_frame_size;
3457
3458 ret = cm_id->event_handler(cm_id, &cm_event);
3459 if (ret)
3460 printk(KERN_ERR "%s[%u] OFA CM event_handler returned, ret=%d\n",
3461 __func__, __LINE__, ret);
3462 return;
3463}
3464
3465
3466static void cm_event_mpa_reject(struct nes_cm_event *event)
3467{
3468 struct iw_cm_id *cm_id;
3469 struct iw_cm_event cm_event;
3470 struct nes_cm_node *cm_node;
3471 int ret;
3472
3473 cm_node = event->cm_node;
3474 if (!cm_node)
3475 return;
3476 cm_id = cm_node->cm_id;
3477
3478 atomic_inc(&cm_connect_reqs);
3479 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
3480 cm_node, cm_id, jiffies);
3481
3482 cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
3483 cm_event.status = -ECONNREFUSED;
3484 cm_event.provider_data = cm_id->provider_data;
3485
3486 cm_event.local_addr.sin_family = AF_INET;
3487 cm_event.local_addr.sin_port = htons(event->cm_info.loc_port);
3488 cm_event.local_addr.sin_addr.s_addr = htonl(event->cm_info.loc_addr);
3489
3490 cm_event.remote_addr.sin_family = AF_INET;
3491 cm_event.remote_addr.sin_port = htons(event->cm_info.rem_port);
3492 cm_event.remote_addr.sin_addr.s_addr = htonl(event->cm_info.rem_addr);
3306 3493
3307 cm_event.private_data = cm_node->mpa_frame_buf; 3494 cm_event.private_data = cm_node->mpa_frame_buf;
3308 cm_event.private_data_len = (u8) cm_node->mpa_frame_size; 3495 cm_event.private_data_len = (u8) cm_node->mpa_frame_size;
3496
3497 nes_debug(NES_DBG_CM, "call CM_EVENT_MPA_REJECTED, local_addr=%08x, "
3498 "remove_addr=%08x\n",
3499 cm_event.local_addr.sin_addr.s_addr,
3500 cm_event.remote_addr.sin_addr.s_addr);
3309 3501
3310 ret = cm_id->event_handler(cm_id, &cm_event); 3502 ret = cm_id->event_handler(cm_id, &cm_event);
3311 if (ret) 3503 if (ret)
3312 printk("%s[%u] OFA CM event_handler returned, ret=%d\n", 3504 printk(KERN_ERR "%s[%u] OFA CM event_handler returned, ret=%d\n",
3313 __func__, __LINE__, ret); 3505 __func__, __LINE__, ret);
3314 3506
3315 return; 3507 return;
@@ -3374,6 +3566,14 @@ static void nes_cm_event_handler(struct work_struct *work)
3374 cm_event_connected(event); 3566 cm_event_connected(event);
3375 nes_debug(NES_DBG_CM, "CM Event: CONNECTED\n"); 3567 nes_debug(NES_DBG_CM, "CM Event: CONNECTED\n");
3376 break; 3568 break;
3569 case NES_CM_EVENT_MPA_REJECT:
3570 if ((!event->cm_node->cm_id) ||
3571 (event->cm_node->state == NES_CM_STATE_TSA))
3572 break;
3573 cm_event_mpa_reject(event);
3574 nes_debug(NES_DBG_CM, "CM Event: REJECT\n");
3575 break;
3576
3377 case NES_CM_EVENT_ABORTED: 3577 case NES_CM_EVENT_ABORTED:
3378 if ((!event->cm_node->cm_id) || 3578 if ((!event->cm_node->cm_id) ||
3379 (event->cm_node->state == NES_CM_STATE_TSA)) 3579 (event->cm_node->state == NES_CM_STATE_TSA))
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
index fafa35042ebd..d5f778202eb7 100644
--- a/drivers/infiniband/hw/nes/nes_cm.h
+++ b/drivers/infiniband/hw/nes/nes_cm.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. 2 * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -39,6 +39,9 @@
39#define NES_MANAGE_APBVT_DEL 0 39#define NES_MANAGE_APBVT_DEL 0
40#define NES_MANAGE_APBVT_ADD 1 40#define NES_MANAGE_APBVT_ADD 1
41 41
42#define NES_MPA_REQUEST_ACCEPT 1
43#define NES_MPA_REQUEST_REJECT 2
44
42/* IETF MPA -- defines, enums, structs */ 45/* IETF MPA -- defines, enums, structs */
43#define IEFT_MPA_KEY_REQ "MPA ID Req Frame" 46#define IEFT_MPA_KEY_REQ "MPA ID Req Frame"
44#define IEFT_MPA_KEY_REP "MPA ID Rep Frame" 47#define IEFT_MPA_KEY_REP "MPA ID Rep Frame"
@@ -186,6 +189,7 @@ enum nes_cm_node_state {
186 NES_CM_STATE_ACCEPTING, 189 NES_CM_STATE_ACCEPTING,
187 NES_CM_STATE_MPAREQ_SENT, 190 NES_CM_STATE_MPAREQ_SENT,
188 NES_CM_STATE_MPAREQ_RCVD, 191 NES_CM_STATE_MPAREQ_RCVD,
192 NES_CM_STATE_MPAREJ_RCVD,
189 NES_CM_STATE_TSA, 193 NES_CM_STATE_TSA,
190 NES_CM_STATE_FIN_WAIT1, 194 NES_CM_STATE_FIN_WAIT1,
191 NES_CM_STATE_FIN_WAIT2, 195 NES_CM_STATE_FIN_WAIT2,
@@ -278,13 +282,12 @@ struct nes_cm_node {
278 struct nes_timer_entry *send_entry; 282 struct nes_timer_entry *send_entry;
279 283
280 spinlock_t retrans_list_lock; 284 spinlock_t retrans_list_lock;
281 struct list_head recv_list; 285 struct nes_timer_entry *recv_entry;
282 spinlock_t recv_list_lock;
283 286
284 int send_write0; 287 int send_write0;
285 union { 288 union {
286 struct ietf_mpa_frame mpa_frame; 289 struct ietf_mpa_frame mpa_frame;
287 u8 mpa_frame_buf[NES_CM_DEFAULT_MTU]; 290 u8 mpa_frame_buf[MAX_CM_BUFFER];
288 }; 291 };
289 u16 mpa_frame_size; 292 u16 mpa_frame_size;
290 struct iw_cm_id *cm_id; 293 struct iw_cm_id *cm_id;
@@ -326,6 +329,7 @@ enum nes_cm_event_type {
326 NES_CM_EVENT_MPA_REQ, 329 NES_CM_EVENT_MPA_REQ,
327 NES_CM_EVENT_MPA_CONNECT, 330 NES_CM_EVENT_MPA_CONNECT,
328 NES_CM_EVENT_MPA_ACCEPT, 331 NES_CM_EVENT_MPA_ACCEPT,
332 NES_CM_EVENT_MPA_REJECT,
329 NES_CM_EVENT_MPA_ESTABLISHED, 333 NES_CM_EVENT_MPA_ESTABLISHED,
330 NES_CM_EVENT_CONNECTED, 334 NES_CM_EVENT_CONNECTED,
331 NES_CM_EVENT_CLOSED, 335 NES_CM_EVENT_CLOSED,
diff --git a/drivers/infiniband/hw/nes/nes_context.h b/drivers/infiniband/hw/nes/nes_context.h
index da9daba8e668..0fb8d81d9a62 100644
--- a/drivers/infiniband/hw/nes/nes_context.h
+++ b/drivers/infiniband/hw/nes/nes_context.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. 2 * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 5d139db1b771..9a51f25c6cee 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. 2 * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -254,6 +254,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
254 u32 adapter_size; 254 u32 adapter_size;
255 u32 arp_table_size; 255 u32 arp_table_size;
256 u16 vendor_id; 256 u16 vendor_id;
257 u16 device_id;
257 u8 OneG_Mode; 258 u8 OneG_Mode;
258 u8 func_index; 259 u8 func_index;
259 260
@@ -356,6 +357,13 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
356 return NULL; 357 return NULL;
357 } 358 }
358 359
360 nesadapter->vendor_id = (((u32) nesadapter->mac_addr_high) << 8) |
361 (nesadapter->mac_addr_low >> 24);
362
363 pci_bus_read_config_word(nesdev->pcidev->bus, nesdev->pcidev->devfn,
364 PCI_DEVICE_ID, &device_id);
365 nesadapter->vendor_part_id = device_id;
366
359 if (nes_init_serdes(nesdev, hw_rev, port_count, nesadapter, 367 if (nes_init_serdes(nesdev, hw_rev, port_count, nesadapter,
360 OneG_Mode)) { 368 OneG_Mode)) {
361 kfree(nesadapter); 369 kfree(nesadapter);
@@ -1636,7 +1644,6 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev)
1636 nesvnic->post_cqp_request = nes_post_cqp_request; 1644 nesvnic->post_cqp_request = nes_post_cqp_request;
1637 nesvnic->mcrq_mcast_filter = NULL; 1645 nesvnic->mcrq_mcast_filter = NULL;
1638 1646
1639 spin_lock_init(&nesvnic->nic.sq_lock);
1640 spin_lock_init(&nesvnic->nic.rq_lock); 1647 spin_lock_init(&nesvnic->nic.rq_lock);
1641 1648
1642 /* setup the RQ */ 1649 /* setup the RQ */
@@ -2261,6 +2268,8 @@ static void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq)
2261 2268
2262 if (++head >= aeq_size) 2269 if (++head >= aeq_size)
2263 head = 0; 2270 head = 0;
2271
2272 nes_write32(nesdev->regs + NES_AEQ_ALLOC, 1 << 16);
2264 } 2273 }
2265 while (1); 2274 while (1);
2266 aeq->aeq_head = head; 2275 aeq->aeq_head = head;
@@ -2622,9 +2631,9 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
2622 } else 2631 } else
2623 break; 2632 break;
2624 } 2633 }
2625 if (skb)
2626 dev_kfree_skb_any(skb);
2627 } 2634 }
2635 if (skb)
2636 dev_kfree_skb_any(skb);
2628 nesnic->sq_tail++; 2637 nesnic->sq_tail++;
2629 nesnic->sq_tail &= nesnic->sq_size-1; 2638 nesnic->sq_tail &= nesnic->sq_size-1;
2630 if (sq_cqes > 128) { 2639 if (sq_cqes > 128) {
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index bc0b4de04450..f41a8710d2a8 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -1,5 +1,5 @@
1/* 1/*
2* Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. 2* Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
3* 3*
4* This software is available to you under a choice of one of two 4* This software is available to you under a choice of one of two
5* licenses. You may choose to be licensed under the terms of the GNU 5* licenses. You may choose to be licensed under the terms of the GNU
@@ -61,6 +61,7 @@ enum pci_regs {
61 NES_CQ_ACK = 0x0034, 61 NES_CQ_ACK = 0x0034,
62 NES_WQE_ALLOC = 0x0040, 62 NES_WQE_ALLOC = 0x0040,
63 NES_CQE_ALLOC = 0x0044, 63 NES_CQE_ALLOC = 0x0044,
64 NES_AEQ_ALLOC = 0x0048
64}; 65};
65 66
66enum indexed_regs { 67enum indexed_regs {
@@ -875,7 +876,6 @@ struct nes_hw_nic {
875 u8 replenishing_rq; 876 u8 replenishing_rq;
876 u8 reserved; 877 u8 reserved;
877 878
878 spinlock_t sq_lock;
879 spinlock_t rq_lock; 879 spinlock_t rq_lock;
880}; 880};
881 881
@@ -1147,7 +1147,6 @@ struct nes_ib_device;
1147struct nes_vnic { 1147struct nes_vnic {
1148 struct nes_ib_device *nesibdev; 1148 struct nes_ib_device *nesibdev;
1149 u64 sq_full; 1149 u64 sq_full;
1150 u64 sq_locked;
1151 u64 tso_requests; 1150 u64 tso_requests;
1152 u64 segmented_tso_requests; 1151 u64 segmented_tso_requests;
1153 u64 linearized_skbs; 1152 u64 linearized_skbs;
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 57a47cf7e513..025ed9f7d9c2 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. 2 * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -400,8 +400,7 @@ static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
400 if (skb_headlen(skb) == skb->len) { 400 if (skb_headlen(skb) == skb->len) {
401 if (skb_headlen(skb) <= NES_FIRST_FRAG_SIZE) { 401 if (skb_headlen(skb) <= NES_FIRST_FRAG_SIZE) {
402 nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_2_1_IDX] = 0; 402 nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_2_1_IDX] = 0;
403 nesnic->tx_skb[nesnic->sq_head] = NULL; 403 nesnic->tx_skb[nesnic->sq_head] = skb;
404 dev_kfree_skb(skb);
405 } 404 }
406 } else { 405 } else {
407 /* Deal with Fragments */ 406 /* Deal with Fragments */
@@ -453,7 +452,6 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
453 u32 wqe_count=1; 452 u32 wqe_count=1;
454 u32 send_rc; 453 u32 send_rc;
455 struct iphdr *iph; 454 struct iphdr *iph;
456 unsigned long flags;
457 __le16 *wqe_fragment_length; 455 __le16 *wqe_fragment_length;
458 u32 nr_frags; 456 u32 nr_frags;
459 u32 original_first_length; 457 u32 original_first_length;
@@ -480,13 +478,6 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
480 if (netif_queue_stopped(netdev)) 478 if (netif_queue_stopped(netdev))
481 return NETDEV_TX_BUSY; 479 return NETDEV_TX_BUSY;
482 480
483 local_irq_save(flags);
484 if (!spin_trylock(&nesnic->sq_lock)) {
485 local_irq_restore(flags);
486 nesvnic->sq_locked++;
487 return NETDEV_TX_LOCKED;
488 }
489
490 /* Check if SQ is full */ 481 /* Check if SQ is full */
491 if ((((nesnic->sq_tail+(nesnic->sq_size*2))-nesnic->sq_head) & (nesnic->sq_size - 1)) == 1) { 482 if ((((nesnic->sq_tail+(nesnic->sq_size*2))-nesnic->sq_head) & (nesnic->sq_size - 1)) == 1) {
492 if (!netif_queue_stopped(netdev)) { 483 if (!netif_queue_stopped(netdev)) {
@@ -498,7 +489,6 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
498 } 489 }
499 } 490 }
500 nesvnic->sq_full++; 491 nesvnic->sq_full++;
501 spin_unlock_irqrestore(&nesnic->sq_lock, flags);
502 return NETDEV_TX_BUSY; 492 return NETDEV_TX_BUSY;
503 } 493 }
504 494
@@ -531,7 +521,6 @@ sq_no_longer_full:
531 } 521 }
532 } 522 }
533 nesvnic->sq_full++; 523 nesvnic->sq_full++;
534 spin_unlock_irqrestore(&nesnic->sq_lock, flags);
535 nes_debug(NES_DBG_NIC_TX, "%s: HNIC SQ full- TSO request has too many frags!\n", 524 nes_debug(NES_DBG_NIC_TX, "%s: HNIC SQ full- TSO request has too many frags!\n",
536 netdev->name); 525 netdev->name);
537 return NETDEV_TX_BUSY; 526 return NETDEV_TX_BUSY;
@@ -656,17 +645,13 @@ tso_sq_no_longer_full:
656 skb_set_transport_header(skb, hoffset); 645 skb_set_transport_header(skb, hoffset);
657 skb_set_network_header(skb, nhoffset); 646 skb_set_network_header(skb, nhoffset);
658 send_rc = nes_nic_send(skb, netdev); 647 send_rc = nes_nic_send(skb, netdev);
659 if (send_rc != NETDEV_TX_OK) { 648 if (send_rc != NETDEV_TX_OK)
660 spin_unlock_irqrestore(&nesnic->sq_lock, flags);
661 return NETDEV_TX_OK; 649 return NETDEV_TX_OK;
662 }
663 } 650 }
664 } else { 651 } else {
665 send_rc = nes_nic_send(skb, netdev); 652 send_rc = nes_nic_send(skb, netdev);
666 if (send_rc != NETDEV_TX_OK) { 653 if (send_rc != NETDEV_TX_OK)
667 spin_unlock_irqrestore(&nesnic->sq_lock, flags);
668 return NETDEV_TX_OK; 654 return NETDEV_TX_OK;
669 }
670 } 655 }
671 656
672 barrier(); 657 barrier();
@@ -676,7 +661,6 @@ tso_sq_no_longer_full:
676 (wqe_count << 24) | (1 << 23) | nesvnic->nic.qp_id); 661 (wqe_count << 24) | (1 << 23) | nesvnic->nic.qp_id);
677 662
678 netdev->trans_start = jiffies; 663 netdev->trans_start = jiffies;
679 spin_unlock_irqrestore(&nesnic->sq_lock, flags);
680 664
681 return NETDEV_TX_OK; 665 return NETDEV_TX_OK;
682} 666}
@@ -1012,7 +996,6 @@ static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = {
1012 "Pause Frames Received", 996 "Pause Frames Received",
1013 "Internal Routing Errors", 997 "Internal Routing Errors",
1014 "SQ SW Dropped SKBs", 998 "SQ SW Dropped SKBs",
1015 "SQ Locked",
1016 "SQ Full", 999 "SQ Full",
1017 "Segmented TSO Requests", 1000 "Segmented TSO Requests",
1018 "Rx Symbol Errors", 1001 "Rx Symbol Errors",
@@ -1129,16 +1112,17 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
1129 struct nes_device *nesdev = nesvnic->nesdev; 1112 struct nes_device *nesdev = nesvnic->nesdev;
1130 u32 nic_count; 1113 u32 nic_count;
1131 u32 u32temp; 1114 u32 u32temp;
1115 u32 index = 0;
1132 1116
1133 target_ethtool_stats->n_stats = NES_ETHTOOL_STAT_COUNT; 1117 target_ethtool_stats->n_stats = NES_ETHTOOL_STAT_COUNT;
1134 target_stat_values[0] = nesvnic->nesdev->link_status_interrupts; 1118 target_stat_values[index] = nesvnic->nesdev->link_status_interrupts;
1135 target_stat_values[1] = nesvnic->linearized_skbs; 1119 target_stat_values[++index] = nesvnic->linearized_skbs;
1136 target_stat_values[2] = nesvnic->tso_requests; 1120 target_stat_values[++index] = nesvnic->tso_requests;
1137 1121
1138 u32temp = nes_read_indexed(nesdev, 1122 u32temp = nes_read_indexed(nesdev,
1139 NES_IDX_MAC_TX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200)); 1123 NES_IDX_MAC_TX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200));
1140 nesvnic->nesdev->mac_pause_frames_sent += u32temp; 1124 nesvnic->nesdev->mac_pause_frames_sent += u32temp;
1141 target_stat_values[3] = nesvnic->nesdev->mac_pause_frames_sent; 1125 target_stat_values[++index] = nesvnic->nesdev->mac_pause_frames_sent;
1142 1126
1143 u32temp = nes_read_indexed(nesdev, 1127 u32temp = nes_read_indexed(nesdev,
1144 NES_IDX_MAC_RX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200)); 1128 NES_IDX_MAC_RX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200));
@@ -1209,60 +1193,59 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
1209 nesvnic->endnode_ipv4_tcp_retransmits += u32temp; 1193 nesvnic->endnode_ipv4_tcp_retransmits += u32temp;
1210 } 1194 }
1211 1195
1212 target_stat_values[4] = nesvnic->nesdev->mac_pause_frames_received; 1196 target_stat_values[++index] = nesvnic->nesdev->mac_pause_frames_received;
1213 target_stat_values[5] = nesdev->nesadapter->nic_rx_eth_route_err; 1197 target_stat_values[++index] = nesdev->nesadapter->nic_rx_eth_route_err;
1214 target_stat_values[6] = nesvnic->tx_sw_dropped; 1198 target_stat_values[++index] = nesvnic->tx_sw_dropped;
1215 target_stat_values[7] = nesvnic->sq_locked; 1199 target_stat_values[++index] = nesvnic->sq_full;
1216 target_stat_values[8] = nesvnic->sq_full; 1200 target_stat_values[++index] = nesvnic->segmented_tso_requests;
1217 target_stat_values[9] = nesvnic->segmented_tso_requests; 1201 target_stat_values[++index] = nesvnic->nesdev->mac_rx_symbol_err_frames;
1218 target_stat_values[10] = nesvnic->nesdev->mac_rx_symbol_err_frames; 1202 target_stat_values[++index] = nesvnic->nesdev->mac_rx_jabber_frames;
1219 target_stat_values[11] = nesvnic->nesdev->mac_rx_jabber_frames; 1203 target_stat_values[++index] = nesvnic->nesdev->mac_rx_oversized_frames;
1220 target_stat_values[12] = nesvnic->nesdev->mac_rx_oversized_frames; 1204 target_stat_values[++index] = nesvnic->nesdev->mac_rx_short_frames;
1221 target_stat_values[13] = nesvnic->nesdev->mac_rx_short_frames; 1205 target_stat_values[++index] = nesvnic->endnode_nstat_rx_discard;
1222 target_stat_values[14] = nesvnic->endnode_nstat_rx_discard; 1206 target_stat_values[++index] = nesvnic->endnode_nstat_rx_octets;
1223 target_stat_values[15] = nesvnic->endnode_nstat_rx_octets; 1207 target_stat_values[++index] = nesvnic->endnode_nstat_rx_frames;
1224 target_stat_values[16] = nesvnic->endnode_nstat_rx_frames; 1208 target_stat_values[++index] = nesvnic->endnode_nstat_tx_octets;
1225 target_stat_values[17] = nesvnic->endnode_nstat_tx_octets; 1209 target_stat_values[++index] = nesvnic->endnode_nstat_tx_frames;
1226 target_stat_values[18] = nesvnic->endnode_nstat_tx_frames; 1210 target_stat_values[++index] = mh_detected;
1227 target_stat_values[19] = mh_detected; 1211 target_stat_values[++index] = mh_pauses_sent;
1228 target_stat_values[20] = mh_pauses_sent; 1212 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
1229 target_stat_values[21] = nesvnic->endnode_ipv4_tcp_retransmits; 1213 target_stat_values[++index] = atomic_read(&cm_connects);
1230 target_stat_values[22] = atomic_read(&cm_connects); 1214 target_stat_values[++index] = atomic_read(&cm_accepts);
1231 target_stat_values[23] = atomic_read(&cm_accepts); 1215 target_stat_values[++index] = atomic_read(&cm_disconnects);
1232 target_stat_values[24] = atomic_read(&cm_disconnects); 1216 target_stat_values[++index] = atomic_read(&cm_connecteds);
1233 target_stat_values[25] = atomic_read(&cm_connecteds); 1217 target_stat_values[++index] = atomic_read(&cm_connect_reqs);
1234 target_stat_values[26] = atomic_read(&cm_connect_reqs); 1218 target_stat_values[++index] = atomic_read(&cm_rejects);
1235 target_stat_values[27] = atomic_read(&cm_rejects); 1219 target_stat_values[++index] = atomic_read(&mod_qp_timouts);
1236 target_stat_values[28] = atomic_read(&mod_qp_timouts); 1220 target_stat_values[++index] = atomic_read(&qps_created);
1237 target_stat_values[29] = atomic_read(&qps_created); 1221 target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
1238 target_stat_values[30] = atomic_read(&sw_qps_destroyed); 1222 target_stat_values[++index] = atomic_read(&qps_destroyed);
1239 target_stat_values[31] = atomic_read(&qps_destroyed); 1223 target_stat_values[++index] = atomic_read(&cm_closes);
1240 target_stat_values[32] = atomic_read(&cm_closes); 1224 target_stat_values[++index] = cm_packets_sent;
1241 target_stat_values[33] = cm_packets_sent; 1225 target_stat_values[++index] = cm_packets_bounced;
1242 target_stat_values[34] = cm_packets_bounced; 1226 target_stat_values[++index] = cm_packets_created;
1243 target_stat_values[35] = cm_packets_created; 1227 target_stat_values[++index] = cm_packets_received;
1244 target_stat_values[36] = cm_packets_received; 1228 target_stat_values[++index] = cm_packets_dropped;
1245 target_stat_values[37] = cm_packets_dropped; 1229 target_stat_values[++index] = cm_packets_retrans;
1246 target_stat_values[38] = cm_packets_retrans; 1230 target_stat_values[++index] = cm_listens_created;
1247 target_stat_values[39] = cm_listens_created; 1231 target_stat_values[++index] = cm_listens_destroyed;
1248 target_stat_values[40] = cm_listens_destroyed; 1232 target_stat_values[++index] = cm_backlog_drops;
1249 target_stat_values[41] = cm_backlog_drops; 1233 target_stat_values[++index] = atomic_read(&cm_loopbacks);
1250 target_stat_values[42] = atomic_read(&cm_loopbacks); 1234 target_stat_values[++index] = atomic_read(&cm_nodes_created);
1251 target_stat_values[43] = atomic_read(&cm_nodes_created); 1235 target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
1252 target_stat_values[44] = atomic_read(&cm_nodes_destroyed); 1236 target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
1253 target_stat_values[45] = atomic_read(&cm_accel_dropped_pkts); 1237 target_stat_values[++index] = atomic_read(&cm_resets_recvd);
1254 target_stat_values[46] = atomic_read(&cm_resets_recvd); 1238 target_stat_values[++index] = int_mod_timer_init;
1255 target_stat_values[47] = int_mod_timer_init; 1239 target_stat_values[++index] = int_mod_cq_depth_1;
1256 target_stat_values[48] = int_mod_cq_depth_1; 1240 target_stat_values[++index] = int_mod_cq_depth_4;
1257 target_stat_values[49] = int_mod_cq_depth_4; 1241 target_stat_values[++index] = int_mod_cq_depth_16;
1258 target_stat_values[50] = int_mod_cq_depth_16; 1242 target_stat_values[++index] = int_mod_cq_depth_24;
1259 target_stat_values[51] = int_mod_cq_depth_24; 1243 target_stat_values[++index] = int_mod_cq_depth_32;
1260 target_stat_values[52] = int_mod_cq_depth_32; 1244 target_stat_values[++index] = int_mod_cq_depth_128;
1261 target_stat_values[53] = int_mod_cq_depth_128; 1245 target_stat_values[++index] = int_mod_cq_depth_256;
1262 target_stat_values[54] = int_mod_cq_depth_256; 1246 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
1263 target_stat_values[55] = nesvnic->lro_mgr.stats.aggregated; 1247 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
1264 target_stat_values[56] = nesvnic->lro_mgr.stats.flushed; 1248 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
1265 target_stat_values[57] = nesvnic->lro_mgr.stats.no_desc;
1266 1249
1267} 1250}
1268 1251
@@ -1616,7 +1599,6 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
1616 nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n"); 1599 nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
1617 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 1600 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1618 netdev->vlan_rx_register = nes_netdev_vlan_rx_register; 1601 netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
1619 netdev->features |= NETIF_F_LLTX;
1620 1602
1621 /* Fill in the port structure */ 1603 /* Fill in the port structure */
1622 nesvnic->netdev = netdev; 1604 nesvnic->netdev = netdev;
diff --git a/drivers/infiniband/hw/nes/nes_user.h b/drivers/infiniband/hw/nes/nes_user.h
index e64306bce80b..cc90c14b49eb 100644
--- a/drivers/infiniband/hw/nes/nes_user.h
+++ b/drivers/infiniband/hw/nes/nes_user.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2008 NetEffect. All rights reserved. 2 * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
3 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Topspin Communications. All rights reserved.
4 * Copyright (c) 2005 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 Cisco Systems. All rights reserved.
5 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. 5 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index 6f3bc1b6bf22..a282031d15c7 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. 2 * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index d93a6562817c..7e5b5ba13a74 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. 2 * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -551,6 +551,7 @@ static int nes_dealloc_fmr(struct ib_fmr *ibfmr)
551 struct nes_device *nesdev = nesvnic->nesdev; 551 struct nes_device *nesdev = nesvnic->nesdev;
552 struct nes_adapter *nesadapter = nesdev->nesadapter; 552 struct nes_adapter *nesadapter = nesdev->nesadapter;
553 int i = 0; 553 int i = 0;
554 int rc;
554 555
555 /* free the resources */ 556 /* free the resources */
556 if (nesfmr->leaf_pbl_cnt == 0) { 557 if (nesfmr->leaf_pbl_cnt == 0) {
@@ -572,7 +573,9 @@ static int nes_dealloc_fmr(struct ib_fmr *ibfmr)
572 nesmr->ibmw.rkey = ibfmr->rkey; 573 nesmr->ibmw.rkey = ibfmr->rkey;
573 nesmr->ibmw.uobject = NULL; 574 nesmr->ibmw.uobject = NULL;
574 575
575 if (nesfmr->nesmr.pbls_used != 0) { 576 rc = nes_dealloc_mw(&nesmr->ibmw);
577
578 if ((rc == 0) && (nesfmr->nesmr.pbls_used != 0)) {
576 spin_lock_irqsave(&nesadapter->pbl_lock, flags); 579 spin_lock_irqsave(&nesadapter->pbl_lock, flags);
577 if (nesfmr->nesmr.pbl_4k) { 580 if (nesfmr->nesmr.pbl_4k) {
578 nesadapter->free_4kpbl += nesfmr->nesmr.pbls_used; 581 nesadapter->free_4kpbl += nesfmr->nesmr.pbls_used;
@@ -584,7 +587,7 @@ static int nes_dealloc_fmr(struct ib_fmr *ibfmr)
584 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); 587 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
585 } 588 }
586 589
587 return nes_dealloc_mw(&nesmr->ibmw); 590 return rc;
588} 591}
589 592
590 593
@@ -1886,21 +1889,75 @@ static int nes_destroy_cq(struct ib_cq *ib_cq)
1886 return ret; 1889 return ret;
1887} 1890}
1888 1891
1892/**
1893 * root_256
1894 */
1895static u32 root_256(struct nes_device *nesdev,
1896 struct nes_root_vpbl *root_vpbl,
1897 struct nes_root_vpbl *new_root,
1898 u16 pbl_count_4k,
1899 u16 pbl_count_256)
1900{
1901 u64 leaf_pbl;
1902 int i, j, k;
1903
1904 if (pbl_count_4k == 1) {
1905 new_root->pbl_vbase = pci_alloc_consistent(nesdev->pcidev,
1906 512, &new_root->pbl_pbase);
1907
1908 if (new_root->pbl_vbase == NULL)
1909 return 0;
1910
1911 leaf_pbl = (u64)root_vpbl->pbl_pbase;
1912 for (i = 0; i < 16; i++) {
1913 new_root->pbl_vbase[i].pa_low =
1914 cpu_to_le32((u32)leaf_pbl);
1915 new_root->pbl_vbase[i].pa_high =
1916 cpu_to_le32((u32)((((u64)leaf_pbl) >> 32)));
1917 leaf_pbl += 256;
1918 }
1919 } else {
1920 for (i = 3; i >= 0; i--) {
1921 j = i * 16;
1922 root_vpbl->pbl_vbase[j] = root_vpbl->pbl_vbase[i];
1923 leaf_pbl = le32_to_cpu(root_vpbl->pbl_vbase[j].pa_low) +
1924 (((u64)le32_to_cpu(root_vpbl->pbl_vbase[j].pa_high))
1925 << 32);
1926 for (k = 1; k < 16; k++) {
1927 leaf_pbl += 256;
1928 root_vpbl->pbl_vbase[j + k].pa_low =
1929 cpu_to_le32((u32)leaf_pbl);
1930 root_vpbl->pbl_vbase[j + k].pa_high =
1931 cpu_to_le32((u32)((((u64)leaf_pbl) >> 32)));
1932 }
1933 }
1934 }
1935
1936 return 1;
1937}
1938
1889 1939
1890/** 1940/**
1891 * nes_reg_mr 1941 * nes_reg_mr
1892 */ 1942 */
1893static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd, 1943static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
1894 u32 stag, u64 region_length, struct nes_root_vpbl *root_vpbl, 1944 u32 stag, u64 region_length, struct nes_root_vpbl *root_vpbl,
1895 dma_addr_t single_buffer, u16 pbl_count, u16 residual_page_count, 1945 dma_addr_t single_buffer, u16 pbl_count_4k,
1896 int acc, u64 *iova_start) 1946 u16 residual_page_count_4k, int acc, u64 *iova_start,
1947 u16 *actual_pbl_cnt, u8 *used_4k_pbls)
1897{ 1948{
1898 struct nes_hw_cqp_wqe *cqp_wqe; 1949 struct nes_hw_cqp_wqe *cqp_wqe;
1899 struct nes_cqp_request *cqp_request; 1950 struct nes_cqp_request *cqp_request;
1900 unsigned long flags; 1951 unsigned long flags;
1901 int ret; 1952 int ret;
1902 struct nes_adapter *nesadapter = nesdev->nesadapter; 1953 struct nes_adapter *nesadapter = nesdev->nesadapter;
1903 /* int count; */ 1954 uint pg_cnt = 0;
1955 u16 pbl_count_256;
1956 u16 pbl_count = 0;
1957 u8 use_256_pbls = 0;
1958 u8 use_4k_pbls = 0;
1959 u16 use_two_level = (pbl_count_4k > 1) ? 1 : 0;
1960 struct nes_root_vpbl new_root = {0, 0, 0};
1904 u32 opcode = 0; 1961 u32 opcode = 0;
1905 u16 major_code; 1962 u16 major_code;
1906 1963
@@ -1913,41 +1970,70 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
1913 cqp_request->waiting = 1; 1970 cqp_request->waiting = 1;
1914 cqp_wqe = &cqp_request->cqp_wqe; 1971 cqp_wqe = &cqp_request->cqp_wqe;
1915 1972
1916 spin_lock_irqsave(&nesadapter->pbl_lock, flags); 1973 if (pbl_count_4k) {
1917 /* track PBL resources */ 1974 spin_lock_irqsave(&nesadapter->pbl_lock, flags);
1918 if (pbl_count != 0) { 1975
1919 if (pbl_count > 1) { 1976 pg_cnt = ((pbl_count_4k - 1) * 512) + residual_page_count_4k;
1920 /* Two level PBL */ 1977 pbl_count_256 = (pg_cnt + 31) / 32;
1921 if ((pbl_count+1) > nesadapter->free_4kpbl) { 1978 if (pg_cnt <= 32) {
1922 nes_debug(NES_DBG_MR, "Out of 4KB Pbls for two level request.\n"); 1979 if (pbl_count_256 <= nesadapter->free_256pbl)
1923 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); 1980 use_256_pbls = 1;
1924 nes_free_cqp_request(nesdev, cqp_request); 1981 else if (pbl_count_4k <= nesadapter->free_4kpbl)
1925 return -ENOMEM; 1982 use_4k_pbls = 1;
1926 } else { 1983 } else if (pg_cnt <= 2048) {
1927 nesadapter->free_4kpbl -= pbl_count+1; 1984 if (((pbl_count_4k + use_two_level) <= nesadapter->free_4kpbl) &&
1928 } 1985 (nesadapter->free_4kpbl > (nesadapter->max_4kpbl >> 1))) {
1929 } else if (residual_page_count > 32) { 1986 use_4k_pbls = 1;
1930 if (pbl_count > nesadapter->free_4kpbl) { 1987 } else if ((pbl_count_256 + 1) <= nesadapter->free_256pbl) {
1931 nes_debug(NES_DBG_MR, "Out of 4KB Pbls.\n"); 1988 use_256_pbls = 1;
1932 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); 1989 use_two_level = 1;
1933 nes_free_cqp_request(nesdev, cqp_request); 1990 } else if ((pbl_count_4k + use_two_level) <= nesadapter->free_4kpbl) {
1934 return -ENOMEM; 1991 use_4k_pbls = 1;
1935 } else {
1936 nesadapter->free_4kpbl -= pbl_count;
1937 } 1992 }
1938 } else { 1993 } else {
1939 if (pbl_count > nesadapter->free_256pbl) { 1994 if ((pbl_count_4k + 1) <= nesadapter->free_4kpbl)
1940 nes_debug(NES_DBG_MR, "Out of 256B Pbls.\n"); 1995 use_4k_pbls = 1;
1941 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
1942 nes_free_cqp_request(nesdev, cqp_request);
1943 return -ENOMEM;
1944 } else {
1945 nesadapter->free_256pbl -= pbl_count;
1946 }
1947 } 1996 }
1997
1998 if (use_256_pbls) {
1999 pbl_count = pbl_count_256;
2000 nesadapter->free_256pbl -= pbl_count + use_two_level;
2001 } else if (use_4k_pbls) {
2002 pbl_count = pbl_count_4k;
2003 nesadapter->free_4kpbl -= pbl_count + use_two_level;
2004 } else {
2005 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
2006 nes_debug(NES_DBG_MR, "Out of Pbls\n");
2007 nes_free_cqp_request(nesdev, cqp_request);
2008 return -ENOMEM;
2009 }
2010
2011 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
1948 } 2012 }
1949 2013
1950 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); 2014 if (use_256_pbls && use_two_level) {
2015 if (root_256(nesdev, root_vpbl, &new_root, pbl_count_4k, pbl_count_256) == 1) {
2016 if (new_root.pbl_pbase != 0)
2017 root_vpbl = &new_root;
2018 } else {
2019 spin_lock_irqsave(&nesadapter->pbl_lock, flags);
2020 nesadapter->free_256pbl += pbl_count_256 + use_two_level;
2021 use_256_pbls = 0;
2022
2023 if (pbl_count_4k == 1)
2024 use_two_level = 0;
2025 pbl_count = pbl_count_4k;
2026
2027 if ((pbl_count_4k + use_two_level) <= nesadapter->free_4kpbl) {
2028 nesadapter->free_4kpbl -= pbl_count + use_two_level;
2029 use_4k_pbls = 1;
2030 }
2031 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
2032
2033 if (use_4k_pbls == 0)
2034 return -ENOMEM;
2035 }
2036 }
1951 2037
1952 opcode = NES_CQP_REGISTER_STAG | NES_CQP_STAG_RIGHTS_LOCAL_READ | 2038 opcode = NES_CQP_REGISTER_STAG | NES_CQP_STAG_RIGHTS_LOCAL_READ |
1953 NES_CQP_STAG_VA_TO | NES_CQP_STAG_MR; 2039 NES_CQP_STAG_VA_TO | NES_CQP_STAG_MR;
@@ -1976,10 +2062,9 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
1976 } else { 2062 } else {
1977 set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PA_LOW_IDX, root_vpbl->pbl_pbase); 2063 set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PA_LOW_IDX, root_vpbl->pbl_pbase);
1978 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX, pbl_count); 2064 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX, pbl_count);
1979 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_LEN_IDX, 2065 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_LEN_IDX, (pg_cnt * 8));
1980 (((pbl_count - 1) * 4096) + (residual_page_count*8)));
1981 2066
1982 if ((pbl_count > 1) || (residual_page_count > 32)) 2067 if (use_4k_pbls)
1983 cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(NES_CQP_STAG_PBL_BLK_SIZE); 2068 cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(NES_CQP_STAG_PBL_BLK_SIZE);
1984 } 2069 }
1985 barrier(); 2070 barrier();
@@ -1996,13 +2081,25 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
1996 major_code = cqp_request->major_code; 2081 major_code = cqp_request->major_code;
1997 nes_put_cqp_request(nesdev, cqp_request); 2082 nes_put_cqp_request(nesdev, cqp_request);
1998 2083
2084 if ((!ret || major_code) && pbl_count != 0) {
2085 spin_lock_irqsave(&nesadapter->pbl_lock, flags);
2086 if (use_256_pbls)
2087 nesadapter->free_256pbl += pbl_count + use_two_level;
2088 else if (use_4k_pbls)
2089 nesadapter->free_4kpbl += pbl_count + use_two_level;
2090 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
2091 }
2092 if (new_root.pbl_pbase)
2093 pci_free_consistent(nesdev->pcidev, 512, new_root.pbl_vbase,
2094 new_root.pbl_pbase);
2095
1999 if (!ret) 2096 if (!ret)
2000 return -ETIME; 2097 return -ETIME;
2001 else if (major_code) 2098 else if (major_code)
2002 return -EIO; 2099 return -EIO;
2003 else
2004 return 0;
2005 2100
2101 *actual_pbl_cnt = pbl_count + use_two_level;
2102 *used_4k_pbls = use_4k_pbls;
2006 return 0; 2103 return 0;
2007} 2104}
2008 2105
@@ -2167,18 +2264,14 @@ static struct ib_mr *nes_reg_phys_mr(struct ib_pd *ib_pd,
2167 pbl_count = root_pbl_index; 2264 pbl_count = root_pbl_index;
2168 } 2265 }
2169 ret = nes_reg_mr(nesdev, nespd, stag, region_length, &root_vpbl, 2266 ret = nes_reg_mr(nesdev, nespd, stag, region_length, &root_vpbl,
2170 buffer_list[0].addr, pbl_count, (u16)cur_pbl_index, acc, iova_start); 2267 buffer_list[0].addr, pbl_count, (u16)cur_pbl_index, acc, iova_start,
2268 &nesmr->pbls_used, &nesmr->pbl_4k);
2171 2269
2172 if (ret == 0) { 2270 if (ret == 0) {
2173 nesmr->ibmr.rkey = stag; 2271 nesmr->ibmr.rkey = stag;
2174 nesmr->ibmr.lkey = stag; 2272 nesmr->ibmr.lkey = stag;
2175 nesmr->mode = IWNES_MEMREG_TYPE_MEM; 2273 nesmr->mode = IWNES_MEMREG_TYPE_MEM;
2176 ibmr = &nesmr->ibmr; 2274 ibmr = &nesmr->ibmr;
2177 nesmr->pbl_4k = ((pbl_count > 1) || (cur_pbl_index > 32)) ? 1 : 0;
2178 nesmr->pbls_used = pbl_count;
2179 if (pbl_count > 1) {
2180 nesmr->pbls_used++;
2181 }
2182 } else { 2275 } else {
2183 kfree(nesmr); 2276 kfree(nesmr);
2184 ibmr = ERR_PTR(-ENOMEM); 2277 ibmr = ERR_PTR(-ENOMEM);
@@ -2456,8 +2549,9 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
2456 stag, (unsigned int)iova_start, 2549 stag, (unsigned int)iova_start,
2457 (unsigned int)region_length, stag_index, 2550 (unsigned int)region_length, stag_index,
2458 (unsigned long long)region->length, pbl_count); 2551 (unsigned long long)region->length, pbl_count);
2459 ret = nes_reg_mr( nesdev, nespd, stag, region->length, &root_vpbl, 2552 ret = nes_reg_mr(nesdev, nespd, stag, region->length, &root_vpbl,
2460 first_dma_addr, pbl_count, (u16)cur_pbl_index, acc, &iova_start); 2553 first_dma_addr, pbl_count, (u16)cur_pbl_index, acc,
2554 &iova_start, &nesmr->pbls_used, &nesmr->pbl_4k);
2461 2555
2462 nes_debug(NES_DBG_MR, "ret=%d\n", ret); 2556 nes_debug(NES_DBG_MR, "ret=%d\n", ret);
2463 2557
@@ -2466,11 +2560,6 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
2466 nesmr->ibmr.lkey = stag; 2560 nesmr->ibmr.lkey = stag;
2467 nesmr->mode = IWNES_MEMREG_TYPE_MEM; 2561 nesmr->mode = IWNES_MEMREG_TYPE_MEM;
2468 ibmr = &nesmr->ibmr; 2562 ibmr = &nesmr->ibmr;
2469 nesmr->pbl_4k = ((pbl_count > 1) || (cur_pbl_index > 32)) ? 1 : 0;
2470 nesmr->pbls_used = pbl_count;
2471 if (pbl_count > 1) {
2472 nesmr->pbls_used++;
2473 }
2474 } else { 2563 } else {
2475 ib_umem_release(region); 2564 ib_umem_release(region);
2476 kfree(nesmr); 2565 kfree(nesmr);
@@ -2609,24 +2698,6 @@ static int nes_dereg_mr(struct ib_mr *ib_mr)
2609 cqp_request->waiting = 1; 2698 cqp_request->waiting = 1;
2610 cqp_wqe = &cqp_request->cqp_wqe; 2699 cqp_wqe = &cqp_request->cqp_wqe;
2611 2700
2612 spin_lock_irqsave(&nesadapter->pbl_lock, flags);
2613 if (nesmr->pbls_used != 0) {
2614 if (nesmr->pbl_4k) {
2615 nesadapter->free_4kpbl += nesmr->pbls_used;
2616 if (nesadapter->free_4kpbl > nesadapter->max_4kpbl) {
2617 printk(KERN_ERR PFX "free 4KB PBLs(%u) has exceeded the max(%u)\n",
2618 nesadapter->free_4kpbl, nesadapter->max_4kpbl);
2619 }
2620 } else {
2621 nesadapter->free_256pbl += nesmr->pbls_used;
2622 if (nesadapter->free_256pbl > nesadapter->max_256pbl) {
2623 printk(KERN_ERR PFX "free 256B PBLs(%u) has exceeded the max(%u)\n",
2624 nesadapter->free_256pbl, nesadapter->max_256pbl);
2625 }
2626 }
2627 }
2628
2629 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
2630 nes_fill_init_cqp_wqe(cqp_wqe, nesdev); 2701 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
2631 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, 2702 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
2632 NES_CQP_DEALLOCATE_STAG | NES_CQP_STAG_VA_TO | 2703 NES_CQP_DEALLOCATE_STAG | NES_CQP_STAG_VA_TO |
@@ -2644,11 +2715,6 @@ static int nes_dereg_mr(struct ib_mr *ib_mr)
2644 " CQP Major:Minor codes = 0x%04X:0x%04X\n", 2715 " CQP Major:Minor codes = 0x%04X:0x%04X\n",
2645 ib_mr->rkey, ret, cqp_request->major_code, cqp_request->minor_code); 2716 ib_mr->rkey, ret, cqp_request->major_code, cqp_request->minor_code);
2646 2717
2647 nes_free_resource(nesadapter, nesadapter->allocated_mrs,
2648 (ib_mr->rkey & 0x0fffff00) >> 8);
2649
2650 kfree(nesmr);
2651
2652 major_code = cqp_request->major_code; 2718 major_code = cqp_request->major_code;
2653 minor_code = cqp_request->minor_code; 2719 minor_code = cqp_request->minor_code;
2654 2720
@@ -2664,8 +2730,33 @@ static int nes_dereg_mr(struct ib_mr *ib_mr)
2664 " to destroy STag, ib_mr=%p, rkey = 0x%08X\n", 2730 " to destroy STag, ib_mr=%p, rkey = 0x%08X\n",
2665 major_code, minor_code, ib_mr, ib_mr->rkey); 2731 major_code, minor_code, ib_mr, ib_mr->rkey);
2666 return -EIO; 2732 return -EIO;
2667 } else 2733 }
2668 return 0; 2734
2735 if (nesmr->pbls_used != 0) {
2736 spin_lock_irqsave(&nesadapter->pbl_lock, flags);
2737 if (nesmr->pbl_4k) {
2738 nesadapter->free_4kpbl += nesmr->pbls_used;
2739 if (nesadapter->free_4kpbl > nesadapter->max_4kpbl)
2740 printk(KERN_ERR PFX "free 4KB PBLs(%u) has "
2741 "exceeded the max(%u)\n",
2742 nesadapter->free_4kpbl,
2743 nesadapter->max_4kpbl);
2744 } else {
2745 nesadapter->free_256pbl += nesmr->pbls_used;
2746 if (nesadapter->free_256pbl > nesadapter->max_256pbl)
2747 printk(KERN_ERR PFX "free 256B PBLs(%u) has "
2748 "exceeded the max(%u)\n",
2749 nesadapter->free_256pbl,
2750 nesadapter->max_256pbl);
2751 }
2752 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
2753 }
2754 nes_free_resource(nesadapter, nesadapter->allocated_mrs,
2755 (ib_mr->rkey & 0x0fffff00) >> 8);
2756
2757 kfree(nesmr);
2758
2759 return 0;
2669} 2760}
2670 2761
2671 2762
diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h
index ae0ca9bc83bd..5e48f67fbe8d 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.h
+++ b/drivers/infiniband/hw/nes/nes_verbs.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved. 2 * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. 3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 0bd2a4ff0842..353c13b91e8f 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -660,8 +660,12 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
660 660
661 path = __path_find(dev, phdr->hwaddr + 4); 661 path = __path_find(dev, phdr->hwaddr + 4);
662 if (!path || !path->valid) { 662 if (!path || !path->valid) {
663 if (!path) 663 int new_path = 0;
664
665 if (!path) {
664 path = path_rec_create(dev, phdr->hwaddr + 4); 666 path = path_rec_create(dev, phdr->hwaddr + 4);
667 new_path = 1;
668 }
665 if (path) { 669 if (path) {
666 /* put pseudoheader back on for next time */ 670 /* put pseudoheader back on for next time */
667 skb_push(skb, sizeof *phdr); 671 skb_push(skb, sizeof *phdr);
@@ -669,7 +673,8 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
669 673
670 if (!path->query && path_rec_start(dev, path)) { 674 if (!path->query && path_rec_start(dev, path)) {
671 spin_unlock_irqrestore(&priv->lock, flags); 675 spin_unlock_irqrestore(&priv->lock, flags);
672 path_free(dev, path); 676 if (new_path)
677 path_free(dev, path);
673 return; 678 return;
674 } else 679 } else
675 __path_add(dev, path); 680 __path_add(dev, path);
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 319b188145be..ea9e1556e0d6 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -401,13 +401,6 @@ static void iser_route_handler(struct rdma_cm_id *cma_id)
401 if (ret) 401 if (ret)
402 goto failure; 402 goto failure;
403 403
404 iser_dbg("path.mtu is %d setting it to %d\n",
405 cma_id->route.path_rec->mtu, IB_MTU_1024);
406
407 /* we must set the MTU to 1024 as this is what the target is assuming */
408 if (cma_id->route.path_rec->mtu > IB_MTU_1024)
409 cma_id->route.path_rec->mtu = IB_MTU_1024;
410
411 memset(&conn_param, 0, sizeof conn_param); 404 memset(&conn_param, 0, sizeof conn_param);
412 conn_param.responder_resources = 4; 405 conn_param.responder_resources = 4;
413 conn_param.initiator_depth = 1; 406 conn_param.initiator_depth = 1;