aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/core/cm.c15
-rw-r--r--drivers/infiniband/core/cm_msgs.h22
-rw-r--r--drivers/infiniband/core/device.c4
-rw-r--r--drivers/infiniband/core/mad.c40
-rw-r--r--drivers/infiniband/core/mad_rmpp.c2
-rw-r--r--drivers/infiniband/core/sa_query.c2
-rw-r--r--drivers/infiniband/core/sysfs.c19
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c30
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h3
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c3
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_ev.c5
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c17
-rw-r--r--drivers/infiniband/hw/ehca/ehca_sqp.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_eeprom.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_init_chip.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mad.c95
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sdma.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_uc.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ud.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_pages.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_sdma.c6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h10
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c27
-rw-r--r--drivers/infiniband/hw/mlx4/main.c5
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c22
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c25
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c9
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c7
-rw-r--r--drivers/net/mlx4/Makefile2
-rw-r--r--drivers/net/mlx4/catas.c16
-rw-r--r--drivers/net/mlx4/eq.c16
-rw-r--r--drivers/net/mlx4/main.c106
-rw-r--r--drivers/net/mlx4/mlx4.h27
-rw-r--r--drivers/net/mlx4/port.c13
-rw-r--r--drivers/net/mlx4/sense.c156
-rw-r--r--include/linux/mlx4/cmd.h1
-rw-r--r--include/linux/mlx4/device.h6
-rw-r--r--include/rdma/ib_cm.h12
-rw-r--r--include/rdma/ib_mad.h4
-rw-r--r--include/rdma/ib_smi.h34
43 files changed, 536 insertions, 267 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index f1e82a92e61e..5130fc55b8e2 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -927,8 +927,7 @@ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
927 unsigned long flags; 927 unsigned long flags;
928 int ret = 0; 928 int ret = 0;
929 929
930 service_mask = service_mask ? service_mask : 930 service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
931 __constant_cpu_to_be64(~0ULL);
932 service_id &= service_mask; 931 service_id &= service_mask;
933 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID && 932 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
934 (service_id != IB_CM_ASSIGN_SERVICE_ID)) 933 (service_id != IB_CM_ASSIGN_SERVICE_ID))
@@ -954,7 +953,7 @@ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
954 spin_lock_irqsave(&cm.lock, flags); 953 spin_lock_irqsave(&cm.lock, flags);
955 if (service_id == IB_CM_ASSIGN_SERVICE_ID) { 954 if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
956 cm_id->service_id = cpu_to_be64(cm.listen_service_id++); 955 cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
957 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 956 cm_id->service_mask = ~cpu_to_be64(0);
958 } else { 957 } else {
959 cm_id->service_id = service_id; 958 cm_id->service_id = service_id;
960 cm_id->service_mask = service_mask; 959 cm_id->service_mask = service_mask;
@@ -1134,7 +1133,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
1134 goto error1; 1133 goto error1;
1135 } 1134 }
1136 cm_id->service_id = param->service_id; 1135 cm_id->service_id = param->service_id;
1137 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 1136 cm_id->service_mask = ~cpu_to_be64(0);
1138 cm_id_priv->timeout_ms = cm_convert_to_ms( 1137 cm_id_priv->timeout_ms = cm_convert_to_ms(
1139 param->primary_path->packet_life_time) * 2 + 1138 param->primary_path->packet_life_time) * 2 +
1140 cm_convert_to_ms( 1139 cm_convert_to_ms(
@@ -1545,7 +1544,7 @@ static int cm_req_handler(struct cm_work *work)
1545 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; 1544 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1546 cm_id_priv->id.context = listen_cm_id_priv->id.context; 1545 cm_id_priv->id.context = listen_cm_id_priv->id.context;
1547 cm_id_priv->id.service_id = req_msg->service_id; 1546 cm_id_priv->id.service_id = req_msg->service_id;
1548 cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); 1547 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
1549 1548
1550 cm_process_routed_req(req_msg, work->mad_recv_wc->wc); 1549 cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
1551 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); 1550 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
@@ -2898,7 +2897,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
2898 goto out; 2897 goto out;
2899 2898
2900 cm_id->service_id = param->service_id; 2899 cm_id->service_id = param->service_id;
2901 cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 2900 cm_id->service_mask = ~cpu_to_be64(0);
2902 cm_id_priv->timeout_ms = param->timeout_ms; 2901 cm_id_priv->timeout_ms = param->timeout_ms;
2903 cm_id_priv->max_cm_retries = param->max_cm_retries; 2902 cm_id_priv->max_cm_retries = param->max_cm_retries;
2904 ret = cm_alloc_msg(cm_id_priv, &msg); 2903 ret = cm_alloc_msg(cm_id_priv, &msg);
@@ -2992,7 +2991,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
2992 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; 2991 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
2993 cm_id_priv->id.context = cur_cm_id_priv->id.context; 2992 cm_id_priv->id.context = cur_cm_id_priv->id.context;
2994 cm_id_priv->id.service_id = sidr_req_msg->service_id; 2993 cm_id_priv->id.service_id = sidr_req_msg->service_id;
2995 cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); 2994 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
2996 2995
2997 cm_format_sidr_req_event(work, &cur_cm_id_priv->id); 2996 cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
2998 cm_process_work(cm_id_priv, work); 2997 cm_process_work(cm_id_priv, work);
@@ -3789,7 +3788,7 @@ static int __init ib_cm_init(void)
3789 rwlock_init(&cm.device_lock); 3788 rwlock_init(&cm.device_lock);
3790 spin_lock_init(&cm.lock); 3789 spin_lock_init(&cm.lock);
3791 cm.listen_service_table = RB_ROOT; 3790 cm.listen_service_table = RB_ROOT;
3792 cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); 3791 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
3793 cm.remote_id_table = RB_ROOT; 3792 cm.remote_id_table = RB_ROOT;
3794 cm.remote_qp_table = RB_ROOT; 3793 cm.remote_qp_table = RB_ROOT;
3795 cm.remote_sidr_table = RB_ROOT; 3794 cm.remote_sidr_table = RB_ROOT;
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h
index aec9c7af825d..7e63c08f697c 100644
--- a/drivers/infiniband/core/cm_msgs.h
+++ b/drivers/infiniband/core/cm_msgs.h
@@ -44,17 +44,17 @@
44 44
45#define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */ 45#define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */
46 46
47#define CM_REQ_ATTR_ID __constant_htons(0x0010) 47#define CM_REQ_ATTR_ID cpu_to_be16(0x0010)
48#define CM_MRA_ATTR_ID __constant_htons(0x0011) 48#define CM_MRA_ATTR_ID cpu_to_be16(0x0011)
49#define CM_REJ_ATTR_ID __constant_htons(0x0012) 49#define CM_REJ_ATTR_ID cpu_to_be16(0x0012)
50#define CM_REP_ATTR_ID __constant_htons(0x0013) 50#define CM_REP_ATTR_ID cpu_to_be16(0x0013)
51#define CM_RTU_ATTR_ID __constant_htons(0x0014) 51#define CM_RTU_ATTR_ID cpu_to_be16(0x0014)
52#define CM_DREQ_ATTR_ID __constant_htons(0x0015) 52#define CM_DREQ_ATTR_ID cpu_to_be16(0x0015)
53#define CM_DREP_ATTR_ID __constant_htons(0x0016) 53#define CM_DREP_ATTR_ID cpu_to_be16(0x0016)
54#define CM_SIDR_REQ_ATTR_ID __constant_htons(0x0017) 54#define CM_SIDR_REQ_ATTR_ID cpu_to_be16(0x0017)
55#define CM_SIDR_REP_ATTR_ID __constant_htons(0x0018) 55#define CM_SIDR_REP_ATTR_ID cpu_to_be16(0x0018)
56#define CM_LAP_ATTR_ID __constant_htons(0x0019) 56#define CM_LAP_ATTR_ID cpu_to_be16(0x0019)
57#define CM_APR_ATTR_ID __constant_htons(0x001A) 57#define CM_APR_ATTR_ID cpu_to_be16(0x001A)
58 58
59enum cm_msg_sequence { 59enum cm_msg_sequence {
60 CM_MSG_SEQUENCE_REQ, 60 CM_MSG_SEQUENCE_REQ,
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 7913b804311e..d1fba4153332 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -193,7 +193,7 @@ void ib_dealloc_device(struct ib_device *device)
193 193
194 BUG_ON(device->reg_state != IB_DEV_UNREGISTERED); 194 BUG_ON(device->reg_state != IB_DEV_UNREGISTERED);
195 195
196 ib_device_unregister_sysfs(device); 196 kobject_put(&device->dev.kobj);
197} 197}
198EXPORT_SYMBOL(ib_dealloc_device); 198EXPORT_SYMBOL(ib_dealloc_device);
199 199
@@ -348,6 +348,8 @@ void ib_unregister_device(struct ib_device *device)
348 348
349 mutex_unlock(&device_mutex); 349 mutex_unlock(&device_mutex);
350 350
351 ib_device_unregister_sysfs(device);
352
351 spin_lock_irqsave(&device->client_data_lock, flags); 353 spin_lock_irqsave(&device->client_data_lock, flags);
352 list_for_each_entry_safe(context, tmp, &device->client_data_list, list) 354 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
353 kfree(context); 355 kfree(context);
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 5c54fc2350be..de922a04ca2d 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -301,6 +301,16 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
301 mad_agent_priv->agent.context = context; 301 mad_agent_priv->agent.context = context;
302 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; 302 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
303 mad_agent_priv->agent.port_num = port_num; 303 mad_agent_priv->agent.port_num = port_num;
304 spin_lock_init(&mad_agent_priv->lock);
305 INIT_LIST_HEAD(&mad_agent_priv->send_list);
306 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
307 INIT_LIST_HEAD(&mad_agent_priv->done_list);
308 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
309 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
310 INIT_LIST_HEAD(&mad_agent_priv->local_list);
311 INIT_WORK(&mad_agent_priv->local_work, local_completions);
312 atomic_set(&mad_agent_priv->refcount, 1);
313 init_completion(&mad_agent_priv->comp);
304 314
305 spin_lock_irqsave(&port_priv->reg_lock, flags); 315 spin_lock_irqsave(&port_priv->reg_lock, flags);
306 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id; 316 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
@@ -350,17 +360,6 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
350 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list); 360 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
351 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 361 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
352 362
353 spin_lock_init(&mad_agent_priv->lock);
354 INIT_LIST_HEAD(&mad_agent_priv->send_list);
355 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
356 INIT_LIST_HEAD(&mad_agent_priv->done_list);
357 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
358 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
359 INIT_LIST_HEAD(&mad_agent_priv->local_list);
360 INIT_WORK(&mad_agent_priv->local_work, local_completions);
361 atomic_set(&mad_agent_priv->refcount, 1);
362 init_completion(&mad_agent_priv->comp);
363
364 return &mad_agent_priv->agent; 363 return &mad_agent_priv->agent;
365 364
366error4: 365error4:
@@ -743,9 +742,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
743 break; 742 break;
744 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED: 743 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
745 kmem_cache_free(ib_mad_cache, mad_priv); 744 kmem_cache_free(ib_mad_cache, mad_priv);
746 kfree(local); 745 break;
747 ret = 1;
748 goto out;
749 case IB_MAD_RESULT_SUCCESS: 746 case IB_MAD_RESULT_SUCCESS:
750 /* Treat like an incoming receive MAD */ 747 /* Treat like an incoming receive MAD */
751 port_priv = ib_get_mad_port(mad_agent_priv->agent.device, 748 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
@@ -756,10 +753,12 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
756 &mad_priv->mad.mad); 753 &mad_priv->mad.mad);
757 } 754 }
758 if (!port_priv || !recv_mad_agent) { 755 if (!port_priv || !recv_mad_agent) {
756 /*
757 * No receiving agent so drop packet and
758 * generate send completion.
759 */
759 kmem_cache_free(ib_mad_cache, mad_priv); 760 kmem_cache_free(ib_mad_cache, mad_priv);
760 kfree(local); 761 break;
761 ret = 0;
762 goto out;
763 } 762 }
764 local->mad_priv = mad_priv; 763 local->mad_priv = mad_priv;
765 local->recv_mad_agent = recv_mad_agent; 764 local->recv_mad_agent = recv_mad_agent;
@@ -2356,7 +2355,7 @@ static void local_completions(struct work_struct *work)
2356 struct ib_mad_local_private *local; 2355 struct ib_mad_local_private *local;
2357 struct ib_mad_agent_private *recv_mad_agent; 2356 struct ib_mad_agent_private *recv_mad_agent;
2358 unsigned long flags; 2357 unsigned long flags;
2359 int recv = 0; 2358 int free_mad;
2360 struct ib_wc wc; 2359 struct ib_wc wc;
2361 struct ib_mad_send_wc mad_send_wc; 2360 struct ib_mad_send_wc mad_send_wc;
2362 2361
@@ -2370,14 +2369,15 @@ static void local_completions(struct work_struct *work)
2370 completion_list); 2369 completion_list);
2371 list_del(&local->completion_list); 2370 list_del(&local->completion_list);
2372 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2371 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2372 free_mad = 0;
2373 if (local->mad_priv) { 2373 if (local->mad_priv) {
2374 recv_mad_agent = local->recv_mad_agent; 2374 recv_mad_agent = local->recv_mad_agent;
2375 if (!recv_mad_agent) { 2375 if (!recv_mad_agent) {
2376 printk(KERN_ERR PFX "No receive MAD agent for local completion\n"); 2376 printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
2377 free_mad = 1;
2377 goto local_send_completion; 2378 goto local_send_completion;
2378 } 2379 }
2379 2380
2380 recv = 1;
2381 /* 2381 /*
2382 * Defined behavior is to complete response 2382 * Defined behavior is to complete response
2383 * before request 2383 * before request
@@ -2422,7 +2422,7 @@ local_send_completion:
2422 2422
2423 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2423 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2424 atomic_dec(&mad_agent_priv->refcount); 2424 atomic_dec(&mad_agent_priv->refcount);
2425 if (!recv) 2425 if (free_mad)
2426 kmem_cache_free(ib_mad_cache, local->mad_priv); 2426 kmem_cache_free(ib_mad_cache, local->mad_priv);
2427 kfree(local); 2427 kfree(local);
2428 } 2428 }
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index 3af2b84cd838..57a3c6f947b2 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -735,7 +735,7 @@ process_rmpp_data(struct ib_mad_agent_private *agent,
735 goto bad; 735 goto bad;
736 } 736 }
737 737
738 if (rmpp_hdr->seg_num == __constant_htonl(1)) { 738 if (rmpp_hdr->seg_num == cpu_to_be32(1)) {
739 if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) { 739 if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) {
740 rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG; 740 rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
741 goto bad; 741 goto bad;
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 7863a50d56f2..1865049e80f7 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -395,6 +395,8 @@ static void update_sm_ah(struct work_struct *work)
395 } 395 }
396 396
397 spin_lock_irq(&port->ah_lock); 397 spin_lock_irq(&port->ah_lock);
398 if (port->sm_ah)
399 kref_put(&port->sm_ah->ref, free_sm_ah);
398 port->sm_ah = new_ah; 400 port->sm_ah = new_ah;
399 spin_unlock_irq(&port->ah_lock); 401 spin_unlock_irq(&port->ah_lock);
400 402
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index b43f7d3682d3..5c04cfb54cb9 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -66,11 +66,6 @@ struct port_table_attribute {
66 int index; 66 int index;
67}; 67};
68 68
69static inline int ibdev_is_alive(const struct ib_device *dev)
70{
71 return dev->reg_state == IB_DEV_REGISTERED;
72}
73
74static ssize_t port_attr_show(struct kobject *kobj, 69static ssize_t port_attr_show(struct kobject *kobj,
75 struct attribute *attr, char *buf) 70 struct attribute *attr, char *buf)
76{ 71{
@@ -80,8 +75,6 @@ static ssize_t port_attr_show(struct kobject *kobj,
80 75
81 if (!port_attr->show) 76 if (!port_attr->show)
82 return -EIO; 77 return -EIO;
83 if (!ibdev_is_alive(p->ibdev))
84 return -ENODEV;
85 78
86 return port_attr->show(p, port_attr, buf); 79 return port_attr->show(p, port_attr, buf);
87} 80}
@@ -562,9 +555,6 @@ static ssize_t show_node_type(struct device *device,
562{ 555{
563 struct ib_device *dev = container_of(device, struct ib_device, dev); 556 struct ib_device *dev = container_of(device, struct ib_device, dev);
564 557
565 if (!ibdev_is_alive(dev))
566 return -ENODEV;
567
568 switch (dev->node_type) { 558 switch (dev->node_type) {
569 case RDMA_NODE_IB_CA: return sprintf(buf, "%d: CA\n", dev->node_type); 559 case RDMA_NODE_IB_CA: return sprintf(buf, "%d: CA\n", dev->node_type);
570 case RDMA_NODE_RNIC: return sprintf(buf, "%d: RNIC\n", dev->node_type); 560 case RDMA_NODE_RNIC: return sprintf(buf, "%d: RNIC\n", dev->node_type);
@@ -581,9 +571,6 @@ static ssize_t show_sys_image_guid(struct device *device,
581 struct ib_device_attr attr; 571 struct ib_device_attr attr;
582 ssize_t ret; 572 ssize_t ret;
583 573
584 if (!ibdev_is_alive(dev))
585 return -ENODEV;
586
587 ret = ib_query_device(dev, &attr); 574 ret = ib_query_device(dev, &attr);
588 if (ret) 575 if (ret)
589 return ret; 576 return ret;
@@ -600,9 +587,6 @@ static ssize_t show_node_guid(struct device *device,
600{ 587{
601 struct ib_device *dev = container_of(device, struct ib_device, dev); 588 struct ib_device *dev = container_of(device, struct ib_device, dev);
602 589
603 if (!ibdev_is_alive(dev))
604 return -ENODEV;
605
606 return sprintf(buf, "%04x:%04x:%04x:%04x\n", 590 return sprintf(buf, "%04x:%04x:%04x:%04x\n",
607 be16_to_cpu(((__be16 *) &dev->node_guid)[0]), 591 be16_to_cpu(((__be16 *) &dev->node_guid)[0]),
608 be16_to_cpu(((__be16 *) &dev->node_guid)[1]), 592 be16_to_cpu(((__be16 *) &dev->node_guid)[1]),
@@ -848,6 +832,9 @@ void ib_device_unregister_sysfs(struct ib_device *device)
848 struct kobject *p, *t; 832 struct kobject *p, *t;
849 struct ib_port *port; 833 struct ib_port *port;
850 834
835 /* Hold kobject until ib_dealloc_device() */
836 kobject_get(&device->dev.kobj);
837
851 list_for_each_entry_safe(p, t, &device->port_list, entry) { 838 list_for_each_entry_safe(p, t, &device->port_list, entry) {
852 list_del(&p->entry); 839 list_del(&p->entry);
853 port = container_of(p, struct ib_port, kobj); 840 port = container_of(p, struct ib_port, kobj);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 4dcf08b3fd83..d4d7204c11ed 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -450,7 +450,7 @@ static int cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq)
450 if ((CQE_OPCODE(*cqe) == T3_READ_RESP) && SQ_TYPE(*cqe)) 450 if ((CQE_OPCODE(*cqe) == T3_READ_RESP) && SQ_TYPE(*cqe))
451 return 0; 451 return 0;
452 452
453 if ((CQE_OPCODE(*cqe) == T3_SEND) && RQ_TYPE(*cqe) && 453 if (CQE_SEND_OPCODE(*cqe) && RQ_TYPE(*cqe) &&
454 Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) 454 Q_EMPTY(wq->rq_rptr, wq->rq_wptr))
455 return 0; 455 return 0;
456 456
@@ -938,6 +938,23 @@ int cxio_rdev_open(struct cxio_rdev *rdev_p)
938 if (!rdev_p->t3cdev_p) 938 if (!rdev_p->t3cdev_p)
939 rdev_p->t3cdev_p = dev2t3cdev(netdev_p); 939 rdev_p->t3cdev_p = dev2t3cdev(netdev_p);
940 rdev_p->t3cdev_p->ulp = (void *) rdev_p; 940 rdev_p->t3cdev_p->ulp = (void *) rdev_p;
941
942 err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_EMBEDDED_INFO,
943 &(rdev_p->fw_info));
944 if (err) {
945 printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n",
946 __func__, rdev_p->t3cdev_p, err);
947 goto err1;
948 }
949 if (G_FW_VERSION_MAJOR(rdev_p->fw_info.fw_vers) != CXIO_FW_MAJ) {
950 printk(KERN_ERR MOD "fatal firmware version mismatch: "
951 "need version %u but adapter has version %u\n",
952 CXIO_FW_MAJ,
953 G_FW_VERSION_MAJOR(rdev_p->fw_info.fw_vers));
954 err = -EINVAL;
955 goto err1;
956 }
957
941 err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_GET_PARAMS, 958 err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_GET_PARAMS,
942 &(rdev_p->rnic_info)); 959 &(rdev_p->rnic_info));
943 if (err) { 960 if (err) {
@@ -1204,11 +1221,12 @@ int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
1204 } 1221 }
1205 1222
1206 /* incoming SEND with no receive posted failures */ 1223 /* incoming SEND with no receive posted failures */
1207 if ((CQE_OPCODE(*hw_cqe) == T3_SEND) && RQ_TYPE(*hw_cqe) && 1224 if (CQE_SEND_OPCODE(*hw_cqe) && RQ_TYPE(*hw_cqe) &&
1208 Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) { 1225 Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) {
1209 ret = -1; 1226 ret = -1;
1210 goto skip_cqe; 1227 goto skip_cqe;
1211 } 1228 }
1229 BUG_ON((*cqe_flushed == 0) && !SW_CQE(*hw_cqe));
1212 goto proc_cqe; 1230 goto proc_cqe;
1213 } 1231 }
1214 1232
@@ -1223,6 +1241,13 @@ int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
1223 * then we complete this with TPT_ERR_MSN and mark the wq in 1241 * then we complete this with TPT_ERR_MSN and mark the wq in
1224 * error. 1242 * error.
1225 */ 1243 */
1244
1245 if (Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) {
1246 wq->error = 1;
1247 ret = -1;
1248 goto skip_cqe;
1249 }
1250
1226 if (unlikely((CQE_WRID_MSN(*hw_cqe) != (wq->rq_rptr + 1)))) { 1251 if (unlikely((CQE_WRID_MSN(*hw_cqe) != (wq->rq_rptr + 1)))) {
1227 wq->error = 1; 1252 wq->error = 1;
1228 hw_cqe->header |= htonl(V_CQE_STATUS(TPT_ERR_MSN)); 1253 hw_cqe->header |= htonl(V_CQE_STATUS(TPT_ERR_MSN));
@@ -1277,6 +1302,7 @@ proc_cqe:
1277 cxio_hal_pblpool_free(wq->rdev, 1302 cxio_hal_pblpool_free(wq->rdev,
1278 wq->rq[Q_PTR2IDX(wq->rq_rptr, 1303 wq->rq[Q_PTR2IDX(wq->rq_rptr,
1279 wq->rq_size_log2)].pbl_addr, T3_STAG0_PBL_SIZE); 1304 wq->rq_size_log2)].pbl_addr, T3_STAG0_PBL_SIZE);
1305 BUG_ON(Q_EMPTY(wq->rq_rptr, wq->rq_wptr));
1280 wq->rq_rptr++; 1306 wq->rq_rptr++;
1281 } 1307 }
1282 1308
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index 656fe47bc84f..e44dc2289471 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -61,6 +61,8 @@
61 61
62#define T3_MAX_DEV_NAME_LEN 32 62#define T3_MAX_DEV_NAME_LEN 32
63 63
64#define CXIO_FW_MAJ 7
65
64struct cxio_hal_ctrl_qp { 66struct cxio_hal_ctrl_qp {
65 u32 wptr; 67 u32 wptr;
66 u32 rptr; 68 u32 rptr;
@@ -108,6 +110,7 @@ struct cxio_rdev {
108 struct gen_pool *pbl_pool; 110 struct gen_pool *pbl_pool;
109 struct gen_pool *rqt_pool; 111 struct gen_pool *rqt_pool;
110 struct list_head entry; 112 struct list_head entry;
113 struct ch_embedded_info fw_info;
111}; 114};
112 115
113static inline int cxio_num_stags(struct cxio_rdev *rdev_p) 116static inline int cxio_num_stags(struct cxio_rdev *rdev_p)
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index 04618f7bfbb3..ff9be1a13106 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -604,6 +604,12 @@ struct t3_cqe {
604#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x).header))) 604#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x).header)))
605#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x).header))) 605#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x).header)))
606 606
607#define CQE_SEND_OPCODE(x)( \
608 (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND) || \
609 (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE) || \
610 (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_INV) || \
611 (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE_INV))
612
607#define CQE_LEN(x) (be32_to_cpu((x).len)) 613#define CQE_LEN(x) (be32_to_cpu((x).len))
608 614
609/* used for RQ completion processing */ 615/* used for RQ completion processing */
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 44e936e48a31..8699947aaf6c 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1678,6 +1678,9 @@ static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1678{ 1678{
1679 struct iwch_ep *ep = ctx; 1679 struct iwch_ep *ep = ctx;
1680 1680
1681 if (state_read(&ep->com) != FPDU_MODE)
1682 return CPL_RET_BUF_DONE;
1683
1681 PDBG("%s ep %p\n", __func__, ep); 1684 PDBG("%s ep %p\n", __func__, ep);
1682 skb_pull(skb, sizeof(struct cpl_rdma_terminate)); 1685 skb_pull(skb, sizeof(struct cpl_rdma_terminate));
1683 PDBG("%s saving %d bytes of term msg\n", __func__, skb->len); 1686 PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_ev.c b/drivers/infiniband/hw/cxgb3/iwch_ev.c
index 7b67a6771720..743c5d8b8806 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_ev.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_ev.c
@@ -179,11 +179,6 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
179 case TPT_ERR_BOUND: 179 case TPT_ERR_BOUND:
180 case TPT_ERR_INVALIDATE_SHARED_MR: 180 case TPT_ERR_INVALIDATE_SHARED_MR:
181 case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND: 181 case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
182 printk(KERN_ERR "%s - CQE Err qpid 0x%x opcode %d status 0x%x "
183 "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __func__,
184 CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
185 CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
186 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
187 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); 182 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
188 post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_ACCESS_ERR, 1); 183 post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_ACCESS_ERR, 1);
189 break; 184 break;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 19661b2f0406..c758fbd58478 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -99,8 +99,8 @@ static int build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr,
99 if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { 99 if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
100 plen = 4; 100 plen = 4;
101 wqe->write.sgl[0].stag = wr->ex.imm_data; 101 wqe->write.sgl[0].stag = wr->ex.imm_data;
102 wqe->write.sgl[0].len = __constant_cpu_to_be32(0); 102 wqe->write.sgl[0].len = cpu_to_be32(0);
103 wqe->write.num_sgle = __constant_cpu_to_be32(0); 103 wqe->write.num_sgle = cpu_to_be32(0);
104 *flit_cnt = 6; 104 *flit_cnt = 6;
105 } else { 105 } else {
106 plen = 0; 106 plen = 0;
@@ -195,15 +195,12 @@ static int build_inv_stag(union t3_wr *wqe, struct ib_send_wr *wr,
195 return 0; 195 return 0;
196} 196}
197 197
198/*
199 * TBD: this is going to be moved to firmware. Missing pdid/qpid check for now.
200 */
201static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list, 198static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
202 u32 num_sgle, u32 * pbl_addr, u8 * page_size) 199 u32 num_sgle, u32 * pbl_addr, u8 * page_size)
203{ 200{
204 int i; 201 int i;
205 struct iwch_mr *mhp; 202 struct iwch_mr *mhp;
206 u32 offset; 203 u64 offset;
207 for (i = 0; i < num_sgle; i++) { 204 for (i = 0; i < num_sgle; i++) {
208 205
209 mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8); 206 mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8);
@@ -235,8 +232,8 @@ static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
235 return -EINVAL; 232 return -EINVAL;
236 } 233 }
237 offset = sg_list[i].addr - mhp->attr.va_fbo; 234 offset = sg_list[i].addr - mhp->attr.va_fbo;
238 offset += ((u32) mhp->attr.va_fbo) % 235 offset += mhp->attr.va_fbo &
239 (1UL << (12 + mhp->attr.page_size)); 236 ((1UL << (12 + mhp->attr.page_size)) - 1);
240 pbl_addr[i] = ((mhp->attr.pbl_addr - 237 pbl_addr[i] = ((mhp->attr.pbl_addr -
241 rhp->rdev.rnic_info.pbl_base) >> 3) + 238 rhp->rdev.rnic_info.pbl_base) >> 3) +
242 (offset >> (12 + mhp->attr.page_size)); 239 (offset >> (12 + mhp->attr.page_size));
@@ -266,8 +263,8 @@ static int build_rdma_recv(struct iwch_qp *qhp, union t3_wr *wqe,
266 wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length); 263 wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
267 264
268 /* to in the WQE == the offset into the page */ 265 /* to in the WQE == the offset into the page */
269 wqe->recv.sgl[i].to = cpu_to_be64(((u32) wr->sg_list[i].addr) % 266 wqe->recv.sgl[i].to = cpu_to_be64(((u32)wr->sg_list[i].addr) &
270 (1UL << (12 + page_size[i]))); 267 ((1UL << (12 + page_size[i])) - 1));
271 268
272 /* pbl_addr is the adapters address in the PBL */ 269 /* pbl_addr is the adapters address in the PBL */
273 wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_addr[i]); 270 wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_addr[i]);
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c
index 44447aaa5501..c568b28f4e20 100644
--- a/drivers/infiniband/hw/ehca/ehca_sqp.c
+++ b/drivers/infiniband/hw/ehca/ehca_sqp.c
@@ -46,11 +46,11 @@
46#include "ehca_iverbs.h" 46#include "ehca_iverbs.h"
47#include "hcp_if.h" 47#include "hcp_if.h"
48 48
49#define IB_MAD_STATUS_REDIRECT __constant_htons(0x0002) 49#define IB_MAD_STATUS_REDIRECT cpu_to_be16(0x0002)
50#define IB_MAD_STATUS_UNSUP_VERSION __constant_htons(0x0004) 50#define IB_MAD_STATUS_UNSUP_VERSION cpu_to_be16(0x0004)
51#define IB_MAD_STATUS_UNSUP_METHOD __constant_htons(0x0008) 51#define IB_MAD_STATUS_UNSUP_METHOD cpu_to_be16(0x0008)
52 52
53#define IB_PMA_CLASS_PORT_INFO __constant_htons(0x0001) 53#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001)
54 54
55/** 55/**
56 * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue 56 * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue
diff --git a/drivers/infiniband/hw/ipath/ipath_eeprom.c b/drivers/infiniband/hw/ipath/ipath_eeprom.c
index dc37277f1c80..fc7181985e8e 100644
--- a/drivers/infiniband/hw/ipath/ipath_eeprom.c
+++ b/drivers/infiniband/hw/ipath/ipath_eeprom.c
@@ -772,8 +772,8 @@ void ipath_get_eeprom_info(struct ipath_devdata *dd)
772 "0x%x, not 0x%x\n", csum, ifp->if_csum); 772 "0x%x, not 0x%x\n", csum, ifp->if_csum);
773 goto done; 773 goto done;
774 } 774 }
775 if (*(__be64 *) ifp->if_guid == 0ULL || 775 if (*(__be64 *) ifp->if_guid == cpu_to_be64(0) ||
776 *(__be64 *) ifp->if_guid == __constant_cpu_to_be64(-1LL)) { 776 *(__be64 *) ifp->if_guid == ~cpu_to_be64(0)) {
777 ipath_dev_err(dd, "Invalid GUID %llx from flash; " 777 ipath_dev_err(dd, "Invalid GUID %llx from flash; "
778 "ignoring\n", 778 "ignoring\n",
779 *(unsigned long long *) ifp->if_guid); 779 *(unsigned long long *) ifp->if_guid);
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
index 64aeefbd2a5d..077879c0bdb5 100644
--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
+++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
@@ -455,7 +455,7 @@ static void init_shadow_tids(struct ipath_devdata *dd)
455 if (!addrs) { 455 if (!addrs) {
456 ipath_dev_err(dd, "failed to allocate shadow dma handle " 456 ipath_dev_err(dd, "failed to allocate shadow dma handle "
457 "array, no expected sends!\n"); 457 "array, no expected sends!\n");
458 vfree(dd->ipath_pageshadow); 458 vfree(pages);
459 dd->ipath_pageshadow = NULL; 459 dd->ipath_pageshadow = NULL;
460 return; 460 return;
461 } 461 }
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c
index 17a123197477..16a702d46018 100644
--- a/drivers/infiniband/hw/ipath/ipath_mad.c
+++ b/drivers/infiniband/hw/ipath/ipath_mad.c
@@ -37,10 +37,10 @@
37#include "ipath_verbs.h" 37#include "ipath_verbs.h"
38#include "ipath_common.h" 38#include "ipath_common.h"
39 39
40#define IB_SMP_UNSUP_VERSION __constant_htons(0x0004) 40#define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004)
41#define IB_SMP_UNSUP_METHOD __constant_htons(0x0008) 41#define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008)
42#define IB_SMP_UNSUP_METH_ATTR __constant_htons(0x000C) 42#define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C)
43#define IB_SMP_INVALID_FIELD __constant_htons(0x001C) 43#define IB_SMP_INVALID_FIELD cpu_to_be16(0x001C)
44 44
45static int reply(struct ib_smp *smp) 45static int reply(struct ib_smp *smp)
46{ 46{
@@ -789,12 +789,12 @@ static int recv_subn_set_pkeytable(struct ib_smp *smp,
789 return recv_subn_get_pkeytable(smp, ibdev); 789 return recv_subn_get_pkeytable(smp, ibdev);
790} 790}
791 791
792#define IB_PMA_CLASS_PORT_INFO __constant_htons(0x0001) 792#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001)
793#define IB_PMA_PORT_SAMPLES_CONTROL __constant_htons(0x0010) 793#define IB_PMA_PORT_SAMPLES_CONTROL cpu_to_be16(0x0010)
794#define IB_PMA_PORT_SAMPLES_RESULT __constant_htons(0x0011) 794#define IB_PMA_PORT_SAMPLES_RESULT cpu_to_be16(0x0011)
795#define IB_PMA_PORT_COUNTERS __constant_htons(0x0012) 795#define IB_PMA_PORT_COUNTERS cpu_to_be16(0x0012)
796#define IB_PMA_PORT_COUNTERS_EXT __constant_htons(0x001D) 796#define IB_PMA_PORT_COUNTERS_EXT cpu_to_be16(0x001D)
797#define IB_PMA_PORT_SAMPLES_RESULT_EXT __constant_htons(0x001E) 797#define IB_PMA_PORT_SAMPLES_RESULT_EXT cpu_to_be16(0x001E)
798 798
799struct ib_perf { 799struct ib_perf {
800 u8 base_version; 800 u8 base_version;
@@ -884,19 +884,19 @@ struct ib_pma_portcounters {
884 __be32 port_rcv_packets; 884 __be32 port_rcv_packets;
885} __attribute__ ((packed)); 885} __attribute__ ((packed));
886 886
887#define IB_PMA_SEL_SYMBOL_ERROR __constant_htons(0x0001) 887#define IB_PMA_SEL_SYMBOL_ERROR cpu_to_be16(0x0001)
888#define IB_PMA_SEL_LINK_ERROR_RECOVERY __constant_htons(0x0002) 888#define IB_PMA_SEL_LINK_ERROR_RECOVERY cpu_to_be16(0x0002)
889#define IB_PMA_SEL_LINK_DOWNED __constant_htons(0x0004) 889#define IB_PMA_SEL_LINK_DOWNED cpu_to_be16(0x0004)
890#define IB_PMA_SEL_PORT_RCV_ERRORS __constant_htons(0x0008) 890#define IB_PMA_SEL_PORT_RCV_ERRORS cpu_to_be16(0x0008)
891#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS __constant_htons(0x0010) 891#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS cpu_to_be16(0x0010)
892#define IB_PMA_SEL_PORT_XMIT_DISCARDS __constant_htons(0x0040) 892#define IB_PMA_SEL_PORT_XMIT_DISCARDS cpu_to_be16(0x0040)
893#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS __constant_htons(0x0200) 893#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS cpu_to_be16(0x0200)
894#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS __constant_htons(0x0400) 894#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS cpu_to_be16(0x0400)
895#define IB_PMA_SEL_PORT_VL15_DROPPED __constant_htons(0x0800) 895#define IB_PMA_SEL_PORT_VL15_DROPPED cpu_to_be16(0x0800)
896#define IB_PMA_SEL_PORT_XMIT_DATA __constant_htons(0x1000) 896#define IB_PMA_SEL_PORT_XMIT_DATA cpu_to_be16(0x1000)
897#define IB_PMA_SEL_PORT_RCV_DATA __constant_htons(0x2000) 897#define IB_PMA_SEL_PORT_RCV_DATA cpu_to_be16(0x2000)
898#define IB_PMA_SEL_PORT_XMIT_PACKETS __constant_htons(0x4000) 898#define IB_PMA_SEL_PORT_XMIT_PACKETS cpu_to_be16(0x4000)
899#define IB_PMA_SEL_PORT_RCV_PACKETS __constant_htons(0x8000) 899#define IB_PMA_SEL_PORT_RCV_PACKETS cpu_to_be16(0x8000)
900 900
901struct ib_pma_portcounters_ext { 901struct ib_pma_portcounters_ext {
902 u8 reserved; 902 u8 reserved;
@@ -913,14 +913,14 @@ struct ib_pma_portcounters_ext {
913 __be64 port_multicast_rcv_packets; 913 __be64 port_multicast_rcv_packets;
914} __attribute__ ((packed)); 914} __attribute__ ((packed));
915 915
916#define IB_PMA_SELX_PORT_XMIT_DATA __constant_htons(0x0001) 916#define IB_PMA_SELX_PORT_XMIT_DATA cpu_to_be16(0x0001)
917#define IB_PMA_SELX_PORT_RCV_DATA __constant_htons(0x0002) 917#define IB_PMA_SELX_PORT_RCV_DATA cpu_to_be16(0x0002)
918#define IB_PMA_SELX_PORT_XMIT_PACKETS __constant_htons(0x0004) 918#define IB_PMA_SELX_PORT_XMIT_PACKETS cpu_to_be16(0x0004)
919#define IB_PMA_SELX_PORT_RCV_PACKETS __constant_htons(0x0008) 919#define IB_PMA_SELX_PORT_RCV_PACKETS cpu_to_be16(0x0008)
920#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS __constant_htons(0x0010) 920#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS cpu_to_be16(0x0010)
921#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS __constant_htons(0x0020) 921#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS cpu_to_be16(0x0020)
922#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS __constant_htons(0x0040) 922#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS cpu_to_be16(0x0040)
923#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS __constant_htons(0x0080) 923#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS cpu_to_be16(0x0080)
924 924
925static int recv_pma_get_classportinfo(struct ib_perf *pmp) 925static int recv_pma_get_classportinfo(struct ib_perf *pmp)
926{ 926{
@@ -933,7 +933,7 @@ static int recv_pma_get_classportinfo(struct ib_perf *pmp)
933 pmp->status |= IB_SMP_INVALID_FIELD; 933 pmp->status |= IB_SMP_INVALID_FIELD;
934 934
935 /* Indicate AllPortSelect is valid (only one port anyway) */ 935 /* Indicate AllPortSelect is valid (only one port anyway) */
936 p->cap_mask = __constant_cpu_to_be16(1 << 8); 936 p->cap_mask = cpu_to_be16(1 << 8);
937 p->base_version = 1; 937 p->base_version = 1;
938 p->class_version = 1; 938 p->class_version = 1;
939 /* 939 /*
@@ -951,12 +951,11 @@ static int recv_pma_get_classportinfo(struct ib_perf *pmp)
951 * We support 5 counters which only count the mandatory quantities. 951 * We support 5 counters which only count the mandatory quantities.
952 */ 952 */
953#define COUNTER_MASK(q, n) (q << ((9 - n) * 3)) 953#define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
954#define COUNTER_MASK0_9 \ 954#define COUNTER_MASK0_9 cpu_to_be32(COUNTER_MASK(1, 0) | \
955 __constant_cpu_to_be32(COUNTER_MASK(1, 0) | \ 955 COUNTER_MASK(1, 1) | \
956 COUNTER_MASK(1, 1) | \ 956 COUNTER_MASK(1, 2) | \
957 COUNTER_MASK(1, 2) | \ 957 COUNTER_MASK(1, 3) | \
958 COUNTER_MASK(1, 3) | \ 958 COUNTER_MASK(1, 4))
959 COUNTER_MASK(1, 4))
960 959
961static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp, 960static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp,
962 struct ib_device *ibdev, u8 port) 961 struct ib_device *ibdev, u8 port)
@@ -1137,7 +1136,7 @@ static int recv_pma_get_portsamplesresult_ext(struct ib_perf *pmp,
1137 status = dev->pma_sample_status; 1136 status = dev->pma_sample_status;
1138 p->sample_status = cpu_to_be16(status); 1137 p->sample_status = cpu_to_be16(status);
1139 /* 64 bits */ 1138 /* 64 bits */
1140 p->extended_width = __constant_cpu_to_be32(0x80000000); 1139 p->extended_width = cpu_to_be32(0x80000000);
1141 for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++) 1140 for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
1142 p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 : 1141 p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 :
1143 cpu_to_be64( 1142 cpu_to_be64(
@@ -1185,7 +1184,7 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
1185 pmp->status |= IB_SMP_INVALID_FIELD; 1184 pmp->status |= IB_SMP_INVALID_FIELD;
1186 1185
1187 if (cntrs.symbol_error_counter > 0xFFFFUL) 1186 if (cntrs.symbol_error_counter > 0xFFFFUL)
1188 p->symbol_error_counter = __constant_cpu_to_be16(0xFFFF); 1187 p->symbol_error_counter = cpu_to_be16(0xFFFF);
1189 else 1188 else
1190 p->symbol_error_counter = 1189 p->symbol_error_counter =
1191 cpu_to_be16((u16)cntrs.symbol_error_counter); 1190 cpu_to_be16((u16)cntrs.symbol_error_counter);
@@ -1199,17 +1198,17 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
1199 else 1198 else
1200 p->link_downed_counter = (u8)cntrs.link_downed_counter; 1199 p->link_downed_counter = (u8)cntrs.link_downed_counter;
1201 if (cntrs.port_rcv_errors > 0xFFFFUL) 1200 if (cntrs.port_rcv_errors > 0xFFFFUL)
1202 p->port_rcv_errors = __constant_cpu_to_be16(0xFFFF); 1201 p->port_rcv_errors = cpu_to_be16(0xFFFF);
1203 else 1202 else
1204 p->port_rcv_errors = 1203 p->port_rcv_errors =
1205 cpu_to_be16((u16) cntrs.port_rcv_errors); 1204 cpu_to_be16((u16) cntrs.port_rcv_errors);
1206 if (cntrs.port_rcv_remphys_errors > 0xFFFFUL) 1205 if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
1207 p->port_rcv_remphys_errors = __constant_cpu_to_be16(0xFFFF); 1206 p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
1208 else 1207 else
1209 p->port_rcv_remphys_errors = 1208 p->port_rcv_remphys_errors =
1210 cpu_to_be16((u16)cntrs.port_rcv_remphys_errors); 1209 cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
1211 if (cntrs.port_xmit_discards > 0xFFFFUL) 1210 if (cntrs.port_xmit_discards > 0xFFFFUL)
1212 p->port_xmit_discards = __constant_cpu_to_be16(0xFFFF); 1211 p->port_xmit_discards = cpu_to_be16(0xFFFF);
1213 else 1212 else
1214 p->port_xmit_discards = 1213 p->port_xmit_discards =
1215 cpu_to_be16((u16)cntrs.port_xmit_discards); 1214 cpu_to_be16((u16)cntrs.port_xmit_discards);
@@ -1220,24 +1219,24 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
1220 p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) | 1219 p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
1221 cntrs.excessive_buffer_overrun_errors; 1220 cntrs.excessive_buffer_overrun_errors;
1222 if (cntrs.vl15_dropped > 0xFFFFUL) 1221 if (cntrs.vl15_dropped > 0xFFFFUL)
1223 p->vl15_dropped = __constant_cpu_to_be16(0xFFFF); 1222 p->vl15_dropped = cpu_to_be16(0xFFFF);
1224 else 1223 else
1225 p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped); 1224 p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
1226 if (cntrs.port_xmit_data > 0xFFFFFFFFUL) 1225 if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
1227 p->port_xmit_data = __constant_cpu_to_be32(0xFFFFFFFF); 1226 p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);
1228 else 1227 else
1229 p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data); 1228 p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
1230 if (cntrs.port_rcv_data > 0xFFFFFFFFUL) 1229 if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
1231 p->port_rcv_data = __constant_cpu_to_be32(0xFFFFFFFF); 1230 p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);
1232 else 1231 else
1233 p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data); 1232 p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
1234 if (cntrs.port_xmit_packets > 0xFFFFFFFFUL) 1233 if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
1235 p->port_xmit_packets = __constant_cpu_to_be32(0xFFFFFFFF); 1234 p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);
1236 else 1235 else
1237 p->port_xmit_packets = 1236 p->port_xmit_packets =
1238 cpu_to_be32((u32)cntrs.port_xmit_packets); 1237 cpu_to_be32((u32)cntrs.port_xmit_packets);
1239 if (cntrs.port_rcv_packets > 0xFFFFFFFFUL) 1238 if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
1240 p->port_rcv_packets = __constant_cpu_to_be32(0xFFFFFFFF); 1239 p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);
1241 else 1240 else
1242 p->port_rcv_packets = 1241 p->port_rcv_packets =
1243 cpu_to_be32((u32) cntrs.port_rcv_packets); 1242 cpu_to_be32((u32) cntrs.port_rcv_packets);
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index 9170710b950d..79b3dbc97179 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -1744,7 +1744,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1744 /* Signal completion event if the solicited bit is set. */ 1744 /* Signal completion event if the solicited bit is set. */
1745 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1745 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
1746 (ohdr->bth[0] & 1746 (ohdr->bth[0] &
1747 __constant_cpu_to_be32(1 << 23)) != 0); 1747 cpu_to_be32(1 << 23)) != 0);
1748 break; 1748 break;
1749 1749
1750 case OP(RDMA_WRITE_FIRST): 1750 case OP(RDMA_WRITE_FIRST):
diff --git a/drivers/infiniband/hw/ipath/ipath_sdma.c b/drivers/infiniband/hw/ipath/ipath_sdma.c
index 8e255adf5d9b..4b0698590850 100644
--- a/drivers/infiniband/hw/ipath/ipath_sdma.c
+++ b/drivers/infiniband/hw/ipath/ipath_sdma.c
@@ -781,10 +781,10 @@ retry:
781 descqp = &dd->ipath_sdma_descq[dd->ipath_sdma_descq_cnt].qw[0]; 781 descqp = &dd->ipath_sdma_descq[dd->ipath_sdma_descq_cnt].qw[0];
782 descqp -= 2; 782 descqp -= 2;
783 /* SDmaLastDesc */ 783 /* SDmaLastDesc */
784 descqp[0] |= __constant_cpu_to_le64(1ULL << 11); 784 descqp[0] |= cpu_to_le64(1ULL << 11);
785 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_INTREQ) { 785 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_INTREQ) {
786 /* SDmaIntReq */ 786 /* SDmaIntReq */
787 descqp[0] |= __constant_cpu_to_le64(1ULL << 15); 787 descqp[0] |= cpu_to_le64(1ULL << 15);
788 } 788 }
789 789
790 /* Commit writes to memory and advance the tail on the chip */ 790 /* Commit writes to memory and advance the tail on the chip */
diff --git a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c
index 82cc588b8bf2..22e60998f1a7 100644
--- a/drivers/infiniband/hw/ipath/ipath_uc.c
+++ b/drivers/infiniband/hw/ipath/ipath_uc.c
@@ -419,7 +419,7 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
419 /* Signal completion event if the solicited bit is set. */ 419 /* Signal completion event if the solicited bit is set. */
420 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 420 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
421 (ohdr->bth[0] & 421 (ohdr->bth[0] &
422 __constant_cpu_to_be32(1 << 23)) != 0); 422 cpu_to_be32(1 << 23)) != 0);
423 break; 423 break;
424 424
425 case OP(RDMA_WRITE_FIRST): 425 case OP(RDMA_WRITE_FIRST):
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
index 91c74cc797ae..6076cb61bf6a 100644
--- a/drivers/infiniband/hw/ipath/ipath_ud.c
+++ b/drivers/infiniband/hw/ipath/ipath_ud.c
@@ -370,7 +370,7 @@ int ipath_make_ud_req(struct ipath_qp *qp)
370 */ 370 */
371 ohdr->bth[1] = ah_attr->dlid >= IPATH_MULTICAST_LID_BASE && 371 ohdr->bth[1] = ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
372 ah_attr->dlid != IPATH_PERMISSIVE_LID ? 372 ah_attr->dlid != IPATH_PERMISSIVE_LID ?
373 __constant_cpu_to_be32(IPATH_MULTICAST_QPN) : 373 cpu_to_be32(IPATH_MULTICAST_QPN) :
374 cpu_to_be32(wqe->wr.wr.ud.remote_qpn); 374 cpu_to_be32(wqe->wr.wr.ud.remote_qpn);
375 ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPATH_PSN_MASK); 375 ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPATH_PSN_MASK);
376 /* 376 /*
@@ -573,7 +573,7 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
573 /* Signal completion event if the solicited bit is set. */ 573 /* Signal completion event if the solicited bit is set. */
574 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 574 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
575 (ohdr->bth[0] & 575 (ohdr->bth[0] &
576 __constant_cpu_to_be32(1 << 23)) != 0); 576 cpu_to_be32(1 << 23)) != 0);
577 577
578bail:; 578bail:;
579} 579}
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c
index 0190edc8044e..855911e7396d 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_pages.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c
@@ -209,20 +209,20 @@ void ipath_release_user_pages_on_close(struct page **p, size_t num_pages)
209 209
210 mm = get_task_mm(current); 210 mm = get_task_mm(current);
211 if (!mm) 211 if (!mm)
212 goto bail; 212 return;
213 213
214 work = kmalloc(sizeof(*work), GFP_KERNEL); 214 work = kmalloc(sizeof(*work), GFP_KERNEL);
215 if (!work) 215 if (!work)
216 goto bail_mm; 216 goto bail_mm;
217 217
218 goto bail;
219
220 INIT_WORK(&work->work, user_pages_account); 218 INIT_WORK(&work->work, user_pages_account);
221 work->mm = mm; 219 work->mm = mm;
222 work->num_pages = num_pages; 220 work->num_pages = num_pages;
223 221
222 schedule_work(&work->work);
223 return;
224
224bail_mm: 225bail_mm:
225 mmput(mm); 226 mmput(mm);
226bail:
227 return; 227 return;
228} 228}
diff --git a/drivers/infiniband/hw/ipath/ipath_user_sdma.c b/drivers/infiniband/hw/ipath/ipath_user_sdma.c
index 82d9a0b5ca2f..7bff4b9baa0a 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_sdma.c
@@ -667,13 +667,13 @@ static inline __le64 ipath_sdma_make_desc0(struct ipath_devdata *dd,
667 667
668static inline __le64 ipath_sdma_make_first_desc0(__le64 descq) 668static inline __le64 ipath_sdma_make_first_desc0(__le64 descq)
669{ 669{
670 return descq | __constant_cpu_to_le64(1ULL << 12); 670 return descq | cpu_to_le64(1ULL << 12);
671} 671}
672 672
673static inline __le64 ipath_sdma_make_last_desc0(__le64 descq) 673static inline __le64 ipath_sdma_make_last_desc0(__le64 descq)
674{ 674{
675 /* last */ /* dma head */ 675 /* last */ /* dma head */
676 return descq | __constant_cpu_to_le64(1ULL << 11 | 1ULL << 13); 676 return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
677} 677}
678 678
679static inline __le64 ipath_sdma_make_desc1(u64 addr) 679static inline __le64 ipath_sdma_make_desc1(u64 addr)
@@ -763,7 +763,7 @@ static int ipath_user_sdma_push_pkts(struct ipath_devdata *dd,
763 if (ofs >= IPATH_SMALLBUF_DWORDS) { 763 if (ofs >= IPATH_SMALLBUF_DWORDS) {
764 for (i = 0; i < pkt->naddr; i++) { 764 for (i = 0; i < pkt->naddr; i++) {
765 dd->ipath_sdma_descq[dtail].qw[0] |= 765 dd->ipath_sdma_descq[dtail].qw[0] |=
766 __constant_cpu_to_le64(1ULL << 14); 766 cpu_to_le64(1ULL << 14);
767 if (++dtail == dd->ipath_sdma_descq_cnt) 767 if (++dtail == dd->ipath_sdma_descq_cnt)
768 dtail = 0; 768 dtail = 0;
769 } 769 }
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index cdf0e6abd34d..9289ab4b0ae8 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -1585,7 +1585,7 @@ static int ipath_query_port(struct ib_device *ibdev,
1585 u64 ibcstat; 1585 u64 ibcstat;
1586 1586
1587 memset(props, 0, sizeof(*props)); 1587 memset(props, 0, sizeof(*props));
1588 props->lid = lid ? lid : __constant_be16_to_cpu(IB_LID_PERMISSIVE); 1588 props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
1589 props->lmc = dd->ipath_lmc; 1589 props->lmc = dd->ipath_lmc;
1590 props->sm_lid = dev->sm_lid; 1590 props->sm_lid = dev->sm_lid;
1591 props->sm_sl = dev->sm_sl; 1591 props->sm_sl = dev->sm_sl;
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index 11e3f613df93..ae6cff4abffc 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -86,11 +86,11 @@
86#define IB_PMA_SAMPLE_STATUS_RUNNING 0x02 86#define IB_PMA_SAMPLE_STATUS_RUNNING 0x02
87 87
88/* Mandatory IB performance counter select values. */ 88/* Mandatory IB performance counter select values. */
89#define IB_PMA_PORT_XMIT_DATA __constant_htons(0x0001) 89#define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001)
90#define IB_PMA_PORT_RCV_DATA __constant_htons(0x0002) 90#define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002)
91#define IB_PMA_PORT_XMIT_PKTS __constant_htons(0x0003) 91#define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003)
92#define IB_PMA_PORT_RCV_PKTS __constant_htons(0x0004) 92#define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004)
93#define IB_PMA_PORT_XMIT_WAIT __constant_htons(0x0005) 93#define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005)
94 94
95struct ib_reth { 95struct ib_reth {
96 __be64 vaddr; 96 __be64 vaddr;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 606f1e2ef284..19e68ab66168 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -147,7 +147,8 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
147 * Snoop SM MADs for port info and P_Key table sets, so we can 147 * Snoop SM MADs for port info and P_Key table sets, so we can
148 * synthesize LID change and P_Key change events. 148 * synthesize LID change and P_Key change events.
149 */ 149 */
150static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad) 150static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
151 u16 prev_lid)
151{ 152{
152 struct ib_event event; 153 struct ib_event event;
153 154
@@ -157,6 +158,7 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad)
157 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) { 158 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) {
158 struct ib_port_info *pinfo = 159 struct ib_port_info *pinfo =
159 (struct ib_port_info *) ((struct ib_smp *) mad)->data; 160 (struct ib_port_info *) ((struct ib_smp *) mad)->data;
161 u16 lid = be16_to_cpu(pinfo->lid);
160 162
161 update_sm_ah(to_mdev(ibdev), port_num, 163 update_sm_ah(to_mdev(ibdev), port_num,
162 be16_to_cpu(pinfo->sm_lid), 164 be16_to_cpu(pinfo->sm_lid),
@@ -165,12 +167,15 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad)
165 event.device = ibdev; 167 event.device = ibdev;
166 event.element.port_num = port_num; 168 event.element.port_num = port_num;
167 169
168 if (pinfo->clientrereg_resv_subnetto & 0x80) 170 if (pinfo->clientrereg_resv_subnetto & 0x80) {
169 event.event = IB_EVENT_CLIENT_REREGISTER; 171 event.event = IB_EVENT_CLIENT_REREGISTER;
170 else 172 ib_dispatch_event(&event);
171 event.event = IB_EVENT_LID_CHANGE; 173 }
172 174
173 ib_dispatch_event(&event); 175 if (prev_lid != lid) {
176 event.event = IB_EVENT_LID_CHANGE;
177 ib_dispatch_event(&event);
178 }
174 } 179 }
175 180
176 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) { 181 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) {
@@ -228,8 +233,9 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
228 struct ib_wc *in_wc, struct ib_grh *in_grh, 233 struct ib_wc *in_wc, struct ib_grh *in_grh,
229 struct ib_mad *in_mad, struct ib_mad *out_mad) 234 struct ib_mad *in_mad, struct ib_mad *out_mad)
230{ 235{
231 u16 slid; 236 u16 slid, prev_lid = 0;
232 int err; 237 int err;
238 struct ib_port_attr pattr;
233 239
234 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); 240 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
235 241
@@ -263,6 +269,13 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
263 } else 269 } else
264 return IB_MAD_RESULT_SUCCESS; 270 return IB_MAD_RESULT_SUCCESS;
265 271
272 if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
273 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
274 in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
275 in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
276 !ib_query_port(ibdev, port_num, &pattr))
277 prev_lid = pattr.lid;
278
266 err = mlx4_MAD_IFC(to_mdev(ibdev), 279 err = mlx4_MAD_IFC(to_mdev(ibdev),
267 mad_flags & IB_MAD_IGNORE_MKEY, 280 mad_flags & IB_MAD_IGNORE_MKEY,
268 mad_flags & IB_MAD_IGNORE_BKEY, 281 mad_flags & IB_MAD_IGNORE_BKEY,
@@ -271,7 +284,7 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
271 return IB_MAD_RESULT_FAILURE; 284 return IB_MAD_RESULT_FAILURE;
272 285
273 if (!out_mad->mad_hdr.status) { 286 if (!out_mad->mad_hdr.status) {
274 smp_snoop(ibdev, port_num, in_mad); 287 smp_snoop(ibdev, port_num, in_mad, prev_lid);
275 node_desc_override(ibdev, out_mad); 288 node_desc_override(ibdev, out_mad);
276 } 289 }
277 290
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 61588bd273bd..2ccb9d31771f 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -699,11 +699,12 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
699 struct mlx4_ib_dev *ibdev = ibdev_ptr; 699 struct mlx4_ib_dev *ibdev = ibdev_ptr;
700 int p; 700 int p;
701 701
702 mlx4_ib_mad_cleanup(ibdev);
703 ib_unregister_device(&ibdev->ib_dev);
704
702 for (p = 1; p <= ibdev->num_ports; ++p) 705 for (p = 1; p <= ibdev->num_ports; ++p)
703 mlx4_CLOSE_PORT(dev, p); 706 mlx4_CLOSE_PORT(dev, p);
704 707
705 mlx4_ib_mad_cleanup(ibdev);
706 ib_unregister_device(&ibdev->ib_dev);
707 iounmap(ibdev->uar_map); 708 iounmap(ibdev->uar_map);
708 mlx4_uar_free(dev, &ibdev->priv_uar); 709 mlx4_uar_free(dev, &ibdev->priv_uar);
709 mlx4_pd_free(dev, ibdev->priv_pdn); 710 mlx4_pd_free(dev, ibdev->priv_pdn);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index a91cb4c3fa5c..f385a24d31d2 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -71,17 +71,17 @@ enum {
71}; 71};
72 72
73static const __be32 mlx4_ib_opcode[] = { 73static const __be32 mlx4_ib_opcode[] = {
74 [IB_WR_SEND] = __constant_cpu_to_be32(MLX4_OPCODE_SEND), 74 [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND),
75 [IB_WR_LSO] = __constant_cpu_to_be32(MLX4_OPCODE_LSO), 75 [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO),
76 [IB_WR_SEND_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_SEND_IMM), 76 [IB_WR_SEND_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_SEND_IMM),
77 [IB_WR_RDMA_WRITE] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE), 77 [IB_WR_RDMA_WRITE] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
78 [IB_WR_RDMA_WRITE_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM), 78 [IB_WR_RDMA_WRITE_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
79 [IB_WR_RDMA_READ] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_READ), 79 [IB_WR_RDMA_READ] = cpu_to_be32(MLX4_OPCODE_RDMA_READ),
80 [IB_WR_ATOMIC_CMP_AND_SWP] = __constant_cpu_to_be32(MLX4_OPCODE_ATOMIC_CS), 80 [IB_WR_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS),
81 [IB_WR_ATOMIC_FETCH_AND_ADD] = __constant_cpu_to_be32(MLX4_OPCODE_ATOMIC_FA), 81 [IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA),
82 [IB_WR_SEND_WITH_INV] = __constant_cpu_to_be32(MLX4_OPCODE_SEND_INVAL), 82 [IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL),
83 [IB_WR_LOCAL_INV] = __constant_cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL), 83 [IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL),
84 [IB_WR_FAST_REG_MR] = __constant_cpu_to_be32(MLX4_OPCODE_FMR), 84 [IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR),
85}; 85};
86 86
87static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp) 87static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 640449582aba..5648659ff0b0 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -104,7 +104,8 @@ static void update_sm_ah(struct mthca_dev *dev,
104 */ 104 */
105static void smp_snoop(struct ib_device *ibdev, 105static void smp_snoop(struct ib_device *ibdev,
106 u8 port_num, 106 u8 port_num,
107 struct ib_mad *mad) 107 struct ib_mad *mad,
108 u16 prev_lid)
108{ 109{
109 struct ib_event event; 110 struct ib_event event;
110 111
@@ -114,6 +115,7 @@ static void smp_snoop(struct ib_device *ibdev,
114 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) { 115 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) {
115 struct ib_port_info *pinfo = 116 struct ib_port_info *pinfo =
116 (struct ib_port_info *) ((struct ib_smp *) mad)->data; 117 (struct ib_port_info *) ((struct ib_smp *) mad)->data;
118 u16 lid = be16_to_cpu(pinfo->lid);
117 119
118 mthca_update_rate(to_mdev(ibdev), port_num); 120 mthca_update_rate(to_mdev(ibdev), port_num);
119 update_sm_ah(to_mdev(ibdev), port_num, 121 update_sm_ah(to_mdev(ibdev), port_num,
@@ -123,12 +125,15 @@ static void smp_snoop(struct ib_device *ibdev,
123 event.device = ibdev; 125 event.device = ibdev;
124 event.element.port_num = port_num; 126 event.element.port_num = port_num;
125 127
126 if (pinfo->clientrereg_resv_subnetto & 0x80) 128 if (pinfo->clientrereg_resv_subnetto & 0x80) {
127 event.event = IB_EVENT_CLIENT_REREGISTER; 129 event.event = IB_EVENT_CLIENT_REREGISTER;
128 else 130 ib_dispatch_event(&event);
129 event.event = IB_EVENT_LID_CHANGE; 131 }
130 132
131 ib_dispatch_event(&event); 133 if (prev_lid != lid) {
134 event.event = IB_EVENT_LID_CHANGE;
135 ib_dispatch_event(&event);
136 }
132 } 137 }
133 138
134 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) { 139 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) {
@@ -196,6 +201,8 @@ int mthca_process_mad(struct ib_device *ibdev,
196 int err; 201 int err;
197 u8 status; 202 u8 status;
198 u16 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); 203 u16 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
204 u16 prev_lid = 0;
205 struct ib_port_attr pattr;
199 206
200 /* Forward locally generated traps to the SM */ 207 /* Forward locally generated traps to the SM */
201 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && 208 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
@@ -233,6 +240,12 @@ int mthca_process_mad(struct ib_device *ibdev,
233 return IB_MAD_RESULT_SUCCESS; 240 return IB_MAD_RESULT_SUCCESS;
234 } else 241 } else
235 return IB_MAD_RESULT_SUCCESS; 242 return IB_MAD_RESULT_SUCCESS;
243 if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
244 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
245 in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
246 in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
247 !ib_query_port(ibdev, port_num, &pattr))
248 prev_lid = pattr.lid;
236 249
237 err = mthca_MAD_IFC(to_mdev(ibdev), 250 err = mthca_MAD_IFC(to_mdev(ibdev),
238 mad_flags & IB_MAD_IGNORE_MKEY, 251 mad_flags & IB_MAD_IGNORE_MKEY,
@@ -252,7 +265,7 @@ int mthca_process_mad(struct ib_device *ibdev,
252 } 265 }
253 266
254 if (!out_mad->mad_hdr.status) { 267 if (!out_mad->mad_hdr.status) {
255 smp_snoop(ibdev, port_num, in_mad); 268 smp_snoop(ibdev, port_num, in_mad, prev_lid);
256 node_desc_override(ibdev, out_mad); 269 node_desc_override(ibdev, out_mad);
257 } 270 }
258 271
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 0bd2a4ff0842..353c13b91e8f 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -660,8 +660,12 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
660 660
661 path = __path_find(dev, phdr->hwaddr + 4); 661 path = __path_find(dev, phdr->hwaddr + 4);
662 if (!path || !path->valid) { 662 if (!path || !path->valid) {
663 if (!path) 663 int new_path = 0;
664
665 if (!path) {
664 path = path_rec_create(dev, phdr->hwaddr + 4); 666 path = path_rec_create(dev, phdr->hwaddr + 4);
667 new_path = 1;
668 }
665 if (path) { 669 if (path) {
666 /* put pseudoheader back on for next time */ 670 /* put pseudoheader back on for next time */
667 skb_push(skb, sizeof *phdr); 671 skb_push(skb, sizeof *phdr);
@@ -669,7 +673,8 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
669 673
670 if (!path->query && path_rec_start(dev, path)) { 674 if (!path->query && path_rec_start(dev, path)) {
671 spin_unlock_irqrestore(&priv->lock, flags); 675 spin_unlock_irqrestore(&priv->lock, flags);
672 path_free(dev, path); 676 if (new_path)
677 path_free(dev, path);
673 return; 678 return;
674 } else 679 } else
675 __path_add(dev, path); 680 __path_add(dev, path);
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 319b188145be..ea9e1556e0d6 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -401,13 +401,6 @@ static void iser_route_handler(struct rdma_cm_id *cma_id)
401 if (ret) 401 if (ret)
402 goto failure; 402 goto failure;
403 403
404 iser_dbg("path.mtu is %d setting it to %d\n",
405 cma_id->route.path_rec->mtu, IB_MTU_1024);
406
407 /* we must set the MTU to 1024 as this is what the target is assuming */
408 if (cma_id->route.path_rec->mtu > IB_MTU_1024)
409 cma_id->route.path_rec->mtu = IB_MTU_1024;
410
411 memset(&conn_param, 0, sizeof conn_param); 404 memset(&conn_param, 0, sizeof conn_param);
412 conn_param.responder_resources = 4; 405 conn_param.responder_resources = 4;
413 conn_param.initiator_depth = 1; 406 conn_param.initiator_depth = 1;
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
index a7a97bf998f8..21040a0d81fe 100644
--- a/drivers/net/mlx4/Makefile
+++ b/drivers/net/mlx4/Makefile
@@ -1,7 +1,7 @@
1obj-$(CONFIG_MLX4_CORE) += mlx4_core.o 1obj-$(CONFIG_MLX4_CORE) += mlx4_core.o
2 2
3mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \ 3mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
4 mr.o pd.o port.o profile.o qp.o reset.o srq.o 4 mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o
5 5
6obj-$(CONFIG_MLX4_EN) += mlx4_en.o 6obj-$(CONFIG_MLX4_EN) += mlx4_en.o
7 7
diff --git a/drivers/net/mlx4/catas.c b/drivers/net/mlx4/catas.c
index f094ee00c416..aa9674b7f19c 100644
--- a/drivers/net/mlx4/catas.c
+++ b/drivers/net/mlx4/catas.c
@@ -42,7 +42,6 @@ enum {
42static DEFINE_SPINLOCK(catas_lock); 42static DEFINE_SPINLOCK(catas_lock);
43 43
44static LIST_HEAD(catas_list); 44static LIST_HEAD(catas_list);
45static struct workqueue_struct *catas_wq;
46static struct work_struct catas_work; 45static struct work_struct catas_work;
47 46
48static int internal_err_reset = 1; 47static int internal_err_reset = 1;
@@ -77,7 +76,7 @@ static void poll_catas(unsigned long dev_ptr)
77 list_add(&priv->catas_err.list, &catas_list); 76 list_add(&priv->catas_err.list, &catas_list);
78 spin_unlock(&catas_lock); 77 spin_unlock(&catas_lock);
79 78
80 queue_work(catas_wq, &catas_work); 79 queue_work(mlx4_wq, &catas_work);
81 } 80 }
82 } else 81 } else
83 mod_timer(&priv->catas_err.timer, 82 mod_timer(&priv->catas_err.timer,
@@ -146,18 +145,7 @@ void mlx4_stop_catas_poll(struct mlx4_dev *dev)
146 spin_unlock_irq(&catas_lock); 145 spin_unlock_irq(&catas_lock);
147} 146}
148 147
149int __init mlx4_catas_init(void) 148void __init mlx4_catas_init(void)
150{ 149{
151 INIT_WORK(&catas_work, catas_reset); 150 INIT_WORK(&catas_work, catas_reset);
152
153 catas_wq = create_singlethread_workqueue("mlx4_err");
154 if (!catas_wq)
155 return -ENOMEM;
156
157 return 0;
158}
159
160void mlx4_catas_cleanup(void)
161{
162 destroy_workqueue(catas_wq);
163} 151}
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 2c19bff7cbab..8830dcb92ec8 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -163,6 +163,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
163 int cqn; 163 int cqn;
164 int eqes_found = 0; 164 int eqes_found = 0;
165 int set_ci = 0; 165 int set_ci = 0;
166 int port;
166 167
167 while ((eqe = next_eqe_sw(eq))) { 168 while ((eqe = next_eqe_sw(eq))) {
168 /* 169 /*
@@ -203,11 +204,16 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
203 break; 204 break;
204 205
205 case MLX4_EVENT_TYPE_PORT_CHANGE: 206 case MLX4_EVENT_TYPE_PORT_CHANGE:
206 mlx4_dispatch_event(dev, 207 port = be32_to_cpu(eqe->event.port_change.port) >> 28;
207 eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_ACTIVE ? 208 if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
208 MLX4_DEV_EVENT_PORT_UP : 209 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
209 MLX4_DEV_EVENT_PORT_DOWN, 210 port);
210 be32_to_cpu(eqe->event.port_change.port) >> 28); 211 mlx4_priv(dev)->sense.do_sense_port[port] = 1;
212 } else {
213 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP,
214 port);
215 mlx4_priv(dev)->sense.do_sense_port[port] = 0;
216 }
211 break; 217 break;
212 218
213 case MLX4_EVENT_TYPE_CQ_ERROR: 219 case MLX4_EVENT_TYPE_CQ_ERROR:
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 6ef2490d5c3e..a66f5b2fd288 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -51,6 +51,8 @@ MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
51MODULE_LICENSE("Dual BSD/GPL"); 51MODULE_LICENSE("Dual BSD/GPL");
52MODULE_VERSION(DRV_VERSION); 52MODULE_VERSION(DRV_VERSION);
53 53
54struct workqueue_struct *mlx4_wq;
55
54#ifdef CONFIG_MLX4_DEBUG 56#ifdef CONFIG_MLX4_DEBUG
55 57
56int mlx4_debug_level = 0; 58int mlx4_debug_level = 0;
@@ -98,24 +100,23 @@ module_param_named(use_prio, use_prio, bool, 0444);
98MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports " 100MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
99 "(0/1, default 0)"); 101 "(0/1, default 0)");
100 102
101static int mlx4_check_port_params(struct mlx4_dev *dev, 103int mlx4_check_port_params(struct mlx4_dev *dev,
102 enum mlx4_port_type *port_type) 104 enum mlx4_port_type *port_type)
103{ 105{
104 int i; 106 int i;
105 107
106 for (i = 0; i < dev->caps.num_ports - 1; i++) { 108 for (i = 0; i < dev->caps.num_ports - 1; i++) {
107 if (port_type[i] != port_type[i+1] && 109 if (port_type[i] != port_type[i + 1]) {
108 !(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { 110 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
109 mlx4_err(dev, "Only same port types supported " 111 mlx4_err(dev, "Only same port types supported "
110 "on this HCA, aborting.\n"); 112 "on this HCA, aborting.\n");
111 return -EINVAL; 113 return -EINVAL;
114 }
115 if (port_type[i] == MLX4_PORT_TYPE_ETH &&
116 port_type[i + 1] == MLX4_PORT_TYPE_IB)
117 return -EINVAL;
112 } 118 }
113 } 119 }
114 if ((port_type[0] == MLX4_PORT_TYPE_ETH) &&
115 (port_type[1] == MLX4_PORT_TYPE_IB)) {
116 mlx4_err(dev, "eth-ib configuration is not supported.\n");
117 return -EINVAL;
118 }
119 120
120 for (i = 0; i < dev->caps.num_ports; i++) { 121 for (i = 0; i < dev->caps.num_ports; i++) {
121 if (!(port_type[i] & dev->caps.supported_type[i+1])) { 122 if (!(port_type[i] & dev->caps.supported_type[i+1])) {
@@ -225,6 +226,9 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
225 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB; 226 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
226 else 227 else
227 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; 228 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
229 dev->caps.possible_type[i] = dev->caps.port_type[i];
230 mlx4_priv(dev)->sense.sense_allowed[i] =
231 dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO;
228 232
229 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) { 233 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
230 dev->caps.log_num_macs = dev_cap->log_max_macs[i]; 234 dev->caps.log_num_macs = dev_cap->log_max_macs[i];
@@ -263,14 +267,16 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
263 * Change the port configuration of the device. 267 * Change the port configuration of the device.
264 * Every user of this function must hold the port mutex. 268 * Every user of this function must hold the port mutex.
265 */ 269 */
266static int mlx4_change_port_types(struct mlx4_dev *dev, 270int mlx4_change_port_types(struct mlx4_dev *dev,
267 enum mlx4_port_type *port_types) 271 enum mlx4_port_type *port_types)
268{ 272{
269 int err = 0; 273 int err = 0;
270 int change = 0; 274 int change = 0;
271 int port; 275 int port;
272 276
273 for (port = 0; port < dev->caps.num_ports; port++) { 277 for (port = 0; port < dev->caps.num_ports; port++) {
278 /* Change the port type only if the new type is different
279 * from the current, and not set to Auto */
274 if (port_types[port] != dev->caps.port_type[port + 1]) { 280 if (port_types[port] != dev->caps.port_type[port + 1]) {
275 change = 1; 281 change = 1;
276 dev->caps.port_type[port + 1] = port_types[port]; 282 dev->caps.port_type[port + 1] = port_types[port];
@@ -302,10 +308,17 @@ static ssize_t show_port_type(struct device *dev,
302 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 308 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
303 port_attr); 309 port_attr);
304 struct mlx4_dev *mdev = info->dev; 310 struct mlx4_dev *mdev = info->dev;
311 char type[8];
312
313 sprintf(type, "%s",
314 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
315 "ib" : "eth");
316 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
317 sprintf(buf, "auto (%s)\n", type);
318 else
319 sprintf(buf, "%s\n", type);
305 320
306 return sprintf(buf, "%s\n", 321 return strlen(buf);
307 mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB ?
308 "ib" : "eth");
309} 322}
310 323
311static ssize_t set_port_type(struct device *dev, 324static ssize_t set_port_type(struct device *dev,
@@ -317,6 +330,7 @@ static ssize_t set_port_type(struct device *dev,
317 struct mlx4_dev *mdev = info->dev; 330 struct mlx4_dev *mdev = info->dev;
318 struct mlx4_priv *priv = mlx4_priv(mdev); 331 struct mlx4_priv *priv = mlx4_priv(mdev);
319 enum mlx4_port_type types[MLX4_MAX_PORTS]; 332 enum mlx4_port_type types[MLX4_MAX_PORTS];
333 enum mlx4_port_type new_types[MLX4_MAX_PORTS];
320 int i; 334 int i;
321 int err = 0; 335 int err = 0;
322 336
@@ -324,26 +338,56 @@ static ssize_t set_port_type(struct device *dev,
324 info->tmp_type = MLX4_PORT_TYPE_IB; 338 info->tmp_type = MLX4_PORT_TYPE_IB;
325 else if (!strcmp(buf, "eth\n")) 339 else if (!strcmp(buf, "eth\n"))
326 info->tmp_type = MLX4_PORT_TYPE_ETH; 340 info->tmp_type = MLX4_PORT_TYPE_ETH;
341 else if (!strcmp(buf, "auto\n"))
342 info->tmp_type = MLX4_PORT_TYPE_AUTO;
327 else { 343 else {
328 mlx4_err(mdev, "%s is not supported port type\n", buf); 344 mlx4_err(mdev, "%s is not supported port type\n", buf);
329 return -EINVAL; 345 return -EINVAL;
330 } 346 }
331 347
348 mlx4_stop_sense(mdev);
332 mutex_lock(&priv->port_mutex); 349 mutex_lock(&priv->port_mutex);
333 for (i = 0; i < mdev->caps.num_ports; i++) 350 /* Possible type is always the one that was delivered */
351 mdev->caps.possible_type[info->port] = info->tmp_type;
352
353 for (i = 0; i < mdev->caps.num_ports; i++) {
334 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type : 354 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
335 mdev->caps.port_type[i+1]; 355 mdev->caps.possible_type[i+1];
356 if (types[i] == MLX4_PORT_TYPE_AUTO)
357 types[i] = mdev->caps.port_type[i+1];
358 }
336 359
337 err = mlx4_check_port_params(mdev, types); 360 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
361 for (i = 1; i <= mdev->caps.num_ports; i++) {
362 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
363 mdev->caps.possible_type[i] = mdev->caps.port_type[i];
364 err = -EINVAL;
365 }
366 }
367 }
368 if (err) {
369 mlx4_err(mdev, "Auto sensing is not supported on this HCA. "
370 "Set only 'eth' or 'ib' for both ports "
371 "(should be the same)\n");
372 goto out;
373 }
374
375 mlx4_do_sense_ports(mdev, new_types, types);
376
377 err = mlx4_check_port_params(mdev, new_types);
338 if (err) 378 if (err)
339 goto out; 379 goto out;
340 380
341 for (i = 1; i <= mdev->caps.num_ports; i++) 381 /* We are about to apply the changes after the configuration
342 priv->port[i].tmp_type = 0; 382 * was verified, no need to remember the temporary types
383 * any more */
384 for (i = 0; i < mdev->caps.num_ports; i++)
385 priv->port[i + 1].tmp_type = 0;
343 386
344 err = mlx4_change_port_types(mdev, types); 387 err = mlx4_change_port_types(mdev, new_types);
345 388
346out: 389out:
390 mlx4_start_sense(mdev);
347 mutex_unlock(&priv->port_mutex); 391 mutex_unlock(&priv->port_mutex);
348 return err ? err : count; 392 return err ? err : count;
349} 393}
@@ -1117,6 +1161,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1117 if (err) 1161 if (err)
1118 goto err_port; 1162 goto err_port;
1119 1163
1164 mlx4_sense_init(dev);
1165 mlx4_start_sense(dev);
1166
1120 pci_set_drvdata(pdev, dev); 1167 pci_set_drvdata(pdev, dev);
1121 1168
1122 return 0; 1169 return 0;
@@ -1182,6 +1229,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
1182 int p; 1229 int p;
1183 1230
1184 if (dev) { 1231 if (dev) {
1232 mlx4_stop_sense(dev);
1185 mlx4_unregister_device(dev); 1233 mlx4_unregister_device(dev);
1186 1234
1187 for (p = 1; p <= dev->caps.num_ports; p++) { 1235 for (p = 1; p <= dev->caps.num_ports; p++) {
@@ -1230,6 +1278,8 @@ static struct pci_device_id mlx4_pci_table[] = {
1230 { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */ 1278 { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
1231 { PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */ 1279 { PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */
1232 { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */ 1280 { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
1281 { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */
1282 { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
1233 { 0, } 1283 { 0, }
1234}; 1284};
1235 1285
@@ -1264,9 +1314,11 @@ static int __init mlx4_init(void)
1264 if (mlx4_verify_params()) 1314 if (mlx4_verify_params())
1265 return -EINVAL; 1315 return -EINVAL;
1266 1316
1267 ret = mlx4_catas_init(); 1317 mlx4_catas_init();
1268 if (ret) 1318
1269 return ret; 1319 mlx4_wq = create_singlethread_workqueue("mlx4");
1320 if (!mlx4_wq)
1321 return -ENOMEM;
1270 1322
1271 ret = pci_register_driver(&mlx4_driver); 1323 ret = pci_register_driver(&mlx4_driver);
1272 return ret < 0 ? ret : 0; 1324 return ret < 0 ? ret : 0;
@@ -1275,7 +1327,7 @@ static int __init mlx4_init(void)
1275static void __exit mlx4_cleanup(void) 1327static void __exit mlx4_cleanup(void)
1276{ 1328{
1277 pci_unregister_driver(&mlx4_driver); 1329 pci_unregister_driver(&mlx4_driver);
1278 mlx4_catas_cleanup(); 1330 destroy_workqueue(mlx4_wq);
1279} 1331}
1280 1332
1281module_init(mlx4_init); 1333module_init(mlx4_init);
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index e0213bad61c7..5bd79c2b184f 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -40,6 +40,7 @@
40#include <linux/mutex.h> 40#include <linux/mutex.h>
41#include <linux/radix-tree.h> 41#include <linux/radix-tree.h>
42#include <linux/timer.h> 42#include <linux/timer.h>
43#include <linux/workqueue.h>
43 44
44#include <linux/mlx4/device.h> 45#include <linux/mlx4/device.h>
45#include <linux/mlx4/driver.h> 46#include <linux/mlx4/driver.h>
@@ -276,6 +277,13 @@ struct mlx4_port_info {
276 struct mlx4_vlan_table vlan_table; 277 struct mlx4_vlan_table vlan_table;
277}; 278};
278 279
280struct mlx4_sense {
281 struct mlx4_dev *dev;
282 u8 do_sense_port[MLX4_MAX_PORTS + 1];
283 u8 sense_allowed[MLX4_MAX_PORTS + 1];
284 struct delayed_work sense_poll;
285};
286
279struct mlx4_priv { 287struct mlx4_priv {
280 struct mlx4_dev dev; 288 struct mlx4_dev dev;
281 289
@@ -305,6 +313,7 @@ struct mlx4_priv {
305 struct mlx4_uar driver_uar; 313 struct mlx4_uar driver_uar;
306 void __iomem *kar; 314 void __iomem *kar;
307 struct mlx4_port_info port[MLX4_MAX_PORTS + 1]; 315 struct mlx4_port_info port[MLX4_MAX_PORTS + 1];
316 struct mlx4_sense sense;
308 struct mutex port_mutex; 317 struct mutex port_mutex;
309}; 318};
310 319
@@ -313,6 +322,10 @@ static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
313 return container_of(dev, struct mlx4_priv, dev); 322 return container_of(dev, struct mlx4_priv, dev);
314} 323}
315 324
325#define MLX4_SENSE_RANGE (HZ * 3)
326
327extern struct workqueue_struct *mlx4_wq;
328
316u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap); 329u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
317void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj); 330void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
318u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align); 331u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align);
@@ -346,8 +359,7 @@ void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
346 359
347void mlx4_start_catas_poll(struct mlx4_dev *dev); 360void mlx4_start_catas_poll(struct mlx4_dev *dev);
348void mlx4_stop_catas_poll(struct mlx4_dev *dev); 361void mlx4_stop_catas_poll(struct mlx4_dev *dev);
349int mlx4_catas_init(void); 362void mlx4_catas_init(void);
350void mlx4_catas_cleanup(void);
351int mlx4_restart_one(struct pci_dev *pdev); 363int mlx4_restart_one(struct pci_dev *pdev);
352int mlx4_register_device(struct mlx4_dev *dev); 364int mlx4_register_device(struct mlx4_dev *dev);
353void mlx4_unregister_device(struct mlx4_dev *dev); 365void mlx4_unregister_device(struct mlx4_dev *dev);
@@ -379,6 +391,17 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
379 391
380void mlx4_handle_catas_err(struct mlx4_dev *dev); 392void mlx4_handle_catas_err(struct mlx4_dev *dev);
381 393
394void mlx4_do_sense_ports(struct mlx4_dev *dev,
395 enum mlx4_port_type *stype,
396 enum mlx4_port_type *defaults);
397void mlx4_start_sense(struct mlx4_dev *dev);
398void mlx4_stop_sense(struct mlx4_dev *dev);
399void mlx4_sense_init(struct mlx4_dev *dev);
400int mlx4_check_port_params(struct mlx4_dev *dev,
401 enum mlx4_port_type *port_type);
402int mlx4_change_port_types(struct mlx4_dev *dev,
403 enum mlx4_port_type *port_types);
404
382void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table); 405void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
383void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); 406void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
384 407
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c
index 0a057e5dc63b..7cce3342ef8c 100644
--- a/drivers/net/mlx4/port.c
+++ b/drivers/net/mlx4/port.c
@@ -298,20 +298,17 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
298{ 298{
299 struct mlx4_cmd_mailbox *mailbox; 299 struct mlx4_cmd_mailbox *mailbox;
300 int err; 300 int err;
301 u8 is_eth = dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
302 301
303 mailbox = mlx4_alloc_cmd_mailbox(dev); 302 mailbox = mlx4_alloc_cmd_mailbox(dev);
304 if (IS_ERR(mailbox)) 303 if (IS_ERR(mailbox))
305 return PTR_ERR(mailbox); 304 return PTR_ERR(mailbox);
306 305
307 memset(mailbox->buf, 0, 256); 306 memset(mailbox->buf, 0, 256);
308 if (is_eth) { 307 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
309 ((u8 *) mailbox->buf)[3] = 6; 308 return 0;
310 ((__be16 *) mailbox->buf)[4] = cpu_to_be16(1 << 15); 309
311 ((__be16 *) mailbox->buf)[6] = cpu_to_be16(1 << 15); 310 ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
312 } else 311 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
313 ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
314 err = mlx4_cmd(dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
315 MLX4_CMD_TIME_CLASS_B); 312 MLX4_CMD_TIME_CLASS_B);
316 313
317 mlx4_free_cmd_mailbox(dev, mailbox); 314 mlx4_free_cmd_mailbox(dev, mailbox);
diff --git a/drivers/net/mlx4/sense.c b/drivers/net/mlx4/sense.c
new file mode 100644
index 000000000000..6d5089ecb5af
--- /dev/null
+++ b/drivers/net/mlx4/sense.c
@@ -0,0 +1,156 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/errno.h>
35#include <linux/if_ether.h>
36
37#include <linux/mlx4/cmd.h>
38
39#include "mlx4.h"
40
41static int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
42 enum mlx4_port_type *type)
43{
44 u64 out_param;
45 int err = 0;
46
47 err = mlx4_cmd_imm(dev, 0, &out_param, port, 0,
48 MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B);
49 if (err) {
50 mlx4_err(dev, "Sense command failed for port: %d\n", port);
51 return err;
52 }
53
54 if (out_param > 2) {
55 mlx4_err(dev, "Sense returned illegal value: 0x%llx\n", out_param);
56 return EINVAL;
57 }
58
59 *type = out_param;
60 return 0;
61}
62
63void mlx4_do_sense_ports(struct mlx4_dev *dev,
64 enum mlx4_port_type *stype,
65 enum mlx4_port_type *defaults)
66{
67 struct mlx4_sense *sense = &mlx4_priv(dev)->sense;
68 int err;
69 int i;
70
71 for (i = 1; i <= dev->caps.num_ports; i++) {
72 stype[i - 1] = 0;
73 if (sense->do_sense_port[i] && sense->sense_allowed[i] &&
74 dev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
75 err = mlx4_SENSE_PORT(dev, i, &stype[i - 1]);
76 if (err)
77 stype[i - 1] = defaults[i - 1];
78 } else
79 stype[i - 1] = defaults[i - 1];
80 }
81
82 /*
83 * Adjust port configuration:
84 * If port 1 sensed nothing and port 2 is IB, set both as IB
85 * If port 2 sensed nothing and port 1 is Eth, set both as Eth
86 */
87 if (stype[0] == MLX4_PORT_TYPE_ETH) {
88 for (i = 1; i < dev->caps.num_ports; i++)
89 stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_ETH;
90 }
91 if (stype[dev->caps.num_ports - 1] == MLX4_PORT_TYPE_IB) {
92 for (i = 0; i < dev->caps.num_ports - 1; i++)
93 stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_IB;
94 }
95
96 /*
97 * If sensed nothing, remain in current configuration.
98 */
99 for (i = 0; i < dev->caps.num_ports; i++)
100 stype[i] = stype[i] ? stype[i] : defaults[i];
101
102}
103
104static void mlx4_sense_port(struct work_struct *work)
105{
106 struct delayed_work *delay = container_of(work, struct delayed_work, work);
107 struct mlx4_sense *sense = container_of(delay, struct mlx4_sense,
108 sense_poll);
109 struct mlx4_dev *dev = sense->dev;
110 struct mlx4_priv *priv = mlx4_priv(dev);
111 enum mlx4_port_type stype[MLX4_MAX_PORTS];
112
113 mutex_lock(&priv->port_mutex);
114 mlx4_do_sense_ports(dev, stype, &dev->caps.port_type[1]);
115
116 if (mlx4_check_port_params(dev, stype))
117 goto sense_again;
118
119 if (mlx4_change_port_types(dev, stype))
120 mlx4_err(dev, "Failed to change port_types\n");
121
122sense_again:
123 mutex_unlock(&priv->port_mutex);
124 queue_delayed_work(mlx4_wq , &sense->sense_poll,
125 round_jiffies_relative(MLX4_SENSE_RANGE));
126}
127
128void mlx4_start_sense(struct mlx4_dev *dev)
129{
130 struct mlx4_priv *priv = mlx4_priv(dev);
131 struct mlx4_sense *sense = &priv->sense;
132
133 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP))
134 return;
135
136 queue_delayed_work(mlx4_wq , &sense->sense_poll,
137 round_jiffies_relative(MLX4_SENSE_RANGE));
138}
139
140void mlx4_stop_sense(struct mlx4_dev *dev)
141{
142 cancel_delayed_work_sync(&mlx4_priv(dev)->sense.sense_poll);
143}
144
145void mlx4_sense_init(struct mlx4_dev *dev)
146{
147 struct mlx4_priv *priv = mlx4_priv(dev);
148 struct mlx4_sense *sense = &priv->sense;
149 int port;
150
151 sense->dev = dev;
152 for (port = 1; port <= dev->caps.num_ports; port++)
153 sense->do_sense_port[port] = 1;
154
155 INIT_DELAYED_WORK_DEFERRABLE(&sense->sense_poll, mlx4_sense_port);
156}
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index cf9c679ab38b..0f82293a82ed 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -55,6 +55,7 @@ enum {
55 MLX4_CMD_CLOSE_PORT = 0xa, 55 MLX4_CMD_CLOSE_PORT = 0xa,
56 MLX4_CMD_QUERY_HCA = 0xb, 56 MLX4_CMD_QUERY_HCA = 0xb,
57 MLX4_CMD_QUERY_PORT = 0x43, 57 MLX4_CMD_QUERY_PORT = 0x43,
58 MLX4_CMD_SENSE_PORT = 0x4d,
58 MLX4_CMD_SET_PORT = 0xc, 59 MLX4_CMD_SET_PORT = 0xc,
59 MLX4_CMD_ACCESS_DDR = 0x2e, 60 MLX4_CMD_ACCESS_DDR = 0x2e,
60 MLX4_CMD_MAP_ICM = 0xffa, 61 MLX4_CMD_MAP_ICM = 0xffa,
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 8f659cc29960..3aff8a6a389e 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -155,8 +155,9 @@ enum mlx4_qp_region {
155}; 155};
156 156
157enum mlx4_port_type { 157enum mlx4_port_type {
158 MLX4_PORT_TYPE_IB = 1 << 0, 158 MLX4_PORT_TYPE_IB = 1,
159 MLX4_PORT_TYPE_ETH = 1 << 1, 159 MLX4_PORT_TYPE_ETH = 2,
160 MLX4_PORT_TYPE_AUTO = 3
160}; 161};
161 162
162enum mlx4_special_vlan_idx { 163enum mlx4_special_vlan_idx {
@@ -237,6 +238,7 @@ struct mlx4_caps {
237 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; 238 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
238 u8 supported_type[MLX4_MAX_PORTS + 1]; 239 u8 supported_type[MLX4_MAX_PORTS + 1];
239 u32 port_mask; 240 u32 port_mask;
241 enum mlx4_port_type possible_type[MLX4_MAX_PORTS + 1];
240}; 242};
241 243
242struct mlx4_buf_list { 244struct mlx4_buf_list {
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index ec7c6d99ed3f..938858304300 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -314,12 +314,12 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
314 */ 314 */
315void ib_destroy_cm_id(struct ib_cm_id *cm_id); 315void ib_destroy_cm_id(struct ib_cm_id *cm_id);
316 316
317#define IB_SERVICE_ID_AGN_MASK __constant_cpu_to_be64(0xFF00000000000000ULL) 317#define IB_SERVICE_ID_AGN_MASK cpu_to_be64(0xFF00000000000000ULL)
318#define IB_CM_ASSIGN_SERVICE_ID __constant_cpu_to_be64(0x0200000000000000ULL) 318#define IB_CM_ASSIGN_SERVICE_ID cpu_to_be64(0x0200000000000000ULL)
319#define IB_CMA_SERVICE_ID __constant_cpu_to_be64(0x0000000001000000ULL) 319#define IB_CMA_SERVICE_ID cpu_to_be64(0x0000000001000000ULL)
320#define IB_CMA_SERVICE_ID_MASK __constant_cpu_to_be64(0xFFFFFFFFFF000000ULL) 320#define IB_CMA_SERVICE_ID_MASK cpu_to_be64(0xFFFFFFFFFF000000ULL)
321#define IB_SDP_SERVICE_ID __constant_cpu_to_be64(0x0000000000010000ULL) 321#define IB_SDP_SERVICE_ID cpu_to_be64(0x0000000000010000ULL)
322#define IB_SDP_SERVICE_ID_MASK __constant_cpu_to_be64(0xFFFFFFFFFFFF0000ULL) 322#define IB_SDP_SERVICE_ID_MASK cpu_to_be64(0xFFFFFFFFFFFF0000ULL)
323 323
324struct ib_cm_compare_data { 324struct ib_cm_compare_data {
325 u8 data[IB_CM_COMPARE_SIZE]; 325 u8 data[IB_CM_COMPARE_SIZE];
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 5f6c40fffcf4..d3b9401b77b0 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -107,7 +107,7 @@
107#define IB_MGMT_RMPP_STATUS_ABORT_MAX 127 107#define IB_MGMT_RMPP_STATUS_ABORT_MAX 127
108 108
109#define IB_QP0 0 109#define IB_QP0 0
110#define IB_QP1 __constant_htonl(1) 110#define IB_QP1 cpu_to_be32(1)
111#define IB_QP1_QKEY 0x80010000 111#define IB_QP1_QKEY 0x80010000
112#define IB_QP_SET_QKEY 0x80000000 112#define IB_QP_SET_QKEY 0x80000000
113 113
@@ -290,7 +290,7 @@ static inline void ib_set_rmpp_resptime(struct ib_rmpp_hdr *rmpp_hdr, u8 rtime)
290 */ 290 */
291static inline void ib_set_rmpp_flags(struct ib_rmpp_hdr *rmpp_hdr, u8 flags) 291static inline void ib_set_rmpp_flags(struct ib_rmpp_hdr *rmpp_hdr, u8 flags)
292{ 292{
293 rmpp_hdr->rmpp_rtime_flags = (rmpp_hdr->rmpp_rtime_flags & 0xF1) | 293 rmpp_hdr->rmpp_rtime_flags = (rmpp_hdr->rmpp_rtime_flags & 0xF8) |
294 (flags & 0x7); 294 (flags & 0x7);
295} 295}
296 296
diff --git a/include/rdma/ib_smi.h b/include/rdma/ib_smi.h
index aaca0878668f..98b9086d769a 100644
--- a/include/rdma/ib_smi.h
+++ b/include/rdma/ib_smi.h
@@ -63,25 +63,25 @@ struct ib_smp {
63 u8 return_path[IB_SMP_MAX_PATH_HOPS]; 63 u8 return_path[IB_SMP_MAX_PATH_HOPS];
64} __attribute__ ((packed)); 64} __attribute__ ((packed));
65 65
66#define IB_SMP_DIRECTION __constant_htons(0x8000) 66#define IB_SMP_DIRECTION cpu_to_be16(0x8000)
67 67
68/* Subnet management attributes */ 68/* Subnet management attributes */
69#define IB_SMP_ATTR_NOTICE __constant_htons(0x0002) 69#define IB_SMP_ATTR_NOTICE cpu_to_be16(0x0002)
70#define IB_SMP_ATTR_NODE_DESC __constant_htons(0x0010) 70#define IB_SMP_ATTR_NODE_DESC cpu_to_be16(0x0010)
71#define IB_SMP_ATTR_NODE_INFO __constant_htons(0x0011) 71#define IB_SMP_ATTR_NODE_INFO cpu_to_be16(0x0011)
72#define IB_SMP_ATTR_SWITCH_INFO __constant_htons(0x0012) 72#define IB_SMP_ATTR_SWITCH_INFO cpu_to_be16(0x0012)
73#define IB_SMP_ATTR_GUID_INFO __constant_htons(0x0014) 73#define IB_SMP_ATTR_GUID_INFO cpu_to_be16(0x0014)
74#define IB_SMP_ATTR_PORT_INFO __constant_htons(0x0015) 74#define IB_SMP_ATTR_PORT_INFO cpu_to_be16(0x0015)
75#define IB_SMP_ATTR_PKEY_TABLE __constant_htons(0x0016) 75#define IB_SMP_ATTR_PKEY_TABLE cpu_to_be16(0x0016)
76#define IB_SMP_ATTR_SL_TO_VL_TABLE __constant_htons(0x0017) 76#define IB_SMP_ATTR_SL_TO_VL_TABLE cpu_to_be16(0x0017)
77#define IB_SMP_ATTR_VL_ARB_TABLE __constant_htons(0x0018) 77#define IB_SMP_ATTR_VL_ARB_TABLE cpu_to_be16(0x0018)
78#define IB_SMP_ATTR_LINEAR_FORWARD_TABLE __constant_htons(0x0019) 78#define IB_SMP_ATTR_LINEAR_FORWARD_TABLE cpu_to_be16(0x0019)
79#define IB_SMP_ATTR_RANDOM_FORWARD_TABLE __constant_htons(0x001A) 79#define IB_SMP_ATTR_RANDOM_FORWARD_TABLE cpu_to_be16(0x001A)
80#define IB_SMP_ATTR_MCAST_FORWARD_TABLE __constant_htons(0x001B) 80#define IB_SMP_ATTR_MCAST_FORWARD_TABLE cpu_to_be16(0x001B)
81#define IB_SMP_ATTR_SM_INFO __constant_htons(0x0020) 81#define IB_SMP_ATTR_SM_INFO cpu_to_be16(0x0020)
82#define IB_SMP_ATTR_VENDOR_DIAG __constant_htons(0x0030) 82#define IB_SMP_ATTR_VENDOR_DIAG cpu_to_be16(0x0030)
83#define IB_SMP_ATTR_LED_INFO __constant_htons(0x0031) 83#define IB_SMP_ATTR_LED_INFO cpu_to_be16(0x0031)
84#define IB_SMP_ATTR_VENDOR_MASK __constant_htons(0xFF00) 84#define IB_SMP_ATTR_VENDOR_MASK cpu_to_be16(0xFF00)
85 85
86struct ib_port_info { 86struct ib_port_info {
87 __be64 mkey; 87 __be64 mkey;