aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-08-29 13:36:48 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-08-29 13:36:48 -0400
commita78b3371b628559eb5c46ee1518df27c62f3e801 (patch)
treedd32333307ce6a7e4d39ea8c07c34bc3dc5540a1 /drivers
parent97c169a21bfb5bb2ab2bccd852da4f0d0e021c55 (diff)
parenta4d61e84804f3b14cc35c5e2af768a07c0f64ef6 (diff)
Merge HEAD from master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband.git
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/core/Makefile2
-rw-r--r--drivers/infiniband/core/agent.c13
-rw-r--r--drivers/infiniband/core/agent_priv.h10
-rw-r--r--drivers/infiniband/core/cache.c6
-rw-r--r--drivers/infiniband/core/cm.c125
-rw-r--r--drivers/infiniband/core/cm_msgs.h194
-rw-r--r--drivers/infiniband/core/core_priv.h2
-rw-r--r--drivers/infiniband/core/device.c1
-rw-r--r--drivers/infiniband/core/fmr_pool.c8
-rw-r--r--drivers/infiniband/core/mad.c15
-rw-r--r--drivers/infiniband/core/mad_priv.h10
-rw-r--r--drivers/infiniband/core/mad_rmpp.c311
-rw-r--r--drivers/infiniband/core/packer.c3
-rw-r--r--drivers/infiniband/core/sa_query.c6
-rw-r--r--drivers/infiniband/core/smi.c13
-rw-r--r--drivers/infiniband/core/sysfs.c40
-rw-r--r--drivers/infiniband/core/ucm.c464
-rw-r--r--drivers/infiniband/core/ucm.h13
-rw-r--r--drivers/infiniband/core/ud_header.c11
-rw-r--r--drivers/infiniband/core/user_mad.c10
-rw-r--r--drivers/infiniband/core/uverbs.h11
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c182
-rw-r--r--drivers/infiniband/core/uverbs_main.c22
-rw-r--r--drivers/infiniband/core/uverbs_mem.c1
-rw-r--r--drivers/infiniband/core/verbs.c65
-rw-r--r--drivers/infiniband/hw/mthca/Makefile4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_allocator.c116
-rw-r--r--drivers/infiniband/hw/mthca/mthca_av.c28
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c106
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.h20
-rw-r--r--drivers/infiniband/hw/mthca/mthca_config_reg.h1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c256
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h52
-rw-r--r--drivers/infiniband/hw/mthca/mthca_doorbell.h13
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c63
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c10
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c179
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mcg.c36
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.h5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c35
-rw-r--r--drivers/infiniband/hw/mthca/mthca_pd.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_profile.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_profile.h2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c115
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h54
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c362
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c591
-rw-r--r--drivers/infiniband/hw/mthca/mthca_user.h11
-rw-r--r--drivers/infiniband/hw/mthca/mthca_wqe.h114
-rw-r--r--drivers/infiniband/include/ib_cache.h103
-rw-r--r--drivers/infiniband/include/ib_cm.h569
-rw-r--r--drivers/infiniband/include/ib_fmr_pool.h93
-rw-r--r--drivers/infiniband/include/ib_mad.h577
-rw-r--r--drivers/infiniband/include/ib_pack.h245
-rw-r--r--drivers/infiniband/include/ib_sa.h373
-rw-r--r--drivers/infiniband/include/ib_smi.h96
-rw-r--r--drivers/infiniband/include/ib_user_cm.h328
-rw-r--r--drivers/infiniband/include/ib_user_mad.h139
-rw-r--r--drivers/infiniband/include/ib_user_verbs.h389
-rw-r--r--drivers/infiniband/include/ib_verbs.h1365
-rw-r--r--drivers/infiniband/ulp/ipoib/Makefile2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_fs.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c33
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c1
69 files changed, 2486 insertions, 5583 deletions
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 10be36731ed7..678a7e097f32 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -1,5 +1,3 @@
1EXTRA_CFLAGS += -Idrivers/infiniband/include
2
3obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \ 1obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \
4 ib_cm.o ib_umad.o ib_ucm.o 2 ib_cm.o ib_umad.o ib_ucm.o
5obj-$(CONFIG_INFINIBAND_USER_VERBS) += ib_uverbs.o 3obj-$(CONFIG_INFINIBAND_USER_VERBS) += ib_uverbs.o
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c
index 729f0b0d983a..5ac86f566dc0 100644
--- a/drivers/infiniband/core/agent.c
+++ b/drivers/infiniband/core/agent.c
@@ -1,9 +1,10 @@
1/* 1/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 2 * Copyright (c) 2004, 2005 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 3 * Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 4 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 5 * Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 6 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * 8 *
8 * This software is available to you under a choice of one of two 9 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU 10 * licenses. You may choose to be licensed under the terms of the GNU
@@ -40,7 +41,7 @@
40 41
41#include <asm/bug.h> 42#include <asm/bug.h>
42 43
43#include <ib_smi.h> 44#include <rdma/ib_smi.h>
44 45
45#include "smi.h" 46#include "smi.h"
46#include "agent_priv.h" 47#include "agent_priv.h"
diff --git a/drivers/infiniband/core/agent_priv.h b/drivers/infiniband/core/agent_priv.h
index 17435af1e914..2ec6d7f1b7d0 100644
--- a/drivers/infiniband/core/agent_priv.h
+++ b/drivers/infiniband/core/agent_priv.h
@@ -1,9 +1,9 @@
1/* 1/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 2 * Copyright (c) 2004, 2005 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 3 * Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 4 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 5 * Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 6 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
7 * 7 *
8 * This software is available to you under a choice of one of two 8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU 9 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 3042360c97e1..f014e639088c 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -1,5 +1,8 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
3 * 6 *
4 * This software is available to you under a choice of one of two 7 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 8 * licenses. You may choose to be licensed under the terms of the GNU
@@ -32,12 +35,11 @@
32 * $Id: cache.c 1349 2004-12-16 21:09:43Z roland $ 35 * $Id: cache.c 1349 2004-12-16 21:09:43Z roland $
33 */ 36 */
34 37
35#include <linux/version.h>
36#include <linux/module.h> 38#include <linux/module.h>
37#include <linux/errno.h> 39#include <linux/errno.h>
38#include <linux/slab.h> 40#include <linux/slab.h>
39 41
40#include <ib_cache.h> 42#include <rdma/ib_cache.h>
41 43
42#include "core_priv.h" 44#include "core_priv.h"
43 45
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 403ed125d8f4..4de93ba274a6 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -43,8 +43,8 @@
43#include <linux/spinlock.h> 43#include <linux/spinlock.h>
44#include <linux/workqueue.h> 44#include <linux/workqueue.h>
45 45
46#include <ib_cache.h> 46#include <rdma/ib_cache.h>
47#include <ib_cm.h> 47#include <rdma/ib_cm.h>
48#include "cm_msgs.h" 48#include "cm_msgs.h"
49 49
50MODULE_AUTHOR("Sean Hefty"); 50MODULE_AUTHOR("Sean Hefty");
@@ -83,7 +83,7 @@ struct cm_port {
83struct cm_device { 83struct cm_device {
84 struct list_head list; 84 struct list_head list;
85 struct ib_device *device; 85 struct ib_device *device;
86 u64 ca_guid; 86 __be64 ca_guid;
87 struct cm_port port[0]; 87 struct cm_port port[0];
88}; 88};
89 89
@@ -100,8 +100,8 @@ struct cm_work {
100 struct list_head list; 100 struct list_head list;
101 struct cm_port *port; 101 struct cm_port *port;
102 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ 102 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
103 u32 local_id; /* Established / timewait */ 103 __be32 local_id; /* Established / timewait */
104 u32 remote_id; 104 __be32 remote_id;
105 struct ib_cm_event cm_event; 105 struct ib_cm_event cm_event;
106 struct ib_sa_path_rec path[0]; 106 struct ib_sa_path_rec path[0];
107}; 107};
@@ -110,8 +110,8 @@ struct cm_timewait_info {
110 struct cm_work work; /* Must be first. */ 110 struct cm_work work; /* Must be first. */
111 struct rb_node remote_qp_node; 111 struct rb_node remote_qp_node;
112 struct rb_node remote_id_node; 112 struct rb_node remote_id_node;
113 u64 remote_ca_guid; 113 __be64 remote_ca_guid;
114 u32 remote_qpn; 114 __be32 remote_qpn;
115 u8 inserted_remote_qp; 115 u8 inserted_remote_qp;
116 u8 inserted_remote_id; 116 u8 inserted_remote_id;
117}; 117};
@@ -132,11 +132,11 @@ struct cm_id_private {
132 struct cm_av alt_av; 132 struct cm_av alt_av;
133 133
134 void *private_data; 134 void *private_data;
135 u64 tid; 135 __be64 tid;
136 u32 local_qpn; 136 __be32 local_qpn;
137 u32 remote_qpn; 137 __be32 remote_qpn;
138 u32 sq_psn; 138 __be32 sq_psn;
139 u32 rq_psn; 139 __be32 rq_psn;
140 int timeout_ms; 140 int timeout_ms;
141 enum ib_mtu path_mtu; 141 enum ib_mtu path_mtu;
142 u8 private_data_len; 142 u8 private_data_len;
@@ -253,7 +253,7 @@ static void cm_set_ah_attr(struct ib_ah_attr *ah_attr, u8 port_num,
253 u16 dlid, u8 sl, u16 src_path_bits) 253 u16 dlid, u8 sl, u16 src_path_bits)
254{ 254{
255 memset(ah_attr, 0, sizeof ah_attr); 255 memset(ah_attr, 0, sizeof ah_attr);
256 ah_attr->dlid = be16_to_cpu(dlid); 256 ah_attr->dlid = dlid;
257 ah_attr->sl = sl; 257 ah_attr->sl = sl;
258 ah_attr->src_path_bits = src_path_bits; 258 ah_attr->src_path_bits = src_path_bits;
259 ah_attr->port_num = port_num; 259 ah_attr->port_num = port_num;
@@ -264,7 +264,7 @@ static void cm_init_av_for_response(struct cm_port *port,
264{ 264{
265 av->port = port; 265 av->port = port;
266 av->pkey_index = wc->pkey_index; 266 av->pkey_index = wc->pkey_index;
267 cm_set_ah_attr(&av->ah_attr, port->port_num, cpu_to_be16(wc->slid), 267 cm_set_ah_attr(&av->ah_attr, port->port_num, wc->slid,
268 wc->sl, wc->dlid_path_bits); 268 wc->sl, wc->dlid_path_bits);
269} 269}
270 270
@@ -295,8 +295,9 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
295 return ret; 295 return ret;
296 296
297 av->port = port; 297 av->port = port;
298 cm_set_ah_attr(&av->ah_attr, av->port->port_num, path->dlid, 298 cm_set_ah_attr(&av->ah_attr, av->port->port_num,
299 path->sl, path->slid & 0x7F); 299 be16_to_cpu(path->dlid), path->sl,
300 be16_to_cpu(path->slid) & 0x7F);
300 av->packet_life_time = path->packet_life_time; 301 av->packet_life_time = path->packet_life_time;
301 return 0; 302 return 0;
302} 303}
@@ -309,26 +310,26 @@ static int cm_alloc_id(struct cm_id_private *cm_id_priv)
309 do { 310 do {
310 spin_lock_irqsave(&cm.lock, flags); 311 spin_lock_irqsave(&cm.lock, flags);
311 ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, 1, 312 ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, 1,
312 (int *) &cm_id_priv->id.local_id); 313 (__force int *) &cm_id_priv->id.local_id);
313 spin_unlock_irqrestore(&cm.lock, flags); 314 spin_unlock_irqrestore(&cm.lock, flags);
314 } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) ); 315 } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
315 return ret; 316 return ret;
316} 317}
317 318
318static void cm_free_id(u32 local_id) 319static void cm_free_id(__be32 local_id)
319{ 320{
320 unsigned long flags; 321 unsigned long flags;
321 322
322 spin_lock_irqsave(&cm.lock, flags); 323 spin_lock_irqsave(&cm.lock, flags);
323 idr_remove(&cm.local_id_table, (int) local_id); 324 idr_remove(&cm.local_id_table, (__force int) local_id);
324 spin_unlock_irqrestore(&cm.lock, flags); 325 spin_unlock_irqrestore(&cm.lock, flags);
325} 326}
326 327
327static struct cm_id_private * cm_get_id(u32 local_id, u32 remote_id) 328static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
328{ 329{
329 struct cm_id_private *cm_id_priv; 330 struct cm_id_private *cm_id_priv;
330 331
331 cm_id_priv = idr_find(&cm.local_id_table, (int) local_id); 332 cm_id_priv = idr_find(&cm.local_id_table, (__force int) local_id);
332 if (cm_id_priv) { 333 if (cm_id_priv) {
333 if (cm_id_priv->id.remote_id == remote_id) 334 if (cm_id_priv->id.remote_id == remote_id)
334 atomic_inc(&cm_id_priv->refcount); 335 atomic_inc(&cm_id_priv->refcount);
@@ -339,7 +340,7 @@ static struct cm_id_private * cm_get_id(u32 local_id, u32 remote_id)
339 return cm_id_priv; 340 return cm_id_priv;
340} 341}
341 342
342static struct cm_id_private * cm_acquire_id(u32 local_id, u32 remote_id) 343static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
343{ 344{
344 struct cm_id_private *cm_id_priv; 345 struct cm_id_private *cm_id_priv;
345 unsigned long flags; 346 unsigned long flags;
@@ -356,8 +357,8 @@ static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
356 struct rb_node **link = &cm.listen_service_table.rb_node; 357 struct rb_node **link = &cm.listen_service_table.rb_node;
357 struct rb_node *parent = NULL; 358 struct rb_node *parent = NULL;
358 struct cm_id_private *cur_cm_id_priv; 359 struct cm_id_private *cur_cm_id_priv;
359 u64 service_id = cm_id_priv->id.service_id; 360 __be64 service_id = cm_id_priv->id.service_id;
360 u64 service_mask = cm_id_priv->id.service_mask; 361 __be64 service_mask = cm_id_priv->id.service_mask;
361 362
362 while (*link) { 363 while (*link) {
363 parent = *link; 364 parent = *link;
@@ -376,7 +377,7 @@ static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
376 return NULL; 377 return NULL;
377} 378}
378 379
379static struct cm_id_private * cm_find_listen(u64 service_id) 380static struct cm_id_private * cm_find_listen(__be64 service_id)
380{ 381{
381 struct rb_node *node = cm.listen_service_table.rb_node; 382 struct rb_node *node = cm.listen_service_table.rb_node;
382 struct cm_id_private *cm_id_priv; 383 struct cm_id_private *cm_id_priv;
@@ -400,8 +401,8 @@ static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
400 struct rb_node **link = &cm.remote_id_table.rb_node; 401 struct rb_node **link = &cm.remote_id_table.rb_node;
401 struct rb_node *parent = NULL; 402 struct rb_node *parent = NULL;
402 struct cm_timewait_info *cur_timewait_info; 403 struct cm_timewait_info *cur_timewait_info;
403 u64 remote_ca_guid = timewait_info->remote_ca_guid; 404 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
404 u32 remote_id = timewait_info->work.remote_id; 405 __be32 remote_id = timewait_info->work.remote_id;
405 406
406 while (*link) { 407 while (*link) {
407 parent = *link; 408 parent = *link;
@@ -424,8 +425,8 @@ static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
424 return NULL; 425 return NULL;
425} 426}
426 427
427static struct cm_timewait_info * cm_find_remote_id(u64 remote_ca_guid, 428static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
428 u32 remote_id) 429 __be32 remote_id)
429{ 430{
430 struct rb_node *node = cm.remote_id_table.rb_node; 431 struct rb_node *node = cm.remote_id_table.rb_node;
431 struct cm_timewait_info *timewait_info; 432 struct cm_timewait_info *timewait_info;
@@ -453,8 +454,8 @@ static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
453 struct rb_node **link = &cm.remote_qp_table.rb_node; 454 struct rb_node **link = &cm.remote_qp_table.rb_node;
454 struct rb_node *parent = NULL; 455 struct rb_node *parent = NULL;
455 struct cm_timewait_info *cur_timewait_info; 456 struct cm_timewait_info *cur_timewait_info;
456 u64 remote_ca_guid = timewait_info->remote_ca_guid; 457 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
457 u32 remote_qpn = timewait_info->remote_qpn; 458 __be32 remote_qpn = timewait_info->remote_qpn;
458 459
459 while (*link) { 460 while (*link) {
460 parent = *link; 461 parent = *link;
@@ -484,7 +485,7 @@ static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
484 struct rb_node *parent = NULL; 485 struct rb_node *parent = NULL;
485 struct cm_id_private *cur_cm_id_priv; 486 struct cm_id_private *cur_cm_id_priv;
486 union ib_gid *port_gid = &cm_id_priv->av.dgid; 487 union ib_gid *port_gid = &cm_id_priv->av.dgid;
487 u32 remote_id = cm_id_priv->id.remote_id; 488 __be32 remote_id = cm_id_priv->id.remote_id;
488 489
489 while (*link) { 490 while (*link) {
490 parent = *link; 491 parent = *link;
@@ -598,7 +599,7 @@ static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
598 spin_unlock_irqrestore(&cm.lock, flags); 599 spin_unlock_irqrestore(&cm.lock, flags);
599} 600}
600 601
601static struct cm_timewait_info * cm_create_timewait_info(u32 local_id) 602static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
602{ 603{
603 struct cm_timewait_info *timewait_info; 604 struct cm_timewait_info *timewait_info;
604 605
@@ -715,14 +716,15 @@ retest:
715EXPORT_SYMBOL(ib_destroy_cm_id); 716EXPORT_SYMBOL(ib_destroy_cm_id);
716 717
717int ib_cm_listen(struct ib_cm_id *cm_id, 718int ib_cm_listen(struct ib_cm_id *cm_id,
718 u64 service_id, 719 __be64 service_id,
719 u64 service_mask) 720 __be64 service_mask)
720{ 721{
721 struct cm_id_private *cm_id_priv, *cur_cm_id_priv; 722 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
722 unsigned long flags; 723 unsigned long flags;
723 int ret = 0; 724 int ret = 0;
724 725
725 service_mask = service_mask ? service_mask : ~0ULL; 726 service_mask = service_mask ? service_mask :
727 __constant_cpu_to_be64(~0ULL);
726 service_id &= service_mask; 728 service_id &= service_mask;
727 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID && 729 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
728 (service_id != IB_CM_ASSIGN_SERVICE_ID)) 730 (service_id != IB_CM_ASSIGN_SERVICE_ID))
@@ -735,8 +737,8 @@ int ib_cm_listen(struct ib_cm_id *cm_id,
735 737
736 spin_lock_irqsave(&cm.lock, flags); 738 spin_lock_irqsave(&cm.lock, flags);
737 if (service_id == IB_CM_ASSIGN_SERVICE_ID) { 739 if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
738 cm_id->service_id = __cpu_to_be64(cm.listen_service_id++); 740 cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
739 cm_id->service_mask = ~0ULL; 741 cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
740 } else { 742 } else {
741 cm_id->service_id = service_id; 743 cm_id->service_id = service_id;
742 cm_id->service_mask = service_mask; 744 cm_id->service_mask = service_mask;
@@ -752,18 +754,19 @@ int ib_cm_listen(struct ib_cm_id *cm_id,
752} 754}
753EXPORT_SYMBOL(ib_cm_listen); 755EXPORT_SYMBOL(ib_cm_listen);
754 756
755static u64 cm_form_tid(struct cm_id_private *cm_id_priv, 757static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
756 enum cm_msg_sequence msg_seq) 758 enum cm_msg_sequence msg_seq)
757{ 759{
758 u64 hi_tid, low_tid; 760 u64 hi_tid, low_tid;
759 761
760 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32; 762 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
761 low_tid = (u64) (cm_id_priv->id.local_id | (msg_seq << 30)); 763 low_tid = (u64) ((__force u32)cm_id_priv->id.local_id |
764 (msg_seq << 30));
762 return cpu_to_be64(hi_tid | low_tid); 765 return cpu_to_be64(hi_tid | low_tid);
763} 766}
764 767
765static void cm_format_mad_hdr(struct ib_mad_hdr *hdr, 768static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
766 enum cm_msg_attr_id attr_id, u64 tid) 769 __be16 attr_id, __be64 tid)
767{ 770{
768 hdr->base_version = IB_MGMT_BASE_VERSION; 771 hdr->base_version = IB_MGMT_BASE_VERSION;
769 hdr->mgmt_class = IB_MGMT_CLASS_CM; 772 hdr->mgmt_class = IB_MGMT_CLASS_CM;
@@ -896,7 +899,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
896 goto error1; 899 goto error1;
897 } 900 }
898 cm_id->service_id = param->service_id; 901 cm_id->service_id = param->service_id;
899 cm_id->service_mask = ~0ULL; 902 cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
900 cm_id_priv->timeout_ms = cm_convert_to_ms( 903 cm_id_priv->timeout_ms = cm_convert_to_ms(
901 param->primary_path->packet_life_time) * 2 + 904 param->primary_path->packet_life_time) * 2 +
902 cm_convert_to_ms( 905 cm_convert_to_ms(
@@ -963,7 +966,7 @@ static int cm_issue_rej(struct cm_port *port,
963 rej_msg->remote_comm_id = rcv_msg->local_comm_id; 966 rej_msg->remote_comm_id = rcv_msg->local_comm_id;
964 rej_msg->local_comm_id = rcv_msg->remote_comm_id; 967 rej_msg->local_comm_id = rcv_msg->remote_comm_id;
965 cm_rej_set_msg_rejected(rej_msg, msg_rejected); 968 cm_rej_set_msg_rejected(rej_msg, msg_rejected);
966 rej_msg->reason = reason; 969 rej_msg->reason = cpu_to_be16(reason);
967 970
968 if (ari && ari_length) { 971 if (ari && ari_length) {
969 cm_rej_set_reject_info_len(rej_msg, ari_length); 972 cm_rej_set_reject_info_len(rej_msg, ari_length);
@@ -977,8 +980,8 @@ static int cm_issue_rej(struct cm_port *port,
977 return ret; 980 return ret;
978} 981}
979 982
980static inline int cm_is_active_peer(u64 local_ca_guid, u64 remote_ca_guid, 983static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
981 u32 local_qpn, u32 remote_qpn) 984 __be32 local_qpn, __be32 remote_qpn)
982{ 985{
983 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) || 986 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
984 ((local_ca_guid == remote_ca_guid) && 987 ((local_ca_guid == remote_ca_guid) &&
@@ -1137,7 +1140,7 @@ static void cm_format_rej(struct cm_rej_msg *rej_msg,
1137 break; 1140 break;
1138 } 1141 }
1139 1142
1140 rej_msg->reason = reason; 1143 rej_msg->reason = cpu_to_be16(reason);
1141 if (ari && ari_length) { 1144 if (ari && ari_length) {
1142 cm_rej_set_reject_info_len(rej_msg, ari_length); 1145 cm_rej_set_reject_info_len(rej_msg, ari_length);
1143 memcpy(rej_msg->ari, ari, ari_length); 1146 memcpy(rej_msg->ari, ari, ari_length);
@@ -1276,7 +1279,7 @@ static int cm_req_handler(struct cm_work *work)
1276 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; 1279 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1277 cm_id_priv->id.context = listen_cm_id_priv->id.context; 1280 cm_id_priv->id.context = listen_cm_id_priv->id.context;
1278 cm_id_priv->id.service_id = req_msg->service_id; 1281 cm_id_priv->id.service_id = req_msg->service_id;
1279 cm_id_priv->id.service_mask = ~0ULL; 1282 cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
1280 1283
1281 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); 1284 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
1282 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); 1285 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
@@ -1969,7 +1972,7 @@ static void cm_format_rej_event(struct cm_work *work)
1969 param = &work->cm_event.param.rej_rcvd; 1972 param = &work->cm_event.param.rej_rcvd;
1970 param->ari = rej_msg->ari; 1973 param->ari = rej_msg->ari;
1971 param->ari_length = cm_rej_get_reject_info_len(rej_msg); 1974 param->ari_length = cm_rej_get_reject_info_len(rej_msg);
1972 param->reason = rej_msg->reason; 1975 param->reason = __be16_to_cpu(rej_msg->reason);
1973 work->cm_event.private_data = &rej_msg->private_data; 1976 work->cm_event.private_data = &rej_msg->private_data;
1974} 1977}
1975 1978
@@ -1978,20 +1981,20 @@ static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
1978 struct cm_timewait_info *timewait_info; 1981 struct cm_timewait_info *timewait_info;
1979 struct cm_id_private *cm_id_priv; 1982 struct cm_id_private *cm_id_priv;
1980 unsigned long flags; 1983 unsigned long flags;
1981 u32 remote_id; 1984 __be32 remote_id;
1982 1985
1983 remote_id = rej_msg->local_comm_id; 1986 remote_id = rej_msg->local_comm_id;
1984 1987
1985 if (rej_msg->reason == IB_CM_REJ_TIMEOUT) { 1988 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
1986 spin_lock_irqsave(&cm.lock, flags); 1989 spin_lock_irqsave(&cm.lock, flags);
1987 timewait_info = cm_find_remote_id( *((u64 *) rej_msg->ari), 1990 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
1988 remote_id); 1991 remote_id);
1989 if (!timewait_info) { 1992 if (!timewait_info) {
1990 spin_unlock_irqrestore(&cm.lock, flags); 1993 spin_unlock_irqrestore(&cm.lock, flags);
1991 return NULL; 1994 return NULL;
1992 } 1995 }
1993 cm_id_priv = idr_find(&cm.local_id_table, 1996 cm_id_priv = idr_find(&cm.local_id_table,
1994 (int) timewait_info->work.local_id); 1997 (__force int) timewait_info->work.local_id);
1995 if (cm_id_priv) { 1998 if (cm_id_priv) {
1996 if (cm_id_priv->id.remote_id == remote_id) 1999 if (cm_id_priv->id.remote_id == remote_id)
1997 atomic_inc(&cm_id_priv->refcount); 2000 atomic_inc(&cm_id_priv->refcount);
@@ -2032,7 +2035,7 @@ static int cm_rej_handler(struct cm_work *work)
2032 /* fall through */ 2035 /* fall through */
2033 case IB_CM_REQ_RCVD: 2036 case IB_CM_REQ_RCVD:
2034 case IB_CM_MRA_REQ_SENT: 2037 case IB_CM_MRA_REQ_SENT:
2035 if (rej_msg->reason == IB_CM_REJ_STALE_CONN) 2038 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
2036 cm_enter_timewait(cm_id_priv); 2039 cm_enter_timewait(cm_id_priv);
2037 else 2040 else
2038 cm_reset_to_idle(cm_id_priv); 2041 cm_reset_to_idle(cm_id_priv);
@@ -2553,7 +2556,7 @@ static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
2553 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID, 2556 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
2554 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR)); 2557 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
2555 sidr_req_msg->request_id = cm_id_priv->id.local_id; 2558 sidr_req_msg->request_id = cm_id_priv->id.local_id;
2556 sidr_req_msg->pkey = param->pkey; 2559 sidr_req_msg->pkey = cpu_to_be16(param->pkey);
2557 sidr_req_msg->service_id = param->service_id; 2560 sidr_req_msg->service_id = param->service_id;
2558 2561
2559 if (param->private_data && param->private_data_len) 2562 if (param->private_data && param->private_data_len)
@@ -2580,7 +2583,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
2580 goto out; 2583 goto out;
2581 2584
2582 cm_id->service_id = param->service_id; 2585 cm_id->service_id = param->service_id;
2583 cm_id->service_mask = ~0ULL; 2586 cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
2584 cm_id_priv->timeout_ms = param->timeout_ms; 2587 cm_id_priv->timeout_ms = param->timeout_ms;
2585 cm_id_priv->max_cm_retries = param->max_cm_retries; 2588 cm_id_priv->max_cm_retries = param->max_cm_retries;
2586 ret = cm_alloc_msg(cm_id_priv, &msg); 2589 ret = cm_alloc_msg(cm_id_priv, &msg);
@@ -2621,7 +2624,7 @@ static void cm_format_sidr_req_event(struct cm_work *work,
2621 sidr_req_msg = (struct cm_sidr_req_msg *) 2624 sidr_req_msg = (struct cm_sidr_req_msg *)
2622 work->mad_recv_wc->recv_buf.mad; 2625 work->mad_recv_wc->recv_buf.mad;
2623 param = &work->cm_event.param.sidr_req_rcvd; 2626 param = &work->cm_event.param.sidr_req_rcvd;
2624 param->pkey = sidr_req_msg->pkey; 2627 param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
2625 param->listen_id = listen_id; 2628 param->listen_id = listen_id;
2626 param->device = work->port->mad_agent->device; 2629 param->device = work->port->mad_agent->device;
2627 param->port = work->port->port_num; 2630 param->port = work->port->port_num;
@@ -2645,7 +2648,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
2645 sidr_req_msg = (struct cm_sidr_req_msg *) 2648 sidr_req_msg = (struct cm_sidr_req_msg *)
2646 work->mad_recv_wc->recv_buf.mad; 2649 work->mad_recv_wc->recv_buf.mad;
2647 wc = work->mad_recv_wc->wc; 2650 wc = work->mad_recv_wc->wc;
2648 cm_id_priv->av.dgid.global.subnet_prefix = wc->slid; 2651 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
2649 cm_id_priv->av.dgid.global.interface_id = 0; 2652 cm_id_priv->av.dgid.global.interface_id = 0;
2650 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 2653 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2651 &cm_id_priv->av); 2654 &cm_id_priv->av);
@@ -2673,7 +2676,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
2673 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; 2676 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
2674 cm_id_priv->id.context = cur_cm_id_priv->id.context; 2677 cm_id_priv->id.context = cur_cm_id_priv->id.context;
2675 cm_id_priv->id.service_id = sidr_req_msg->service_id; 2678 cm_id_priv->id.service_id = sidr_req_msg->service_id;
2676 cm_id_priv->id.service_mask = ~0ULL; 2679 cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
2677 2680
2678 cm_format_sidr_req_event(work, &cur_cm_id_priv->id); 2681 cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
2679 cm_process_work(cm_id_priv, work); 2682 cm_process_work(cm_id_priv, work);
@@ -3175,10 +3178,10 @@ int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
3175} 3178}
3176EXPORT_SYMBOL(ib_cm_init_qp_attr); 3179EXPORT_SYMBOL(ib_cm_init_qp_attr);
3177 3180
3178static u64 cm_get_ca_guid(struct ib_device *device) 3181static __be64 cm_get_ca_guid(struct ib_device *device)
3179{ 3182{
3180 struct ib_device_attr *device_attr; 3183 struct ib_device_attr *device_attr;
3181 u64 guid; 3184 __be64 guid;
3182 int ret; 3185 int ret;
3183 3186
3184 device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL); 3187 device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h
index 15a309a77b2b..813ab70bf6d5 100644
--- a/drivers/infiniband/core/cm_msgs.h
+++ b/drivers/infiniband/core/cm_msgs.h
@@ -34,7 +34,7 @@
34#if !defined(CM_MSGS_H) 34#if !defined(CM_MSGS_H)
35#define CM_MSGS_H 35#define CM_MSGS_H
36 36
37#include <ib_mad.h> 37#include <rdma/ib_mad.h>
38 38
39/* 39/*
40 * Parameters to routines below should be in network-byte order, and values 40 * Parameters to routines below should be in network-byte order, and values
@@ -43,19 +43,17 @@
43 43
44#define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */ 44#define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */
45 45
46enum cm_msg_attr_id { 46#define CM_REQ_ATTR_ID __constant_htons(0x0010)
47 CM_REQ_ATTR_ID = __constant_htons(0x0010), 47#define CM_MRA_ATTR_ID __constant_htons(0x0011)
48 CM_MRA_ATTR_ID = __constant_htons(0x0011), 48#define CM_REJ_ATTR_ID __constant_htons(0x0012)
49 CM_REJ_ATTR_ID = __constant_htons(0x0012), 49#define CM_REP_ATTR_ID __constant_htons(0x0013)
50 CM_REP_ATTR_ID = __constant_htons(0x0013), 50#define CM_RTU_ATTR_ID __constant_htons(0x0014)
51 CM_RTU_ATTR_ID = __constant_htons(0x0014), 51#define CM_DREQ_ATTR_ID __constant_htons(0x0015)
52 CM_DREQ_ATTR_ID = __constant_htons(0x0015), 52#define CM_DREP_ATTR_ID __constant_htons(0x0016)
53 CM_DREP_ATTR_ID = __constant_htons(0x0016), 53#define CM_SIDR_REQ_ATTR_ID __constant_htons(0x0017)
54 CM_SIDR_REQ_ATTR_ID = __constant_htons(0x0017), 54#define CM_SIDR_REP_ATTR_ID __constant_htons(0x0018)
55 CM_SIDR_REP_ATTR_ID = __constant_htons(0x0018), 55#define CM_LAP_ATTR_ID __constant_htons(0x0019)
56 CM_LAP_ATTR_ID = __constant_htons(0x0019), 56#define CM_APR_ATTR_ID __constant_htons(0x001A)
57 CM_APR_ATTR_ID = __constant_htons(0x001A)
58};
59 57
60enum cm_msg_sequence { 58enum cm_msg_sequence {
61 CM_MSG_SEQUENCE_REQ, 59 CM_MSG_SEQUENCE_REQ,
@@ -67,35 +65,35 @@ enum cm_msg_sequence {
67struct cm_req_msg { 65struct cm_req_msg {
68 struct ib_mad_hdr hdr; 66 struct ib_mad_hdr hdr;
69 67
70 u32 local_comm_id; 68 __be32 local_comm_id;
71 u32 rsvd4; 69 __be32 rsvd4;
72 u64 service_id; 70 __be64 service_id;
73 u64 local_ca_guid; 71 __be64 local_ca_guid;
74 u32 rsvd24; 72 __be32 rsvd24;
75 u32 local_qkey; 73 __be32 local_qkey;
76 /* local QPN:24, responder resources:8 */ 74 /* local QPN:24, responder resources:8 */
77 u32 offset32; 75 __be32 offset32;
78 /* local EECN:24, initiator depth:8 */ 76 /* local EECN:24, initiator depth:8 */
79 u32 offset36; 77 __be32 offset36;
80 /* 78 /*
81 * remote EECN:24, remote CM response timeout:5, 79 * remote EECN:24, remote CM response timeout:5,
82 * transport service type:2, end-to-end flow control:1 80 * transport service type:2, end-to-end flow control:1
83 */ 81 */
84 u32 offset40; 82 __be32 offset40;
85 /* starting PSN:24, local CM response timeout:5, retry count:3 */ 83 /* starting PSN:24, local CM response timeout:5, retry count:3 */
86 u32 offset44; 84 __be32 offset44;
87 u16 pkey; 85 __be16 pkey;
88 /* path MTU:4, RDC exists:1, RNR retry count:3. */ 86 /* path MTU:4, RDC exists:1, RNR retry count:3. */
89 u8 offset50; 87 u8 offset50;
90 /* max CM Retries:4, SRQ:1, rsvd:3 */ 88 /* max CM Retries:4, SRQ:1, rsvd:3 */
91 u8 offset51; 89 u8 offset51;
92 90
93 u16 primary_local_lid; 91 __be16 primary_local_lid;
94 u16 primary_remote_lid; 92 __be16 primary_remote_lid;
95 union ib_gid primary_local_gid; 93 union ib_gid primary_local_gid;
96 union ib_gid primary_remote_gid; 94 union ib_gid primary_remote_gid;
97 /* flow label:20, rsvd:6, packet rate:6 */ 95 /* flow label:20, rsvd:6, packet rate:6 */
98 u32 primary_offset88; 96 __be32 primary_offset88;
99 u8 primary_traffic_class; 97 u8 primary_traffic_class;
100 u8 primary_hop_limit; 98 u8 primary_hop_limit;
101 /* SL:4, subnet local:1, rsvd:3 */ 99 /* SL:4, subnet local:1, rsvd:3 */
@@ -103,12 +101,12 @@ struct cm_req_msg {
103 /* local ACK timeout:5, rsvd:3 */ 101 /* local ACK timeout:5, rsvd:3 */
104 u8 primary_offset95; 102 u8 primary_offset95;
105 103
106 u16 alt_local_lid; 104 __be16 alt_local_lid;
107 u16 alt_remote_lid; 105 __be16 alt_remote_lid;
108 union ib_gid alt_local_gid; 106 union ib_gid alt_local_gid;
109 union ib_gid alt_remote_gid; 107 union ib_gid alt_remote_gid;
110 /* flow label:20, rsvd:6, packet rate:6 */ 108 /* flow label:20, rsvd:6, packet rate:6 */
111 u32 alt_offset132; 109 __be32 alt_offset132;
112 u8 alt_traffic_class; 110 u8 alt_traffic_class;
113 u8 alt_hop_limit; 111 u8 alt_hop_limit;
114 /* SL:4, subnet local:1, rsvd:3 */ 112 /* SL:4, subnet local:1, rsvd:3 */
@@ -120,12 +118,12 @@ struct cm_req_msg {
120 118
121} __attribute__ ((packed)); 119} __attribute__ ((packed));
122 120
123static inline u32 cm_req_get_local_qpn(struct cm_req_msg *req_msg) 121static inline __be32 cm_req_get_local_qpn(struct cm_req_msg *req_msg)
124{ 122{
125 return cpu_to_be32(be32_to_cpu(req_msg->offset32) >> 8); 123 return cpu_to_be32(be32_to_cpu(req_msg->offset32) >> 8);
126} 124}
127 125
128static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, u32 qpn) 126static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, __be32 qpn)
129{ 127{
130 req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) | 128 req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
131 (be32_to_cpu(req_msg->offset32) & 129 (be32_to_cpu(req_msg->offset32) &
@@ -208,13 +206,13 @@ static inline void cm_req_set_flow_ctrl(struct cm_req_msg *req_msg,
208 0xFFFFFFFE)); 206 0xFFFFFFFE));
209} 207}
210 208
211static inline u32 cm_req_get_starting_psn(struct cm_req_msg *req_msg) 209static inline __be32 cm_req_get_starting_psn(struct cm_req_msg *req_msg)
212{ 210{
213 return cpu_to_be32(be32_to_cpu(req_msg->offset44) >> 8); 211 return cpu_to_be32(be32_to_cpu(req_msg->offset44) >> 8);
214} 212}
215 213
216static inline void cm_req_set_starting_psn(struct cm_req_msg *req_msg, 214static inline void cm_req_set_starting_psn(struct cm_req_msg *req_msg,
217 u32 starting_psn) 215 __be32 starting_psn)
218{ 216{
219 req_msg->offset44 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) | 217 req_msg->offset44 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
220 (be32_to_cpu(req_msg->offset44) & 0x000000FF)); 218 (be32_to_cpu(req_msg->offset44) & 0x000000FF));
@@ -288,13 +286,13 @@ static inline void cm_req_set_srq(struct cm_req_msg *req_msg, u8 srq)
288 ((srq & 0x1) << 3)); 286 ((srq & 0x1) << 3));
289} 287}
290 288
291static inline u32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg) 289static inline __be32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg)
292{ 290{
293 return cpu_to_be32((be32_to_cpu(req_msg->primary_offset88) >> 12)); 291 return cpu_to_be32(be32_to_cpu(req_msg->primary_offset88) >> 12);
294} 292}
295 293
296static inline void cm_req_set_primary_flow_label(struct cm_req_msg *req_msg, 294static inline void cm_req_set_primary_flow_label(struct cm_req_msg *req_msg,
297 u32 flow_label) 295 __be32 flow_label)
298{ 296{
299 req_msg->primary_offset88 = cpu_to_be32( 297 req_msg->primary_offset88 = cpu_to_be32(
300 (be32_to_cpu(req_msg->primary_offset88) & 298 (be32_to_cpu(req_msg->primary_offset88) &
@@ -350,13 +348,13 @@ static inline void cm_req_set_primary_local_ack_timeout(struct cm_req_msg *req_m
350 (local_ack_timeout << 3)); 348 (local_ack_timeout << 3));
351} 349}
352 350
353static inline u32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg) 351static inline __be32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg)
354{ 352{
355 return cpu_to_be32((be32_to_cpu(req_msg->alt_offset132) >> 12)); 353 return cpu_to_be32(be32_to_cpu(req_msg->alt_offset132) >> 12);
356} 354}
357 355
358static inline void cm_req_set_alt_flow_label(struct cm_req_msg *req_msg, 356static inline void cm_req_set_alt_flow_label(struct cm_req_msg *req_msg,
359 u32 flow_label) 357 __be32 flow_label)
360{ 358{
361 req_msg->alt_offset132 = cpu_to_be32( 359 req_msg->alt_offset132 = cpu_to_be32(
362 (be32_to_cpu(req_msg->alt_offset132) & 360 (be32_to_cpu(req_msg->alt_offset132) &
@@ -422,8 +420,8 @@ enum cm_msg_response {
422 struct cm_mra_msg { 420 struct cm_mra_msg {
423 struct ib_mad_hdr hdr; 421 struct ib_mad_hdr hdr;
424 422
425 u32 local_comm_id; 423 __be32 local_comm_id;
426 u32 remote_comm_id; 424 __be32 remote_comm_id;
427 /* message MRAed:2, rsvd:6 */ 425 /* message MRAed:2, rsvd:6 */
428 u8 offset8; 426 u8 offset8;
429 /* service timeout:5, rsvd:3 */ 427 /* service timeout:5, rsvd:3 */
@@ -458,13 +456,13 @@ static inline void cm_mra_set_service_timeout(struct cm_mra_msg *mra_msg,
458struct cm_rej_msg { 456struct cm_rej_msg {
459 struct ib_mad_hdr hdr; 457 struct ib_mad_hdr hdr;
460 458
461 u32 local_comm_id; 459 __be32 local_comm_id;
462 u32 remote_comm_id; 460 __be32 remote_comm_id;
463 /* message REJected:2, rsvd:6 */ 461 /* message REJected:2, rsvd:6 */
464 u8 offset8; 462 u8 offset8;
465 /* reject info length:7, rsvd:1. */ 463 /* reject info length:7, rsvd:1. */
466 u8 offset9; 464 u8 offset9;
467 u16 reason; 465 __be16 reason;
468 u8 ari[IB_CM_REJ_ARI_LENGTH]; 466 u8 ari[IB_CM_REJ_ARI_LENGTH];
469 467
470 u8 private_data[IB_CM_REJ_PRIVATE_DATA_SIZE]; 468 u8 private_data[IB_CM_REJ_PRIVATE_DATA_SIZE];
@@ -495,45 +493,45 @@ static inline void cm_rej_set_reject_info_len(struct cm_rej_msg *rej_msg,
495struct cm_rep_msg { 493struct cm_rep_msg {
496 struct ib_mad_hdr hdr; 494 struct ib_mad_hdr hdr;
497 495
498 u32 local_comm_id; 496 __be32 local_comm_id;
499 u32 remote_comm_id; 497 __be32 remote_comm_id;
500 u32 local_qkey; 498 __be32 local_qkey;
501 /* local QPN:24, rsvd:8 */ 499 /* local QPN:24, rsvd:8 */
502 u32 offset12; 500 __be32 offset12;
503 /* local EECN:24, rsvd:8 */ 501 /* local EECN:24, rsvd:8 */
504 u32 offset16; 502 __be32 offset16;
505 /* starting PSN:24 rsvd:8 */ 503 /* starting PSN:24 rsvd:8 */
506 u32 offset20; 504 __be32 offset20;
507 u8 resp_resources; 505 u8 resp_resources;
508 u8 initiator_depth; 506 u8 initiator_depth;
509 /* target ACK delay:5, failover accepted:2, end-to-end flow control:1 */ 507 /* target ACK delay:5, failover accepted:2, end-to-end flow control:1 */
510 u8 offset26; 508 u8 offset26;
511 /* RNR retry count:3, SRQ:1, rsvd:5 */ 509 /* RNR retry count:3, SRQ:1, rsvd:5 */
512 u8 offset27; 510 u8 offset27;
513 u64 local_ca_guid; 511 __be64 local_ca_guid;
514 512
515 u8 private_data[IB_CM_REP_PRIVATE_DATA_SIZE]; 513 u8 private_data[IB_CM_REP_PRIVATE_DATA_SIZE];
516 514
517} __attribute__ ((packed)); 515} __attribute__ ((packed));
518 516
519static inline u32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg) 517static inline __be32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg)
520{ 518{
521 return cpu_to_be32(be32_to_cpu(rep_msg->offset12) >> 8); 519 return cpu_to_be32(be32_to_cpu(rep_msg->offset12) >> 8);
522} 520}
523 521
524static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, u32 qpn) 522static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn)
525{ 523{
526 rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) | 524 rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
527 (be32_to_cpu(rep_msg->offset12) & 0x000000FF)); 525 (be32_to_cpu(rep_msg->offset12) & 0x000000FF));
528} 526}
529 527
530static inline u32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg) 528static inline __be32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg)
531{ 529{
532 return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8); 530 return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8);
533} 531}
534 532
535static inline void cm_rep_set_starting_psn(struct cm_rep_msg *rep_msg, 533static inline void cm_rep_set_starting_psn(struct cm_rep_msg *rep_msg,
536 u32 starting_psn) 534 __be32 starting_psn)
537{ 535{
538 rep_msg->offset20 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) | 536 rep_msg->offset20 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
539 (be32_to_cpu(rep_msg->offset20) & 0x000000FF)); 537 (be32_to_cpu(rep_msg->offset20) & 0x000000FF));
@@ -600,8 +598,8 @@ static inline void cm_rep_set_srq(struct cm_rep_msg *rep_msg, u8 srq)
600struct cm_rtu_msg { 598struct cm_rtu_msg {
601 struct ib_mad_hdr hdr; 599 struct ib_mad_hdr hdr;
602 600
603 u32 local_comm_id; 601 __be32 local_comm_id;
604 u32 remote_comm_id; 602 __be32 remote_comm_id;
605 603
606 u8 private_data[IB_CM_RTU_PRIVATE_DATA_SIZE]; 604 u8 private_data[IB_CM_RTU_PRIVATE_DATA_SIZE];
607 605
@@ -610,21 +608,21 @@ struct cm_rtu_msg {
610struct cm_dreq_msg { 608struct cm_dreq_msg {
611 struct ib_mad_hdr hdr; 609 struct ib_mad_hdr hdr;
612 610
613 u32 local_comm_id; 611 __be32 local_comm_id;
614 u32 remote_comm_id; 612 __be32 remote_comm_id;
615 /* remote QPN/EECN:24, rsvd:8 */ 613 /* remote QPN/EECN:24, rsvd:8 */
616 u32 offset8; 614 __be32 offset8;
617 615
618 u8 private_data[IB_CM_DREQ_PRIVATE_DATA_SIZE]; 616 u8 private_data[IB_CM_DREQ_PRIVATE_DATA_SIZE];
619 617
620} __attribute__ ((packed)); 618} __attribute__ ((packed));
621 619
622static inline u32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg) 620static inline __be32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg)
623{ 621{
624 return cpu_to_be32(be32_to_cpu(dreq_msg->offset8) >> 8); 622 return cpu_to_be32(be32_to_cpu(dreq_msg->offset8) >> 8);
625} 623}
626 624
627static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, u32 qpn) 625static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, __be32 qpn)
628{ 626{
629 dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) | 627 dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
630 (be32_to_cpu(dreq_msg->offset8) & 0x000000FF)); 628 (be32_to_cpu(dreq_msg->offset8) & 0x000000FF));
@@ -633,8 +631,8 @@ static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, u32 qpn)
633struct cm_drep_msg { 631struct cm_drep_msg {
634 struct ib_mad_hdr hdr; 632 struct ib_mad_hdr hdr;
635 633
636 u32 local_comm_id; 634 __be32 local_comm_id;
637 u32 remote_comm_id; 635 __be32 remote_comm_id;
638 636
639 u8 private_data[IB_CM_DREP_PRIVATE_DATA_SIZE]; 637 u8 private_data[IB_CM_DREP_PRIVATE_DATA_SIZE];
640 638
@@ -643,37 +641,37 @@ struct cm_drep_msg {
643struct cm_lap_msg { 641struct cm_lap_msg {
644 struct ib_mad_hdr hdr; 642 struct ib_mad_hdr hdr;
645 643
646 u32 local_comm_id; 644 __be32 local_comm_id;
647 u32 remote_comm_id; 645 __be32 remote_comm_id;
648 646
649 u32 rsvd8; 647 __be32 rsvd8;
650 /* remote QPN/EECN:24, remote CM response timeout:5, rsvd:3 */ 648 /* remote QPN/EECN:24, remote CM response timeout:5, rsvd:3 */
651 u32 offset12; 649 __be32 offset12;
652 u32 rsvd16; 650 __be32 rsvd16;
653 651
654 u16 alt_local_lid; 652 __be16 alt_local_lid;
655 u16 alt_remote_lid; 653 __be16 alt_remote_lid;
656 union ib_gid alt_local_gid; 654 union ib_gid alt_local_gid;
657 union ib_gid alt_remote_gid; 655 union ib_gid alt_remote_gid;
658 /* flow label:20, rsvd:4, traffic class:8 */ 656 /* flow label:20, rsvd:4, traffic class:8 */
659 u32 offset56; 657 __be32 offset56;
660 u8 alt_hop_limit; 658 u8 alt_hop_limit;
661 /* rsvd:2, packet rate:6 */ 659 /* rsvd:2, packet rate:6 */
662 uint8_t offset61; 660 u8 offset61;
663 /* SL:4, subnet local:1, rsvd:3 */ 661 /* SL:4, subnet local:1, rsvd:3 */
664 uint8_t offset62; 662 u8 offset62;
665 /* local ACK timeout:5, rsvd:3 */ 663 /* local ACK timeout:5, rsvd:3 */
666 uint8_t offset63; 664 u8 offset63;
667 665
668 u8 private_data[IB_CM_LAP_PRIVATE_DATA_SIZE]; 666 u8 private_data[IB_CM_LAP_PRIVATE_DATA_SIZE];
669} __attribute__ ((packed)); 667} __attribute__ ((packed));
670 668
671static inline u32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg) 669static inline __be32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg)
672{ 670{
673 return cpu_to_be32(be32_to_cpu(lap_msg->offset12) >> 8); 671 return cpu_to_be32(be32_to_cpu(lap_msg->offset12) >> 8);
674} 672}
675 673
676static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, u32 qpn) 674static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, __be32 qpn)
677{ 675{
678 lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) | 676 lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
679 (be32_to_cpu(lap_msg->offset12) & 677 (be32_to_cpu(lap_msg->offset12) &
@@ -693,17 +691,17 @@ static inline void cm_lap_set_remote_resp_timeout(struct cm_lap_msg *lap_msg,
693 0xFFFFFF07)); 691 0xFFFFFF07));
694} 692}
695 693
696static inline u32 cm_lap_get_flow_label(struct cm_lap_msg *lap_msg) 694static inline __be32 cm_lap_get_flow_label(struct cm_lap_msg *lap_msg)
697{ 695{
698 return be32_to_cpu(lap_msg->offset56) >> 12; 696 return cpu_to_be32(be32_to_cpu(lap_msg->offset56) >> 12);
699} 697}
700 698
701static inline void cm_lap_set_flow_label(struct cm_lap_msg *lap_msg, 699static inline void cm_lap_set_flow_label(struct cm_lap_msg *lap_msg,
702 u32 flow_label) 700 __be32 flow_label)
703{ 701{
704 lap_msg->offset56 = cpu_to_be32((flow_label << 12) | 702 lap_msg->offset56 = cpu_to_be32(
705 (be32_to_cpu(lap_msg->offset56) & 703 (be32_to_cpu(lap_msg->offset56) & 0x00000FFF) |
706 0x00000FFF)); 704 (be32_to_cpu(flow_label) << 12));
707} 705}
708 706
709static inline u8 cm_lap_get_traffic_class(struct cm_lap_msg *lap_msg) 707static inline u8 cm_lap_get_traffic_class(struct cm_lap_msg *lap_msg)
@@ -766,8 +764,8 @@ static inline void cm_lap_set_local_ack_timeout(struct cm_lap_msg *lap_msg,
766struct cm_apr_msg { 764struct cm_apr_msg {
767 struct ib_mad_hdr hdr; 765 struct ib_mad_hdr hdr;
768 766
769 u32 local_comm_id; 767 __be32 local_comm_id;
770 u32 remote_comm_id; 768 __be32 remote_comm_id;
771 769
772 u8 info_length; 770 u8 info_length;
773 u8 ap_status; 771 u8 ap_status;
@@ -779,10 +777,10 @@ struct cm_apr_msg {
779struct cm_sidr_req_msg { 777struct cm_sidr_req_msg {
780 struct ib_mad_hdr hdr; 778 struct ib_mad_hdr hdr;
781 779
782 u32 request_id; 780 __be32 request_id;
783 u16 pkey; 781 __be16 pkey;
784 u16 rsvd; 782 __be16 rsvd;
785 u64 service_id; 783 __be64 service_id;
786 784
787 u8 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE]; 785 u8 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE];
788} __attribute__ ((packed)); 786} __attribute__ ((packed));
@@ -790,26 +788,26 @@ struct cm_sidr_req_msg {
790struct cm_sidr_rep_msg { 788struct cm_sidr_rep_msg {
791 struct ib_mad_hdr hdr; 789 struct ib_mad_hdr hdr;
792 790
793 u32 request_id; 791 __be32 request_id;
794 u8 status; 792 u8 status;
795 u8 info_length; 793 u8 info_length;
796 u16 rsvd; 794 __be16 rsvd;
797 /* QPN:24, rsvd:8 */ 795 /* QPN:24, rsvd:8 */
798 u32 offset8; 796 __be32 offset8;
799 u64 service_id; 797 __be64 service_id;
800 u32 qkey; 798 __be32 qkey;
801 u8 info[IB_CM_SIDR_REP_INFO_LENGTH]; 799 u8 info[IB_CM_SIDR_REP_INFO_LENGTH];
802 800
803 u8 private_data[IB_CM_SIDR_REP_PRIVATE_DATA_SIZE]; 801 u8 private_data[IB_CM_SIDR_REP_PRIVATE_DATA_SIZE];
804} __attribute__ ((packed)); 802} __attribute__ ((packed));
805 803
806static inline u32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg) 804static inline __be32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg)
807{ 805{
808 return cpu_to_be32(be32_to_cpu(sidr_rep_msg->offset8) >> 8); 806 return cpu_to_be32(be32_to_cpu(sidr_rep_msg->offset8) >> 8);
809} 807}
810 808
811static inline void cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg *sidr_rep_msg, 809static inline void cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg *sidr_rep_msg,
812 u32 qpn) 810 __be32 qpn)
813{ 811{
814 sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) | 812 sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
815 (be32_to_cpu(sidr_rep_msg->offset8) & 813 (be32_to_cpu(sidr_rep_msg->offset8) &
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 797049626ff6..7ad47a4b166b 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -38,7 +38,7 @@
38#include <linux/list.h> 38#include <linux/list.h>
39#include <linux/spinlock.h> 39#include <linux/spinlock.h>
40 40
41#include <ib_verbs.h> 41#include <rdma/ib_verbs.h>
42 42
43int ib_device_register_sysfs(struct ib_device *device); 43int ib_device_register_sysfs(struct ib_device *device);
44void ib_device_unregister_sysfs(struct ib_device *device); 44void ib_device_unregister_sysfs(struct ib_device *device);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 9197e92d708a..d3cf84e01587 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index 7763b31abba7..d34a6f1c4f4c 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -39,7 +39,7 @@
39#include <linux/jhash.h> 39#include <linux/jhash.h>
40#include <linux/kthread.h> 40#include <linux/kthread.h>
41 41
42#include <ib_fmr_pool.h> 42#include <rdma/ib_fmr_pool.h>
43 43
44#include "core_priv.h" 44#include "core_priv.h"
45 45
@@ -334,6 +334,7 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
334{ 334{
335 struct ib_pool_fmr *fmr; 335 struct ib_pool_fmr *fmr;
336 struct ib_pool_fmr *tmp; 336 struct ib_pool_fmr *tmp;
337 LIST_HEAD(fmr_list);
337 int i; 338 int i;
338 339
339 kthread_stop(pool->thread); 340 kthread_stop(pool->thread);
@@ -341,6 +342,11 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
341 342
342 i = 0; 343 i = 0;
343 list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) { 344 list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
345 if (fmr->remap_count) {
346 INIT_LIST_HEAD(&fmr_list);
347 list_add_tail(&fmr->fmr->list, &fmr_list);
348 ib_unmap_fmr(&fmr_list);
349 }
344 ib_dealloc_fmr(fmr->fmr); 350 ib_dealloc_fmr(fmr->fmr);
345 list_del(&fmr->list); 351 list_del(&fmr->list);
346 kfree(fmr); 352 kfree(fmr);
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index b97e210ce9c8..a4a4d9c1eef3 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -693,7 +693,8 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
693 goto out; 693 goto out;
694 } 694 }
695 695
696 build_smp_wc(send_wr->wr_id, smp->dr_slid, send_wr->wr.ud.pkey_index, 696 build_smp_wc(send_wr->wr_id, be16_to_cpu(smp->dr_slid),
697 send_wr->wr.ud.pkey_index,
697 send_wr->wr.ud.port_num, &mad_wc); 698 send_wr->wr.ud.port_num, &mad_wc);
698 699
699 /* No GRH for DR SMP */ 700 /* No GRH for DR SMP */
@@ -1554,7 +1555,7 @@ static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
1554} 1555}
1555 1556
1556struct ib_mad_send_wr_private* 1557struct ib_mad_send_wr_private*
1557ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, u64 tid) 1558ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid)
1558{ 1559{
1559 struct ib_mad_send_wr_private *mad_send_wr; 1560 struct ib_mad_send_wr_private *mad_send_wr;
1560 1561
@@ -1597,7 +1598,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1597 struct ib_mad_send_wr_private *mad_send_wr; 1598 struct ib_mad_send_wr_private *mad_send_wr;
1598 struct ib_mad_send_wc mad_send_wc; 1599 struct ib_mad_send_wc mad_send_wc;
1599 unsigned long flags; 1600 unsigned long flags;
1600 u64 tid; 1601 __be64 tid;
1601 1602
1602 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); 1603 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1603 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); 1604 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
@@ -2165,7 +2166,8 @@ static void local_completions(void *data)
2165 * Defined behavior is to complete response 2166 * Defined behavior is to complete response
2166 * before request 2167 * before request
2167 */ 2168 */
2168 build_smp_wc(local->wr_id, IB_LID_PERMISSIVE, 2169 build_smp_wc(local->wr_id,
2170 be16_to_cpu(IB_LID_PERMISSIVE),
2169 0 /* pkey index */, 2171 0 /* pkey index */,
2170 recv_mad_agent->agent.port_num, &wc); 2172 recv_mad_agent->agent.port_num, &wc);
2171 2173
@@ -2294,7 +2296,7 @@ static void timeout_sends(void *data)
2294 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2296 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2295} 2297}
2296 2298
2297static void ib_mad_thread_completion_handler(struct ib_cq *cq) 2299static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
2298{ 2300{
2299 struct ib_mad_port_private *port_priv = cq->cq_context; 2301 struct ib_mad_port_private *port_priv = cq->cq_context;
2300 2302
@@ -2574,8 +2576,7 @@ static int ib_mad_port_open(struct ib_device *device,
2574 2576
2575 cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2; 2577 cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2;
2576 port_priv->cq = ib_create_cq(port_priv->device, 2578 port_priv->cq = ib_create_cq(port_priv->device,
2577 (ib_comp_handler) 2579 ib_mad_thread_completion_handler,
2578 ib_mad_thread_completion_handler,
2579 NULL, port_priv, cq_size); 2580 NULL, port_priv, cq_size);
2580 if (IS_ERR(port_priv->cq)) { 2581 if (IS_ERR(port_priv->cq)) {
2581 printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n"); 2582 printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n");
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index 568da10b05ab..f1ba794e0daa 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -40,8 +40,8 @@
40#include <linux/pci.h> 40#include <linux/pci.h>
41#include <linux/kthread.h> 41#include <linux/kthread.h>
42#include <linux/workqueue.h> 42#include <linux/workqueue.h>
43#include <ib_mad.h> 43#include <rdma/ib_mad.h>
44#include <ib_smi.h> 44#include <rdma/ib_smi.h>
45 45
46 46
47#define PFX "ib_mad: " 47#define PFX "ib_mad: "
@@ -121,7 +121,7 @@ struct ib_mad_send_wr_private {
121 struct ib_send_wr send_wr; 121 struct ib_send_wr send_wr;
122 struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; 122 struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
123 u64 wr_id; /* client WR ID */ 123 u64 wr_id; /* client WR ID */
124 u64 tid; 124 __be64 tid;
125 unsigned long timeout; 125 unsigned long timeout;
126 int retries; 126 int retries;
127 int retry; 127 int retry;
@@ -144,7 +144,7 @@ struct ib_mad_local_private {
144 struct ib_send_wr send_wr; 144 struct ib_send_wr send_wr;
145 struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; 145 struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
146 u64 wr_id; /* client WR ID */ 146 u64 wr_id; /* client WR ID */
147 u64 tid; 147 __be64 tid;
148}; 148};
149 149
150struct ib_mad_mgmt_method_table { 150struct ib_mad_mgmt_method_table {
@@ -210,7 +210,7 @@ extern kmem_cache_t *ib_mad_cache;
210int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr); 210int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr);
211 211
212struct ib_mad_send_wr_private * 212struct ib_mad_send_wr_private *
213ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, u64 tid); 213ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid);
214 214
215void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, 215void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
216 struct ib_mad_send_wc *mad_send_wc); 216 struct ib_mad_send_wc *mad_send_wc);
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index 8f1eb80e421f..43fd805e0265 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -61,7 +61,7 @@ struct mad_rmpp_recv {
61 int seg_num; 61 int seg_num;
62 int newwin; 62 int newwin;
63 63
64 u64 tid; 64 __be64 tid;
65 u32 src_qp; 65 u32 src_qp;
66 u16 slid; 66 u16 slid;
67 u8 mgmt_class; 67 u8 mgmt_class;
@@ -100,6 +100,121 @@ void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
100 } 100 }
101} 101}
102 102
103static int data_offset(u8 mgmt_class)
104{
105 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
106 return offsetof(struct ib_sa_mad, data);
107 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
108 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
109 return offsetof(struct ib_vendor_mad, data);
110 else
111 return offsetof(struct ib_rmpp_mad, data);
112}
113
114static void format_ack(struct ib_rmpp_mad *ack,
115 struct ib_rmpp_mad *data,
116 struct mad_rmpp_recv *rmpp_recv)
117{
118 unsigned long flags;
119
120 memcpy(&ack->mad_hdr, &data->mad_hdr,
121 data_offset(data->mad_hdr.mgmt_class));
122
123 ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
124 ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK;
125 ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
126
127 spin_lock_irqsave(&rmpp_recv->lock, flags);
128 rmpp_recv->last_ack = rmpp_recv->seg_num;
129 ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num);
130 ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin);
131 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
132}
133
134static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
135 struct ib_mad_recv_wc *recv_wc)
136{
137 struct ib_mad_send_buf *msg;
138 struct ib_send_wr *bad_send_wr;
139 int hdr_len, ret;
140
141 hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr);
142 msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
143 recv_wc->wc->pkey_index, rmpp_recv->ah, 1,
144 hdr_len, sizeof(struct ib_rmpp_mad) - hdr_len,
145 GFP_KERNEL);
146 if (!msg)
147 return;
148
149 format_ack((struct ib_rmpp_mad *) msg->mad,
150 (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv);
151 ret = ib_post_send_mad(&rmpp_recv->agent->agent, &msg->send_wr,
152 &bad_send_wr);
153 if (ret)
154 ib_free_send_mad(msg);
155}
156
157static int alloc_response_msg(struct ib_mad_agent *agent,
158 struct ib_mad_recv_wc *recv_wc,
159 struct ib_mad_send_buf **msg)
160{
161 struct ib_mad_send_buf *m;
162 struct ib_ah *ah;
163 int hdr_len;
164
165 ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc,
166 recv_wc->recv_buf.grh, agent->port_num);
167 if (IS_ERR(ah))
168 return PTR_ERR(ah);
169
170 hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr);
171 m = ib_create_send_mad(agent, recv_wc->wc->src_qp,
172 recv_wc->wc->pkey_index, ah, 1, hdr_len,
173 sizeof(struct ib_rmpp_mad) - hdr_len,
174 GFP_KERNEL);
175 if (IS_ERR(m)) {
176 ib_destroy_ah(ah);
177 return PTR_ERR(m);
178 }
179 *msg = m;
180 return 0;
181}
182
183static void free_msg(struct ib_mad_send_buf *msg)
184{
185 ib_destroy_ah(msg->send_wr.wr.ud.ah);
186 ib_free_send_mad(msg);
187}
188
189static void nack_recv(struct ib_mad_agent_private *agent,
190 struct ib_mad_recv_wc *recv_wc, u8 rmpp_status)
191{
192 struct ib_mad_send_buf *msg;
193 struct ib_rmpp_mad *rmpp_mad;
194 struct ib_send_wr *bad_send_wr;
195 int ret;
196
197 ret = alloc_response_msg(&agent->agent, recv_wc, &msg);
198 if (ret)
199 return;
200
201 rmpp_mad = (struct ib_rmpp_mad *) msg->mad;
202 memcpy(rmpp_mad, recv_wc->recv_buf.mad,
203 data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class));
204
205 rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
206 rmpp_mad->rmpp_hdr.rmpp_version = IB_MGMT_RMPP_VERSION;
207 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ABORT;
208 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
209 rmpp_mad->rmpp_hdr.rmpp_status = rmpp_status;
210 rmpp_mad->rmpp_hdr.seg_num = 0;
211 rmpp_mad->rmpp_hdr.paylen_newwin = 0;
212
213 ret = ib_post_send_mad(&agent->agent, &msg->send_wr, &bad_send_wr);
214 if (ret)
215 free_msg(msg);
216}
217
103static void recv_timeout_handler(void *data) 218static void recv_timeout_handler(void *data)
104{ 219{
105 struct mad_rmpp_recv *rmpp_recv = data; 220 struct mad_rmpp_recv *rmpp_recv = data;
@@ -115,8 +230,8 @@ static void recv_timeout_handler(void *data)
115 list_del(&rmpp_recv->list); 230 list_del(&rmpp_recv->list);
116 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); 231 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
117 232
118 /* TODO: send abort. */
119 rmpp_wc = rmpp_recv->rmpp_wc; 233 rmpp_wc = rmpp_recv->rmpp_wc;
234 nack_recv(rmpp_recv->agent, rmpp_wc, IB_MGMT_RMPP_STATUS_T2L);
120 destroy_rmpp_recv(rmpp_recv); 235 destroy_rmpp_recv(rmpp_recv);
121 ib_free_recv_mad(rmpp_wc); 236 ib_free_recv_mad(rmpp_wc);
122} 237}
@@ -230,60 +345,6 @@ insert_rmpp_recv(struct ib_mad_agent_private *agent,
230 return cur_rmpp_recv; 345 return cur_rmpp_recv;
231} 346}
232 347
233static int data_offset(u8 mgmt_class)
234{
235 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
236 return offsetof(struct ib_sa_mad, data);
237 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
238 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
239 return offsetof(struct ib_vendor_mad, data);
240 else
241 return offsetof(struct ib_rmpp_mad, data);
242}
243
244static void format_ack(struct ib_rmpp_mad *ack,
245 struct ib_rmpp_mad *data,
246 struct mad_rmpp_recv *rmpp_recv)
247{
248 unsigned long flags;
249
250 memcpy(&ack->mad_hdr, &data->mad_hdr,
251 data_offset(data->mad_hdr.mgmt_class));
252
253 ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
254 ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK;
255 ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
256
257 spin_lock_irqsave(&rmpp_recv->lock, flags);
258 rmpp_recv->last_ack = rmpp_recv->seg_num;
259 ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num);
260 ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin);
261 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
262}
263
264static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
265 struct ib_mad_recv_wc *recv_wc)
266{
267 struct ib_mad_send_buf *msg;
268 struct ib_send_wr *bad_send_wr;
269 int hdr_len, ret;
270
271 hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr);
272 msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
273 recv_wc->wc->pkey_index, rmpp_recv->ah, 1,
274 hdr_len, sizeof(struct ib_rmpp_mad) - hdr_len,
275 GFP_KERNEL);
276 if (!msg)
277 return;
278
279 format_ack((struct ib_rmpp_mad *) msg->mad,
280 (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv);
281 ret = ib_post_send_mad(&rmpp_recv->agent->agent, &msg->send_wr,
282 &bad_send_wr);
283 if (ret)
284 ib_free_send_mad(msg);
285}
286
287static inline int get_last_flag(struct ib_mad_recv_buf *seg) 348static inline int get_last_flag(struct ib_mad_recv_buf *seg)
288{ 349{
289 struct ib_rmpp_mad *rmpp_mad; 350 struct ib_rmpp_mad *rmpp_mad;
@@ -559,6 +620,34 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
559 return ib_send_mad(mad_send_wr); 620 return ib_send_mad(mad_send_wr);
560} 621}
561 622
623static void abort_send(struct ib_mad_agent_private *agent, __be64 tid,
624 u8 rmpp_status)
625{
626 struct ib_mad_send_wr_private *mad_send_wr;
627 struct ib_mad_send_wc wc;
628 unsigned long flags;
629
630 spin_lock_irqsave(&agent->lock, flags);
631 mad_send_wr = ib_find_send_mad(agent, tid);
632 if (!mad_send_wr)
633 goto out; /* Unmatched send */
634
635 if ((mad_send_wr->last_ack == mad_send_wr->total_seg) ||
636 (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
637 goto out; /* Send is already done */
638
639 ib_mark_mad_done(mad_send_wr);
640 spin_unlock_irqrestore(&agent->lock, flags);
641
642 wc.status = IB_WC_REM_ABORT_ERR;
643 wc.vendor_err = rmpp_status;
644 wc.wr_id = mad_send_wr->wr_id;
645 ib_mad_complete_send_wr(mad_send_wr, &wc);
646 return;
647out:
648 spin_unlock_irqrestore(&agent->lock, flags);
649}
650
562static void process_rmpp_ack(struct ib_mad_agent_private *agent, 651static void process_rmpp_ack(struct ib_mad_agent_private *agent,
563 struct ib_mad_recv_wc *mad_recv_wc) 652 struct ib_mad_recv_wc *mad_recv_wc)
564{ 653{
@@ -568,11 +657,21 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
568 int seg_num, newwin, ret; 657 int seg_num, newwin, ret;
569 658
570 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; 659 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
571 if (rmpp_mad->rmpp_hdr.rmpp_status) 660 if (rmpp_mad->rmpp_hdr.rmpp_status) {
661 abort_send(agent, rmpp_mad->mad_hdr.tid,
662 IB_MGMT_RMPP_STATUS_BAD_STATUS);
663 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
572 return; 664 return;
665 }
573 666
574 seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num); 667 seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
575 newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); 668 newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
669 if (newwin < seg_num) {
670 abort_send(agent, rmpp_mad->mad_hdr.tid,
671 IB_MGMT_RMPP_STATUS_W2S);
672 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
673 return;
674 }
576 675
577 spin_lock_irqsave(&agent->lock, flags); 676 spin_lock_irqsave(&agent->lock, flags);
578 mad_send_wr = ib_find_send_mad(agent, rmpp_mad->mad_hdr.tid); 677 mad_send_wr = ib_find_send_mad(agent, rmpp_mad->mad_hdr.tid);
@@ -583,8 +682,13 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
583 (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS)) 682 (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
584 goto out; /* Send is already done */ 683 goto out; /* Send is already done */
585 684
586 if (seg_num > mad_send_wr->total_seg) 685 if (seg_num > mad_send_wr->total_seg || seg_num > mad_send_wr->newwin) {
587 goto out; /* Bad ACK */ 686 spin_unlock_irqrestore(&agent->lock, flags);
687 abort_send(agent, rmpp_mad->mad_hdr.tid,
688 IB_MGMT_RMPP_STATUS_S2B);
689 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
690 return;
691 }
588 692
589 if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack) 693 if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack)
590 goto out; /* Old ACK */ 694 goto out; /* Old ACK */
@@ -628,6 +732,72 @@ out:
628 spin_unlock_irqrestore(&agent->lock, flags); 732 spin_unlock_irqrestore(&agent->lock, flags);
629} 733}
630 734
735static struct ib_mad_recv_wc *
736process_rmpp_data(struct ib_mad_agent_private *agent,
737 struct ib_mad_recv_wc *mad_recv_wc)
738{
739 struct ib_rmpp_hdr *rmpp_hdr;
740 u8 rmpp_status;
741
742 rmpp_hdr = &((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr;
743
744 if (rmpp_hdr->rmpp_status) {
745 rmpp_status = IB_MGMT_RMPP_STATUS_BAD_STATUS;
746 goto bad;
747 }
748
749 if (rmpp_hdr->seg_num == __constant_htonl(1)) {
750 if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) {
751 rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
752 goto bad;
753 }
754 return start_rmpp(agent, mad_recv_wc);
755 } else {
756 if (ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST) {
757 rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
758 goto bad;
759 }
760 return continue_rmpp(agent, mad_recv_wc);
761 }
762bad:
763 nack_recv(agent, mad_recv_wc, rmpp_status);
764 ib_free_recv_mad(mad_recv_wc);
765 return NULL;
766}
767
768static void process_rmpp_stop(struct ib_mad_agent_private *agent,
769 struct ib_mad_recv_wc *mad_recv_wc)
770{
771 struct ib_rmpp_mad *rmpp_mad;
772
773 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
774
775 if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) {
776 abort_send(agent, rmpp_mad->mad_hdr.tid,
777 IB_MGMT_RMPP_STATUS_BAD_STATUS);
778 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
779 } else
780 abort_send(agent, rmpp_mad->mad_hdr.tid,
781 rmpp_mad->rmpp_hdr.rmpp_status);
782}
783
784static void process_rmpp_abort(struct ib_mad_agent_private *agent,
785 struct ib_mad_recv_wc *mad_recv_wc)
786{
787 struct ib_rmpp_mad *rmpp_mad;
788
789 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
790
791 if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN ||
792 rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) {
793 abort_send(agent, rmpp_mad->mad_hdr.tid,
794 IB_MGMT_RMPP_STATUS_BAD_STATUS);
795 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
796 } else
797 abort_send(agent, rmpp_mad->mad_hdr.tid,
798 rmpp_mad->rmpp_hdr.rmpp_status);
799}
800
631struct ib_mad_recv_wc * 801struct ib_mad_recv_wc *
632ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent, 802ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
633 struct ib_mad_recv_wc *mad_recv_wc) 803 struct ib_mad_recv_wc *mad_recv_wc)
@@ -638,23 +808,29 @@ ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
638 if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE)) 808 if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE))
639 return mad_recv_wc; 809 return mad_recv_wc;
640 810
641 if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) 811 if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) {
812 abort_send(agent, rmpp_mad->mad_hdr.tid,
813 IB_MGMT_RMPP_STATUS_UNV);
814 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
642 goto out; 815 goto out;
816 }
643 817
644 switch (rmpp_mad->rmpp_hdr.rmpp_type) { 818 switch (rmpp_mad->rmpp_hdr.rmpp_type) {
645 case IB_MGMT_RMPP_TYPE_DATA: 819 case IB_MGMT_RMPP_TYPE_DATA:
646 if (rmpp_mad->rmpp_hdr.seg_num == __constant_htonl(1)) 820 return process_rmpp_data(agent, mad_recv_wc);
647 return start_rmpp(agent, mad_recv_wc);
648 else
649 return continue_rmpp(agent, mad_recv_wc);
650 case IB_MGMT_RMPP_TYPE_ACK: 821 case IB_MGMT_RMPP_TYPE_ACK:
651 process_rmpp_ack(agent, mad_recv_wc); 822 process_rmpp_ack(agent, mad_recv_wc);
652 break; 823 break;
653 case IB_MGMT_RMPP_TYPE_STOP: 824 case IB_MGMT_RMPP_TYPE_STOP:
825 process_rmpp_stop(agent, mad_recv_wc);
826 break;
654 case IB_MGMT_RMPP_TYPE_ABORT: 827 case IB_MGMT_RMPP_TYPE_ABORT:
655 /* TODO: process_rmpp_nack(agent, mad_recv_wc); */ 828 process_rmpp_abort(agent, mad_recv_wc);
656 break; 829 break;
657 default: 830 default:
831 abort_send(agent, rmpp_mad->mad_hdr.tid,
832 IB_MGMT_RMPP_STATUS_BADT);
833 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
658 break; 834 break;
659 } 835 }
660out: 836out:
@@ -714,7 +890,10 @@ int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
714 if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) { 890 if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) {
715 msg = (struct ib_mad_send_buf *) (unsigned long) 891 msg = (struct ib_mad_send_buf *) (unsigned long)
716 mad_send_wc->wr_id; 892 mad_send_wc->wr_id;
717 ib_free_send_mad(msg); 893 if (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_ACK)
894 ib_free_send_mad(msg);
895 else
896 free_msg(msg);
718 return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */ 897 return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */
719 } 898 }
720 899
diff --git a/drivers/infiniband/core/packer.c b/drivers/infiniband/core/packer.c
index eb5ff54c10d7..35df5010e723 100644
--- a/drivers/infiniband/core/packer.c
+++ b/drivers/infiniband/core/packer.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 2 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -32,7 +33,7 @@
32 * $Id: packer.c 1349 2004-12-16 21:09:43Z roland $ 33 * $Id: packer.c 1349 2004-12-16 21:09:43Z roland $
33 */ 34 */
34 35
35#include <ib_pack.h> 36#include <rdma/ib_pack.h>
36 37
37static u64 value_read(int offset, int size, void *structure) 38static u64 value_read(int offset, int size, void *structure)
38{ 39{
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 795184931c83..126ac80db7b8 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 3 * Copyright (c) 2005 Voltaire, Inc.  All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -44,8 +44,8 @@
44#include <linux/kref.h> 44#include <linux/kref.h>
45#include <linux/idr.h> 45#include <linux/idr.h>
46 46
47#include <ib_pack.h> 47#include <rdma/ib_pack.h>
48#include <ib_sa.h> 48#include <rdma/ib_sa.h>
49 49
50MODULE_AUTHOR("Roland Dreier"); 50MODULE_AUTHOR("Roland Dreier");
51MODULE_DESCRIPTION("InfiniBand subnet administration query support"); 51MODULE_DESCRIPTION("InfiniBand subnet administration query support");
diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c
index b4b284324a33..35852e794e26 100644
--- a/drivers/infiniband/core/smi.c
+++ b/drivers/infiniband/core/smi.c
@@ -1,9 +1,10 @@
1/* 1/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 2 * Copyright (c) 2004, 2005 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 3 * Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 4 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 5 * Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 6 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * 8 *
8 * This software is available to you under a choice of one of two 9 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU 10 * licenses. You may choose to be licensed under the terms of the GNU
@@ -36,7 +37,7 @@
36 * $Id: smi.c 1389 2004-12-27 22:56:47Z roland $ 37 * $Id: smi.c 1389 2004-12-27 22:56:47Z roland $
37 */ 38 */
38 39
39#include <ib_smi.h> 40#include <rdma/ib_smi.h>
40#include "smi.h" 41#include "smi.h"
41 42
42/* 43/*
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 90d51b179abe..fae1c2dcee51 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -1,5 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
3 * 5 *
4 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
@@ -34,7 +36,7 @@
34 36
35#include "core_priv.h" 37#include "core_priv.h"
36 38
37#include <ib_mad.h> 39#include <rdma/ib_mad.h>
38 40
39struct ib_port { 41struct ib_port {
40 struct kobject kobj; 42 struct kobject kobj;
@@ -253,14 +255,14 @@ static ssize_t show_port_gid(struct ib_port *p, struct port_attribute *attr,
253 return ret; 255 return ret;
254 256
255 return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", 257 return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
256 be16_to_cpu(((u16 *) gid.raw)[0]), 258 be16_to_cpu(((__be16 *) gid.raw)[0]),
257 be16_to_cpu(((u16 *) gid.raw)[1]), 259 be16_to_cpu(((__be16 *) gid.raw)[1]),
258 be16_to_cpu(((u16 *) gid.raw)[2]), 260 be16_to_cpu(((__be16 *) gid.raw)[2]),
259 be16_to_cpu(((u16 *) gid.raw)[3]), 261 be16_to_cpu(((__be16 *) gid.raw)[3]),
260 be16_to_cpu(((u16 *) gid.raw)[4]), 262 be16_to_cpu(((__be16 *) gid.raw)[4]),
261 be16_to_cpu(((u16 *) gid.raw)[5]), 263 be16_to_cpu(((__be16 *) gid.raw)[5]),
262 be16_to_cpu(((u16 *) gid.raw)[6]), 264 be16_to_cpu(((__be16 *) gid.raw)[6]),
263 be16_to_cpu(((u16 *) gid.raw)[7])); 265 be16_to_cpu(((__be16 *) gid.raw)[7]));
264} 266}
265 267
266static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr, 268static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr,
@@ -332,11 +334,11 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
332 break; 334 break;
333 case 16: 335 case 16:
334 ret = sprintf(buf, "%u\n", 336 ret = sprintf(buf, "%u\n",
335 be16_to_cpup((u16 *)(out_mad->data + 40 + offset / 8))); 337 be16_to_cpup((__be16 *)(out_mad->data + 40 + offset / 8)));
336 break; 338 break;
337 case 32: 339 case 32:
338 ret = sprintf(buf, "%u\n", 340 ret = sprintf(buf, "%u\n",
339 be32_to_cpup((u32 *)(out_mad->data + 40 + offset / 8))); 341 be32_to_cpup((__be32 *)(out_mad->data + 40 + offset / 8)));
340 break; 342 break;
341 default: 343 default:
342 ret = 0; 344 ret = 0;
@@ -598,10 +600,10 @@ static ssize_t show_sys_image_guid(struct class_device *cdev, char *buf)
598 return ret; 600 return ret;
599 601
600 return sprintf(buf, "%04x:%04x:%04x:%04x\n", 602 return sprintf(buf, "%04x:%04x:%04x:%04x\n",
601 be16_to_cpu(((u16 *) &attr.sys_image_guid)[0]), 603 be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
602 be16_to_cpu(((u16 *) &attr.sys_image_guid)[1]), 604 be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
603 be16_to_cpu(((u16 *) &attr.sys_image_guid)[2]), 605 be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
604 be16_to_cpu(((u16 *) &attr.sys_image_guid)[3])); 606 be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
605} 607}
606 608
607static ssize_t show_node_guid(struct class_device *cdev, char *buf) 609static ssize_t show_node_guid(struct class_device *cdev, char *buf)
@@ -615,10 +617,10 @@ static ssize_t show_node_guid(struct class_device *cdev, char *buf)
615 return ret; 617 return ret;
616 618
617 return sprintf(buf, "%04x:%04x:%04x:%04x\n", 619 return sprintf(buf, "%04x:%04x:%04x:%04x\n",
618 be16_to_cpu(((u16 *) &attr.node_guid)[0]), 620 be16_to_cpu(((__be16 *) &attr.node_guid)[0]),
619 be16_to_cpu(((u16 *) &attr.node_guid)[1]), 621 be16_to_cpu(((__be16 *) &attr.node_guid)[1]),
620 be16_to_cpu(((u16 *) &attr.node_guid)[2]), 622 be16_to_cpu(((__be16 *) &attr.node_guid)[2]),
621 be16_to_cpu(((u16 *) &attr.node_guid)[3])); 623 be16_to_cpu(((__be16 *) &attr.node_guid)[3]));
622} 624}
623 625
624static CLASS_DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL); 626static CLASS_DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 61d07c732f49..79595826ccc7 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -73,14 +74,18 @@ static struct semaphore ctx_id_mutex;
73static struct idr ctx_id_table; 74static struct idr ctx_id_table;
74static int ctx_id_rover = 0; 75static int ctx_id_rover = 0;
75 76
76static struct ib_ucm_context *ib_ucm_ctx_get(int id) 77static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id)
77{ 78{
78 struct ib_ucm_context *ctx; 79 struct ib_ucm_context *ctx;
79 80
80 down(&ctx_id_mutex); 81 down(&ctx_id_mutex);
81 ctx = idr_find(&ctx_id_table, id); 82 ctx = idr_find(&ctx_id_table, id);
82 if (ctx) 83 if (!ctx)
83 ctx->ref++; 84 ctx = ERR_PTR(-ENOENT);
85 else if (ctx->file != file)
86 ctx = ERR_PTR(-EINVAL);
87 else
88 atomic_inc(&ctx->ref);
84 up(&ctx_id_mutex); 89 up(&ctx_id_mutex);
85 90
86 return ctx; 91 return ctx;
@@ -88,21 +93,37 @@ static struct ib_ucm_context *ib_ucm_ctx_get(int id)
88 93
89static void ib_ucm_ctx_put(struct ib_ucm_context *ctx) 94static void ib_ucm_ctx_put(struct ib_ucm_context *ctx)
90{ 95{
96 if (atomic_dec_and_test(&ctx->ref))
97 wake_up(&ctx->wait);
98}
99
100static ssize_t ib_ucm_destroy_ctx(struct ib_ucm_file *file, int id)
101{
102 struct ib_ucm_context *ctx;
91 struct ib_ucm_event *uevent; 103 struct ib_ucm_event *uevent;
92 104
93 down(&ctx_id_mutex); 105 down(&ctx_id_mutex);
94 106 ctx = idr_find(&ctx_id_table, id);
95 ctx->ref--; 107 if (!ctx)
96 if (!ctx->ref) 108 ctx = ERR_PTR(-ENOENT);
109 else if (ctx->file != file)
110 ctx = ERR_PTR(-EINVAL);
111 else
97 idr_remove(&ctx_id_table, ctx->id); 112 idr_remove(&ctx_id_table, ctx->id);
98
99 up(&ctx_id_mutex); 113 up(&ctx_id_mutex);
100 114
101 if (ctx->ref) 115 if (IS_ERR(ctx))
102 return; 116 return PTR_ERR(ctx);
103 117
104 down(&ctx->file->mutex); 118 atomic_dec(&ctx->ref);
119 wait_event(ctx->wait, !atomic_read(&ctx->ref));
120
121 /* No new events will be generated after destroying the cm_id. */
122 if (!IS_ERR(ctx->cm_id))
123 ib_destroy_cm_id(ctx->cm_id);
105 124
125 /* Cleanup events not yet reported to the user. */
126 down(&file->mutex);
106 list_del(&ctx->file_list); 127 list_del(&ctx->file_list);
107 while (!list_empty(&ctx->events)) { 128 while (!list_empty(&ctx->events)) {
108 129
@@ -117,13 +138,10 @@ static void ib_ucm_ctx_put(struct ib_ucm_context *ctx)
117 138
118 kfree(uevent); 139 kfree(uevent);
119 } 140 }
141 up(&file->mutex);
120 142
121 up(&ctx->file->mutex);
122
123 ucm_dbg("Destroyed CM ID <%d>\n", ctx->id);
124
125 ib_destroy_cm_id(ctx->cm_id);
126 kfree(ctx); 143 kfree(ctx);
144 return 0;
127} 145}
128 146
129static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file) 147static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
@@ -135,11 +153,11 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
135 if (!ctx) 153 if (!ctx)
136 return NULL; 154 return NULL;
137 155
138 ctx->ref = 1; /* user reference */ 156 atomic_set(&ctx->ref, 1);
157 init_waitqueue_head(&ctx->wait);
139 ctx->file = file; 158 ctx->file = file;
140 159
141 INIT_LIST_HEAD(&ctx->events); 160 INIT_LIST_HEAD(&ctx->events);
142 init_MUTEX(&ctx->mutex);
143 161
144 list_add_tail(&ctx->file_list, &file->ctxs); 162 list_add_tail(&ctx->file_list, &file->ctxs);
145 163
@@ -177,8 +195,8 @@ static void ib_ucm_event_path_get(struct ib_ucm_path_rec *upath,
177 if (!kpath || !upath) 195 if (!kpath || !upath)
178 return; 196 return;
179 197
180 memcpy(upath->dgid, kpath->dgid.raw, sizeof(union ib_gid)); 198 memcpy(upath->dgid, kpath->dgid.raw, sizeof *upath->dgid);
181 memcpy(upath->sgid, kpath->sgid.raw, sizeof(union ib_gid)); 199 memcpy(upath->sgid, kpath->sgid.raw, sizeof *upath->sgid);
182 200
183 upath->dlid = kpath->dlid; 201 upath->dlid = kpath->dlid;
184 upath->slid = kpath->slid; 202 upath->slid = kpath->slid;
@@ -201,10 +219,11 @@ static void ib_ucm_event_path_get(struct ib_ucm_path_rec *upath,
201 kpath->packet_life_time_selector; 219 kpath->packet_life_time_selector;
202} 220}
203 221
204static void ib_ucm_event_req_get(struct ib_ucm_req_event_resp *ureq, 222static void ib_ucm_event_req_get(struct ib_ucm_context *ctx,
223 struct ib_ucm_req_event_resp *ureq,
205 struct ib_cm_req_event_param *kreq) 224 struct ib_cm_req_event_param *kreq)
206{ 225{
207 ureq->listen_id = (long)kreq->listen_id->context; 226 ureq->listen_id = ctx->id;
208 227
209 ureq->remote_ca_guid = kreq->remote_ca_guid; 228 ureq->remote_ca_guid = kreq->remote_ca_guid;
210 ureq->remote_qkey = kreq->remote_qkey; 229 ureq->remote_qkey = kreq->remote_qkey;
@@ -240,34 +259,11 @@ static void ib_ucm_event_rep_get(struct ib_ucm_rep_event_resp *urep,
240 urep->srq = krep->srq; 259 urep->srq = krep->srq;
241} 260}
242 261
243static void ib_ucm_event_rej_get(struct ib_ucm_rej_event_resp *urej, 262static void ib_ucm_event_sidr_req_get(struct ib_ucm_context *ctx,
244 struct ib_cm_rej_event_param *krej) 263 struct ib_ucm_sidr_req_event_resp *ureq,
245{
246 urej->reason = krej->reason;
247}
248
249static void ib_ucm_event_mra_get(struct ib_ucm_mra_event_resp *umra,
250 struct ib_cm_mra_event_param *kmra)
251{
252 umra->timeout = kmra->service_timeout;
253}
254
255static void ib_ucm_event_lap_get(struct ib_ucm_lap_event_resp *ulap,
256 struct ib_cm_lap_event_param *klap)
257{
258 ib_ucm_event_path_get(&ulap->path, klap->alternate_path);
259}
260
261static void ib_ucm_event_apr_get(struct ib_ucm_apr_event_resp *uapr,
262 struct ib_cm_apr_event_param *kapr)
263{
264 uapr->status = kapr->ap_status;
265}
266
267static void ib_ucm_event_sidr_req_get(struct ib_ucm_sidr_req_event_resp *ureq,
268 struct ib_cm_sidr_req_event_param *kreq) 264 struct ib_cm_sidr_req_event_param *kreq)
269{ 265{
270 ureq->listen_id = (long)kreq->listen_id->context; 266 ureq->listen_id = ctx->id;
271 ureq->pkey = kreq->pkey; 267 ureq->pkey = kreq->pkey;
272} 268}
273 269
@@ -279,19 +275,18 @@ static void ib_ucm_event_sidr_rep_get(struct ib_ucm_sidr_rep_event_resp *urep,
279 urep->qpn = krep->qpn; 275 urep->qpn = krep->qpn;
280}; 276};
281 277
282static int ib_ucm_event_process(struct ib_cm_event *evt, 278static int ib_ucm_event_process(struct ib_ucm_context *ctx,
279 struct ib_cm_event *evt,
283 struct ib_ucm_event *uvt) 280 struct ib_ucm_event *uvt)
284{ 281{
285 void *info = NULL; 282 void *info = NULL;
286 int result;
287 283
288 switch (evt->event) { 284 switch (evt->event) {
289 case IB_CM_REQ_RECEIVED: 285 case IB_CM_REQ_RECEIVED:
290 ib_ucm_event_req_get(&uvt->resp.u.req_resp, 286 ib_ucm_event_req_get(ctx, &uvt->resp.u.req_resp,
291 &evt->param.req_rcvd); 287 &evt->param.req_rcvd);
292 uvt->data_len = IB_CM_REQ_PRIVATE_DATA_SIZE; 288 uvt->data_len = IB_CM_REQ_PRIVATE_DATA_SIZE;
293 uvt->resp.present |= (evt->param.req_rcvd.primary_path ? 289 uvt->resp.present = IB_UCM_PRES_PRIMARY;
294 IB_UCM_PRES_PRIMARY : 0);
295 uvt->resp.present |= (evt->param.req_rcvd.alternate_path ? 290 uvt->resp.present |= (evt->param.req_rcvd.alternate_path ?
296 IB_UCM_PRES_ALTERNATE : 0); 291 IB_UCM_PRES_ALTERNATE : 0);
297 break; 292 break;
@@ -299,57 +294,46 @@ static int ib_ucm_event_process(struct ib_cm_event *evt,
299 ib_ucm_event_rep_get(&uvt->resp.u.rep_resp, 294 ib_ucm_event_rep_get(&uvt->resp.u.rep_resp,
300 &evt->param.rep_rcvd); 295 &evt->param.rep_rcvd);
301 uvt->data_len = IB_CM_REP_PRIVATE_DATA_SIZE; 296 uvt->data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
302
303 break; 297 break;
304 case IB_CM_RTU_RECEIVED: 298 case IB_CM_RTU_RECEIVED:
305 uvt->data_len = IB_CM_RTU_PRIVATE_DATA_SIZE; 299 uvt->data_len = IB_CM_RTU_PRIVATE_DATA_SIZE;
306 uvt->resp.u.send_status = evt->param.send_status; 300 uvt->resp.u.send_status = evt->param.send_status;
307
308 break; 301 break;
309 case IB_CM_DREQ_RECEIVED: 302 case IB_CM_DREQ_RECEIVED:
310 uvt->data_len = IB_CM_DREQ_PRIVATE_DATA_SIZE; 303 uvt->data_len = IB_CM_DREQ_PRIVATE_DATA_SIZE;
311 uvt->resp.u.send_status = evt->param.send_status; 304 uvt->resp.u.send_status = evt->param.send_status;
312
313 break; 305 break;
314 case IB_CM_DREP_RECEIVED: 306 case IB_CM_DREP_RECEIVED:
315 uvt->data_len = IB_CM_DREP_PRIVATE_DATA_SIZE; 307 uvt->data_len = IB_CM_DREP_PRIVATE_DATA_SIZE;
316 uvt->resp.u.send_status = evt->param.send_status; 308 uvt->resp.u.send_status = evt->param.send_status;
317
318 break; 309 break;
319 case IB_CM_MRA_RECEIVED: 310 case IB_CM_MRA_RECEIVED:
320 ib_ucm_event_mra_get(&uvt->resp.u.mra_resp, 311 uvt->resp.u.mra_resp.timeout =
321 &evt->param.mra_rcvd); 312 evt->param.mra_rcvd.service_timeout;
322 uvt->data_len = IB_CM_MRA_PRIVATE_DATA_SIZE; 313 uvt->data_len = IB_CM_MRA_PRIVATE_DATA_SIZE;
323
324 break; 314 break;
325 case IB_CM_REJ_RECEIVED: 315 case IB_CM_REJ_RECEIVED:
326 ib_ucm_event_rej_get(&uvt->resp.u.rej_resp, 316 uvt->resp.u.rej_resp.reason = evt->param.rej_rcvd.reason;
327 &evt->param.rej_rcvd);
328 uvt->data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; 317 uvt->data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
329 uvt->info_len = evt->param.rej_rcvd.ari_length; 318 uvt->info_len = evt->param.rej_rcvd.ari_length;
330 info = evt->param.rej_rcvd.ari; 319 info = evt->param.rej_rcvd.ari;
331
332 break; 320 break;
333 case IB_CM_LAP_RECEIVED: 321 case IB_CM_LAP_RECEIVED:
334 ib_ucm_event_lap_get(&uvt->resp.u.lap_resp, 322 ib_ucm_event_path_get(&uvt->resp.u.lap_resp.path,
335 &evt->param.lap_rcvd); 323 evt->param.lap_rcvd.alternate_path);
336 uvt->data_len = IB_CM_LAP_PRIVATE_DATA_SIZE; 324 uvt->data_len = IB_CM_LAP_PRIVATE_DATA_SIZE;
337 uvt->resp.present |= (evt->param.lap_rcvd.alternate_path ? 325 uvt->resp.present = IB_UCM_PRES_ALTERNATE;
338 IB_UCM_PRES_ALTERNATE : 0);
339 break; 326 break;
340 case IB_CM_APR_RECEIVED: 327 case IB_CM_APR_RECEIVED:
341 ib_ucm_event_apr_get(&uvt->resp.u.apr_resp, 328 uvt->resp.u.apr_resp.status = evt->param.apr_rcvd.ap_status;
342 &evt->param.apr_rcvd);
343 uvt->data_len = IB_CM_APR_PRIVATE_DATA_SIZE; 329 uvt->data_len = IB_CM_APR_PRIVATE_DATA_SIZE;
344 uvt->info_len = evt->param.apr_rcvd.info_len; 330 uvt->info_len = evt->param.apr_rcvd.info_len;
345 info = evt->param.apr_rcvd.apr_info; 331 info = evt->param.apr_rcvd.apr_info;
346
347 break; 332 break;
348 case IB_CM_SIDR_REQ_RECEIVED: 333 case IB_CM_SIDR_REQ_RECEIVED:
349 ib_ucm_event_sidr_req_get(&uvt->resp.u.sidr_req_resp, 334 ib_ucm_event_sidr_req_get(ctx, &uvt->resp.u.sidr_req_resp,
350 &evt->param.sidr_req_rcvd); 335 &evt->param.sidr_req_rcvd);
351 uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE; 336 uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE;
352
353 break; 337 break;
354 case IB_CM_SIDR_REP_RECEIVED: 338 case IB_CM_SIDR_REP_RECEIVED:
355 ib_ucm_event_sidr_rep_get(&uvt->resp.u.sidr_rep_resp, 339 ib_ucm_event_sidr_rep_get(&uvt->resp.u.sidr_rep_resp,
@@ -357,43 +341,35 @@ static int ib_ucm_event_process(struct ib_cm_event *evt,
357 uvt->data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE; 341 uvt->data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
358 uvt->info_len = evt->param.sidr_rep_rcvd.info_len; 342 uvt->info_len = evt->param.sidr_rep_rcvd.info_len;
359 info = evt->param.sidr_rep_rcvd.info; 343 info = evt->param.sidr_rep_rcvd.info;
360
361 break; 344 break;
362 default: 345 default:
363 uvt->resp.u.send_status = evt->param.send_status; 346 uvt->resp.u.send_status = evt->param.send_status;
364
365 break; 347 break;
366 } 348 }
367 349
368 if (uvt->data_len && evt->private_data) { 350 if (uvt->data_len) {
369
370 uvt->data = kmalloc(uvt->data_len, GFP_KERNEL); 351 uvt->data = kmalloc(uvt->data_len, GFP_KERNEL);
371 if (!uvt->data) { 352 if (!uvt->data)
372 result = -ENOMEM; 353 goto err1;
373 goto error;
374 }
375 354
376 memcpy(uvt->data, evt->private_data, uvt->data_len); 355 memcpy(uvt->data, evt->private_data, uvt->data_len);
377 uvt->resp.present |= IB_UCM_PRES_DATA; 356 uvt->resp.present |= IB_UCM_PRES_DATA;
378 } 357 }
379 358
380 if (uvt->info_len && info) { 359 if (uvt->info_len) {
381
382 uvt->info = kmalloc(uvt->info_len, GFP_KERNEL); 360 uvt->info = kmalloc(uvt->info_len, GFP_KERNEL);
383 if (!uvt->info) { 361 if (!uvt->info)
384 result = -ENOMEM; 362 goto err2;
385 goto error;
386 }
387 363
388 memcpy(uvt->info, info, uvt->info_len); 364 memcpy(uvt->info, info, uvt->info_len);
389 uvt->resp.present |= IB_UCM_PRES_INFO; 365 uvt->resp.present |= IB_UCM_PRES_INFO;
390 } 366 }
391
392 return 0; 367 return 0;
393error: 368
394 kfree(uvt->info); 369err2:
395 kfree(uvt->data); 370 kfree(uvt->data);
396 return result; 371err1:
372 return -ENOMEM;
397} 373}
398 374
399static int ib_ucm_event_handler(struct ib_cm_id *cm_id, 375static int ib_ucm_event_handler(struct ib_cm_id *cm_id,
@@ -403,63 +379,42 @@ static int ib_ucm_event_handler(struct ib_cm_id *cm_id,
403 struct ib_ucm_context *ctx; 379 struct ib_ucm_context *ctx;
404 int result = 0; 380 int result = 0;
405 int id; 381 int id;
406 /*
407 * lookup correct context based on event type.
408 */
409 switch (event->event) {
410 case IB_CM_REQ_RECEIVED:
411 id = (long)event->param.req_rcvd.listen_id->context;
412 break;
413 case IB_CM_SIDR_REQ_RECEIVED:
414 id = (long)event->param.sidr_req_rcvd.listen_id->context;
415 break;
416 default:
417 id = (long)cm_id->context;
418 break;
419 }
420 382
421 ucm_dbg("Event. CM ID <%d> event <%d>\n", id, event->event); 383 ctx = cm_id->context;
422
423 ctx = ib_ucm_ctx_get(id);
424 if (!ctx)
425 return -ENOENT;
426 384
427 if (event->event == IB_CM_REQ_RECEIVED || 385 if (event->event == IB_CM_REQ_RECEIVED ||
428 event->event == IB_CM_SIDR_REQ_RECEIVED) 386 event->event == IB_CM_SIDR_REQ_RECEIVED)
429 id = IB_UCM_CM_ID_INVALID; 387 id = IB_UCM_CM_ID_INVALID;
388 else
389 id = ctx->id;
430 390
431 uevent = kmalloc(sizeof(*uevent), GFP_KERNEL); 391 uevent = kmalloc(sizeof(*uevent), GFP_KERNEL);
432 if (!uevent) { 392 if (!uevent)
433 result = -ENOMEM; 393 goto err1;
434 goto done;
435 }
436 394
437 memset(uevent, 0, sizeof(*uevent)); 395 memset(uevent, 0, sizeof(*uevent));
438
439 uevent->resp.id = id; 396 uevent->resp.id = id;
440 uevent->resp.event = event->event; 397 uevent->resp.event = event->event;
441 398
442 result = ib_ucm_event_process(event, uevent); 399 result = ib_ucm_event_process(ctx, event, uevent);
443 if (result) 400 if (result)
444 goto done; 401 goto err2;
445 402
446 uevent->ctx = ctx; 403 uevent->ctx = ctx;
447 uevent->cm_id = ((event->event == IB_CM_REQ_RECEIVED || 404 uevent->cm_id = (id == IB_UCM_CM_ID_INVALID) ? cm_id : NULL;
448 event->event == IB_CM_SIDR_REQ_RECEIVED ) ?
449 cm_id : NULL);
450 405
451 down(&ctx->file->mutex); 406 down(&ctx->file->mutex);
452
453 list_add_tail(&uevent->file_list, &ctx->file->events); 407 list_add_tail(&uevent->file_list, &ctx->file->events);
454 list_add_tail(&uevent->ctx_list, &ctx->events); 408 list_add_tail(&uevent->ctx_list, &ctx->events);
455
456 wake_up_interruptible(&ctx->file->poll_wait); 409 wake_up_interruptible(&ctx->file->poll_wait);
457
458 up(&ctx->file->mutex); 410 up(&ctx->file->mutex);
459done: 411 return 0;
460 ctx->error = result; 412
461 ib_ucm_ctx_put(ctx); /* func reference */ 413err2:
462 return result; 414 kfree(uevent);
415err1:
416 /* Destroy new cm_id's */
417 return (id == IB_UCM_CM_ID_INVALID);
463} 418}
464 419
465static ssize_t ib_ucm_event(struct ib_ucm_file *file, 420static ssize_t ib_ucm_event(struct ib_ucm_file *file,
@@ -517,9 +472,8 @@ static ssize_t ib_ucm_event(struct ib_ucm_file *file,
517 goto done; 472 goto done;
518 } 473 }
519 474
520 ctx->cm_id = uevent->cm_id; 475 ctx->cm_id = uevent->cm_id;
521 ctx->cm_id->cm_handler = ib_ucm_event_handler; 476 ctx->cm_id->context = ctx;
522 ctx->cm_id->context = (void *)(unsigned long)ctx->id;
523 477
524 uevent->resp.id = ctx->id; 478 uevent->resp.id = ctx->id;
525 479
@@ -585,30 +539,29 @@ static ssize_t ib_ucm_create_id(struct ib_ucm_file *file,
585 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 539 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
586 return -EFAULT; 540 return -EFAULT;
587 541
542 down(&file->mutex);
588 ctx = ib_ucm_ctx_alloc(file); 543 ctx = ib_ucm_ctx_alloc(file);
544 up(&file->mutex);
589 if (!ctx) 545 if (!ctx)
590 return -ENOMEM; 546 return -ENOMEM;
591 547
592 ctx->cm_id = ib_create_cm_id(ib_ucm_event_handler, 548 ctx->cm_id = ib_create_cm_id(ib_ucm_event_handler, ctx);
593 (void *)(unsigned long)ctx->id); 549 if (IS_ERR(ctx->cm_id)) {
594 if (!ctx->cm_id) { 550 result = PTR_ERR(ctx->cm_id);
595 result = -ENOMEM; 551 goto err;
596 goto err_cm;
597 } 552 }
598 553
599 resp.id = ctx->id; 554 resp.id = ctx->id;
600 if (copy_to_user((void __user *)(unsigned long)cmd.response, 555 if (copy_to_user((void __user *)(unsigned long)cmd.response,
601 &resp, sizeof(resp))) { 556 &resp, sizeof(resp))) {
602 result = -EFAULT; 557 result = -EFAULT;
603 goto err_ret; 558 goto err;
604 } 559 }
605 560
606 return 0; 561 return 0;
607err_ret:
608 ib_destroy_cm_id(ctx->cm_id);
609err_cm:
610 ib_ucm_ctx_put(ctx); /* user reference */
611 562
563err:
564 ib_ucm_destroy_ctx(file, ctx->id);
612 return result; 565 return result;
613} 566}
614 567
@@ -617,19 +570,11 @@ static ssize_t ib_ucm_destroy_id(struct ib_ucm_file *file,
617 int in_len, int out_len) 570 int in_len, int out_len)
618{ 571{
619 struct ib_ucm_destroy_id cmd; 572 struct ib_ucm_destroy_id cmd;
620 struct ib_ucm_context *ctx;
621 573
622 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 574 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
623 return -EFAULT; 575 return -EFAULT;
624 576
625 ctx = ib_ucm_ctx_get(cmd.id); 577 return ib_ucm_destroy_ctx(file, cmd.id);
626 if (!ctx)
627 return -ENOENT;
628
629 ib_ucm_ctx_put(ctx); /* user reference */
630 ib_ucm_ctx_put(ctx); /* func reference */
631
632 return 0;
633} 578}
634 579
635static ssize_t ib_ucm_attr_id(struct ib_ucm_file *file, 580static ssize_t ib_ucm_attr_id(struct ib_ucm_file *file,
@@ -647,15 +592,9 @@ static ssize_t ib_ucm_attr_id(struct ib_ucm_file *file,
647 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 592 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
648 return -EFAULT; 593 return -EFAULT;
649 594
650 ctx = ib_ucm_ctx_get(cmd.id); 595 ctx = ib_ucm_ctx_get(file, cmd.id);
651 if (!ctx) 596 if (IS_ERR(ctx))
652 return -ENOENT; 597 return PTR_ERR(ctx);
653
654 down(&ctx->file->mutex);
655 if (ctx->file != file) {
656 result = -EINVAL;
657 goto done;
658 }
659 598
660 resp.service_id = ctx->cm_id->service_id; 599 resp.service_id = ctx->cm_id->service_id;
661 resp.service_mask = ctx->cm_id->service_mask; 600 resp.service_mask = ctx->cm_id->service_mask;
@@ -666,9 +605,7 @@ static ssize_t ib_ucm_attr_id(struct ib_ucm_file *file,
666 &resp, sizeof(resp))) 605 &resp, sizeof(resp)))
667 result = -EFAULT; 606 result = -EFAULT;
668 607
669done: 608 ib_ucm_ctx_put(ctx);
670 up(&ctx->file->mutex);
671 ib_ucm_ctx_put(ctx); /* func reference */
672 return result; 609 return result;
673} 610}
674 611
@@ -683,19 +620,12 @@ static ssize_t ib_ucm_listen(struct ib_ucm_file *file,
683 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 620 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
684 return -EFAULT; 621 return -EFAULT;
685 622
686 ctx = ib_ucm_ctx_get(cmd.id); 623 ctx = ib_ucm_ctx_get(file, cmd.id);
687 if (!ctx) 624 if (IS_ERR(ctx))
688 return -ENOENT; 625 return PTR_ERR(ctx);
689 626
690 down(&ctx->file->mutex); 627 result = ib_cm_listen(ctx->cm_id, cmd.service_id, cmd.service_mask);
691 if (ctx->file != file) 628 ib_ucm_ctx_put(ctx);
692 result = -EINVAL;
693 else
694 result = ib_cm_listen(ctx->cm_id, cmd.service_id,
695 cmd.service_mask);
696
697 up(&ctx->file->mutex);
698 ib_ucm_ctx_put(ctx); /* func reference */
699 return result; 629 return result;
700} 630}
701 631
@@ -710,18 +640,12 @@ static ssize_t ib_ucm_establish(struct ib_ucm_file *file,
710 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 640 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
711 return -EFAULT; 641 return -EFAULT;
712 642
713 ctx = ib_ucm_ctx_get(cmd.id); 643 ctx = ib_ucm_ctx_get(file, cmd.id);
714 if (!ctx) 644 if (IS_ERR(ctx))
715 return -ENOENT; 645 return PTR_ERR(ctx);
716
717 down(&ctx->file->mutex);
718 if (ctx->file != file)
719 result = -EINVAL;
720 else
721 result = ib_cm_establish(ctx->cm_id);
722 646
723 up(&ctx->file->mutex); 647 result = ib_cm_establish(ctx->cm_id);
724 ib_ucm_ctx_put(ctx); /* func reference */ 648 ib_ucm_ctx_put(ctx);
725 return result; 649 return result;
726} 650}
727 651
@@ -768,8 +692,8 @@ static int ib_ucm_path_get(struct ib_sa_path_rec **path, u64 src)
768 return -EFAULT; 692 return -EFAULT;
769 } 693 }
770 694
771 memcpy(sa_path->dgid.raw, ucm_path.dgid, sizeof(union ib_gid)); 695 memcpy(sa_path->dgid.raw, ucm_path.dgid, sizeof sa_path->dgid);
772 memcpy(sa_path->sgid.raw, ucm_path.sgid, sizeof(union ib_gid)); 696 memcpy(sa_path->sgid.raw, ucm_path.sgid, sizeof sa_path->sgid);
773 697
774 sa_path->dlid = ucm_path.dlid; 698 sa_path->dlid = ucm_path.dlid;
775 sa_path->slid = ucm_path.slid; 699 sa_path->slid = ucm_path.slid;
@@ -839,25 +763,17 @@ static ssize_t ib_ucm_send_req(struct ib_ucm_file *file,
839 param.max_cm_retries = cmd.max_cm_retries; 763 param.max_cm_retries = cmd.max_cm_retries;
840 param.srq = cmd.srq; 764 param.srq = cmd.srq;
841 765
842 ctx = ib_ucm_ctx_get(cmd.id); 766 ctx = ib_ucm_ctx_get(file, cmd.id);
843 if (!ctx) { 767 if (!IS_ERR(ctx)) {
844 result = -ENOENT;
845 goto done;
846 }
847
848 down(&ctx->file->mutex);
849 if (ctx->file != file)
850 result = -EINVAL;
851 else
852 result = ib_send_cm_req(ctx->cm_id, &param); 768 result = ib_send_cm_req(ctx->cm_id, &param);
769 ib_ucm_ctx_put(ctx);
770 } else
771 result = PTR_ERR(ctx);
853 772
854 up(&ctx->file->mutex);
855 ib_ucm_ctx_put(ctx); /* func reference */
856done: 773done:
857 kfree(param.private_data); 774 kfree(param.private_data);
858 kfree(param.primary_path); 775 kfree(param.primary_path);
859 kfree(param.alternate_path); 776 kfree(param.alternate_path);
860
861 return result; 777 return result;
862} 778}
863 779
@@ -890,23 +806,14 @@ static ssize_t ib_ucm_send_rep(struct ib_ucm_file *file,
890 param.rnr_retry_count = cmd.rnr_retry_count; 806 param.rnr_retry_count = cmd.rnr_retry_count;
891 param.srq = cmd.srq; 807 param.srq = cmd.srq;
892 808
893 ctx = ib_ucm_ctx_get(cmd.id); 809 ctx = ib_ucm_ctx_get(file, cmd.id);
894 if (!ctx) { 810 if (!IS_ERR(ctx)) {
895 result = -ENOENT;
896 goto done;
897 }
898
899 down(&ctx->file->mutex);
900 if (ctx->file != file)
901 result = -EINVAL;
902 else
903 result = ib_send_cm_rep(ctx->cm_id, &param); 811 result = ib_send_cm_rep(ctx->cm_id, &param);
812 ib_ucm_ctx_put(ctx);
813 } else
814 result = PTR_ERR(ctx);
904 815
905 up(&ctx->file->mutex);
906 ib_ucm_ctx_put(ctx); /* func reference */
907done:
908 kfree(param.private_data); 816 kfree(param.private_data);
909
910 return result; 817 return result;
911} 818}
912 819
@@ -928,23 +835,14 @@ static ssize_t ib_ucm_send_private_data(struct ib_ucm_file *file,
928 if (result) 835 if (result)
929 return result; 836 return result;
930 837
931 ctx = ib_ucm_ctx_get(cmd.id); 838 ctx = ib_ucm_ctx_get(file, cmd.id);
932 if (!ctx) { 839 if (!IS_ERR(ctx)) {
933 result = -ENOENT;
934 goto done;
935 }
936
937 down(&ctx->file->mutex);
938 if (ctx->file != file)
939 result = -EINVAL;
940 else
941 result = func(ctx->cm_id, private_data, cmd.len); 840 result = func(ctx->cm_id, private_data, cmd.len);
841 ib_ucm_ctx_put(ctx);
842 } else
843 result = PTR_ERR(ctx);
942 844
943 up(&ctx->file->mutex);
944 ib_ucm_ctx_put(ctx); /* func reference */
945done:
946 kfree(private_data); 845 kfree(private_data);
947
948 return result; 846 return result;
949} 847}
950 848
@@ -995,26 +893,17 @@ static ssize_t ib_ucm_send_info(struct ib_ucm_file *file,
995 if (result) 893 if (result)
996 goto done; 894 goto done;
997 895
998 ctx = ib_ucm_ctx_get(cmd.id); 896 ctx = ib_ucm_ctx_get(file, cmd.id);
999 if (!ctx) { 897 if (!IS_ERR(ctx)) {
1000 result = -ENOENT; 898 result = func(ctx->cm_id, cmd.status, info, cmd.info_len,
1001 goto done;
1002 }
1003
1004 down(&ctx->file->mutex);
1005 if (ctx->file != file)
1006 result = -EINVAL;
1007 else
1008 result = func(ctx->cm_id, cmd.status,
1009 info, cmd.info_len,
1010 data, cmd.data_len); 899 data, cmd.data_len);
900 ib_ucm_ctx_put(ctx);
901 } else
902 result = PTR_ERR(ctx);
1011 903
1012 up(&ctx->file->mutex);
1013 ib_ucm_ctx_put(ctx); /* func reference */
1014done: 904done:
1015 kfree(data); 905 kfree(data);
1016 kfree(info); 906 kfree(info);
1017
1018 return result; 907 return result;
1019} 908}
1020 909
@@ -1048,24 +937,14 @@ static ssize_t ib_ucm_send_mra(struct ib_ucm_file *file,
1048 if (result) 937 if (result)
1049 return result; 938 return result;
1050 939
1051 ctx = ib_ucm_ctx_get(cmd.id); 940 ctx = ib_ucm_ctx_get(file, cmd.id);
1052 if (!ctx) { 941 if (!IS_ERR(ctx)) {
1053 result = -ENOENT; 942 result = ib_send_cm_mra(ctx->cm_id, cmd.timeout, data, cmd.len);
1054 goto done; 943 ib_ucm_ctx_put(ctx);
1055 } 944 } else
945 result = PTR_ERR(ctx);
1056 946
1057 down(&ctx->file->mutex);
1058 if (ctx->file != file)
1059 result = -EINVAL;
1060 else
1061 result = ib_send_cm_mra(ctx->cm_id, cmd.timeout,
1062 data, cmd.len);
1063
1064 up(&ctx->file->mutex);
1065 ib_ucm_ctx_put(ctx); /* func reference */
1066done:
1067 kfree(data); 947 kfree(data);
1068
1069 return result; 948 return result;
1070} 949}
1071 950
@@ -1090,24 +969,16 @@ static ssize_t ib_ucm_send_lap(struct ib_ucm_file *file,
1090 if (result) 969 if (result)
1091 goto done; 970 goto done;
1092 971
1093 ctx = ib_ucm_ctx_get(cmd.id); 972 ctx = ib_ucm_ctx_get(file, cmd.id);
1094 if (!ctx) { 973 if (!IS_ERR(ctx)) {
1095 result = -ENOENT;
1096 goto done;
1097 }
1098
1099 down(&ctx->file->mutex);
1100 if (ctx->file != file)
1101 result = -EINVAL;
1102 else
1103 result = ib_send_cm_lap(ctx->cm_id, path, data, cmd.len); 974 result = ib_send_cm_lap(ctx->cm_id, path, data, cmd.len);
975 ib_ucm_ctx_put(ctx);
976 } else
977 result = PTR_ERR(ctx);
1104 978
1105 up(&ctx->file->mutex);
1106 ib_ucm_ctx_put(ctx); /* func reference */
1107done: 979done:
1108 kfree(data); 980 kfree(data);
1109 kfree(path); 981 kfree(path);
1110
1111 return result; 982 return result;
1112} 983}
1113 984
@@ -1140,24 +1011,16 @@ static ssize_t ib_ucm_send_sidr_req(struct ib_ucm_file *file,
1140 param.max_cm_retries = cmd.max_cm_retries; 1011 param.max_cm_retries = cmd.max_cm_retries;
1141 param.pkey = cmd.pkey; 1012 param.pkey = cmd.pkey;
1142 1013
1143 ctx = ib_ucm_ctx_get(cmd.id); 1014 ctx = ib_ucm_ctx_get(file, cmd.id);
1144 if (!ctx) { 1015 if (!IS_ERR(ctx)) {
1145 result = -ENOENT;
1146 goto done;
1147 }
1148
1149 down(&ctx->file->mutex);
1150 if (ctx->file != file)
1151 result = -EINVAL;
1152 else
1153 result = ib_send_cm_sidr_req(ctx->cm_id, &param); 1016 result = ib_send_cm_sidr_req(ctx->cm_id, &param);
1017 ib_ucm_ctx_put(ctx);
1018 } else
1019 result = PTR_ERR(ctx);
1154 1020
1155 up(&ctx->file->mutex);
1156 ib_ucm_ctx_put(ctx); /* func reference */
1157done: 1021done:
1158 kfree(param.private_data); 1022 kfree(param.private_data);
1159 kfree(param.path); 1023 kfree(param.path);
1160
1161 return result; 1024 return result;
1162} 1025}
1163 1026
@@ -1184,30 +1047,22 @@ static ssize_t ib_ucm_send_sidr_rep(struct ib_ucm_file *file,
1184 if (result) 1047 if (result)
1185 goto done; 1048 goto done;
1186 1049
1187 param.qp_num = cmd.qpn; 1050 param.qp_num = cmd.qpn;
1188 param.qkey = cmd.qkey; 1051 param.qkey = cmd.qkey;
1189 param.status = cmd.status; 1052 param.status = cmd.status;
1190 param.info_length = cmd.info_len; 1053 param.info_length = cmd.info_len;
1191 param.private_data_len = cmd.data_len; 1054 param.private_data_len = cmd.data_len;
1192
1193 ctx = ib_ucm_ctx_get(cmd.id);
1194 if (!ctx) {
1195 result = -ENOENT;
1196 goto done;
1197 }
1198 1055
1199 down(&ctx->file->mutex); 1056 ctx = ib_ucm_ctx_get(file, cmd.id);
1200 if (ctx->file != file) 1057 if (!IS_ERR(ctx)) {
1201 result = -EINVAL;
1202 else
1203 result = ib_send_cm_sidr_rep(ctx->cm_id, &param); 1058 result = ib_send_cm_sidr_rep(ctx->cm_id, &param);
1059 ib_ucm_ctx_put(ctx);
1060 } else
1061 result = PTR_ERR(ctx);
1204 1062
1205 up(&ctx->file->mutex);
1206 ib_ucm_ctx_put(ctx); /* func reference */
1207done: 1063done:
1208 kfree(param.private_data); 1064 kfree(param.private_data);
1209 kfree(param.info); 1065 kfree(param.info);
1210
1211 return result; 1066 return result;
1212} 1067}
1213 1068
@@ -1305,22 +1160,17 @@ static int ib_ucm_close(struct inode *inode, struct file *filp)
1305 struct ib_ucm_context *ctx; 1160 struct ib_ucm_context *ctx;
1306 1161
1307 down(&file->mutex); 1162 down(&file->mutex);
1308
1309 while (!list_empty(&file->ctxs)) { 1163 while (!list_empty(&file->ctxs)) {
1310 1164
1311 ctx = list_entry(file->ctxs.next, 1165 ctx = list_entry(file->ctxs.next,
1312 struct ib_ucm_context, file_list); 1166 struct ib_ucm_context, file_list);
1313 1167
1314 up(&ctx->file->mutex); 1168 up(&file->mutex);
1315 ib_ucm_ctx_put(ctx); /* user reference */ 1169 ib_ucm_destroy_ctx(file, ctx->id);
1316 down(&file->mutex); 1170 down(&file->mutex);
1317 } 1171 }
1318
1319 up(&file->mutex); 1172 up(&file->mutex);
1320
1321 kfree(file); 1173 kfree(file);
1322
1323 ucm_dbg("Deleted struct\n");
1324 return 0; 1174 return 0;
1325} 1175}
1326 1176
diff --git a/drivers/infiniband/core/ucm.h b/drivers/infiniband/core/ucm.h
index 6d36606151b2..c8819b928a1b 100644
--- a/drivers/infiniband/core/ucm.h
+++ b/drivers/infiniband/core/ucm.h
@@ -40,17 +40,15 @@
40#include <linux/cdev.h> 40#include <linux/cdev.h>
41#include <linux/idr.h> 41#include <linux/idr.h>
42 42
43#include <ib_cm.h> 43#include <rdma/ib_cm.h>
44#include <ib_user_cm.h> 44#include <rdma/ib_user_cm.h>
45 45
46#define IB_UCM_CM_ID_INVALID 0xffffffff 46#define IB_UCM_CM_ID_INVALID 0xffffffff
47 47
48struct ib_ucm_file { 48struct ib_ucm_file {
49 struct semaphore mutex; 49 struct semaphore mutex;
50 struct file *filp; 50 struct file *filp;
51 /* 51
52 * list of pending events
53 */
54 struct list_head ctxs; /* list of active connections */ 52 struct list_head ctxs; /* list of active connections */
55 struct list_head events; /* list of pending events */ 53 struct list_head events; /* list of pending events */
56 wait_queue_head_t poll_wait; 54 wait_queue_head_t poll_wait;
@@ -58,12 +56,11 @@ struct ib_ucm_file {
58 56
59struct ib_ucm_context { 57struct ib_ucm_context {
60 int id; 58 int id;
61 int ref; 59 wait_queue_head_t wait;
62 int error; 60 atomic_t ref;
63 61
64 struct ib_ucm_file *file; 62 struct ib_ucm_file *file;
65 struct ib_cm_id *cm_id; 63 struct ib_cm_id *cm_id;
66 struct semaphore mutex;
67 64
68 struct list_head events; /* list of pending events. */ 65 struct list_head events; /* list of pending events. */
69 struct list_head file_list; /* member in file ctx list */ 66 struct list_head file_list; /* member in file ctx list */
diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c
index dc4eb1db5e96..527b23450ab3 100644
--- a/drivers/infiniband/core/ud_header.c
+++ b/drivers/infiniband/core/ud_header.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 2 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -34,7 +35,7 @@
34 35
35#include <linux/errno.h> 36#include <linux/errno.h>
36 37
37#include <ib_pack.h> 38#include <rdma/ib_pack.h>
38 39
39#define STRUCT_FIELD(header, field) \ 40#define STRUCT_FIELD(header, field) \
40 .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \ 41 .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
@@ -194,6 +195,7 @@ void ib_ud_header_init(int payload_bytes,
194 struct ib_ud_header *header) 195 struct ib_ud_header *header)
195{ 196{
196 int header_len; 197 int header_len;
198 u16 packet_length;
197 199
198 memset(header, 0, sizeof *header); 200 memset(header, 0, sizeof *header);
199 201
@@ -208,7 +210,7 @@ void ib_ud_header_init(int payload_bytes,
208 header->lrh.link_version = 0; 210 header->lrh.link_version = 0;
209 header->lrh.link_next_header = 211 header->lrh.link_next_header =
210 grh_present ? IB_LNH_IBA_GLOBAL : IB_LNH_IBA_LOCAL; 212 grh_present ? IB_LNH_IBA_GLOBAL : IB_LNH_IBA_LOCAL;
211 header->lrh.packet_length = (IB_LRH_BYTES + 213 packet_length = (IB_LRH_BYTES +
212 IB_BTH_BYTES + 214 IB_BTH_BYTES +
213 IB_DETH_BYTES + 215 IB_DETH_BYTES +
214 payload_bytes + 216 payload_bytes +
@@ -217,8 +219,7 @@ void ib_ud_header_init(int payload_bytes,
217 219
218 header->grh_present = grh_present; 220 header->grh_present = grh_present;
219 if (grh_present) { 221 if (grh_present) {
220 header->lrh.packet_length += IB_GRH_BYTES / 4; 222 packet_length += IB_GRH_BYTES / 4;
221
222 header->grh.ip_version = 6; 223 header->grh.ip_version = 6;
223 header->grh.payload_length = 224 header->grh.payload_length =
224 cpu_to_be16((IB_BTH_BYTES + 225 cpu_to_be16((IB_BTH_BYTES +
@@ -229,7 +230,7 @@ void ib_ud_header_init(int payload_bytes,
229 header->grh.next_header = 0x1b; 230 header->grh.next_header = 0x1b;
230 } 231 }
231 232
232 cpu_to_be16s(&header->lrh.packet_length); 233 header->lrh.packet_length = cpu_to_be16(packet_length);
233 234
234 if (header->immediate_present) 235 if (header->immediate_present)
235 header->bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; 236 header->bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 2e38792df533..7c2f03057ddb 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * 5 *
6 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
@@ -49,8 +49,8 @@
49#include <asm/uaccess.h> 49#include <asm/uaccess.h>
50#include <asm/semaphore.h> 50#include <asm/semaphore.h>
51 51
52#include <ib_mad.h> 52#include <rdma/ib_mad.h>
53#include <ib_user_mad.h> 53#include <rdma/ib_user_mad.h>
54 54
55MODULE_AUTHOR("Roland Dreier"); 55MODULE_AUTHOR("Roland Dreier");
56MODULE_DESCRIPTION("InfiniBand userspace MAD packet access"); 56MODULE_DESCRIPTION("InfiniBand userspace MAD packet access");
@@ -271,7 +271,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
271 struct ib_send_wr *bad_wr; 271 struct ib_send_wr *bad_wr;
272 struct ib_rmpp_mad *rmpp_mad; 272 struct ib_rmpp_mad *rmpp_mad;
273 u8 method; 273 u8 method;
274 u64 *tid; 274 __be64 *tid;
275 int ret, length, hdr_len, data_len, rmpp_hdr_size; 275 int ret, length, hdr_len, data_len, rmpp_hdr_size;
276 int rmpp_active = 0; 276 int rmpp_active = 0;
277 277
@@ -316,7 +316,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
316 if (packet->mad.hdr.grh_present) { 316 if (packet->mad.hdr.grh_present) {
317 ah_attr.ah_flags = IB_AH_GRH; 317 ah_attr.ah_flags = IB_AH_GRH;
318 memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16); 318 memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16);
319 ah_attr.grh.flow_label = packet->mad.hdr.flow_label; 319 ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label);
320 ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit; 320 ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit;
321 ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class; 321 ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class;
322 } 322 }
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 7696022f9a4e..180b3d4765e4 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -1,6 +1,8 @@
1/* 1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 * 6 *
5 * This software is available to you under a choice of one of two 7 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU 8 * licenses. You may choose to be licensed under the terms of the GNU
@@ -43,8 +45,8 @@
43#include <linux/kref.h> 45#include <linux/kref.h>
44#include <linux/idr.h> 46#include <linux/idr.h>
45 47
46#include <ib_verbs.h> 48#include <rdma/ib_verbs.h>
47#include <ib_user_verbs.h> 49#include <rdma/ib_user_verbs.h>
48 50
49struct ib_uverbs_device { 51struct ib_uverbs_device {
50 int devnum; 52 int devnum;
@@ -97,10 +99,12 @@ extern struct idr ib_uverbs_mw_idr;
97extern struct idr ib_uverbs_ah_idr; 99extern struct idr ib_uverbs_ah_idr;
98extern struct idr ib_uverbs_cq_idr; 100extern struct idr ib_uverbs_cq_idr;
99extern struct idr ib_uverbs_qp_idr; 101extern struct idr ib_uverbs_qp_idr;
102extern struct idr ib_uverbs_srq_idr;
100 103
101void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context); 104void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context);
102void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr); 105void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr);
103void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr); 106void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr);
107void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr);
104 108
105int ib_umem_get(struct ib_device *dev, struct ib_umem *mem, 109int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
106 void *addr, size_t size, int write); 110 void *addr, size_t size, int write);
@@ -129,5 +133,8 @@ IB_UVERBS_DECLARE_CMD(modify_qp);
129IB_UVERBS_DECLARE_CMD(destroy_qp); 133IB_UVERBS_DECLARE_CMD(destroy_qp);
130IB_UVERBS_DECLARE_CMD(attach_mcast); 134IB_UVERBS_DECLARE_CMD(attach_mcast);
131IB_UVERBS_DECLARE_CMD(detach_mcast); 135IB_UVERBS_DECLARE_CMD(detach_mcast);
136IB_UVERBS_DECLARE_CMD(create_srq);
137IB_UVERBS_DECLARE_CMD(modify_srq);
138IB_UVERBS_DECLARE_CMD(destroy_srq);
132 139
133#endif /* UVERBS_H */ 140#endif /* UVERBS_H */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 5f2bbcda4c73..ebccf9f38af9 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -724,6 +724,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
724 struct ib_uobject *uobj; 724 struct ib_uobject *uobj;
725 struct ib_pd *pd; 725 struct ib_pd *pd;
726 struct ib_cq *scq, *rcq; 726 struct ib_cq *scq, *rcq;
727 struct ib_srq *srq;
727 struct ib_qp *qp; 728 struct ib_qp *qp;
728 struct ib_qp_init_attr attr; 729 struct ib_qp_init_attr attr;
729 int ret; 730 int ret;
@@ -747,10 +748,12 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
747 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); 748 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
748 scq = idr_find(&ib_uverbs_cq_idr, cmd.send_cq_handle); 749 scq = idr_find(&ib_uverbs_cq_idr, cmd.send_cq_handle);
749 rcq = idr_find(&ib_uverbs_cq_idr, cmd.recv_cq_handle); 750 rcq = idr_find(&ib_uverbs_cq_idr, cmd.recv_cq_handle);
751 srq = cmd.is_srq ? idr_find(&ib_uverbs_srq_idr, cmd.srq_handle) : NULL;
750 752
751 if (!pd || pd->uobject->context != file->ucontext || 753 if (!pd || pd->uobject->context != file->ucontext ||
752 !scq || scq->uobject->context != file->ucontext || 754 !scq || scq->uobject->context != file->ucontext ||
753 !rcq || rcq->uobject->context != file->ucontext) { 755 !rcq || rcq->uobject->context != file->ucontext ||
756 (cmd.is_srq && (!srq || srq->uobject->context != file->ucontext))) {
754 ret = -EINVAL; 757 ret = -EINVAL;
755 goto err_up; 758 goto err_up;
756 } 759 }
@@ -759,7 +762,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
759 attr.qp_context = file; 762 attr.qp_context = file;
760 attr.send_cq = scq; 763 attr.send_cq = scq;
761 attr.recv_cq = rcq; 764 attr.recv_cq = rcq;
762 attr.srq = NULL; 765 attr.srq = srq;
763 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; 766 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
764 attr.qp_type = cmd.qp_type; 767 attr.qp_type = cmd.qp_type;
765 768
@@ -1004,3 +1007,178 @@ ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
1004 1007
1005 return ret ? ret : in_len; 1008 return ret ? ret : in_len;
1006} 1009}
1010
1011ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
1012 const char __user *buf, int in_len,
1013 int out_len)
1014{
1015 struct ib_uverbs_create_srq cmd;
1016 struct ib_uverbs_create_srq_resp resp;
1017 struct ib_udata udata;
1018 struct ib_uobject *uobj;
1019 struct ib_pd *pd;
1020 struct ib_srq *srq;
1021 struct ib_srq_init_attr attr;
1022 int ret;
1023
1024 if (out_len < sizeof resp)
1025 return -ENOSPC;
1026
1027 if (copy_from_user(&cmd, buf, sizeof cmd))
1028 return -EFAULT;
1029
1030 INIT_UDATA(&udata, buf + sizeof cmd,
1031 (unsigned long) cmd.response + sizeof resp,
1032 in_len - sizeof cmd, out_len - sizeof resp);
1033
1034 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
1035 if (!uobj)
1036 return -ENOMEM;
1037
1038 down(&ib_uverbs_idr_mutex);
1039
1040 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
1041
1042 if (!pd || pd->uobject->context != file->ucontext) {
1043 ret = -EINVAL;
1044 goto err_up;
1045 }
1046
1047 attr.event_handler = ib_uverbs_srq_event_handler;
1048 attr.srq_context = file;
1049 attr.attr.max_wr = cmd.max_wr;
1050 attr.attr.max_sge = cmd.max_sge;
1051 attr.attr.srq_limit = cmd.srq_limit;
1052
1053 uobj->user_handle = cmd.user_handle;
1054 uobj->context = file->ucontext;
1055
1056 srq = pd->device->create_srq(pd, &attr, &udata);
1057 if (IS_ERR(srq)) {
1058 ret = PTR_ERR(srq);
1059 goto err_up;
1060 }
1061
1062 srq->device = pd->device;
1063 srq->pd = pd;
1064 srq->uobject = uobj;
1065 srq->event_handler = attr.event_handler;
1066 srq->srq_context = attr.srq_context;
1067 atomic_inc(&pd->usecnt);
1068 atomic_set(&srq->usecnt, 0);
1069
1070 memset(&resp, 0, sizeof resp);
1071
1072retry:
1073 if (!idr_pre_get(&ib_uverbs_srq_idr, GFP_KERNEL)) {
1074 ret = -ENOMEM;
1075 goto err_destroy;
1076 }
1077
1078 ret = idr_get_new(&ib_uverbs_srq_idr, srq, &uobj->id);
1079
1080 if (ret == -EAGAIN)
1081 goto retry;
1082 if (ret)
1083 goto err_destroy;
1084
1085 resp.srq_handle = uobj->id;
1086
1087 spin_lock_irq(&file->ucontext->lock);
1088 list_add_tail(&uobj->list, &file->ucontext->srq_list);
1089 spin_unlock_irq(&file->ucontext->lock);
1090
1091 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1092 &resp, sizeof resp)) {
1093 ret = -EFAULT;
1094 goto err_list;
1095 }
1096
1097 up(&ib_uverbs_idr_mutex);
1098
1099 return in_len;
1100
1101err_list:
1102 spin_lock_irq(&file->ucontext->lock);
1103 list_del(&uobj->list);
1104 spin_unlock_irq(&file->ucontext->lock);
1105
1106err_destroy:
1107 ib_destroy_srq(srq);
1108
1109err_up:
1110 up(&ib_uverbs_idr_mutex);
1111
1112 kfree(uobj);
1113 return ret;
1114}
1115
1116ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
1117 const char __user *buf, int in_len,
1118 int out_len)
1119{
1120 struct ib_uverbs_modify_srq cmd;
1121 struct ib_srq *srq;
1122 struct ib_srq_attr attr;
1123 int ret;
1124
1125 if (copy_from_user(&cmd, buf, sizeof cmd))
1126 return -EFAULT;
1127
1128 down(&ib_uverbs_idr_mutex);
1129
1130 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle);
1131 if (!srq || srq->uobject->context != file->ucontext) {
1132 ret = -EINVAL;
1133 goto out;
1134 }
1135
1136 attr.max_wr = cmd.max_wr;
1137 attr.max_sge = cmd.max_sge;
1138 attr.srq_limit = cmd.srq_limit;
1139
1140 ret = ib_modify_srq(srq, &attr, cmd.attr_mask);
1141
1142out:
1143 up(&ib_uverbs_idr_mutex);
1144
1145 return ret ? ret : in_len;
1146}
1147
1148ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
1149 const char __user *buf, int in_len,
1150 int out_len)
1151{
1152 struct ib_uverbs_destroy_srq cmd;
1153 struct ib_srq *srq;
1154 struct ib_uobject *uobj;
1155 int ret = -EINVAL;
1156
1157 if (copy_from_user(&cmd, buf, sizeof cmd))
1158 return -EFAULT;
1159
1160 down(&ib_uverbs_idr_mutex);
1161
1162 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle);
1163 if (!srq || srq->uobject->context != file->ucontext)
1164 goto out;
1165
1166 uobj = srq->uobject;
1167
1168 ret = ib_destroy_srq(srq);
1169 if (ret)
1170 goto out;
1171
1172 idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle);
1173
1174 spin_lock_irq(&file->ucontext->lock);
1175 list_del(&uobj->list);
1176 spin_unlock_irq(&file->ucontext->lock);
1177
1178 kfree(uobj);
1179
1180out:
1181 up(&ib_uverbs_idr_mutex);
1182
1183 return ret ? ret : in_len;
1184}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 5f6e9ea29cd7..09caf5b1ef36 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -1,6 +1,8 @@
1/* 1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 * 6 *
5 * This software is available to you under a choice of one of two 7 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU 8 * licenses. You may choose to be licensed under the terms of the GNU
@@ -67,6 +69,7 @@ DEFINE_IDR(ib_uverbs_mw_idr);
67DEFINE_IDR(ib_uverbs_ah_idr); 69DEFINE_IDR(ib_uverbs_ah_idr);
68DEFINE_IDR(ib_uverbs_cq_idr); 70DEFINE_IDR(ib_uverbs_cq_idr);
69DEFINE_IDR(ib_uverbs_qp_idr); 71DEFINE_IDR(ib_uverbs_qp_idr);
72DEFINE_IDR(ib_uverbs_srq_idr);
70 73
71static spinlock_t map_lock; 74static spinlock_t map_lock;
72static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES); 75static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
@@ -91,6 +94,9 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
91 [IB_USER_VERBS_CMD_DESTROY_QP] = ib_uverbs_destroy_qp, 94 [IB_USER_VERBS_CMD_DESTROY_QP] = ib_uverbs_destroy_qp,
92 [IB_USER_VERBS_CMD_ATTACH_MCAST] = ib_uverbs_attach_mcast, 95 [IB_USER_VERBS_CMD_ATTACH_MCAST] = ib_uverbs_attach_mcast,
93 [IB_USER_VERBS_CMD_DETACH_MCAST] = ib_uverbs_detach_mcast, 96 [IB_USER_VERBS_CMD_DETACH_MCAST] = ib_uverbs_detach_mcast,
97 [IB_USER_VERBS_CMD_CREATE_SRQ] = ib_uverbs_create_srq,
98 [IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq,
99 [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq,
94}; 100};
95 101
96static struct vfsmount *uverbs_event_mnt; 102static struct vfsmount *uverbs_event_mnt;
@@ -125,7 +131,14 @@ static int ib_dealloc_ucontext(struct ib_ucontext *context)
125 kfree(uobj); 131 kfree(uobj);
126 } 132 }
127 133
128 /* XXX Free SRQs */ 134 list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) {
135 struct ib_srq *srq = idr_find(&ib_uverbs_srq_idr, uobj->id);
136 idr_remove(&ib_uverbs_srq_idr, uobj->id);
137 ib_destroy_srq(srq);
138 list_del(&uobj->list);
139 kfree(uobj);
140 }
141
129 /* XXX Free MWs */ 142 /* XXX Free MWs */
130 143
131 list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) { 144 list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) {
@@ -344,6 +357,13 @@ void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr)
344 event->event); 357 event->event);
345} 358}
346 359
360void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr)
361{
362 ib_uverbs_async_handler(context_ptr,
363 event->element.srq->uobject->user_handle,
364 event->event);
365}
366
347static void ib_uverbs_event_handler(struct ib_event_handler *handler, 367static void ib_uverbs_event_handler(struct ib_event_handler *handler,
348 struct ib_event *event) 368 struct ib_event *event)
349{ 369{
diff --git a/drivers/infiniband/core/uverbs_mem.c b/drivers/infiniband/core/uverbs_mem.c
index ed550f6595bd..36a32c315668 100644
--- a/drivers/infiniband/core/uverbs_mem.c
+++ b/drivers/infiniband/core/uverbs_mem.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * 5 *
5 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 506fdf1f2a26..5081d903e561 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -4,6 +4,7 @@
4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * Copyright (c) 2005 Cisco Systems. All rights reserved. 8 * Copyright (c) 2005 Cisco Systems. All rights reserved.
8 * 9 *
9 * This software is available to you under a choice of one of two 10 * This software is available to you under a choice of one of two
@@ -40,8 +41,8 @@
40#include <linux/errno.h> 41#include <linux/errno.h>
41#include <linux/err.h> 42#include <linux/err.h>
42 43
43#include <ib_verbs.h> 44#include <rdma/ib_verbs.h>
44#include <ib_cache.h> 45#include <rdma/ib_cache.h>
45 46
46/* Protection domains */ 47/* Protection domains */
47 48
@@ -153,6 +154,66 @@ int ib_destroy_ah(struct ib_ah *ah)
153} 154}
154EXPORT_SYMBOL(ib_destroy_ah); 155EXPORT_SYMBOL(ib_destroy_ah);
155 156
157/* Shared receive queues */
158
159struct ib_srq *ib_create_srq(struct ib_pd *pd,
160 struct ib_srq_init_attr *srq_init_attr)
161{
162 struct ib_srq *srq;
163
164 if (!pd->device->create_srq)
165 return ERR_PTR(-ENOSYS);
166
167 srq = pd->device->create_srq(pd, srq_init_attr, NULL);
168
169 if (!IS_ERR(srq)) {
170 srq->device = pd->device;
171 srq->pd = pd;
172 srq->uobject = NULL;
173 srq->event_handler = srq_init_attr->event_handler;
174 srq->srq_context = srq_init_attr->srq_context;
175 atomic_inc(&pd->usecnt);
176 atomic_set(&srq->usecnt, 0);
177 }
178
179 return srq;
180}
181EXPORT_SYMBOL(ib_create_srq);
182
183int ib_modify_srq(struct ib_srq *srq,
184 struct ib_srq_attr *srq_attr,
185 enum ib_srq_attr_mask srq_attr_mask)
186{
187 return srq->device->modify_srq(srq, srq_attr, srq_attr_mask);
188}
189EXPORT_SYMBOL(ib_modify_srq);
190
191int ib_query_srq(struct ib_srq *srq,
192 struct ib_srq_attr *srq_attr)
193{
194 return srq->device->query_srq ?
195 srq->device->query_srq(srq, srq_attr) : -ENOSYS;
196}
197EXPORT_SYMBOL(ib_query_srq);
198
199int ib_destroy_srq(struct ib_srq *srq)
200{
201 struct ib_pd *pd;
202 int ret;
203
204 if (atomic_read(&srq->usecnt))
205 return -EBUSY;
206
207 pd = srq->pd;
208
209 ret = srq->device->destroy_srq(srq);
210 if (!ret)
211 atomic_dec(&pd->usecnt);
212
213 return ret;
214}
215EXPORT_SYMBOL(ib_destroy_srq);
216
156/* Queue pairs */ 217/* Queue pairs */
157 218
158struct ib_qp *ib_create_qp(struct ib_pd *pd, 219struct ib_qp *ib_create_qp(struct ib_pd *pd,
diff --git a/drivers/infiniband/hw/mthca/Makefile b/drivers/infiniband/hw/mthca/Makefile
index 5dcbd43073e2..c44f7bae5424 100644
--- a/drivers/infiniband/hw/mthca/Makefile
+++ b/drivers/infiniband/hw/mthca/Makefile
@@ -1,5 +1,3 @@
1EXTRA_CFLAGS += -Idrivers/infiniband/include
2
3ifdef CONFIG_INFINIBAND_MTHCA_DEBUG 1ifdef CONFIG_INFINIBAND_MTHCA_DEBUG
4EXTRA_CFLAGS += -DDEBUG 2EXTRA_CFLAGS += -DDEBUG
5endif 3endif
@@ -9,4 +7,4 @@ obj-$(CONFIG_INFINIBAND_MTHCA) += ib_mthca.o
9ib_mthca-y := mthca_main.o mthca_cmd.o mthca_profile.o mthca_reset.o \ 7ib_mthca-y := mthca_main.o mthca_cmd.o mthca_profile.o mthca_reset.o \
10 mthca_allocator.o mthca_eq.o mthca_pd.o mthca_cq.o \ 8 mthca_allocator.o mthca_eq.o mthca_pd.o mthca_cq.o \
11 mthca_mr.o mthca_qp.o mthca_av.o mthca_mcg.o mthca_mad.o \ 9 mthca_mr.o mthca_qp.o mthca_av.o mthca_mcg.o mthca_mad.o \
12 mthca_provider.o mthca_memfree.o mthca_uar.o 10 mthca_provider.o mthca_memfree.o mthca_uar.o mthca_srq.o
diff --git a/drivers/infiniband/hw/mthca/mthca_allocator.c b/drivers/infiniband/hw/mthca/mthca_allocator.c
index b1db48dd91d6..9ba3211cef7c 100644
--- a/drivers/infiniband/hw/mthca/mthca_allocator.c
+++ b/drivers/infiniband/hw/mthca/mthca_allocator.c
@@ -177,3 +177,119 @@ void mthca_array_cleanup(struct mthca_array *array, int nent)
177 177
178 kfree(array->page_list); 178 kfree(array->page_list);
179} 179}
180
181/*
182 * Handling for queue buffers -- we allocate a bunch of memory and
183 * register it in a memory region at HCA virtual address 0. If the
184 * requested size is > max_direct, we split the allocation into
185 * multiple pages, so we don't require too much contiguous memory.
186 */
187
188int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
189 union mthca_buf *buf, int *is_direct, struct mthca_pd *pd,
190 int hca_write, struct mthca_mr *mr)
191{
192 int err = -ENOMEM;
193 int npages, shift;
194 u64 *dma_list = NULL;
195 dma_addr_t t;
196 int i;
197
198 if (size <= max_direct) {
199 *is_direct = 1;
200 npages = 1;
201 shift = get_order(size) + PAGE_SHIFT;
202
203 buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev,
204 size, &t, GFP_KERNEL);
205 if (!buf->direct.buf)
206 return -ENOMEM;
207
208 pci_unmap_addr_set(&buf->direct, mapping, t);
209
210 memset(buf->direct.buf, 0, size);
211
212 while (t & ((1 << shift) - 1)) {
213 --shift;
214 npages *= 2;
215 }
216
217 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
218 if (!dma_list)
219 goto err_free;
220
221 for (i = 0; i < npages; ++i)
222 dma_list[i] = t + i * (1 << shift);
223 } else {
224 *is_direct = 0;
225 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
226 shift = PAGE_SHIFT;
227
228 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
229 if (!dma_list)
230 return -ENOMEM;
231
232 buf->page_list = kmalloc(npages * sizeof *buf->page_list,
233 GFP_KERNEL);
234 if (!buf->page_list)
235 goto err_out;
236
237 for (i = 0; i < npages; ++i)
238 buf->page_list[i].buf = NULL;
239
240 for (i = 0; i < npages; ++i) {
241 buf->page_list[i].buf =
242 dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
243 &t, GFP_KERNEL);
244 if (!buf->page_list[i].buf)
245 goto err_free;
246
247 dma_list[i] = t;
248 pci_unmap_addr_set(&buf->page_list[i], mapping, t);
249
250 memset(buf->page_list[i].buf, 0, PAGE_SIZE);
251 }
252 }
253
254 err = mthca_mr_alloc_phys(dev, pd->pd_num,
255 dma_list, shift, npages,
256 0, size,
257 MTHCA_MPT_FLAG_LOCAL_READ |
258 (hca_write ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0),
259 mr);
260 if (err)
261 goto err_free;
262
263 kfree(dma_list);
264
265 return 0;
266
267err_free:
268 mthca_buf_free(dev, size, buf, *is_direct, NULL);
269
270err_out:
271 kfree(dma_list);
272
273 return err;
274}
275
276void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf,
277 int is_direct, struct mthca_mr *mr)
278{
279 int i;
280
281 if (mr)
282 mthca_free_mr(dev, mr);
283
284 if (is_direct)
285 dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
286 pci_unmap_addr(&buf->direct, mapping));
287 else {
288 for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
289 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
290 buf->page_list[i].buf,
291 pci_unmap_addr(&buf->page_list[i],
292 mapping));
293 kfree(buf->page_list);
294 }
295}
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c
index d58dcbe66488..889e85096736 100644
--- a/drivers/infiniband/hw/mthca/mthca_av.c
+++ b/drivers/infiniband/hw/mthca/mthca_av.c
@@ -35,22 +35,22 @@
35 35
36#include <linux/init.h> 36#include <linux/init.h>
37 37
38#include <ib_verbs.h> 38#include <rdma/ib_verbs.h>
39#include <ib_cache.h> 39#include <rdma/ib_cache.h>
40 40
41#include "mthca_dev.h" 41#include "mthca_dev.h"
42 42
43struct mthca_av { 43struct mthca_av {
44 u32 port_pd; 44 __be32 port_pd;
45 u8 reserved1; 45 u8 reserved1;
46 u8 g_slid; 46 u8 g_slid;
47 u16 dlid; 47 __be16 dlid;
48 u8 reserved2; 48 u8 reserved2;
49 u8 gid_index; 49 u8 gid_index;
50 u8 msg_sr; 50 u8 msg_sr;
51 u8 hop_limit; 51 u8 hop_limit;
52 u32 sl_tclass_flowlabel; 52 __be32 sl_tclass_flowlabel;
53 u32 dgid[4]; 53 __be32 dgid[4];
54}; 54};
55 55
56int mthca_create_ah(struct mthca_dev *dev, 56int mthca_create_ah(struct mthca_dev *dev,
@@ -128,7 +128,7 @@ on_hca_fail:
128 av, (unsigned long) ah->avdma); 128 av, (unsigned long) ah->avdma);
129 for (j = 0; j < 8; ++j) 129 for (j = 0; j < 8; ++j)
130 printk(KERN_DEBUG " [%2x] %08x\n", 130 printk(KERN_DEBUG " [%2x] %08x\n",
131 j * 4, be32_to_cpu(((u32 *) av)[j])); 131 j * 4, be32_to_cpu(((__be32 *) av)[j]));
132 } 132 }
133 133
134 if (ah->type == MTHCA_AH_ON_HCA) { 134 if (ah->type == MTHCA_AH_ON_HCA) {
@@ -169,7 +169,7 @@ int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
169 169
170 header->lrh.service_level = be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28; 170 header->lrh.service_level = be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28;
171 header->lrh.destination_lid = ah->av->dlid; 171 header->lrh.destination_lid = ah->av->dlid;
172 header->lrh.source_lid = ah->av->g_slid & 0x7f; 172 header->lrh.source_lid = cpu_to_be16(ah->av->g_slid & 0x7f);
173 if (ah->av->g_slid & 0x80) { 173 if (ah->av->g_slid & 0x80) {
174 header->grh_present = 1; 174 header->grh_present = 1;
175 header->grh.traffic_class = 175 header->grh.traffic_class =
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 1557a522d831..cc758a2d2bc6 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -36,7 +37,7 @@
36#include <linux/pci.h> 37#include <linux/pci.h>
37#include <linux/errno.h> 38#include <linux/errno.h>
38#include <asm/io.h> 39#include <asm/io.h>
39#include <ib_mad.h> 40#include <rdma/ib_mad.h>
40 41
41#include "mthca_dev.h" 42#include "mthca_dev.h"
42#include "mthca_config_reg.h" 43#include "mthca_config_reg.h"
@@ -108,6 +109,7 @@ enum {
108 CMD_SW2HW_SRQ = 0x35, 109 CMD_SW2HW_SRQ = 0x35,
109 CMD_HW2SW_SRQ = 0x36, 110 CMD_HW2SW_SRQ = 0x36,
110 CMD_QUERY_SRQ = 0x37, 111 CMD_QUERY_SRQ = 0x37,
112 CMD_ARM_SRQ = 0x40,
111 113
112 /* QP/EE commands */ 114 /* QP/EE commands */
113 CMD_RST2INIT_QPEE = 0x19, 115 CMD_RST2INIT_QPEE = 0x19,
@@ -219,20 +221,20 @@ static int mthca_cmd_post(struct mthca_dev *dev,
219 * (and some architectures such as ia64 implement memcpy_toio 221 * (and some architectures such as ia64 implement memcpy_toio
220 * in terms of writeb). 222 * in terms of writeb).
221 */ 223 */
222 __raw_writel(cpu_to_be32(in_param >> 32), dev->hcr + 0 * 4); 224 __raw_writel((__force u32) cpu_to_be32(in_param >> 32), dev->hcr + 0 * 4);
223 __raw_writel(cpu_to_be32(in_param & 0xfffffffful), dev->hcr + 1 * 4); 225 __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), dev->hcr + 1 * 4);
224 __raw_writel(cpu_to_be32(in_modifier), dev->hcr + 2 * 4); 226 __raw_writel((__force u32) cpu_to_be32(in_modifier), dev->hcr + 2 * 4);
225 __raw_writel(cpu_to_be32(out_param >> 32), dev->hcr + 3 * 4); 227 __raw_writel((__force u32) cpu_to_be32(out_param >> 32), dev->hcr + 3 * 4);
226 __raw_writel(cpu_to_be32(out_param & 0xfffffffful), dev->hcr + 4 * 4); 228 __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), dev->hcr + 4 * 4);
227 __raw_writel(cpu_to_be32(token << 16), dev->hcr + 5 * 4); 229 __raw_writel((__force u32) cpu_to_be32(token << 16), dev->hcr + 5 * 4);
228 230
229 /* __raw_writel may not order writes. */ 231 /* __raw_writel may not order writes. */
230 wmb(); 232 wmb();
231 233
232 __raw_writel(cpu_to_be32((1 << HCR_GO_BIT) | 234 __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) |
233 (event ? (1 << HCA_E_BIT) : 0) | 235 (event ? (1 << HCA_E_BIT) : 0) |
234 (op_modifier << HCR_OPMOD_SHIFT) | 236 (op_modifier << HCR_OPMOD_SHIFT) |
235 op), dev->hcr + 6 * 4); 237 op), dev->hcr + 6 * 4);
236 238
237out: 239out:
238 up(&dev->cmd.hcr_sem); 240 up(&dev->cmd.hcr_sem);
@@ -273,12 +275,14 @@ static int mthca_cmd_poll(struct mthca_dev *dev,
273 goto out; 275 goto out;
274 } 276 }
275 277
276 if (out_is_imm) { 278 if (out_is_imm)
277 memcpy_fromio(out_param, dev->hcr + HCR_OUT_PARAM_OFFSET, sizeof (u64)); 279 *out_param =
278 be64_to_cpus(out_param); 280 (u64) be32_to_cpu((__force __be32)
279 } 281 __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
282 (u64) be32_to_cpu((__force __be32)
283 __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET + 4));
280 284
281 *status = be32_to_cpu(__raw_readl(dev->hcr + HCR_STATUS_OFFSET)) >> 24; 285 *status = be32_to_cpu((__force __be32) __raw_readl(dev->hcr + HCR_STATUS_OFFSET)) >> 24;
282 286
283out: 287out:
284 up(&dev->cmd.poll_sem); 288 up(&dev->cmd.poll_sem);
@@ -1029,6 +1033,8 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
1029 1033
1030 mthca_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n", 1034 mthca_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
1031 dev_lim->max_qps, dev_lim->reserved_qps, dev_lim->qpc_entry_sz); 1035 dev_lim->max_qps, dev_lim->reserved_qps, dev_lim->qpc_entry_sz);
1036 mthca_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
1037 dev_lim->max_srqs, dev_lim->reserved_srqs, dev_lim->srq_entry_sz);
1032 mthca_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n", 1038 mthca_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
1033 dev_lim->max_cqs, dev_lim->reserved_cqs, dev_lim->cqc_entry_sz); 1039 dev_lim->max_cqs, dev_lim->reserved_cqs, dev_lim->cqc_entry_sz);
1034 mthca_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n", 1040 mthca_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n",
@@ -1082,6 +1088,34 @@ out:
1082 return err; 1088 return err;
1083} 1089}
1084 1090
1091static void get_board_id(void *vsd, char *board_id)
1092{
1093 int i;
1094
1095#define VSD_OFFSET_SIG1 0x00
1096#define VSD_OFFSET_SIG2 0xde
1097#define VSD_OFFSET_MLX_BOARD_ID 0xd0
1098#define VSD_OFFSET_TS_BOARD_ID 0x20
1099
1100#define VSD_SIGNATURE_TOPSPIN 0x5ad
1101
1102 memset(board_id, 0, MTHCA_BOARD_ID_LEN);
1103
1104 if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
1105 be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
1106 strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MTHCA_BOARD_ID_LEN);
1107 } else {
1108 /*
1109 * The board ID is a string but the firmware byte
1110 * swaps each 4-byte word before passing it back to
1111 * us. Therefore we need to swab it before printing.
1112 */
1113 for (i = 0; i < 4; ++i)
1114 ((u32 *) board_id)[i] =
1115 swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
1116 }
1117}
1118
1085int mthca_QUERY_ADAPTER(struct mthca_dev *dev, 1119int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
1086 struct mthca_adapter *adapter, u8 *status) 1120 struct mthca_adapter *adapter, u8 *status)
1087{ 1121{
@@ -1094,6 +1128,7 @@ int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
1094#define QUERY_ADAPTER_DEVICE_ID_OFFSET 0x04 1128#define QUERY_ADAPTER_DEVICE_ID_OFFSET 0x04
1095#define QUERY_ADAPTER_REVISION_ID_OFFSET 0x08 1129#define QUERY_ADAPTER_REVISION_ID_OFFSET 0x08
1096#define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10 1130#define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
1131#define QUERY_ADAPTER_VSD_OFFSET 0x20
1097 1132
1098 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 1133 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1099 if (IS_ERR(mailbox)) 1134 if (IS_ERR(mailbox))
@@ -1111,6 +1146,9 @@ int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
1111 MTHCA_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET); 1146 MTHCA_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET);
1112 MTHCA_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET); 1147 MTHCA_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
1113 1148
1149 get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
1150 adapter->board_id);
1151
1114out: 1152out:
1115 mthca_free_mailbox(dev, mailbox); 1153 mthca_free_mailbox(dev, mailbox);
1116 return err; 1154 return err;
@@ -1121,7 +1159,7 @@ int mthca_INIT_HCA(struct mthca_dev *dev,
1121 u8 *status) 1159 u8 *status)
1122{ 1160{
1123 struct mthca_mailbox *mailbox; 1161 struct mthca_mailbox *mailbox;
1124 u32 *inbox; 1162 __be32 *inbox;
1125 int err; 1163 int err;
1126 1164
1127#define INIT_HCA_IN_SIZE 0x200 1165#define INIT_HCA_IN_SIZE 0x200
@@ -1247,10 +1285,8 @@ int mthca_INIT_IB(struct mthca_dev *dev,
1247#define INIT_IB_FLAG_SIG (1 << 18) 1285#define INIT_IB_FLAG_SIG (1 << 18)
1248#define INIT_IB_FLAG_NG (1 << 17) 1286#define INIT_IB_FLAG_NG (1 << 17)
1249#define INIT_IB_FLAG_G0 (1 << 16) 1287#define INIT_IB_FLAG_G0 (1 << 16)
1250#define INIT_IB_FLAG_1X (1 << 8)
1251#define INIT_IB_FLAG_4X (1 << 9)
1252#define INIT_IB_FLAG_12X (1 << 11)
1253#define INIT_IB_VL_SHIFT 4 1288#define INIT_IB_VL_SHIFT 4
1289#define INIT_IB_PORT_WIDTH_SHIFT 8
1254#define INIT_IB_MTU_SHIFT 12 1290#define INIT_IB_MTU_SHIFT 12
1255#define INIT_IB_MAX_GID_OFFSET 0x06 1291#define INIT_IB_MAX_GID_OFFSET 0x06
1256#define INIT_IB_MAX_PKEY_OFFSET 0x0a 1292#define INIT_IB_MAX_PKEY_OFFSET 0x0a
@@ -1266,12 +1302,11 @@ int mthca_INIT_IB(struct mthca_dev *dev,
1266 memset(inbox, 0, INIT_IB_IN_SIZE); 1302 memset(inbox, 0, INIT_IB_IN_SIZE);
1267 1303
1268 flags = 0; 1304 flags = 0;
1269 flags |= param->enable_1x ? INIT_IB_FLAG_1X : 0;
1270 flags |= param->enable_4x ? INIT_IB_FLAG_4X : 0;
1271 flags |= param->set_guid0 ? INIT_IB_FLAG_G0 : 0; 1305 flags |= param->set_guid0 ? INIT_IB_FLAG_G0 : 0;
1272 flags |= param->set_node_guid ? INIT_IB_FLAG_NG : 0; 1306 flags |= param->set_node_guid ? INIT_IB_FLAG_NG : 0;
1273 flags |= param->set_si_guid ? INIT_IB_FLAG_SIG : 0; 1307 flags |= param->set_si_guid ? INIT_IB_FLAG_SIG : 0;
1274 flags |= param->vl_cap << INIT_IB_VL_SHIFT; 1308 flags |= param->vl_cap << INIT_IB_VL_SHIFT;
1309 flags |= param->port_width << INIT_IB_PORT_WIDTH_SHIFT;
1275 flags |= param->mtu_cap << INIT_IB_MTU_SHIFT; 1310 flags |= param->mtu_cap << INIT_IB_MTU_SHIFT;
1276 MTHCA_PUT(inbox, flags, INIT_IB_FLAGS_OFFSET); 1311 MTHCA_PUT(inbox, flags, INIT_IB_FLAGS_OFFSET);
1277 1312
@@ -1342,7 +1377,7 @@ int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt, u8 *st
1342int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status) 1377int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status)
1343{ 1378{
1344 struct mthca_mailbox *mailbox; 1379 struct mthca_mailbox *mailbox;
1345 u64 *inbox; 1380 __be64 *inbox;
1346 int err; 1381 int err;
1347 1382
1348 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 1383 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
@@ -1468,6 +1503,27 @@ int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1468 CMD_TIME_CLASS_A, status); 1503 CMD_TIME_CLASS_A, status);
1469} 1504}
1470 1505
1506int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1507 int srq_num, u8 *status)
1508{
1509 return mthca_cmd(dev, mailbox->dma, srq_num, 0, CMD_SW2HW_SRQ,
1510 CMD_TIME_CLASS_A, status);
1511}
1512
1513int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1514 int srq_num, u8 *status)
1515{
1516 return mthca_cmd_box(dev, 0, mailbox->dma, srq_num, 0,
1517 CMD_HW2SW_SRQ,
1518 CMD_TIME_CLASS_A, status);
1519}
1520
1521int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status)
1522{
1523 return mthca_cmd(dev, limit, srq_num, 0, CMD_ARM_SRQ,
1524 CMD_TIME_CLASS_B, status);
1525}
1526
1471int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, 1527int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
1472 int is_ee, struct mthca_mailbox *mailbox, u32 optmask, 1528 int is_ee, struct mthca_mailbox *mailbox, u32 optmask,
1473 u8 *status) 1529 u8 *status)
@@ -1513,7 +1569,7 @@ int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
1513 if (i % 8 == 0) 1569 if (i % 8 == 0)
1514 printk(" [%02x] ", i * 4); 1570 printk(" [%02x] ", i * 4);
1515 printk(" %08x", 1571 printk(" %08x",
1516 be32_to_cpu(((u32 *) mailbox->buf)[i + 2])); 1572 be32_to_cpu(((__be32 *) mailbox->buf)[i + 2]));
1517 if ((i + 1) % 8 == 0) 1573 if ((i + 1) % 8 == 0)
1518 printk("\n"); 1574 printk("\n");
1519 } 1575 }
@@ -1533,7 +1589,7 @@ int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
1533 if (i % 8 == 0) 1589 if (i % 8 == 0)
1534 printk("[%02x] ", i * 4); 1590 printk("[%02x] ", i * 4);
1535 printk(" %08x", 1591 printk(" %08x",
1536 be32_to_cpu(((u32 *) mailbox->buf)[i + 2])); 1592 be32_to_cpu(((__be32 *) mailbox->buf)[i + 2]));
1537 if ((i + 1) % 8 == 0) 1593 if ((i + 1) % 8 == 0)
1538 printk("\n"); 1594 printk("\n");
1539 } 1595 }
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.h b/drivers/infiniband/hw/mthca/mthca_cmd.h
index ed517f175dd6..65f976a13e02 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.h
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -35,7 +36,7 @@
35#ifndef MTHCA_CMD_H 36#ifndef MTHCA_CMD_H
36#define MTHCA_CMD_H 37#define MTHCA_CMD_H
37 38
38#include <ib_verbs.h> 39#include <rdma/ib_verbs.h>
39 40
40#define MTHCA_MAILBOX_SIZE 4096 41#define MTHCA_MAILBOX_SIZE 4096
41 42
@@ -183,10 +184,11 @@ struct mthca_dev_lim {
183}; 184};
184 185
185struct mthca_adapter { 186struct mthca_adapter {
186 u32 vendor_id; 187 u32 vendor_id;
187 u32 device_id; 188 u32 device_id;
188 u32 revision_id; 189 u32 revision_id;
189 u8 inta_pin; 190 char board_id[MTHCA_BOARD_ID_LEN];
191 u8 inta_pin;
190}; 192};
191 193
192struct mthca_init_hca_param { 194struct mthca_init_hca_param {
@@ -218,8 +220,7 @@ struct mthca_init_hca_param {
218}; 220};
219 221
220struct mthca_init_ib_param { 222struct mthca_init_ib_param {
221 int enable_1x; 223 int port_width;
222 int enable_4x;
223 int vl_cap; 224 int vl_cap;
224 int mtu_cap; 225 int mtu_cap;
225 u16 gid_cap; 226 u16 gid_cap;
@@ -297,6 +298,11 @@ int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
297 int cq_num, u8 *status); 298 int cq_num, u8 *status);
298int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 299int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
299 int cq_num, u8 *status); 300 int cq_num, u8 *status);
301int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
302 int srq_num, u8 *status);
303int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
304 int srq_num, u8 *status);
305int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status);
300int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, 306int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
301 int is_ee, struct mthca_mailbox *mailbox, u32 optmask, 307 int is_ee, struct mthca_mailbox *mailbox, u32 optmask,
302 u8 *status); 308 u8 *status);
diff --git a/drivers/infiniband/hw/mthca/mthca_config_reg.h b/drivers/infiniband/hw/mthca/mthca_config_reg.h
index b4bfbbfe2c3d..afa56bfaab2e 100644
--- a/drivers/infiniband/hw/mthca/mthca_config_reg.h
+++ b/drivers/infiniband/hw/mthca/mthca_config_reg.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 5687c3014522..8600b6c3e0c2 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -2,6 +2,8 @@
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved. 4 * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
5 * 7 *
6 * This software is available to you under a choice of one of two 8 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU 9 * licenses. You may choose to be licensed under the terms of the GNU
@@ -37,7 +39,7 @@
37#include <linux/init.h> 39#include <linux/init.h>
38#include <linux/hardirq.h> 40#include <linux/hardirq.h>
39 41
40#include <ib_pack.h> 42#include <rdma/ib_pack.h>
41 43
42#include "mthca_dev.h" 44#include "mthca_dev.h"
43#include "mthca_cmd.h" 45#include "mthca_cmd.h"
@@ -55,21 +57,21 @@ enum {
55 * Must be packed because start is 64 bits but only aligned to 32 bits. 57 * Must be packed because start is 64 bits but only aligned to 32 bits.
56 */ 58 */
57struct mthca_cq_context { 59struct mthca_cq_context {
58 u32 flags; 60 __be32 flags;
59 u64 start; 61 __be64 start;
60 u32 logsize_usrpage; 62 __be32 logsize_usrpage;
61 u32 error_eqn; /* Tavor only */ 63 __be32 error_eqn; /* Tavor only */
62 u32 comp_eqn; 64 __be32 comp_eqn;
63 u32 pd; 65 __be32 pd;
64 u32 lkey; 66 __be32 lkey;
65 u32 last_notified_index; 67 __be32 last_notified_index;
66 u32 solicit_producer_index; 68 __be32 solicit_producer_index;
67 u32 consumer_index; 69 __be32 consumer_index;
68 u32 producer_index; 70 __be32 producer_index;
69 u32 cqn; 71 __be32 cqn;
70 u32 ci_db; /* Arbel only */ 72 __be32 ci_db; /* Arbel only */
71 u32 state_db; /* Arbel only */ 73 __be32 state_db; /* Arbel only */
72 u32 reserved; 74 u32 reserved;
73} __attribute__((packed)); 75} __attribute__((packed));
74 76
75#define MTHCA_CQ_STATUS_OK ( 0 << 28) 77#define MTHCA_CQ_STATUS_OK ( 0 << 28)
@@ -108,31 +110,31 @@ enum {
108}; 110};
109 111
110struct mthca_cqe { 112struct mthca_cqe {
111 u32 my_qpn; 113 __be32 my_qpn;
112 u32 my_ee; 114 __be32 my_ee;
113 u32 rqpn; 115 __be32 rqpn;
114 u16 sl_g_mlpath; 116 __be16 sl_g_mlpath;
115 u16 rlid; 117 __be16 rlid;
116 u32 imm_etype_pkey_eec; 118 __be32 imm_etype_pkey_eec;
117 u32 byte_cnt; 119 __be32 byte_cnt;
118 u32 wqe; 120 __be32 wqe;
119 u8 opcode; 121 u8 opcode;
120 u8 is_send; 122 u8 is_send;
121 u8 reserved; 123 u8 reserved;
122 u8 owner; 124 u8 owner;
123}; 125};
124 126
125struct mthca_err_cqe { 127struct mthca_err_cqe {
126 u32 my_qpn; 128 __be32 my_qpn;
127 u32 reserved1[3]; 129 u32 reserved1[3];
128 u8 syndrome; 130 u8 syndrome;
129 u8 reserved2; 131 u8 reserved2;
130 u16 db_cnt; 132 __be16 db_cnt;
131 u32 reserved3; 133 u32 reserved3;
132 u32 wqe; 134 __be32 wqe;
133 u8 opcode; 135 u8 opcode;
134 u8 reserved4[2]; 136 u8 reserved4[2];
135 u8 owner; 137 u8 owner;
136}; 138};
137 139
138#define MTHCA_CQ_ENTRY_OWNER_SW (0 << 7) 140#define MTHCA_CQ_ENTRY_OWNER_SW (0 << 7)
@@ -191,7 +193,7 @@ static void dump_cqe(struct mthca_dev *dev, void *cqe_ptr)
191static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq, 193static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq,
192 int incr) 194 int incr)
193{ 195{
194 u32 doorbell[2]; 196 __be32 doorbell[2];
195 197
196 if (mthca_is_memfree(dev)) { 198 if (mthca_is_memfree(dev)) {
197 *cq->set_ci_db = cpu_to_be32(cq->cons_index); 199 *cq->set_ci_db = cpu_to_be32(cq->cons_index);
@@ -222,7 +224,8 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn)
222 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); 224 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
223} 225}
224 226
225void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn) 227void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
228 struct mthca_srq *srq)
226{ 229{
227 struct mthca_cq *cq; 230 struct mthca_cq *cq;
228 struct mthca_cqe *cqe; 231 struct mthca_cqe *cqe;
@@ -263,8 +266,11 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn)
263 */ 266 */
264 while (prod_index > cq->cons_index) { 267 while (prod_index > cq->cons_index) {
265 cqe = get_cqe(cq, (prod_index - 1) & cq->ibcq.cqe); 268 cqe = get_cqe(cq, (prod_index - 1) & cq->ibcq.cqe);
266 if (cqe->my_qpn == cpu_to_be32(qpn)) 269 if (cqe->my_qpn == cpu_to_be32(qpn)) {
270 if (srq)
271 mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe));
267 ++nfreed; 272 ++nfreed;
273 }
268 else if (nfreed) 274 else if (nfreed)
269 memcpy(get_cqe(cq, (prod_index - 1 + nfreed) & 275 memcpy(get_cqe(cq, (prod_index - 1 + nfreed) &
270 cq->ibcq.cqe), 276 cq->ibcq.cqe),
@@ -291,7 +297,7 @@ static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
291{ 297{
292 int err; 298 int err;
293 int dbd; 299 int dbd;
294 u32 new_wqe; 300 __be32 new_wqe;
295 301
296 if (cqe->syndrome == SYNDROME_LOCAL_QP_OP_ERR) { 302 if (cqe->syndrome == SYNDROME_LOCAL_QP_OP_ERR) {
297 mthca_dbg(dev, "local QP operation err " 303 mthca_dbg(dev, "local QP operation err "
@@ -365,6 +371,13 @@ static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
365 break; 371 break;
366 } 372 }
367 373
374 /*
375 * Mem-free HCAs always generate one CQE per WQE, even in the
376 * error case, so we don't have to check the doorbell count, etc.
377 */
378 if (mthca_is_memfree(dev))
379 return 0;
380
368 err = mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe); 381 err = mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe);
369 if (err) 382 if (err)
370 return err; 383 return err;
@@ -373,12 +386,8 @@ static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
373 * If we're at the end of the WQE chain, or we've used up our 386 * If we're at the end of the WQE chain, or we've used up our
374 * doorbell count, free the CQE. Otherwise just update it for 387 * doorbell count, free the CQE. Otherwise just update it for
375 * the next poll operation. 388 * the next poll operation.
376 *
377 * This does not apply to mem-free HCAs: they don't use the
378 * doorbell count field, and so we should always free the CQE.
379 */ 389 */
380 if (mthca_is_memfree(dev) || 390 if (!(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd))
381 !(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd))
382 return 0; 391 return 0;
383 392
384 cqe->db_cnt = cpu_to_be16(be16_to_cpu(cqe->db_cnt) - dbd); 393 cqe->db_cnt = cpu_to_be16(be16_to_cpu(cqe->db_cnt) - dbd);
@@ -450,23 +459,27 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
450 >> wq->wqe_shift); 459 >> wq->wqe_shift);
451 entry->wr_id = (*cur_qp)->wrid[wqe_index + 460 entry->wr_id = (*cur_qp)->wrid[wqe_index +
452 (*cur_qp)->rq.max]; 461 (*cur_qp)->rq.max];
462 } else if ((*cur_qp)->ibqp.srq) {
463 struct mthca_srq *srq = to_msrq((*cur_qp)->ibqp.srq);
464 u32 wqe = be32_to_cpu(cqe->wqe);
465 wq = NULL;
466 wqe_index = wqe >> srq->wqe_shift;
467 entry->wr_id = srq->wrid[wqe_index];
468 mthca_free_srq_wqe(srq, wqe);
453 } else { 469 } else {
454 wq = &(*cur_qp)->rq; 470 wq = &(*cur_qp)->rq;
455 wqe_index = be32_to_cpu(cqe->wqe) >> wq->wqe_shift; 471 wqe_index = be32_to_cpu(cqe->wqe) >> wq->wqe_shift;
456 entry->wr_id = (*cur_qp)->wrid[wqe_index]; 472 entry->wr_id = (*cur_qp)->wrid[wqe_index];
457 } 473 }
458 474
459 if (wq->last_comp < wqe_index) 475 if (wq) {
460 wq->tail += wqe_index - wq->last_comp; 476 if (wq->last_comp < wqe_index)
461 else 477 wq->tail += wqe_index - wq->last_comp;
462 wq->tail += wqe_index + wq->max - wq->last_comp; 478 else
463 479 wq->tail += wqe_index + wq->max - wq->last_comp;
464 wq->last_comp = wqe_index;
465 480
466 if (0) 481 wq->last_comp = wqe_index;
467 mthca_dbg(dev, "%s completion for QP %06x, index %d (nr %d)\n", 482 }
468 is_send ? "Send" : "Receive",
469 (*cur_qp)->qpn, wqe_index, wq->max);
470 483
471 if (is_error) { 484 if (is_error) {
472 err = handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send, 485 err = handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send,
@@ -584,13 +597,13 @@ int mthca_poll_cq(struct ib_cq *ibcq, int num_entries,
584 597
585int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify) 598int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify)
586{ 599{
587 u32 doorbell[2]; 600 __be32 doorbell[2];
588 601
589 doorbell[0] = cpu_to_be32((notify == IB_CQ_SOLICITED ? 602 doorbell[0] = cpu_to_be32((notify == IB_CQ_SOLICITED ?
590 MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL : 603 MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL :
591 MTHCA_TAVOR_CQ_DB_REQ_NOT) | 604 MTHCA_TAVOR_CQ_DB_REQ_NOT) |
592 to_mcq(cq)->cqn); 605 to_mcq(cq)->cqn);
593 doorbell[1] = 0xffffffff; 606 doorbell[1] = (__force __be32) 0xffffffff;
594 607
595 mthca_write64(doorbell, 608 mthca_write64(doorbell,
596 to_mdev(cq->device)->kar + MTHCA_CQ_DOORBELL, 609 to_mdev(cq->device)->kar + MTHCA_CQ_DOORBELL,
@@ -602,9 +615,9 @@ int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify)
602int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) 615int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
603{ 616{
604 struct mthca_cq *cq = to_mcq(ibcq); 617 struct mthca_cq *cq = to_mcq(ibcq);
605 u32 doorbell[2]; 618 __be32 doorbell[2];
606 u32 sn; 619 u32 sn;
607 u32 ci; 620 __be32 ci;
608 621
609 sn = cq->arm_sn & 3; 622 sn = cq->arm_sn & 3;
610 ci = cpu_to_be32(cq->cons_index); 623 ci = cpu_to_be32(cq->cons_index);
@@ -637,113 +650,8 @@ int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
637 650
638static void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq *cq) 651static void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq *cq)
639{ 652{
640 int i; 653 mthca_buf_free(dev, (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE,
641 int size; 654 &cq->queue, cq->is_direct, &cq->mr);
642
643 if (cq->is_direct)
644 dma_free_coherent(&dev->pdev->dev,
645 (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE,
646 cq->queue.direct.buf,
647 pci_unmap_addr(&cq->queue.direct,
648 mapping));
649 else {
650 size = (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE;
651 for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
652 if (cq->queue.page_list[i].buf)
653 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
654 cq->queue.page_list[i].buf,
655 pci_unmap_addr(&cq->queue.page_list[i],
656 mapping));
657
658 kfree(cq->queue.page_list);
659 }
660}
661
662static int mthca_alloc_cq_buf(struct mthca_dev *dev, int size,
663 struct mthca_cq *cq)
664{
665 int err = -ENOMEM;
666 int npages, shift;
667 u64 *dma_list = NULL;
668 dma_addr_t t;
669 int i;
670
671 if (size <= MTHCA_MAX_DIRECT_CQ_SIZE) {
672 cq->is_direct = 1;
673 npages = 1;
674 shift = get_order(size) + PAGE_SHIFT;
675
676 cq->queue.direct.buf = dma_alloc_coherent(&dev->pdev->dev,
677 size, &t, GFP_KERNEL);
678 if (!cq->queue.direct.buf)
679 return -ENOMEM;
680
681 pci_unmap_addr_set(&cq->queue.direct, mapping, t);
682
683 memset(cq->queue.direct.buf, 0, size);
684
685 while (t & ((1 << shift) - 1)) {
686 --shift;
687 npages *= 2;
688 }
689
690 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
691 if (!dma_list)
692 goto err_free;
693
694 for (i = 0; i < npages; ++i)
695 dma_list[i] = t + i * (1 << shift);
696 } else {
697 cq->is_direct = 0;
698 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
699 shift = PAGE_SHIFT;
700
701 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
702 if (!dma_list)
703 return -ENOMEM;
704
705 cq->queue.page_list = kmalloc(npages * sizeof *cq->queue.page_list,
706 GFP_KERNEL);
707 if (!cq->queue.page_list)
708 goto err_out;
709
710 for (i = 0; i < npages; ++i)
711 cq->queue.page_list[i].buf = NULL;
712
713 for (i = 0; i < npages; ++i) {
714 cq->queue.page_list[i].buf =
715 dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
716 &t, GFP_KERNEL);
717 if (!cq->queue.page_list[i].buf)
718 goto err_free;
719
720 dma_list[i] = t;
721 pci_unmap_addr_set(&cq->queue.page_list[i], mapping, t);
722
723 memset(cq->queue.page_list[i].buf, 0, PAGE_SIZE);
724 }
725 }
726
727 err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num,
728 dma_list, shift, npages,
729 0, size,
730 MTHCA_MPT_FLAG_LOCAL_WRITE |
731 MTHCA_MPT_FLAG_LOCAL_READ,
732 &cq->mr);
733 if (err)
734 goto err_free;
735
736 kfree(dma_list);
737
738 return 0;
739
740err_free:
741 mthca_free_cq_buf(dev, cq);
742
743err_out:
744 kfree(dma_list);
745
746 return err;
747} 655}
748 656
749int mthca_init_cq(struct mthca_dev *dev, int nent, 657int mthca_init_cq(struct mthca_dev *dev, int nent,
@@ -795,7 +703,9 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
795 cq_context = mailbox->buf; 703 cq_context = mailbox->buf;
796 704
797 if (cq->is_kernel) { 705 if (cq->is_kernel) {
798 err = mthca_alloc_cq_buf(dev, size, cq); 706 err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_CQ_SIZE,
707 &cq->queue, &cq->is_direct,
708 &dev->driver_pd, 1, &cq->mr);
799 if (err) 709 if (err)
800 goto err_out_mailbox; 710 goto err_out_mailbox;
801 711
@@ -811,7 +721,6 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
811 cq_context->flags = cpu_to_be32(MTHCA_CQ_STATUS_OK | 721 cq_context->flags = cpu_to_be32(MTHCA_CQ_STATUS_OK |
812 MTHCA_CQ_STATE_DISARMED | 722 MTHCA_CQ_STATE_DISARMED |
813 MTHCA_CQ_FLAG_TR); 723 MTHCA_CQ_FLAG_TR);
814 cq_context->start = cpu_to_be64(0);
815 cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24); 724 cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24);
816 if (ctx) 725 if (ctx)
817 cq_context->logsize_usrpage |= cpu_to_be32(ctx->uar.index); 726 cq_context->logsize_usrpage |= cpu_to_be32(ctx->uar.index);
@@ -857,10 +766,8 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
857 return 0; 766 return 0;
858 767
859err_out_free_mr: 768err_out_free_mr:
860 if (cq->is_kernel) { 769 if (cq->is_kernel)
861 mthca_free_mr(dev, &cq->mr);
862 mthca_free_cq_buf(dev, cq); 770 mthca_free_cq_buf(dev, cq);
863 }
864 771
865err_out_mailbox: 772err_out_mailbox:
866 mthca_free_mailbox(dev, mailbox); 773 mthca_free_mailbox(dev, mailbox);
@@ -904,7 +811,7 @@ void mthca_free_cq(struct mthca_dev *dev,
904 mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n", status); 811 mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n", status);
905 812
906 if (0) { 813 if (0) {
907 u32 *ctx = mailbox->buf; 814 __be32 *ctx = mailbox->buf;
908 int j; 815 int j;
909 816
910 printk(KERN_ERR "context for CQN %x (cons index %x, next sw %d)\n", 817 printk(KERN_ERR "context for CQN %x (cons index %x, next sw %d)\n",
@@ -928,7 +835,6 @@ void mthca_free_cq(struct mthca_dev *dev,
928 wait_event(cq->wait, !atomic_read(&cq->refcount)); 835 wait_event(cq->wait, !atomic_read(&cq->refcount));
929 836
930 if (cq->is_kernel) { 837 if (cq->is_kernel) {
931 mthca_free_mr(dev, &cq->mr);
932 mthca_free_cq_buf(dev, cq); 838 mthca_free_cq_buf(dev, cq);
933 if (mthca_is_memfree(dev)) { 839 if (mthca_is_memfree(dev)) {
934 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index); 840 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 5ecdd2eeeb0f..7bff5a8425f4 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -2,6 +2,8 @@
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 Cisco Systems. All rights reserved.
5 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
5 * 7 *
6 * This software is available to you under a choice of one of two 8 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU 9 * licenses. You may choose to be licensed under the terms of the GNU
@@ -67,6 +69,10 @@ enum {
67}; 69};
68 70
69enum { 71enum {
72 MTHCA_BOARD_ID_LEN = 64
73};
74
75enum {
70 MTHCA_EQ_CONTEXT_SIZE = 0x40, 76 MTHCA_EQ_CONTEXT_SIZE = 0x40,
71 MTHCA_CQ_CONTEXT_SIZE = 0x40, 77 MTHCA_CQ_CONTEXT_SIZE = 0x40,
72 MTHCA_QP_CONTEXT_SIZE = 0x200, 78 MTHCA_QP_CONTEXT_SIZE = 0x200,
@@ -142,6 +148,7 @@ struct mthca_limits {
142 int reserved_mcgs; 148 int reserved_mcgs;
143 int num_pds; 149 int num_pds;
144 int reserved_pds; 150 int reserved_pds;
151 u8 port_width_cap;
145}; 152};
146 153
147struct mthca_alloc { 154struct mthca_alloc {
@@ -211,6 +218,13 @@ struct mthca_cq_table {
211 struct mthca_icm_table *table; 218 struct mthca_icm_table *table;
212}; 219};
213 220
221struct mthca_srq_table {
222 struct mthca_alloc alloc;
223 spinlock_t lock;
224 struct mthca_array srq;
225 struct mthca_icm_table *table;
226};
227
214struct mthca_qp_table { 228struct mthca_qp_table {
215 struct mthca_alloc alloc; 229 struct mthca_alloc alloc;
216 u32 rdb_base; 230 u32 rdb_base;
@@ -246,6 +260,7 @@ struct mthca_dev {
246 unsigned long device_cap_flags; 260 unsigned long device_cap_flags;
247 261
248 u32 rev_id; 262 u32 rev_id;
263 char board_id[MTHCA_BOARD_ID_LEN];
249 264
250 /* firmware info */ 265 /* firmware info */
251 u64 fw_ver; 266 u64 fw_ver;
@@ -291,6 +306,7 @@ struct mthca_dev {
291 struct mthca_mr_table mr_table; 306 struct mthca_mr_table mr_table;
292 struct mthca_eq_table eq_table; 307 struct mthca_eq_table eq_table;
293 struct mthca_cq_table cq_table; 308 struct mthca_cq_table cq_table;
309 struct mthca_srq_table srq_table;
294 struct mthca_qp_table qp_table; 310 struct mthca_qp_table qp_table;
295 struct mthca_av_table av_table; 311 struct mthca_av_table av_table;
296 struct mthca_mcg_table mcg_table; 312 struct mthca_mcg_table mcg_table;
@@ -331,14 +347,13 @@ extern void __buggy_use_of_MTHCA_PUT(void);
331 347
332#define MTHCA_PUT(dest, source, offset) \ 348#define MTHCA_PUT(dest, source, offset) \
333 do { \ 349 do { \
334 __typeof__(source) *__p = \ 350 void *__d = ((char *) (dest) + (offset)); \
335 (__typeof__(source) *) ((char *) (dest) + (offset)); \
336 switch (sizeof(source)) { \ 351 switch (sizeof(source)) { \
337 case 1: *__p = (source); break; \ 352 case 1: *(u8 *) __d = (source); break; \
338 case 2: *__p = cpu_to_be16(source); break; \ 353 case 2: *(__be16 *) __d = cpu_to_be16(source); break; \
339 case 4: *__p = cpu_to_be32(source); break; \ 354 case 4: *(__be32 *) __d = cpu_to_be32(source); break; \
340 case 8: *__p = cpu_to_be64(source); break; \ 355 case 8: *(__be64 *) __d = cpu_to_be64(source); break; \
341 default: __buggy_use_of_MTHCA_PUT(); \ 356 default: __buggy_use_of_MTHCA_PUT(); \
342 } \ 357 } \
343 } while (0) 358 } while (0)
344 359
@@ -354,12 +369,18 @@ int mthca_array_set(struct mthca_array *array, int index, void *value);
354void mthca_array_clear(struct mthca_array *array, int index); 369void mthca_array_clear(struct mthca_array *array, int index);
355int mthca_array_init(struct mthca_array *array, int nent); 370int mthca_array_init(struct mthca_array *array, int nent);
356void mthca_array_cleanup(struct mthca_array *array, int nent); 371void mthca_array_cleanup(struct mthca_array *array, int nent);
372int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
373 union mthca_buf *buf, int *is_direct, struct mthca_pd *pd,
374 int hca_write, struct mthca_mr *mr);
375void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf,
376 int is_direct, struct mthca_mr *mr);
357 377
358int mthca_init_uar_table(struct mthca_dev *dev); 378int mthca_init_uar_table(struct mthca_dev *dev);
359int mthca_init_pd_table(struct mthca_dev *dev); 379int mthca_init_pd_table(struct mthca_dev *dev);
360int mthca_init_mr_table(struct mthca_dev *dev); 380int mthca_init_mr_table(struct mthca_dev *dev);
361int mthca_init_eq_table(struct mthca_dev *dev); 381int mthca_init_eq_table(struct mthca_dev *dev);
362int mthca_init_cq_table(struct mthca_dev *dev); 382int mthca_init_cq_table(struct mthca_dev *dev);
383int mthca_init_srq_table(struct mthca_dev *dev);
363int mthca_init_qp_table(struct mthca_dev *dev); 384int mthca_init_qp_table(struct mthca_dev *dev);
364int mthca_init_av_table(struct mthca_dev *dev); 385int mthca_init_av_table(struct mthca_dev *dev);
365int mthca_init_mcg_table(struct mthca_dev *dev); 386int mthca_init_mcg_table(struct mthca_dev *dev);
@@ -369,6 +390,7 @@ void mthca_cleanup_pd_table(struct mthca_dev *dev);
369void mthca_cleanup_mr_table(struct mthca_dev *dev); 390void mthca_cleanup_mr_table(struct mthca_dev *dev);
370void mthca_cleanup_eq_table(struct mthca_dev *dev); 391void mthca_cleanup_eq_table(struct mthca_dev *dev);
371void mthca_cleanup_cq_table(struct mthca_dev *dev); 392void mthca_cleanup_cq_table(struct mthca_dev *dev);
393void mthca_cleanup_srq_table(struct mthca_dev *dev);
372void mthca_cleanup_qp_table(struct mthca_dev *dev); 394void mthca_cleanup_qp_table(struct mthca_dev *dev);
373void mthca_cleanup_av_table(struct mthca_dev *dev); 395void mthca_cleanup_av_table(struct mthca_dev *dev);
374void mthca_cleanup_mcg_table(struct mthca_dev *dev); 396void mthca_cleanup_mcg_table(struct mthca_dev *dev);
@@ -419,7 +441,19 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
419void mthca_free_cq(struct mthca_dev *dev, 441void mthca_free_cq(struct mthca_dev *dev,
420 struct mthca_cq *cq); 442 struct mthca_cq *cq);
421void mthca_cq_event(struct mthca_dev *dev, u32 cqn); 443void mthca_cq_event(struct mthca_dev *dev, u32 cqn);
422void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn); 444void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
445 struct mthca_srq *srq);
446
447int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
448 struct ib_srq_attr *attr, struct mthca_srq *srq);
449void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq);
450void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
451 enum ib_event_type event_type);
452void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr);
453int mthca_tavor_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr,
454 struct ib_recv_wr **bad_wr);
455int mthca_arbel_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr,
456 struct ib_recv_wr **bad_wr);
423 457
424void mthca_qp_event(struct mthca_dev *dev, u32 qpn, 458void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
425 enum ib_event_type event_type); 459 enum ib_event_type event_type);
@@ -433,7 +467,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
433int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, 467int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
434 struct ib_recv_wr **bad_wr); 468 struct ib_recv_wr **bad_wr);
435int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, 469int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
436 int index, int *dbd, u32 *new_wqe); 470 int index, int *dbd, __be32 *new_wqe);
437int mthca_alloc_qp(struct mthca_dev *dev, 471int mthca_alloc_qp(struct mthca_dev *dev,
438 struct mthca_pd *pd, 472 struct mthca_pd *pd,
439 struct mthca_cq *send_cq, 473 struct mthca_cq *send_cq,
diff --git a/drivers/infiniband/hw/mthca/mthca_doorbell.h b/drivers/infiniband/hw/mthca/mthca_doorbell.h
index 535fad7710fb..dd9a44d170c9 100644
--- a/drivers/infiniband/hw/mthca/mthca_doorbell.h
+++ b/drivers/infiniband/hw/mthca/mthca_doorbell.h
@@ -1,6 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * 5 *
5 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
@@ -57,13 +58,13 @@ static inline void mthca_write64_raw(__be64 val, void __iomem *dest)
57 __raw_writeq((__force u64) val, dest); 58 __raw_writeq((__force u64) val, dest);
58} 59}
59 60
60static inline void mthca_write64(u32 val[2], void __iomem *dest, 61static inline void mthca_write64(__be32 val[2], void __iomem *dest,
61 spinlock_t *doorbell_lock) 62 spinlock_t *doorbell_lock)
62{ 63{
63 __raw_writeq(*(u64 *) val, dest); 64 __raw_writeq(*(u64 *) val, dest);
64} 65}
65 66
66static inline void mthca_write_db_rec(u32 val[2], u32 *db) 67static inline void mthca_write_db_rec(__be32 val[2], __be32 *db)
67{ 68{
68 *(u64 *) db = *(u64 *) val; 69 *(u64 *) db = *(u64 *) val;
69} 70}
@@ -86,18 +87,18 @@ static inline void mthca_write64_raw(__be64 val, void __iomem *dest)
86 __raw_writel(((__force u32 *) &val)[1], dest + 4); 87 __raw_writel(((__force u32 *) &val)[1], dest + 4);
87} 88}
88 89
89static inline void mthca_write64(u32 val[2], void __iomem *dest, 90static inline void mthca_write64(__be32 val[2], void __iomem *dest,
90 spinlock_t *doorbell_lock) 91 spinlock_t *doorbell_lock)
91{ 92{
92 unsigned long flags; 93 unsigned long flags;
93 94
94 spin_lock_irqsave(doorbell_lock, flags); 95 spin_lock_irqsave(doorbell_lock, flags);
95 __raw_writel(val[0], dest); 96 __raw_writel((__force u32) val[0], dest);
96 __raw_writel(val[1], dest + 4); 97 __raw_writel((__force u32) val[1], dest + 4);
97 spin_unlock_irqrestore(doorbell_lock, flags); 98 spin_unlock_irqrestore(doorbell_lock, flags);
98} 99}
99 100
100static inline void mthca_write_db_rec(u32 val[2], u32 *db) 101static inline void mthca_write_db_rec(__be32 val[2], __be32 *db)
101{ 102{
102 db[0] = val[0]; 103 db[0] = val[0];
103 wmb(); 104 wmb();
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index cbcf2b4722e4..18f0981eb0c1 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -51,18 +52,18 @@ enum {
51 * Must be packed because start is 64 bits but only aligned to 32 bits. 52 * Must be packed because start is 64 bits but only aligned to 32 bits.
52 */ 53 */
53struct mthca_eq_context { 54struct mthca_eq_context {
54 u32 flags; 55 __be32 flags;
55 u64 start; 56 __be64 start;
56 u32 logsize_usrpage; 57 __be32 logsize_usrpage;
57 u32 tavor_pd; /* reserved for Arbel */ 58 __be32 tavor_pd; /* reserved for Arbel */
58 u8 reserved1[3]; 59 u8 reserved1[3];
59 u8 intr; 60 u8 intr;
60 u32 arbel_pd; /* lost_count for Tavor */ 61 __be32 arbel_pd; /* lost_count for Tavor */
61 u32 lkey; 62 __be32 lkey;
62 u32 reserved2[2]; 63 u32 reserved2[2];
63 u32 consumer_index; 64 __be32 consumer_index;
64 u32 producer_index; 65 __be32 producer_index;
65 u32 reserved3[4]; 66 u32 reserved3[4];
66} __attribute__((packed)); 67} __attribute__((packed));
67 68
68#define MTHCA_EQ_STATUS_OK ( 0 << 28) 69#define MTHCA_EQ_STATUS_OK ( 0 << 28)
@@ -127,28 +128,28 @@ struct mthca_eqe {
127 union { 128 union {
128 u32 raw[6]; 129 u32 raw[6];
129 struct { 130 struct {
130 u32 cqn; 131 __be32 cqn;
131 } __attribute__((packed)) comp; 132 } __attribute__((packed)) comp;
132 struct { 133 struct {
133 u16 reserved1; 134 u16 reserved1;
134 u16 token; 135 __be16 token;
135 u32 reserved2; 136 u32 reserved2;
136 u8 reserved3[3]; 137 u8 reserved3[3];
137 u8 status; 138 u8 status;
138 u64 out_param; 139 __be64 out_param;
139 } __attribute__((packed)) cmd; 140 } __attribute__((packed)) cmd;
140 struct { 141 struct {
141 u32 qpn; 142 __be32 qpn;
142 } __attribute__((packed)) qp; 143 } __attribute__((packed)) qp;
143 struct { 144 struct {
144 u32 cqn; 145 __be32 cqn;
145 u32 reserved1; 146 u32 reserved1;
146 u8 reserved2[3]; 147 u8 reserved2[3];
147 u8 syndrome; 148 u8 syndrome;
148 } __attribute__((packed)) cq_err; 149 } __attribute__((packed)) cq_err;
149 struct { 150 struct {
150 u32 reserved1[2]; 151 u32 reserved1[2];
151 u32 port; 152 __be32 port;
152 } __attribute__((packed)) port_change; 153 } __attribute__((packed)) port_change;
153 } event; 154 } event;
154 u8 reserved3[3]; 155 u8 reserved3[3];
@@ -167,7 +168,7 @@ static inline u64 async_mask(struct mthca_dev *dev)
167 168
168static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) 169static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
169{ 170{
170 u32 doorbell[2]; 171 __be32 doorbell[2];
171 172
172 doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_SET_CI | eq->eqn); 173 doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_SET_CI | eq->eqn);
173 doorbell[1] = cpu_to_be32(ci & (eq->nent - 1)); 174 doorbell[1] = cpu_to_be32(ci & (eq->nent - 1));
@@ -190,8 +191,8 @@ static inline void arbel_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u
190{ 191{
191 /* See comment in tavor_set_eq_ci() above. */ 192 /* See comment in tavor_set_eq_ci() above. */
192 wmb(); 193 wmb();
193 __raw_writel(cpu_to_be32(ci), dev->eq_regs.arbel.eq_set_ci_base + 194 __raw_writel((__force u32) cpu_to_be32(ci),
194 eq->eqn * 8); 195 dev->eq_regs.arbel.eq_set_ci_base + eq->eqn * 8);
195 /* We still want ordering, just not swabbing, so add a barrier */ 196 /* We still want ordering, just not swabbing, so add a barrier */
196 mb(); 197 mb();
197} 198}
@@ -206,7 +207,7 @@ static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
206 207
207static inline void tavor_eq_req_not(struct mthca_dev *dev, int eqn) 208static inline void tavor_eq_req_not(struct mthca_dev *dev, int eqn)
208{ 209{
209 u32 doorbell[2]; 210 __be32 doorbell[2];
210 211
211 doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_REQ_NOT | eqn); 212 doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_REQ_NOT | eqn);
212 doorbell[1] = 0; 213 doorbell[1] = 0;
@@ -224,7 +225,7 @@ static inline void arbel_eq_req_not(struct mthca_dev *dev, u32 eqn_mask)
224static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn) 225static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn)
225{ 226{
226 if (!mthca_is_memfree(dev)) { 227 if (!mthca_is_memfree(dev)) {
227 u32 doorbell[2]; 228 __be32 doorbell[2];
228 229
229 doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_DISARM_CQ | eqn); 230 doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_DISARM_CQ | eqn);
230 doorbell[1] = cpu_to_be32(cqn); 231 doorbell[1] = cpu_to_be32(cqn);
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 7df223642015..9804174f7f3c 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -1,5 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
3 * 5 *
4 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
@@ -32,9 +34,9 @@
32 * $Id: mthca_mad.c 1349 2004-12-16 21:09:43Z roland $ 34 * $Id: mthca_mad.c 1349 2004-12-16 21:09:43Z roland $
33 */ 35 */
34 36
35#include <ib_verbs.h> 37#include <rdma/ib_verbs.h>
36#include <ib_mad.h> 38#include <rdma/ib_mad.h>
37#include <ib_smi.h> 39#include <rdma/ib_smi.h>
38 40
39#include "mthca_dev.h" 41#include "mthca_dev.h"
40#include "mthca_cmd.h" 42#include "mthca_cmd.h"
@@ -192,7 +194,7 @@ int mthca_process_mad(struct ib_device *ibdev,
192{ 194{
193 int err; 195 int err;
194 u8 status; 196 u8 status;
195 u16 slid = in_wc ? in_wc->slid : IB_LID_PERMISSIVE; 197 u16 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
196 198
197 /* Forward locally generated traps to the SM */ 199 /* Forward locally generated traps to the SM */
198 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && 200 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 2ef916859e17..3241d6c9dc11 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * 5 *
5 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
@@ -34,7 +35,6 @@
34 */ 35 */
35 36
36#include <linux/config.h> 37#include <linux/config.h>
37#include <linux/version.h>
38#include <linux/module.h> 38#include <linux/module.h>
39#include <linux/init.h> 39#include <linux/init.h>
40#include <linux/errno.h> 40#include <linux/errno.h>
@@ -171,6 +171,7 @@ static int __devinit mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim
171 mdev->limits.reserved_mrws = dev_lim->reserved_mrws; 171 mdev->limits.reserved_mrws = dev_lim->reserved_mrws;
172 mdev->limits.reserved_uars = dev_lim->reserved_uars; 172 mdev->limits.reserved_uars = dev_lim->reserved_uars;
173 mdev->limits.reserved_pds = dev_lim->reserved_pds; 173 mdev->limits.reserved_pds = dev_lim->reserved_pds;
174 mdev->limits.port_width_cap = dev_lim->max_port_width;
174 175
175 /* IB_DEVICE_RESIZE_MAX_WR not supported by driver. 176 /* IB_DEVICE_RESIZE_MAX_WR not supported by driver.
176 May be doable since hardware supports it for SRQ. 177 May be doable since hardware supports it for SRQ.
@@ -212,7 +213,6 @@ static int __devinit mthca_init_tavor(struct mthca_dev *mdev)
212 struct mthca_dev_lim dev_lim; 213 struct mthca_dev_lim dev_lim;
213 struct mthca_profile profile; 214 struct mthca_profile profile;
214 struct mthca_init_hca_param init_hca; 215 struct mthca_init_hca_param init_hca;
215 struct mthca_adapter adapter;
216 216
217 err = mthca_SYS_EN(mdev, &status); 217 err = mthca_SYS_EN(mdev, &status);
218 if (err) { 218 if (err) {
@@ -253,6 +253,8 @@ static int __devinit mthca_init_tavor(struct mthca_dev *mdev)
253 profile = default_profile; 253 profile = default_profile;
254 profile.num_uar = dev_lim.uar_size / PAGE_SIZE; 254 profile.num_uar = dev_lim.uar_size / PAGE_SIZE;
255 profile.uarc_size = 0; 255 profile.uarc_size = 0;
256 if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
257 profile.num_srq = dev_lim.max_srqs;
256 258
257 err = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); 259 err = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
258 if (err < 0) 260 if (err < 0)
@@ -270,26 +272,8 @@ static int __devinit mthca_init_tavor(struct mthca_dev *mdev)
270 goto err_disable; 272 goto err_disable;
271 } 273 }
272 274
273 err = mthca_QUERY_ADAPTER(mdev, &adapter, &status);
274 if (err) {
275 mthca_err(mdev, "QUERY_ADAPTER command failed, aborting.\n");
276 goto err_close;
277 }
278 if (status) {
279 mthca_err(mdev, "QUERY_ADAPTER returned status 0x%02x, "
280 "aborting.\n", status);
281 err = -EINVAL;
282 goto err_close;
283 }
284
285 mdev->eq_table.inta_pin = adapter.inta_pin;
286 mdev->rev_id = adapter.revision_id;
287
288 return 0; 275 return 0;
289 276
290err_close:
291 mthca_CLOSE_HCA(mdev, 0, &status);
292
293err_disable: 277err_disable:
294 mthca_SYS_DIS(mdev, &status); 278 mthca_SYS_DIS(mdev, &status);
295 279
@@ -442,15 +426,29 @@ static int __devinit mthca_init_icm(struct mthca_dev *mdev,
442 } 426 }
443 427
444 mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base, 428 mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base,
445 dev_lim->cqc_entry_sz, 429 dev_lim->cqc_entry_sz,
446 mdev->limits.num_cqs, 430 mdev->limits.num_cqs,
447 mdev->limits.reserved_cqs, 0); 431 mdev->limits.reserved_cqs, 0);
448 if (!mdev->cq_table.table) { 432 if (!mdev->cq_table.table) {
449 mthca_err(mdev, "Failed to map CQ context memory, aborting.\n"); 433 mthca_err(mdev, "Failed to map CQ context memory, aborting.\n");
450 err = -ENOMEM; 434 err = -ENOMEM;
451 goto err_unmap_rdb; 435 goto err_unmap_rdb;
452 } 436 }
453 437
438 if (mdev->mthca_flags & MTHCA_FLAG_SRQ) {
439 mdev->srq_table.table =
440 mthca_alloc_icm_table(mdev, init_hca->srqc_base,
441 dev_lim->srq_entry_sz,
442 mdev->limits.num_srqs,
443 mdev->limits.reserved_srqs, 0);
444 if (!mdev->srq_table.table) {
445 mthca_err(mdev, "Failed to map SRQ context memory, "
446 "aborting.\n");
447 err = -ENOMEM;
448 goto err_unmap_cq;
449 }
450 }
451
454 /* 452 /*
455 * It's not strictly required, but for simplicity just map the 453 * It's not strictly required, but for simplicity just map the
456 * whole multicast group table now. The table isn't very big 454 * whole multicast group table now. The table isn't very big
@@ -466,11 +464,15 @@ static int __devinit mthca_init_icm(struct mthca_dev *mdev,
466 if (!mdev->mcg_table.table) { 464 if (!mdev->mcg_table.table) {
467 mthca_err(mdev, "Failed to map MCG context memory, aborting.\n"); 465 mthca_err(mdev, "Failed to map MCG context memory, aborting.\n");
468 err = -ENOMEM; 466 err = -ENOMEM;
469 goto err_unmap_cq; 467 goto err_unmap_srq;
470 } 468 }
471 469
472 return 0; 470 return 0;
473 471
472err_unmap_srq:
473 if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
474 mthca_free_icm_table(mdev, mdev->srq_table.table);
475
474err_unmap_cq: 476err_unmap_cq:
475 mthca_free_icm_table(mdev, mdev->cq_table.table); 477 mthca_free_icm_table(mdev, mdev->cq_table.table);
476 478
@@ -506,7 +508,6 @@ static int __devinit mthca_init_arbel(struct mthca_dev *mdev)
506 struct mthca_dev_lim dev_lim; 508 struct mthca_dev_lim dev_lim;
507 struct mthca_profile profile; 509 struct mthca_profile profile;
508 struct mthca_init_hca_param init_hca; 510 struct mthca_init_hca_param init_hca;
509 struct mthca_adapter adapter;
510 u64 icm_size; 511 u64 icm_size;
511 u8 status; 512 u8 status;
512 int err; 513 int err;
@@ -551,6 +552,8 @@ static int __devinit mthca_init_arbel(struct mthca_dev *mdev)
551 profile = default_profile; 552 profile = default_profile;
552 profile.num_uar = dev_lim.uar_size / PAGE_SIZE; 553 profile.num_uar = dev_lim.uar_size / PAGE_SIZE;
553 profile.num_udav = 0; 554 profile.num_udav = 0;
555 if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
556 profile.num_srq = dev_lim.max_srqs;
554 557
555 icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); 558 icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
556 if ((int) icm_size < 0) { 559 if ((int) icm_size < 0) {
@@ -574,24 +577,11 @@ static int __devinit mthca_init_arbel(struct mthca_dev *mdev)
574 goto err_free_icm; 577 goto err_free_icm;
575 } 578 }
576 579
577 err = mthca_QUERY_ADAPTER(mdev, &adapter, &status);
578 if (err) {
579 mthca_err(mdev, "QUERY_ADAPTER command failed, aborting.\n");
580 goto err_free_icm;
581 }
582 if (status) {
583 mthca_err(mdev, "QUERY_ADAPTER returned status 0x%02x, "
584 "aborting.\n", status);
585 err = -EINVAL;
586 goto err_free_icm;
587 }
588
589 mdev->eq_table.inta_pin = adapter.inta_pin;
590 mdev->rev_id = adapter.revision_id;
591
592 return 0; 580 return 0;
593 581
594err_free_icm: 582err_free_icm:
583 if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
584 mthca_free_icm_table(mdev, mdev->srq_table.table);
595 mthca_free_icm_table(mdev, mdev->cq_table.table); 585 mthca_free_icm_table(mdev, mdev->cq_table.table);
596 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); 586 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
597 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); 587 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
@@ -614,12 +604,70 @@ err_disable:
614 return err; 604 return err;
615} 605}
616 606
607static void mthca_close_hca(struct mthca_dev *mdev)
608{
609 u8 status;
610
611 mthca_CLOSE_HCA(mdev, 0, &status);
612
613 if (mthca_is_memfree(mdev)) {
614 if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
615 mthca_free_icm_table(mdev, mdev->srq_table.table);
616 mthca_free_icm_table(mdev, mdev->cq_table.table);
617 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
618 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
619 mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
620 mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
621 mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
622 mthca_unmap_eq_icm(mdev);
623
624 mthca_UNMAP_ICM_AUX(mdev, &status);
625 mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
626
627 mthca_UNMAP_FA(mdev, &status);
628 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm);
629
630 if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
631 mthca_DISABLE_LAM(mdev, &status);
632 } else
633 mthca_SYS_DIS(mdev, &status);
634}
635
617static int __devinit mthca_init_hca(struct mthca_dev *mdev) 636static int __devinit mthca_init_hca(struct mthca_dev *mdev)
618{ 637{
638 u8 status;
639 int err;
640 struct mthca_adapter adapter;
641
619 if (mthca_is_memfree(mdev)) 642 if (mthca_is_memfree(mdev))
620 return mthca_init_arbel(mdev); 643 err = mthca_init_arbel(mdev);
621 else 644 else
622 return mthca_init_tavor(mdev); 645 err = mthca_init_tavor(mdev);
646
647 if (err)
648 return err;
649
650 err = mthca_QUERY_ADAPTER(mdev, &adapter, &status);
651 if (err) {
652 mthca_err(mdev, "QUERY_ADAPTER command failed, aborting.\n");
653 goto err_close;
654 }
655 if (status) {
656 mthca_err(mdev, "QUERY_ADAPTER returned status 0x%02x, "
657 "aborting.\n", status);
658 err = -EINVAL;
659 goto err_close;
660 }
661
662 mdev->eq_table.inta_pin = adapter.inta_pin;
663 mdev->rev_id = adapter.revision_id;
664 memcpy(mdev->board_id, adapter.board_id, sizeof mdev->board_id);
665
666 return 0;
667
668err_close:
669 mthca_close_hca(mdev);
670 return err;
623} 671}
624 672
625static int __devinit mthca_setup_hca(struct mthca_dev *dev) 673static int __devinit mthca_setup_hca(struct mthca_dev *dev)
@@ -709,11 +757,18 @@ static int __devinit mthca_setup_hca(struct mthca_dev *dev)
709 goto err_cmd_poll; 757 goto err_cmd_poll;
710 } 758 }
711 759
760 err = mthca_init_srq_table(dev);
761 if (err) {
762 mthca_err(dev, "Failed to initialize "
763 "shared receive queue table, aborting.\n");
764 goto err_cq_table_free;
765 }
766
712 err = mthca_init_qp_table(dev); 767 err = mthca_init_qp_table(dev);
713 if (err) { 768 if (err) {
714 mthca_err(dev, "Failed to initialize " 769 mthca_err(dev, "Failed to initialize "
715 "queue pair table, aborting.\n"); 770 "queue pair table, aborting.\n");
716 goto err_cq_table_free; 771 goto err_srq_table_free;
717 } 772 }
718 773
719 err = mthca_init_av_table(dev); 774 err = mthca_init_av_table(dev);
@@ -738,6 +793,9 @@ err_av_table_free:
738err_qp_table_free: 793err_qp_table_free:
739 mthca_cleanup_qp_table(dev); 794 mthca_cleanup_qp_table(dev);
740 795
796err_srq_table_free:
797 mthca_cleanup_srq_table(dev);
798
741err_cq_table_free: 799err_cq_table_free:
742 mthca_cleanup_cq_table(dev); 800 mthca_cleanup_cq_table(dev);
743 801
@@ -844,33 +902,6 @@ static int __devinit mthca_enable_msi_x(struct mthca_dev *mdev)
844 return 0; 902 return 0;
845} 903}
846 904
847static void mthca_close_hca(struct mthca_dev *mdev)
848{
849 u8 status;
850
851 mthca_CLOSE_HCA(mdev, 0, &status);
852
853 if (mthca_is_memfree(mdev)) {
854 mthca_free_icm_table(mdev, mdev->cq_table.table);
855 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
856 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
857 mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
858 mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
859 mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
860 mthca_unmap_eq_icm(mdev);
861
862 mthca_UNMAP_ICM_AUX(mdev, &status);
863 mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
864
865 mthca_UNMAP_FA(mdev, &status);
866 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm);
867
868 if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
869 mthca_DISABLE_LAM(mdev, &status);
870 } else
871 mthca_SYS_DIS(mdev, &status);
872}
873
874/* Types of supported HCA */ 905/* Types of supported HCA */
875enum { 906enum {
876 TAVOR, /* MT23108 */ 907 TAVOR, /* MT23108 */
@@ -887,9 +918,9 @@ static struct {
887 int is_memfree; 918 int is_memfree;
888 int is_pcie; 919 int is_pcie;
889} mthca_hca_table[] = { 920} mthca_hca_table[] = {
890 [TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 3, 2), .is_memfree = 0, .is_pcie = 0 }, 921 [TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 3, 3), .is_memfree = 0, .is_pcie = 0 },
891 [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 6, 2), .is_memfree = 0, .is_pcie = 1 }, 922 [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 7, 0), .is_memfree = 0, .is_pcie = 1 },
892 [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 0, 1), .is_memfree = 1, .is_pcie = 1 }, 923 [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 1, 0), .is_memfree = 1, .is_pcie = 1 },
893 [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 0, 1), .is_memfree = 1, .is_pcie = 1 } 924 [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 0, 1), .is_memfree = 1, .is_pcie = 1 }
894}; 925};
895 926
@@ -1051,6 +1082,7 @@ err_cleanup:
1051 mthca_cleanup_mcg_table(mdev); 1082 mthca_cleanup_mcg_table(mdev);
1052 mthca_cleanup_av_table(mdev); 1083 mthca_cleanup_av_table(mdev);
1053 mthca_cleanup_qp_table(mdev); 1084 mthca_cleanup_qp_table(mdev);
1085 mthca_cleanup_srq_table(mdev);
1054 mthca_cleanup_cq_table(mdev); 1086 mthca_cleanup_cq_table(mdev);
1055 mthca_cmd_use_polling(mdev); 1087 mthca_cmd_use_polling(mdev);
1056 mthca_cleanup_eq_table(mdev); 1088 mthca_cleanup_eq_table(mdev);
@@ -1100,6 +1132,7 @@ static void __devexit mthca_remove_one(struct pci_dev *pdev)
1100 mthca_cleanup_mcg_table(mdev); 1132 mthca_cleanup_mcg_table(mdev);
1101 mthca_cleanup_av_table(mdev); 1133 mthca_cleanup_av_table(mdev);
1102 mthca_cleanup_qp_table(mdev); 1134 mthca_cleanup_qp_table(mdev);
1135 mthca_cleanup_srq_table(mdev);
1103 mthca_cleanup_cq_table(mdev); 1136 mthca_cleanup_cq_table(mdev);
1104 mthca_cmd_use_polling(mdev); 1137 mthca_cmd_use_polling(mdev);
1105 mthca_cleanup_eq_table(mdev); 1138 mthca_cleanup_eq_table(mdev);
diff --git a/drivers/infiniband/hw/mthca/mthca_mcg.c b/drivers/infiniband/hw/mthca/mthca_mcg.c
index 5be7d949dbf6..a2707605f4c8 100644
--- a/drivers/infiniband/hw/mthca/mthca_mcg.c
+++ b/drivers/infiniband/hw/mthca/mthca_mcg.c
@@ -42,10 +42,10 @@ enum {
42}; 42};
43 43
44struct mthca_mgm { 44struct mthca_mgm {
45 u32 next_gid_index; 45 __be32 next_gid_index;
46 u32 reserved[3]; 46 u32 reserved[3];
47 u8 gid[16]; 47 u8 gid[16];
48 u32 qp[MTHCA_QP_PER_MGM]; 48 __be32 qp[MTHCA_QP_PER_MGM];
49}; 49};
50 50
51static const u8 zero_gid[16]; /* automatically initialized to 0 */ 51static const u8 zero_gid[16]; /* automatically initialized to 0 */
@@ -94,10 +94,14 @@ static int find_mgm(struct mthca_dev *dev,
94 if (0) 94 if (0)
95 mthca_dbg(dev, "Hash for %04x:%04x:%04x:%04x:" 95 mthca_dbg(dev, "Hash for %04x:%04x:%04x:%04x:"
96 "%04x:%04x:%04x:%04x is %04x\n", 96 "%04x:%04x:%04x:%04x is %04x\n",
97 be16_to_cpu(((u16 *) gid)[0]), be16_to_cpu(((u16 *) gid)[1]), 97 be16_to_cpu(((__be16 *) gid)[0]),
98 be16_to_cpu(((u16 *) gid)[2]), be16_to_cpu(((u16 *) gid)[3]), 98 be16_to_cpu(((__be16 *) gid)[1]),
99 be16_to_cpu(((u16 *) gid)[4]), be16_to_cpu(((u16 *) gid)[5]), 99 be16_to_cpu(((__be16 *) gid)[2]),
100 be16_to_cpu(((u16 *) gid)[6]), be16_to_cpu(((u16 *) gid)[7]), 100 be16_to_cpu(((__be16 *) gid)[3]),
101 be16_to_cpu(((__be16 *) gid)[4]),
102 be16_to_cpu(((__be16 *) gid)[5]),
103 be16_to_cpu(((__be16 *) gid)[6]),
104 be16_to_cpu(((__be16 *) gid)[7]),
101 *hash); 105 *hash);
102 106
103 *index = *hash; 107 *index = *hash;
@@ -258,14 +262,14 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
258 if (index == -1) { 262 if (index == -1) {
259 mthca_err(dev, "MGID %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x " 263 mthca_err(dev, "MGID %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x "
260 "not found\n", 264 "not found\n",
261 be16_to_cpu(((u16 *) gid->raw)[0]), 265 be16_to_cpu(((__be16 *) gid->raw)[0]),
262 be16_to_cpu(((u16 *) gid->raw)[1]), 266 be16_to_cpu(((__be16 *) gid->raw)[1]),
263 be16_to_cpu(((u16 *) gid->raw)[2]), 267 be16_to_cpu(((__be16 *) gid->raw)[2]),
264 be16_to_cpu(((u16 *) gid->raw)[3]), 268 be16_to_cpu(((__be16 *) gid->raw)[3]),
265 be16_to_cpu(((u16 *) gid->raw)[4]), 269 be16_to_cpu(((__be16 *) gid->raw)[4]),
266 be16_to_cpu(((u16 *) gid->raw)[5]), 270 be16_to_cpu(((__be16 *) gid->raw)[5]),
267 be16_to_cpu(((u16 *) gid->raw)[6]), 271 be16_to_cpu(((__be16 *) gid->raw)[6]),
268 be16_to_cpu(((u16 *) gid->raw)[7])); 272 be16_to_cpu(((__be16 *) gid->raw)[7]));
269 err = -EINVAL; 273 err = -EINVAL;
270 goto out; 274 goto out;
271 } 275 }
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 2a8646150355..1827400f189b 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * 5 *
5 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
@@ -285,6 +286,7 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
285{ 286{
286 struct mthca_icm_table *table; 287 struct mthca_icm_table *table;
287 int num_icm; 288 int num_icm;
289 unsigned chunk_size;
288 int i; 290 int i;
289 u8 status; 291 u8 status;
290 292
@@ -305,7 +307,11 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
305 table->icm[i] = NULL; 307 table->icm[i] = NULL;
306 308
307 for (i = 0; i * MTHCA_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) { 309 for (i = 0; i * MTHCA_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
308 table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT, 310 chunk_size = MTHCA_TABLE_CHUNK_SIZE;
311 if ((i + 1) * MTHCA_TABLE_CHUNK_SIZE > nobj * obj_size)
312 chunk_size = nobj * obj_size - i * MTHCA_TABLE_CHUNK_SIZE;
313
314 table->icm[i] = mthca_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
309 (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) | 315 (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
310 __GFP_NOWARN); 316 __GFP_NOWARN);
311 if (!table->icm[i]) 317 if (!table->icm[i])
@@ -481,7 +487,7 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
481 } 487 }
482} 488}
483 489
484int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, u32 **db) 490int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, __be32 **db)
485{ 491{
486 int group; 492 int group;
487 int start, end, dir; 493 int start, end, dir;
@@ -564,7 +570,7 @@ found:
564 570
565 page->db_rec[j] = cpu_to_be64((qn << 8) | (type << 5)); 571 page->db_rec[j] = cpu_to_be64((qn << 8) | (type << 5));
566 572
567 *db = (u32 *) &page->db_rec[j]; 573 *db = (__be32 *) &page->db_rec[j];
568 574
569out: 575out:
570 up(&dev->db_tab->mutex); 576 up(&dev->db_tab->mutex);
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.h b/drivers/infiniband/hw/mthca/mthca_memfree.h
index 4761d844cb5f..bafa51544aa3 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.h
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.h
@@ -1,6 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * 5 *
5 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
@@ -137,7 +138,7 @@ enum {
137 138
138struct mthca_db_page { 139struct mthca_db_page {
139 DECLARE_BITMAP(used, MTHCA_DB_REC_PER_PAGE); 140 DECLARE_BITMAP(used, MTHCA_DB_REC_PER_PAGE);
140 u64 *db_rec; 141 __be64 *db_rec;
141 dma_addr_t mapping; 142 dma_addr_t mapping;
142}; 143};
143 144
@@ -172,7 +173,7 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
172 173
173int mthca_init_db_tab(struct mthca_dev *dev); 174int mthca_init_db_tab(struct mthca_dev *dev);
174void mthca_cleanup_db_tab(struct mthca_dev *dev); 175void mthca_cleanup_db_tab(struct mthca_dev *dev);
175int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, u32 **db); 176int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, __be32 **db);
176void mthca_free_db(struct mthca_dev *dev, int type, int db_index); 177void mthca_free_db(struct mthca_dev *dev, int type, int db_index);
177 178
178#endif /* MTHCA_MEMFREE_H */ 179#endif /* MTHCA_MEMFREE_H */
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index cbe50feaf680..1f97a44477f5 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -50,18 +51,18 @@ struct mthca_mtt {
50 * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. 51 * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
51 */ 52 */
52struct mthca_mpt_entry { 53struct mthca_mpt_entry {
53 u32 flags; 54 __be32 flags;
54 u32 page_size; 55 __be32 page_size;
55 u32 key; 56 __be32 key;
56 u32 pd; 57 __be32 pd;
57 u64 start; 58 __be64 start;
58 u64 length; 59 __be64 length;
59 u32 lkey; 60 __be32 lkey;
60 u32 window_count; 61 __be32 window_count;
61 u32 window_count_limit; 62 __be32 window_count_limit;
62 u64 mtt_seg; 63 __be64 mtt_seg;
63 u32 mtt_sz; /* Arbel only */ 64 __be32 mtt_sz; /* Arbel only */
64 u32 reserved[2]; 65 u32 reserved[2];
65} __attribute__((packed)); 66} __attribute__((packed));
66 67
67#define MTHCA_MPT_FLAG_SW_OWNS (0xfUL << 28) 68#define MTHCA_MPT_FLAG_SW_OWNS (0xfUL << 28)
@@ -247,7 +248,7 @@ int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
247 int start_index, u64 *buffer_list, int list_len) 248 int start_index, u64 *buffer_list, int list_len)
248{ 249{
249 struct mthca_mailbox *mailbox; 250 struct mthca_mailbox *mailbox;
250 u64 *mtt_entry; 251 __be64 *mtt_entry;
251 int err = 0; 252 int err = 0;
252 u8 status; 253 u8 status;
253 int i; 254 int i;
@@ -389,7 +390,7 @@ int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
389 for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) { 390 for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) {
390 if (i % 4 == 0) 391 if (i % 4 == 0)
391 printk("[%02x] ", i * 4); 392 printk("[%02x] ", i * 4);
392 printk(" %08x", be32_to_cpu(((u32 *) mpt_entry)[i])); 393 printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i]));
393 if ((i + 1) % 4 == 0) 394 if ((i + 1) % 4 == 0)
394 printk("\n"); 395 printk("\n");
395 } 396 }
@@ -458,7 +459,7 @@ int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
458static void mthca_free_region(struct mthca_dev *dev, u32 lkey) 459static void mthca_free_region(struct mthca_dev *dev, u32 lkey)
459{ 460{
460 mthca_table_put(dev, dev->mr_table.mpt_table, 461 mthca_table_put(dev, dev->mr_table.mpt_table,
461 arbel_key_to_hw_index(lkey)); 462 key_to_hw_index(dev, lkey));
462 463
463 mthca_free(&dev->mr_table.mpt_alloc, key_to_hw_index(dev, lkey)); 464 mthca_free(&dev->mr_table.mpt_alloc, key_to_hw_index(dev, lkey));
464} 465}
@@ -562,7 +563,7 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
562 for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) { 563 for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) {
563 if (i % 4 == 0) 564 if (i % 4 == 0)
564 printk("[%02x] ", i * 4); 565 printk("[%02x] ", i * 4);
565 printk(" %08x", be32_to_cpu(((u32 *) mpt_entry)[i])); 566 printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i]));
566 if ((i + 1) % 4 == 0) 567 if ((i + 1) % 4 == 0)
567 printk("\n"); 568 printk("\n");
568 } 569 }
@@ -669,7 +670,7 @@ int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
669 mpt_entry.length = cpu_to_be64(list_len * (1ull << fmr->attr.page_size)); 670 mpt_entry.length = cpu_to_be64(list_len * (1ull << fmr->attr.page_size));
670 mpt_entry.start = cpu_to_be64(iova); 671 mpt_entry.start = cpu_to_be64(iova);
671 672
672 writel(mpt_entry.lkey, &fmr->mem.tavor.mpt->key); 673 __raw_writel((__force u32) mpt_entry.lkey, &fmr->mem.tavor.mpt->key);
673 memcpy_toio(&fmr->mem.tavor.mpt->start, &mpt_entry.start, 674 memcpy_toio(&fmr->mem.tavor.mpt->start, &mpt_entry.start,
674 offsetof(struct mthca_mpt_entry, window_count) - 675 offsetof(struct mthca_mpt_entry, window_count) -
675 offsetof(struct mthca_mpt_entry, start)); 676 offsetof(struct mthca_mpt_entry, start));
diff --git a/drivers/infiniband/hw/mthca/mthca_pd.c b/drivers/infiniband/hw/mthca/mthca_pd.c
index c2c899844e98..3dbf06a6e6f4 100644
--- a/drivers/infiniband/hw/mthca/mthca_pd.c
+++ b/drivers/infiniband/hw/mthca/mthca_pd.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * 5 *
5 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/mthca/mthca_profile.c b/drivers/infiniband/hw/mthca/mthca_profile.c
index 4fedc32d5871..0576056b34f4 100644
--- a/drivers/infiniband/hw/mthca/mthca_profile.c
+++ b/drivers/infiniband/hw/mthca/mthca_profile.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -101,6 +102,7 @@ u64 mthca_make_profile(struct mthca_dev *dev,
101 profile[MTHCA_RES_UARC].size = request->uarc_size; 102 profile[MTHCA_RES_UARC].size = request->uarc_size;
102 103
103 profile[MTHCA_RES_QP].num = request->num_qp; 104 profile[MTHCA_RES_QP].num = request->num_qp;
105 profile[MTHCA_RES_SRQ].num = request->num_srq;
104 profile[MTHCA_RES_EQP].num = request->num_qp; 106 profile[MTHCA_RES_EQP].num = request->num_qp;
105 profile[MTHCA_RES_RDB].num = request->num_qp * request->rdb_per_qp; 107 profile[MTHCA_RES_RDB].num = request->num_qp * request->rdb_per_qp;
106 profile[MTHCA_RES_CQ].num = request->num_cq; 108 profile[MTHCA_RES_CQ].num = request->num_cq;
diff --git a/drivers/infiniband/hw/mthca/mthca_profile.h b/drivers/infiniband/hw/mthca/mthca_profile.h
index 17aef3357661..94641808f97f 100644
--- a/drivers/infiniband/hw/mthca/mthca_profile.h
+++ b/drivers/infiniband/hw/mthca/mthca_profile.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -41,6 +42,7 @@
41struct mthca_profile { 42struct mthca_profile {
42 int num_qp; 43 int num_qp;
43 int rdb_per_qp; 44 int rdb_per_qp;
45 int num_srq;
44 int num_cq; 46 int num_cq;
45 int num_mcg; 47 int num_mcg;
46 int num_mpt; 48 int num_mpt;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 81919a7b4935..1c1c2e230871 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -2,6 +2,8 @@
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 Cisco Systems. All rights reserved.
5 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
5 * 7 *
6 * This software is available to you under a choice of one of two 8 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU 9 * licenses. You may choose to be licensed under the terms of the GNU
@@ -34,7 +36,7 @@
34 * $Id: mthca_provider.c 1397 2004-12-28 05:09:00Z roland $ 36 * $Id: mthca_provider.c 1397 2004-12-28 05:09:00Z roland $
35 */ 37 */
36 38
37#include <ib_smi.h> 39#include <rdma/ib_smi.h>
38#include <linux/mm.h> 40#include <linux/mm.h>
39 41
40#include "mthca_dev.h" 42#include "mthca_dev.h"
@@ -79,10 +81,10 @@ static int mthca_query_device(struct ib_device *ibdev,
79 } 81 }
80 82
81 props->device_cap_flags = mdev->device_cap_flags; 83 props->device_cap_flags = mdev->device_cap_flags;
82 props->vendor_id = be32_to_cpup((u32 *) (out_mad->data + 36)) & 84 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
83 0xffffff; 85 0xffffff;
84 props->vendor_part_id = be16_to_cpup((u16 *) (out_mad->data + 30)); 86 props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30));
85 props->hw_ver = be16_to_cpup((u16 *) (out_mad->data + 32)); 87 props->hw_ver = be16_to_cpup((__be16 *) (out_mad->data + 32));
86 memcpy(&props->sys_image_guid, out_mad->data + 4, 8); 88 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
87 memcpy(&props->node_guid, out_mad->data + 12, 8); 89 memcpy(&props->node_guid, out_mad->data + 12, 8);
88 90
@@ -118,6 +120,8 @@ static int mthca_query_port(struct ib_device *ibdev,
118 if (!in_mad || !out_mad) 120 if (!in_mad || !out_mad)
119 goto out; 121 goto out;
120 122
123 memset(props, 0, sizeof *props);
124
121 memset(in_mad, 0, sizeof *in_mad); 125 memset(in_mad, 0, sizeof *in_mad);
122 in_mad->base_version = 1; 126 in_mad->base_version = 1;
123 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; 127 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
@@ -136,16 +140,17 @@ static int mthca_query_port(struct ib_device *ibdev,
136 goto out; 140 goto out;
137 } 141 }
138 142
139 props->lid = be16_to_cpup((u16 *) (out_mad->data + 16)); 143 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
140 props->lmc = out_mad->data[34] & 0x7; 144 props->lmc = out_mad->data[34] & 0x7;
141 props->sm_lid = be16_to_cpup((u16 *) (out_mad->data + 18)); 145 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
142 props->sm_sl = out_mad->data[36] & 0xf; 146 props->sm_sl = out_mad->data[36] & 0xf;
143 props->state = out_mad->data[32] & 0xf; 147 props->state = out_mad->data[32] & 0xf;
144 props->phys_state = out_mad->data[33] >> 4; 148 props->phys_state = out_mad->data[33] >> 4;
145 props->port_cap_flags = be32_to_cpup((u32 *) (out_mad->data + 20)); 149 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
146 props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len; 150 props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len;
151 props->max_msg_sz = 0x80000000;
147 props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len; 152 props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len;
148 props->qkey_viol_cntr = be16_to_cpup((u16 *) (out_mad->data + 48)); 153 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
149 props->active_width = out_mad->data[31] & 0xf; 154 props->active_width = out_mad->data[31] & 0xf;
150 props->active_speed = out_mad->data[35] >> 4; 155 props->active_speed = out_mad->data[35] >> 4;
151 156
@@ -221,7 +226,7 @@ static int mthca_query_pkey(struct ib_device *ibdev,
221 goto out; 226 goto out;
222 } 227 }
223 228
224 *pkey = be16_to_cpu(((u16 *) out_mad->data)[index % 32]); 229 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
225 230
226 out: 231 out:
227 kfree(in_mad); 232 kfree(in_mad);
@@ -420,6 +425,77 @@ static int mthca_ah_destroy(struct ib_ah *ah)
420 return 0; 425 return 0;
421} 426}
422 427
428static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
429 struct ib_srq_init_attr *init_attr,
430 struct ib_udata *udata)
431{
432 struct mthca_create_srq ucmd;
433 struct mthca_ucontext *context = NULL;
434 struct mthca_srq *srq;
435 int err;
436
437 srq = kmalloc(sizeof *srq, GFP_KERNEL);
438 if (!srq)
439 return ERR_PTR(-ENOMEM);
440
441 if (pd->uobject) {
442 context = to_mucontext(pd->uobject->context);
443
444 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
445 return ERR_PTR(-EFAULT);
446
447 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
448 context->db_tab, ucmd.db_index,
449 ucmd.db_page);
450
451 if (err)
452 goto err_free;
453
454 srq->mr.ibmr.lkey = ucmd.lkey;
455 srq->db_index = ucmd.db_index;
456 }
457
458 err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd),
459 &init_attr->attr, srq);
460
461 if (err && pd->uobject)
462 mthca_unmap_user_db(to_mdev(pd->device), &context->uar,
463 context->db_tab, ucmd.db_index);
464
465 if (err)
466 goto err_free;
467
468 if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) {
469 mthca_free_srq(to_mdev(pd->device), srq);
470 err = -EFAULT;
471 goto err_free;
472 }
473
474 return &srq->ibsrq;
475
476err_free:
477 kfree(srq);
478
479 return ERR_PTR(err);
480}
481
482static int mthca_destroy_srq(struct ib_srq *srq)
483{
484 struct mthca_ucontext *context;
485
486 if (srq->uobject) {
487 context = to_mucontext(srq->uobject->context);
488
489 mthca_unmap_user_db(to_mdev(srq->device), &context->uar,
490 context->db_tab, to_msrq(srq)->db_index);
491 }
492
493 mthca_free_srq(to_mdev(srq->device), to_msrq(srq));
494 kfree(srq);
495
496 return 0;
497}
498
423static struct ib_qp *mthca_create_qp(struct ib_pd *pd, 499static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
424 struct ib_qp_init_attr *init_attr, 500 struct ib_qp_init_attr *init_attr,
425 struct ib_udata *udata) 501 struct ib_udata *udata)
@@ -956,14 +1032,22 @@ static ssize_t show_hca(struct class_device *cdev, char *buf)
956 } 1032 }
957} 1033}
958 1034
1035static ssize_t show_board(struct class_device *cdev, char *buf)
1036{
1037 struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
1038 return sprintf(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id);
1039}
1040
959static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); 1041static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
960static CLASS_DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); 1042static CLASS_DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
961static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); 1043static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
1044static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
962 1045
963static struct class_device_attribute *mthca_class_attributes[] = { 1046static struct class_device_attribute *mthca_class_attributes[] = {
964 &class_device_attr_hw_rev, 1047 &class_device_attr_hw_rev,
965 &class_device_attr_fw_ver, 1048 &class_device_attr_fw_ver,
966 &class_device_attr_hca_type 1049 &class_device_attr_hca_type,
1050 &class_device_attr_board_id
967}; 1051};
968 1052
969int mthca_register_device(struct mthca_dev *dev) 1053int mthca_register_device(struct mthca_dev *dev)
@@ -990,6 +1074,17 @@ int mthca_register_device(struct mthca_dev *dev)
990 dev->ib_dev.dealloc_pd = mthca_dealloc_pd; 1074 dev->ib_dev.dealloc_pd = mthca_dealloc_pd;
991 dev->ib_dev.create_ah = mthca_ah_create; 1075 dev->ib_dev.create_ah = mthca_ah_create;
992 dev->ib_dev.destroy_ah = mthca_ah_destroy; 1076 dev->ib_dev.destroy_ah = mthca_ah_destroy;
1077
1078 if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
1079 dev->ib_dev.create_srq = mthca_create_srq;
1080 dev->ib_dev.destroy_srq = mthca_destroy_srq;
1081
1082 if (mthca_is_memfree(dev))
1083 dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv;
1084 else
1085 dev->ib_dev.post_srq_recv = mthca_tavor_post_srq_recv;
1086 }
1087
993 dev->ib_dev.create_qp = mthca_create_qp; 1088 dev->ib_dev.create_qp = mthca_create_qp;
994 dev->ib_dev.modify_qp = mthca_modify_qp; 1089 dev->ib_dev.modify_qp = mthca_modify_qp;
995 dev->ib_dev.destroy_qp = mthca_destroy_qp; 1090 dev->ib_dev.destroy_qp = mthca_destroy_qp;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index 1d032791cc8b..bcd4b01a339c 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -1,6 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * 5 *
5 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
@@ -36,8 +37,8 @@
36#ifndef MTHCA_PROVIDER_H 37#ifndef MTHCA_PROVIDER_H
37#define MTHCA_PROVIDER_H 38#define MTHCA_PROVIDER_H
38 39
39#include <ib_verbs.h> 40#include <rdma/ib_verbs.h>
40#include <ib_pack.h> 41#include <rdma/ib_pack.h>
41 42
42#define MTHCA_MPT_FLAG_ATOMIC (1 << 14) 43#define MTHCA_MPT_FLAG_ATOMIC (1 << 14)
43#define MTHCA_MPT_FLAG_REMOTE_WRITE (1 << 13) 44#define MTHCA_MPT_FLAG_REMOTE_WRITE (1 << 13)
@@ -50,6 +51,11 @@ struct mthca_buf_list {
50 DECLARE_PCI_UNMAP_ADDR(mapping) 51 DECLARE_PCI_UNMAP_ADDR(mapping)
51}; 52};
52 53
54union mthca_buf {
55 struct mthca_buf_list direct;
56 struct mthca_buf_list *page_list;
57};
58
53struct mthca_uar { 59struct mthca_uar {
54 unsigned long pfn; 60 unsigned long pfn;
55 int index; 61 int index;
@@ -181,19 +187,39 @@ struct mthca_cq {
181 187
182 /* Next fields are Arbel only */ 188 /* Next fields are Arbel only */
183 int set_ci_db_index; 189 int set_ci_db_index;
184 u32 *set_ci_db; 190 __be32 *set_ci_db;
185 int arm_db_index; 191 int arm_db_index;
186 u32 *arm_db; 192 __be32 *arm_db;
187 int arm_sn; 193 int arm_sn;
188 194
189 union { 195 union mthca_buf queue;
190 struct mthca_buf_list direct;
191 struct mthca_buf_list *page_list;
192 } queue;
193 struct mthca_mr mr; 196 struct mthca_mr mr;
194 wait_queue_head_t wait; 197 wait_queue_head_t wait;
195}; 198};
196 199
200struct mthca_srq {
201 struct ib_srq ibsrq;
202 spinlock_t lock;
203 atomic_t refcount;
204 int srqn;
205 int max;
206 int max_gs;
207 int wqe_shift;
208 int first_free;
209 int last_free;
210 u16 counter; /* Arbel only */
211 int db_index; /* Arbel only */
212 __be32 *db; /* Arbel only */
213 void *last;
214
215 int is_direct;
216 u64 *wrid;
217 union mthca_buf queue;
218 struct mthca_mr mr;
219
220 wait_queue_head_t wait;
221};
222
197struct mthca_wq { 223struct mthca_wq {
198 spinlock_t lock; 224 spinlock_t lock;
199 int max; 225 int max;
@@ -206,7 +232,7 @@ struct mthca_wq {
206 int wqe_shift; 232 int wqe_shift;
207 233
208 int db_index; /* Arbel only */ 234 int db_index; /* Arbel only */
209 u32 *db; 235 __be32 *db;
210}; 236};
211 237
212struct mthca_qp { 238struct mthca_qp {
@@ -227,10 +253,7 @@ struct mthca_qp {
227 int send_wqe_offset; 253 int send_wqe_offset;
228 254
229 u64 *wrid; 255 u64 *wrid;
230 union { 256 union mthca_buf queue;
231 struct mthca_buf_list direct;
232 struct mthca_buf_list *page_list;
233 } queue;
234 257
235 wait_queue_head_t wait; 258 wait_queue_head_t wait;
236}; 259};
@@ -277,6 +300,11 @@ static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq)
277 return container_of(ibcq, struct mthca_cq, ibcq); 300 return container_of(ibcq, struct mthca_cq, ibcq);
278} 301}
279 302
303static inline struct mthca_srq *to_msrq(struct ib_srq *ibsrq)
304{
305 return container_of(ibsrq, struct mthca_srq, ibsrq);
306}
307
280static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp) 308static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp)
281{ 309{
282 return container_of(ibqp, struct mthca_qp, ibqp); 310 return container_of(ibqp, struct mthca_qp, ibqp);
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index f7126b14d5ae..0164b84d4ec6 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1,6 +1,8 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
4 * 6 *
5 * This software is available to you under a choice of one of two 7 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU 8 * licenses. You may choose to be licensed under the terms of the GNU
@@ -35,13 +37,14 @@
35 37
36#include <linux/init.h> 38#include <linux/init.h>
37 39
38#include <ib_verbs.h> 40#include <rdma/ib_verbs.h>
39#include <ib_cache.h> 41#include <rdma/ib_cache.h>
40#include <ib_pack.h> 42#include <rdma/ib_pack.h>
41 43
42#include "mthca_dev.h" 44#include "mthca_dev.h"
43#include "mthca_cmd.h" 45#include "mthca_cmd.h"
44#include "mthca_memfree.h" 46#include "mthca_memfree.h"
47#include "mthca_wqe.h"
45 48
46enum { 49enum {
47 MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE, 50 MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE,
@@ -95,62 +98,62 @@ enum {
95}; 98};
96 99
97struct mthca_qp_path { 100struct mthca_qp_path {
98 u32 port_pkey; 101 __be32 port_pkey;
99 u8 rnr_retry; 102 u8 rnr_retry;
100 u8 g_mylmc; 103 u8 g_mylmc;
101 u16 rlid; 104 __be16 rlid;
102 u8 ackto; 105 u8 ackto;
103 u8 mgid_index; 106 u8 mgid_index;
104 u8 static_rate; 107 u8 static_rate;
105 u8 hop_limit; 108 u8 hop_limit;
106 u32 sl_tclass_flowlabel; 109 __be32 sl_tclass_flowlabel;
107 u8 rgid[16]; 110 u8 rgid[16];
108} __attribute__((packed)); 111} __attribute__((packed));
109 112
110struct mthca_qp_context { 113struct mthca_qp_context {
111 u32 flags; 114 __be32 flags;
112 u32 tavor_sched_queue; /* Reserved on Arbel */ 115 __be32 tavor_sched_queue; /* Reserved on Arbel */
113 u8 mtu_msgmax; 116 u8 mtu_msgmax;
114 u8 rq_size_stride; /* Reserved on Tavor */ 117 u8 rq_size_stride; /* Reserved on Tavor */
115 u8 sq_size_stride; /* Reserved on Tavor */ 118 u8 sq_size_stride; /* Reserved on Tavor */
116 u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */ 119 u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */
117 u32 usr_page; 120 __be32 usr_page;
118 u32 local_qpn; 121 __be32 local_qpn;
119 u32 remote_qpn; 122 __be32 remote_qpn;
120 u32 reserved1[2]; 123 u32 reserved1[2];
121 struct mthca_qp_path pri_path; 124 struct mthca_qp_path pri_path;
122 struct mthca_qp_path alt_path; 125 struct mthca_qp_path alt_path;
123 u32 rdd; 126 __be32 rdd;
124 u32 pd; 127 __be32 pd;
125 u32 wqe_base; 128 __be32 wqe_base;
126 u32 wqe_lkey; 129 __be32 wqe_lkey;
127 u32 params1; 130 __be32 params1;
128 u32 reserved2; 131 __be32 reserved2;
129 u32 next_send_psn; 132 __be32 next_send_psn;
130 u32 cqn_snd; 133 __be32 cqn_snd;
131 u32 snd_wqe_base_l; /* Next send WQE on Tavor */ 134 __be32 snd_wqe_base_l; /* Next send WQE on Tavor */
132 u32 snd_db_index; /* (debugging only entries) */ 135 __be32 snd_db_index; /* (debugging only entries) */
133 u32 last_acked_psn; 136 __be32 last_acked_psn;
134 u32 ssn; 137 __be32 ssn;
135 u32 params2; 138 __be32 params2;
136 u32 rnr_nextrecvpsn; 139 __be32 rnr_nextrecvpsn;
137 u32 ra_buff_indx; 140 __be32 ra_buff_indx;
138 u32 cqn_rcv; 141 __be32 cqn_rcv;
139 u32 rcv_wqe_base_l; /* Next recv WQE on Tavor */ 142 __be32 rcv_wqe_base_l; /* Next recv WQE on Tavor */
140 u32 rcv_db_index; /* (debugging only entries) */ 143 __be32 rcv_db_index; /* (debugging only entries) */
141 u32 qkey; 144 __be32 qkey;
142 u32 srqn; 145 __be32 srqn;
143 u32 rmsn; 146 __be32 rmsn;
144 u16 rq_wqe_counter; /* reserved on Tavor */ 147 __be16 rq_wqe_counter; /* reserved on Tavor */
145 u16 sq_wqe_counter; /* reserved on Tavor */ 148 __be16 sq_wqe_counter; /* reserved on Tavor */
146 u32 reserved3[18]; 149 u32 reserved3[18];
147} __attribute__((packed)); 150} __attribute__((packed));
148 151
149struct mthca_qp_param { 152struct mthca_qp_param {
150 u32 opt_param_mask; 153 __be32 opt_param_mask;
151 u32 reserved1; 154 u32 reserved1;
152 struct mthca_qp_context context; 155 struct mthca_qp_context context;
153 u32 reserved2[62]; 156 u32 reserved2[62];
154} __attribute__((packed)); 157} __attribute__((packed));
155 158
156enum { 159enum {
@@ -173,80 +176,6 @@ enum {
173 MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 << 16 176 MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 << 16
174}; 177};
175 178
176enum {
177 MTHCA_NEXT_DBD = 1 << 7,
178 MTHCA_NEXT_FENCE = 1 << 6,
179 MTHCA_NEXT_CQ_UPDATE = 1 << 3,
180 MTHCA_NEXT_EVENT_GEN = 1 << 2,
181 MTHCA_NEXT_SOLICIT = 1 << 1,
182
183 MTHCA_MLX_VL15 = 1 << 17,
184 MTHCA_MLX_SLR = 1 << 16
185};
186
187enum {
188 MTHCA_INVAL_LKEY = 0x100
189};
190
191struct mthca_next_seg {
192 u32 nda_op; /* [31:6] next WQE [4:0] next opcode */
193 u32 ee_nds; /* [31:8] next EE [7] DBD [6] F [5:0] next WQE size */
194 u32 flags; /* [3] CQ [2] Event [1] Solicit */
195 u32 imm; /* immediate data */
196};
197
198struct mthca_tavor_ud_seg {
199 u32 reserved1;
200 u32 lkey;
201 u64 av_addr;
202 u32 reserved2[4];
203 u32 dqpn;
204 u32 qkey;
205 u32 reserved3[2];
206};
207
208struct mthca_arbel_ud_seg {
209 u32 av[8];
210 u32 dqpn;
211 u32 qkey;
212 u32 reserved[2];
213};
214
215struct mthca_bind_seg {
216 u32 flags; /* [31] Atomic [30] rem write [29] rem read */
217 u32 reserved;
218 u32 new_rkey;
219 u32 lkey;
220 u64 addr;
221 u64 length;
222};
223
224struct mthca_raddr_seg {
225 u64 raddr;
226 u32 rkey;
227 u32 reserved;
228};
229
230struct mthca_atomic_seg {
231 u64 swap_add;
232 u64 compare;
233};
234
235struct mthca_data_seg {
236 u32 byte_count;
237 u32 lkey;
238 u64 addr;
239};
240
241struct mthca_mlx_seg {
242 u32 nda_op;
243 u32 nds;
244 u32 flags; /* [17] VL15 [16] SLR [14:12] static rate
245 [11:8] SL [3] C [2] E */
246 u16 rlid;
247 u16 vcrc;
248};
249
250static const u8 mthca_opcode[] = { 179static const u8 mthca_opcode[] = {
251 [IB_WR_SEND] = MTHCA_OPCODE_SEND, 180 [IB_WR_SEND] = MTHCA_OPCODE_SEND,
252 [IB_WR_SEND_WITH_IMM] = MTHCA_OPCODE_SEND_IMM, 181 [IB_WR_SEND_WITH_IMM] = MTHCA_OPCODE_SEND_IMM,
@@ -573,12 +502,11 @@ static void init_port(struct mthca_dev *dev, int port)
573 502
574 memset(&param, 0, sizeof param); 503 memset(&param, 0, sizeof param);
575 504
576 param.enable_1x = 1; 505 param.port_width = dev->limits.port_width_cap;
577 param.enable_4x = 1; 506 param.vl_cap = dev->limits.vl_cap;
578 param.vl_cap = dev->limits.vl_cap; 507 param.mtu_cap = dev->limits.mtu_cap;
579 param.mtu_cap = dev->limits.mtu_cap; 508 param.gid_cap = dev->limits.gid_table_len;
580 param.gid_cap = dev->limits.gid_table_len; 509 param.pkey_cap = dev->limits.pkey_table_len;
581 param.pkey_cap = dev->limits.pkey_table_len;
582 510
583 err = mthca_INIT_IB(dev, &param, port, &status); 511 err = mthca_INIT_IB(dev, &param, port, &status);
584 if (err) 512 if (err)
@@ -684,10 +612,13 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
684 qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31; 612 qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31;
685 613
686 if (mthca_is_memfree(dev)) { 614 if (mthca_is_memfree(dev)) {
687 qp_context->rq_size_stride = 615 if (qp->rq.max)
688 ((ffs(qp->rq.max) - 1) << 3) | (qp->rq.wqe_shift - 4); 616 qp_context->rq_size_stride = long_log2(qp->rq.max) << 3;
689 qp_context->sq_size_stride = 617 qp_context->rq_size_stride |= qp->rq.wqe_shift - 4;
690 ((ffs(qp->sq.max) - 1) << 3) | (qp->sq.wqe_shift - 4); 618
619 if (qp->sq.max)
620 qp_context->sq_size_stride = long_log2(qp->sq.max) << 3;
621 qp_context->sq_size_stride |= qp->sq.wqe_shift - 4;
691 } 622 }
692 623
693 /* leave arbel_sched_queue as 0 */ 624 /* leave arbel_sched_queue as 0 */
@@ -856,6 +787,9 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
856 787
857 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC); 788 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);
858 789
790 if (ibqp->srq)
791 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC);
792
859 if (attr_mask & IB_QP_MIN_RNR_TIMER) { 793 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
860 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); 794 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
861 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT); 795 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT);
@@ -878,6 +812,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
878 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY); 812 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY);
879 } 813 }
880 814
815 if (ibqp->srq)
816 qp_context->srqn = cpu_to_be32(1 << 24 |
817 to_msrq(ibqp->srq)->srqn);
818
881 err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans, 819 err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans,
882 qp->qpn, 0, mailbox, 0, &status); 820 qp->qpn, 0, mailbox, 0, &status);
883 if (status) { 821 if (status) {
@@ -925,10 +863,6 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
925 struct mthca_qp *qp) 863 struct mthca_qp *qp)
926{ 864{
927 int size; 865 int size;
928 int i;
929 int npages, shift;
930 dma_addr_t t;
931 u64 *dma_list = NULL;
932 int err = -ENOMEM; 866 int err = -ENOMEM;
933 867
934 size = sizeof (struct mthca_next_seg) + 868 size = sizeof (struct mthca_next_seg) +
@@ -978,116 +912,24 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
978 if (!qp->wrid) 912 if (!qp->wrid)
979 goto err_out; 913 goto err_out;
980 914
981 if (size <= MTHCA_MAX_DIRECT_QP_SIZE) { 915 err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE,
982 qp->is_direct = 1; 916 &qp->queue, &qp->is_direct, pd, 0, &qp->mr);
983 npages = 1;
984 shift = get_order(size) + PAGE_SHIFT;
985
986 if (0)
987 mthca_dbg(dev, "Creating direct QP of size %d (shift %d)\n",
988 size, shift);
989
990 qp->queue.direct.buf = dma_alloc_coherent(&dev->pdev->dev, size,
991 &t, GFP_KERNEL);
992 if (!qp->queue.direct.buf)
993 goto err_out;
994
995 pci_unmap_addr_set(&qp->queue.direct, mapping, t);
996
997 memset(qp->queue.direct.buf, 0, size);
998
999 while (t & ((1 << shift) - 1)) {
1000 --shift;
1001 npages *= 2;
1002 }
1003
1004 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
1005 if (!dma_list)
1006 goto err_out_free;
1007
1008 for (i = 0; i < npages; ++i)
1009 dma_list[i] = t + i * (1 << shift);
1010 } else {
1011 qp->is_direct = 0;
1012 npages = size / PAGE_SIZE;
1013 shift = PAGE_SHIFT;
1014
1015 if (0)
1016 mthca_dbg(dev, "Creating indirect QP with %d pages\n", npages);
1017
1018 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
1019 if (!dma_list)
1020 goto err_out;
1021
1022 qp->queue.page_list = kmalloc(npages *
1023 sizeof *qp->queue.page_list,
1024 GFP_KERNEL);
1025 if (!qp->queue.page_list)
1026 goto err_out;
1027
1028 for (i = 0; i < npages; ++i) {
1029 qp->queue.page_list[i].buf =
1030 dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
1031 &t, GFP_KERNEL);
1032 if (!qp->queue.page_list[i].buf)
1033 goto err_out_free;
1034
1035 memset(qp->queue.page_list[i].buf, 0, PAGE_SIZE);
1036
1037 pci_unmap_addr_set(&qp->queue.page_list[i], mapping, t);
1038 dma_list[i] = t;
1039 }
1040 }
1041
1042 err = mthca_mr_alloc_phys(dev, pd->pd_num, dma_list, shift,
1043 npages, 0, size,
1044 MTHCA_MPT_FLAG_LOCAL_READ,
1045 &qp->mr);
1046 if (err) 917 if (err)
1047 goto err_out_free; 918 goto err_out;
1048 919
1049 kfree(dma_list);
1050 return 0; 920 return 0;
1051 921
1052 err_out_free: 922err_out:
1053 if (qp->is_direct) {
1054 dma_free_coherent(&dev->pdev->dev, size, qp->queue.direct.buf,
1055 pci_unmap_addr(&qp->queue.direct, mapping));
1056 } else
1057 for (i = 0; i < npages; ++i) {
1058 if (qp->queue.page_list[i].buf)
1059 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
1060 qp->queue.page_list[i].buf,
1061 pci_unmap_addr(&qp->queue.page_list[i],
1062 mapping));
1063
1064 }
1065
1066 err_out:
1067 kfree(qp->wrid); 923 kfree(qp->wrid);
1068 kfree(dma_list);
1069 return err; 924 return err;
1070} 925}
1071 926
1072static void mthca_free_wqe_buf(struct mthca_dev *dev, 927static void mthca_free_wqe_buf(struct mthca_dev *dev,
1073 struct mthca_qp *qp) 928 struct mthca_qp *qp)
1074{ 929{
1075 int i; 930 mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset +
1076 int size = PAGE_ALIGN(qp->send_wqe_offset + 931 (qp->sq.max << qp->sq.wqe_shift)),
1077 (qp->sq.max << qp->sq.wqe_shift)); 932 &qp->queue, qp->is_direct, &qp->mr);
1078
1079 if (qp->is_direct) {
1080 dma_free_coherent(&dev->pdev->dev, size, qp->queue.direct.buf,
1081 pci_unmap_addr(&qp->queue.direct, mapping));
1082 } else {
1083 for (i = 0; i < size / PAGE_SIZE; ++i) {
1084 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
1085 qp->queue.page_list[i].buf,
1086 pci_unmap_addr(&qp->queue.page_list[i],
1087 mapping));
1088 }
1089 }
1090
1091 kfree(qp->wrid); 933 kfree(qp->wrid);
1092} 934}
1093 935
@@ -1428,11 +1270,12 @@ void mthca_free_qp(struct mthca_dev *dev,
1428 * unref the mem-free tables and free the QPN in our table. 1270 * unref the mem-free tables and free the QPN in our table.
1429 */ 1271 */
1430 if (!qp->ibqp.uobject) { 1272 if (!qp->ibqp.uobject) {
1431 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn); 1273 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn,
1274 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1432 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) 1275 if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
1433 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn); 1276 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn,
1277 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1434 1278
1435 mthca_free_mr(dev, &qp->mr);
1436 mthca_free_memfree(dev, qp); 1279 mthca_free_memfree(dev, qp);
1437 mthca_free_wqe_buf(dev, qp); 1280 mthca_free_wqe_buf(dev, qp);
1438 } 1281 }
@@ -1457,6 +1300,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1457{ 1300{
1458 int header_size; 1301 int header_size;
1459 int err; 1302 int err;
1303 u16 pkey;
1460 1304
1461 ib_ud_header_init(256, /* assume a MAD */ 1305 ib_ud_header_init(256, /* assume a MAD */
1462 sqp->ud_header.grh_present, 1306 sqp->ud_header.grh_present,
@@ -1467,8 +1311,8 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1467 return err; 1311 return err;
1468 mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1); 1312 mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1);
1469 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) | 1313 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |
1470 (sqp->ud_header.lrh.destination_lid == 0xffff ? 1314 (sqp->ud_header.lrh.destination_lid ==
1471 MTHCA_MLX_SLR : 0) | 1315 IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) |
1472 (sqp->ud_header.lrh.service_level << 8)); 1316 (sqp->ud_header.lrh.service_level << 8));
1473 mlx->rlid = sqp->ud_header.lrh.destination_lid; 1317 mlx->rlid = sqp->ud_header.lrh.destination_lid;
1474 mlx->vcrc = 0; 1318 mlx->vcrc = 0;
@@ -1488,18 +1332,16 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1488 } 1332 }
1489 1333
1490 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; 1334 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
1491 if (sqp->ud_header.lrh.destination_lid == 0xffff) 1335 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
1492 sqp->ud_header.lrh.source_lid = 0xffff; 1336 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
1493 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); 1337 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
1494 if (!sqp->qp.ibqp.qp_num) 1338 if (!sqp->qp.ibqp.qp_num)
1495 ib_get_cached_pkey(&dev->ib_dev, sqp->port, 1339 ib_get_cached_pkey(&dev->ib_dev, sqp->port,
1496 sqp->pkey_index, 1340 sqp->pkey_index, &pkey);
1497 &sqp->ud_header.bth.pkey);
1498 else 1341 else
1499 ib_get_cached_pkey(&dev->ib_dev, sqp->port, 1342 ib_get_cached_pkey(&dev->ib_dev, sqp->port,
1500 wr->wr.ud.pkey_index, 1343 wr->wr.ud.pkey_index, &pkey);
1501 &sqp->ud_header.bth.pkey); 1344 sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
1502 cpu_to_be16s(&sqp->ud_header.bth.pkey);
1503 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); 1345 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
1504 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); 1346 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
1505 sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? 1347 sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ?
@@ -1742,7 +1584,7 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1742 1584
1743out: 1585out:
1744 if (likely(nreq)) { 1586 if (likely(nreq)) {
1745 u32 doorbell[2]; 1587 __be32 doorbell[2];
1746 1588
1747 doorbell[0] = cpu_to_be32(((qp->sq.next_ind << qp->sq.wqe_shift) + 1589 doorbell[0] = cpu_to_be32(((qp->sq.next_ind << qp->sq.wqe_shift) +
1748 qp->send_wqe_offset) | f0 | op0); 1590 qp->send_wqe_offset) | f0 | op0);
@@ -1843,7 +1685,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1843 1685
1844out: 1686out:
1845 if (likely(nreq)) { 1687 if (likely(nreq)) {
1846 u32 doorbell[2]; 1688 __be32 doorbell[2];
1847 1689
1848 doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); 1690 doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
1849 doorbell[1] = cpu_to_be32((qp->qpn << 8) | nreq); 1691 doorbell[1] = cpu_to_be32((qp->qpn << 8) | nreq);
@@ -2064,7 +1906,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2064 1906
2065out: 1907out:
2066 if (likely(nreq)) { 1908 if (likely(nreq)) {
2067 u32 doorbell[2]; 1909 __be32 doorbell[2];
2068 1910
2069 doorbell[0] = cpu_to_be32((nreq << 24) | 1911 doorbell[0] = cpu_to_be32((nreq << 24) |
2070 ((qp->sq.head & 0xffff) << 8) | 1912 ((qp->sq.head & 0xffff) << 8) |
@@ -2174,19 +2016,25 @@ out:
2174} 2016}
2175 2017
2176int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, 2018int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
2177 int index, int *dbd, u32 *new_wqe) 2019 int index, int *dbd, __be32 *new_wqe)
2178{ 2020{
2179 struct mthca_next_seg *next; 2021 struct mthca_next_seg *next;
2180 2022
2023 /*
2024 * For SRQs, all WQEs generate a CQE, so we're always at the
2025 * end of the doorbell chain.
2026 */
2027 if (qp->ibqp.srq) {
2028 *new_wqe = 0;
2029 return 0;
2030 }
2031
2181 if (is_send) 2032 if (is_send)
2182 next = get_send_wqe(qp, index); 2033 next = get_send_wqe(qp, index);
2183 else 2034 else
2184 next = get_recv_wqe(qp, index); 2035 next = get_recv_wqe(qp, index);
2185 2036
2186 if (mthca_is_memfree(dev)) 2037 *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD));
2187 *dbd = 1;
2188 else
2189 *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD));
2190 if (next->ee_nds & cpu_to_be32(0x3f)) 2038 if (next->ee_nds & cpu_to_be32(0x3f))
2191 *new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) | 2039 *new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) |
2192 (next->ee_nds & cpu_to_be32(0x3f)); 2040 (next->ee_nds & cpu_to_be32(0x3f));
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
new file mode 100644
index 000000000000..75cd2d84ef12
--- /dev/null
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -0,0 +1,591 @@
1/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: mthca_srq.c 3047 2005-08-10 03:59:35Z roland $
33 */
34
35#include "mthca_dev.h"
36#include "mthca_cmd.h"
37#include "mthca_memfree.h"
38#include "mthca_wqe.h"
39
40enum {
41 MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE
42};
43
44struct mthca_tavor_srq_context {
45 __be64 wqe_base_ds; /* low 6 bits is descriptor size */
46 __be32 state_pd;
47 __be32 lkey;
48 __be32 uar;
49 __be32 wqe_cnt;
50 u32 reserved[2];
51};
52
53struct mthca_arbel_srq_context {
54 __be32 state_logsize_srqn;
55 __be32 lkey;
56 __be32 db_index;
57 __be32 logstride_usrpage;
58 __be64 wqe_base;
59 __be32 eq_pd;
60 __be16 limit_watermark;
61 __be16 wqe_cnt;
62 u16 reserved1;
63 __be16 wqe_counter;
64 u32 reserved2[3];
65};
66
67static void *get_wqe(struct mthca_srq *srq, int n)
68{
69 if (srq->is_direct)
70 return srq->queue.direct.buf + (n << srq->wqe_shift);
71 else
72 return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf +
73 ((n << srq->wqe_shift) & (PAGE_SIZE - 1));
74}
75
76/*
77 * Return a pointer to the location within a WQE that we're using as a
78 * link when the WQE is in the free list. We use an offset of 4
79 * because in the Tavor case, posting a WQE may overwrite the first
80 * four bytes of the previous WQE. The offset avoids corrupting our
81 * free list if the WQE has already completed and been put on the free
82 * list when we post the next WQE.
83 */
84static inline int *wqe_to_link(void *wqe)
85{
86 return (int *) (wqe + 4);
87}
88
89static void mthca_tavor_init_srq_context(struct mthca_dev *dev,
90 struct mthca_pd *pd,
91 struct mthca_srq *srq,
92 struct mthca_tavor_srq_context *context)
93{
94 memset(context, 0, sizeof *context);
95
96 context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4));
97 context->state_pd = cpu_to_be32(pd->pd_num);
98 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
99
100 if (pd->ibpd.uobject)
101 context->uar =
102 cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
103 else
104 context->uar = cpu_to_be32(dev->driver_uar.index);
105}
106
107static void mthca_arbel_init_srq_context(struct mthca_dev *dev,
108 struct mthca_pd *pd,
109 struct mthca_srq *srq,
110 struct mthca_arbel_srq_context *context)
111{
112 int logsize;
113
114 memset(context, 0, sizeof *context);
115
116 logsize = long_log2(srq->max) + srq->wqe_shift;
117 context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn);
118 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
119 context->db_index = cpu_to_be32(srq->db_index);
120 context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29);
121 if (pd->ibpd.uobject)
122 context->logstride_usrpage |=
123 cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
124 else
125 context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index);
126 context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num);
127}
128
129static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq)
130{
131 mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue,
132 srq->is_direct, &srq->mr);
133 kfree(srq->wrid);
134}
135
136static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
137 struct mthca_srq *srq)
138{
139 struct mthca_data_seg *scatter;
140 void *wqe;
141 int err;
142 int i;
143
144 if (pd->ibpd.uobject)
145 return 0;
146
147 srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL);
148 if (!srq->wrid)
149 return -ENOMEM;
150
151 err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift,
152 MTHCA_MAX_DIRECT_SRQ_SIZE,
153 &srq->queue, &srq->is_direct, pd, 1, &srq->mr);
154 if (err) {
155 kfree(srq->wrid);
156 return err;
157 }
158
159 /*
160 * Now initialize the SRQ buffer so that all of the WQEs are
161 * linked into the list of free WQEs. In addition, set the
162 * scatter list L_Keys to the sentry value of 0x100.
163 */
164 for (i = 0; i < srq->max; ++i) {
165 wqe = get_wqe(srq, i);
166
167 *wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1;
168
169 for (scatter = wqe + sizeof (struct mthca_next_seg);
170 (void *) scatter < wqe + (1 << srq->wqe_shift);
171 ++scatter)
172 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
173 }
174
175 return 0;
176}
177
178int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
179 struct ib_srq_attr *attr, struct mthca_srq *srq)
180{
181 struct mthca_mailbox *mailbox;
182 u8 status;
183 int ds;
184 int err;
185
186 /* Sanity check SRQ size before proceeding */
187 if (attr->max_wr > 16 << 20 || attr->max_sge > 64)
188 return -EINVAL;
189
190 srq->max = attr->max_wr;
191 srq->max_gs = attr->max_sge;
192 srq->last = NULL;
193 srq->counter = 0;
194
195 if (mthca_is_memfree(dev))
196 srq->max = roundup_pow_of_two(srq->max + 1);
197
198 ds = min(64UL,
199 roundup_pow_of_two(sizeof (struct mthca_next_seg) +
200 srq->max_gs * sizeof (struct mthca_data_seg)));
201 srq->wqe_shift = long_log2(ds);
202
203 srq->srqn = mthca_alloc(&dev->srq_table.alloc);
204 if (srq->srqn == -1)
205 return -ENOMEM;
206
207 if (mthca_is_memfree(dev)) {
208 err = mthca_table_get(dev, dev->srq_table.table, srq->srqn);
209 if (err)
210 goto err_out;
211
212 if (!pd->ibpd.uobject) {
213 srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ,
214 srq->srqn, &srq->db);
215 if (srq->db_index < 0) {
216 err = -ENOMEM;
217 goto err_out_icm;
218 }
219 }
220 }
221
222 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
223 if (IS_ERR(mailbox)) {
224 err = PTR_ERR(mailbox);
225 goto err_out_db;
226 }
227
228 err = mthca_alloc_srq_buf(dev, pd, srq);
229 if (err)
230 goto err_out_mailbox;
231
232 spin_lock_init(&srq->lock);
233 atomic_set(&srq->refcount, 1);
234 init_waitqueue_head(&srq->wait);
235
236 if (mthca_is_memfree(dev))
237 mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf);
238 else
239 mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf);
240
241 err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status);
242
243 if (err) {
244 mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err);
245 goto err_out_free_buf;
246 }
247 if (status) {
248 mthca_warn(dev, "SW2HW_SRQ returned status 0x%02x\n",
249 status);
250 err = -EINVAL;
251 goto err_out_free_buf;
252 }
253
254 spin_lock_irq(&dev->srq_table.lock);
255 if (mthca_array_set(&dev->srq_table.srq,
256 srq->srqn & (dev->limits.num_srqs - 1),
257 srq)) {
258 spin_unlock_irq(&dev->srq_table.lock);
259 goto err_out_free_srq;
260 }
261 spin_unlock_irq(&dev->srq_table.lock);
262
263 mthca_free_mailbox(dev, mailbox);
264
265 srq->first_free = 0;
266 srq->last_free = srq->max - 1;
267
268 return 0;
269
270err_out_free_srq:
271 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
272 if (err)
273 mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
274 else if (status)
275 mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status);
276
277err_out_free_buf:
278 if (!pd->ibpd.uobject)
279 mthca_free_srq_buf(dev, srq);
280
281err_out_mailbox:
282 mthca_free_mailbox(dev, mailbox);
283
284err_out_db:
285 if (!pd->ibpd.uobject && mthca_is_memfree(dev))
286 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
287
288err_out_icm:
289 mthca_table_put(dev, dev->srq_table.table, srq->srqn);
290
291err_out:
292 mthca_free(&dev->srq_table.alloc, srq->srqn);
293
294 return err;
295}
296
297void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
298{
299 struct mthca_mailbox *mailbox;
300 int err;
301 u8 status;
302
303 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
304 if (IS_ERR(mailbox)) {
305 mthca_warn(dev, "No memory for mailbox to free SRQ.\n");
306 return;
307 }
308
309 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
310 if (err)
311 mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
312 else if (status)
313 mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status);
314
315 spin_lock_irq(&dev->srq_table.lock);
316 mthca_array_clear(&dev->srq_table.srq,
317 srq->srqn & (dev->limits.num_srqs - 1));
318 spin_unlock_irq(&dev->srq_table.lock);
319
320 atomic_dec(&srq->refcount);
321 wait_event(srq->wait, !atomic_read(&srq->refcount));
322
323 if (!srq->ibsrq.uobject) {
324 mthca_free_srq_buf(dev, srq);
325 if (mthca_is_memfree(dev))
326 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
327 }
328
329 mthca_table_put(dev, dev->srq_table.table, srq->srqn);
330 mthca_free(&dev->srq_table.alloc, srq->srqn);
331 mthca_free_mailbox(dev, mailbox);
332}
333
334void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
335 enum ib_event_type event_type)
336{
337 struct mthca_srq *srq;
338 struct ib_event event;
339
340 spin_lock(&dev->srq_table.lock);
341 srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1));
342 if (srq)
343 atomic_inc(&srq->refcount);
344 spin_unlock(&dev->srq_table.lock);
345
346 if (!srq) {
347 mthca_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
348 return;
349 }
350
351 if (!srq->ibsrq.event_handler)
352 goto out;
353
354 event.device = &dev->ib_dev;
355 event.event = event_type;
356 event.element.srq = &srq->ibsrq;
357 srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
358
359out:
360 if (atomic_dec_and_test(&srq->refcount))
361 wake_up(&srq->wait);
362}
363
364/*
365 * This function must be called with IRQs disabled.
366 */
367void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
368{
369 int ind;
370
371 ind = wqe_addr >> srq->wqe_shift;
372
373 spin_lock(&srq->lock);
374
375 if (likely(srq->first_free >= 0))
376 *wqe_to_link(get_wqe(srq, srq->last_free)) = ind;
377 else
378 srq->first_free = ind;
379
380 *wqe_to_link(get_wqe(srq, ind)) = -1;
381 srq->last_free = ind;
382
383 spin_unlock(&srq->lock);
384}
385
386int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
387 struct ib_recv_wr **bad_wr)
388{
389 struct mthca_dev *dev = to_mdev(ibsrq->device);
390 struct mthca_srq *srq = to_msrq(ibsrq);
391 unsigned long flags;
392 int err = 0;
393 int first_ind;
394 int ind;
395 int next_ind;
396 int nreq;
397 int i;
398 void *wqe;
399 void *prev_wqe;
400
401 spin_lock_irqsave(&srq->lock, flags);
402
403 first_ind = srq->first_free;
404
405 for (nreq = 0; wr; ++nreq, wr = wr->next) {
406 ind = srq->first_free;
407
408 if (ind < 0) {
409 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
410 err = -ENOMEM;
411 *bad_wr = wr;
412 return nreq;
413 }
414
415 wqe = get_wqe(srq, ind);
416 next_ind = *wqe_to_link(wqe);
417 prev_wqe = srq->last;
418 srq->last = wqe;
419
420 ((struct mthca_next_seg *) wqe)->nda_op = 0;
421 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
422 /* flags field will always remain 0 */
423
424 wqe += sizeof (struct mthca_next_seg);
425
426 if (unlikely(wr->num_sge > srq->max_gs)) {
427 err = -EINVAL;
428 *bad_wr = wr;
429 srq->last = prev_wqe;
430 return nreq;
431 }
432
433 for (i = 0; i < wr->num_sge; ++i) {
434 ((struct mthca_data_seg *) wqe)->byte_count =
435 cpu_to_be32(wr->sg_list[i].length);
436 ((struct mthca_data_seg *) wqe)->lkey =
437 cpu_to_be32(wr->sg_list[i].lkey);
438 ((struct mthca_data_seg *) wqe)->addr =
439 cpu_to_be64(wr->sg_list[i].addr);
440 wqe += sizeof (struct mthca_data_seg);
441 }
442
443 if (i < srq->max_gs) {
444 ((struct mthca_data_seg *) wqe)->byte_count = 0;
445 ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
446 ((struct mthca_data_seg *) wqe)->addr = 0;
447 }
448
449 if (likely(prev_wqe)) {
450 ((struct mthca_next_seg *) prev_wqe)->nda_op =
451 cpu_to_be32((ind << srq->wqe_shift) | 1);
452 wmb();
453 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
454 cpu_to_be32(MTHCA_NEXT_DBD);
455 }
456
457 srq->wrid[ind] = wr->wr_id;
458 srq->first_free = next_ind;
459 }
460
461 return nreq;
462
463 if (likely(nreq)) {
464 __be32 doorbell[2];
465
466 doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift);
467 doorbell[1] = cpu_to_be32((srq->srqn << 8) | nreq);
468
469 /*
470 * Make sure that descriptors are written before
471 * doorbell is rung.
472 */
473 wmb();
474
475 mthca_write64(doorbell,
476 dev->kar + MTHCA_RECEIVE_DOORBELL,
477 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
478 }
479
480 spin_unlock_irqrestore(&srq->lock, flags);
481 return err;
482}
483
484int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
485 struct ib_recv_wr **bad_wr)
486{
487 struct mthca_dev *dev = to_mdev(ibsrq->device);
488 struct mthca_srq *srq = to_msrq(ibsrq);
489 unsigned long flags;
490 int err = 0;
491 int ind;
492 int next_ind;
493 int nreq;
494 int i;
495 void *wqe;
496
497 spin_lock_irqsave(&srq->lock, flags);
498
499 for (nreq = 0; wr; ++nreq, wr = wr->next) {
500 ind = srq->first_free;
501
502 if (ind < 0) {
503 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
504 err = -ENOMEM;
505 *bad_wr = wr;
506 return nreq;
507 }
508
509 wqe = get_wqe(srq, ind);
510 next_ind = *wqe_to_link(wqe);
511
512 ((struct mthca_next_seg *) wqe)->nda_op =
513 cpu_to_be32((next_ind << srq->wqe_shift) | 1);
514 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
515 /* flags field will always remain 0 */
516
517 wqe += sizeof (struct mthca_next_seg);
518
519 if (unlikely(wr->num_sge > srq->max_gs)) {
520 err = -EINVAL;
521 *bad_wr = wr;
522 return nreq;
523 }
524
525 for (i = 0; i < wr->num_sge; ++i) {
526 ((struct mthca_data_seg *) wqe)->byte_count =
527 cpu_to_be32(wr->sg_list[i].length);
528 ((struct mthca_data_seg *) wqe)->lkey =
529 cpu_to_be32(wr->sg_list[i].lkey);
530 ((struct mthca_data_seg *) wqe)->addr =
531 cpu_to_be64(wr->sg_list[i].addr);
532 wqe += sizeof (struct mthca_data_seg);
533 }
534
535 if (i < srq->max_gs) {
536 ((struct mthca_data_seg *) wqe)->byte_count = 0;
537 ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
538 ((struct mthca_data_seg *) wqe)->addr = 0;
539 }
540
541 srq->wrid[ind] = wr->wr_id;
542 srq->first_free = next_ind;
543 }
544
545 if (likely(nreq)) {
546 srq->counter += nreq;
547
548 /*
549 * Make sure that descriptors are written before
550 * we write doorbell record.
551 */
552 wmb();
553 *srq->db = cpu_to_be32(srq->counter);
554 }
555
556 spin_unlock_irqrestore(&srq->lock, flags);
557 return err;
558}
559
560int __devinit mthca_init_srq_table(struct mthca_dev *dev)
561{
562 int err;
563
564 if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
565 return 0;
566
567 spin_lock_init(&dev->srq_table.lock);
568
569 err = mthca_alloc_init(&dev->srq_table.alloc,
570 dev->limits.num_srqs,
571 dev->limits.num_srqs - 1,
572 dev->limits.reserved_srqs);
573 if (err)
574 return err;
575
576 err = mthca_array_init(&dev->srq_table.srq,
577 dev->limits.num_srqs);
578 if (err)
579 mthca_alloc_cleanup(&dev->srq_table.alloc);
580
581 return err;
582}
583
584void __devexit mthca_cleanup_srq_table(struct mthca_dev *dev)
585{
586 if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
587 return;
588
589 mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs);
590 mthca_alloc_cleanup(&dev->srq_table.alloc);
591}
diff --git a/drivers/infiniband/hw/mthca/mthca_user.h b/drivers/infiniband/hw/mthca/mthca_user.h
index 3024c1b4547d..41613ec8a04e 100644
--- a/drivers/infiniband/hw/mthca/mthca_user.h
+++ b/drivers/infiniband/hw/mthca/mthca_user.h
@@ -69,6 +69,17 @@ struct mthca_create_cq_resp {
69 __u32 reserved; 69 __u32 reserved;
70}; 70};
71 71
72struct mthca_create_srq {
73 __u32 lkey;
74 __u32 db_index;
75 __u64 db_page;
76};
77
78struct mthca_create_srq_resp {
79 __u32 srqn;
80 __u32 reserved;
81};
82
72struct mthca_create_qp { 83struct mthca_create_qp {
73 __u32 lkey; 84 __u32 lkey;
74 __u32 reserved; 85 __u32 reserved;
diff --git a/drivers/infiniband/hw/mthca/mthca_wqe.h b/drivers/infiniband/hw/mthca/mthca_wqe.h
new file mode 100644
index 000000000000..1f4c0ff28f79
--- /dev/null
+++ b/drivers/infiniband/hw/mthca/mthca_wqe.h
@@ -0,0 +1,114 @@
1/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: mthca_wqe.h 3047 2005-08-10 03:59:35Z roland $
33 */
34
35#ifndef MTHCA_WQE_H
36#define MTHCA_WQE_H
37
38#include <linux/types.h>
39
40enum {
41 MTHCA_NEXT_DBD = 1 << 7,
42 MTHCA_NEXT_FENCE = 1 << 6,
43 MTHCA_NEXT_CQ_UPDATE = 1 << 3,
44 MTHCA_NEXT_EVENT_GEN = 1 << 2,
45 MTHCA_NEXT_SOLICIT = 1 << 1,
46
47 MTHCA_MLX_VL15 = 1 << 17,
48 MTHCA_MLX_SLR = 1 << 16
49};
50
51enum {
52 MTHCA_INVAL_LKEY = 0x100
53};
54
55struct mthca_next_seg {
56 __be32 nda_op; /* [31:6] next WQE [4:0] next opcode */
57 __be32 ee_nds; /* [31:8] next EE [7] DBD [6] F [5:0] next WQE size */
58 __be32 flags; /* [3] CQ [2] Event [1] Solicit */
59 __be32 imm; /* immediate data */
60};
61
62struct mthca_tavor_ud_seg {
63 u32 reserved1;
64 __be32 lkey;
65 __be64 av_addr;
66 u32 reserved2[4];
67 __be32 dqpn;
68 __be32 qkey;
69 u32 reserved3[2];
70};
71
72struct mthca_arbel_ud_seg {
73 __be32 av[8];
74 __be32 dqpn;
75 __be32 qkey;
76 u32 reserved[2];
77};
78
79struct mthca_bind_seg {
80 __be32 flags; /* [31] Atomic [30] rem write [29] rem read */
81 u32 reserved;
82 __be32 new_rkey;
83 __be32 lkey;
84 __be64 addr;
85 __be64 length;
86};
87
88struct mthca_raddr_seg {
89 __be64 raddr;
90 __be32 rkey;
91 u32 reserved;
92};
93
94struct mthca_atomic_seg {
95 __be64 swap_add;
96 __be64 compare;
97};
98
99struct mthca_data_seg {
100 __be32 byte_count;
101 __be32 lkey;
102 __be64 addr;
103};
104
105struct mthca_mlx_seg {
106 __be32 nda_op;
107 __be32 nds;
108 __be32 flags; /* [17] VL15 [16] SLR [14:12] static rate
109 [11:8] SL [3] C [2] E */
110 __be16 rlid;
111 __be16 vcrc;
112};
113
114#endif /* MTHCA_WQE_H */
diff --git a/drivers/infiniband/include/ib_cache.h b/drivers/infiniband/include/ib_cache.h
deleted file mode 100644
index 44ef6bb9b9df..000000000000
--- a/drivers/infiniband/include/ib_cache.h
+++ /dev/null
@@ -1,103 +0,0 @@
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: ib_cache.h 1349 2004-12-16 21:09:43Z roland $
33 */
34
35#ifndef _IB_CACHE_H
36#define _IB_CACHE_H
37
38#include <ib_verbs.h>
39
40/**
41 * ib_get_cached_gid - Returns a cached GID table entry
42 * @device: The device to query.
43 * @port_num: The port number of the device to query.
44 * @index: The index into the cached GID table to query.
45 * @gid: The GID value found at the specified index.
46 *
47 * ib_get_cached_gid() fetches the specified GID table entry stored in
48 * the local software cache.
49 */
50int ib_get_cached_gid(struct ib_device *device,
51 u8 port_num,
52 int index,
53 union ib_gid *gid);
54
55/**
56 * ib_find_cached_gid - Returns the port number and GID table index where
57 * a specified GID value occurs.
58 * @device: The device to query.
59 * @gid: The GID value to search for.
60 * @port_num: The port number of the device where the GID value was found.
61 * @index: The index into the cached GID table where the GID was found. This
62 * parameter may be NULL.
63 *
64 * ib_find_cached_gid() searches for the specified GID value in
65 * the local software cache.
66 */
67int ib_find_cached_gid(struct ib_device *device,
68 union ib_gid *gid,
69 u8 *port_num,
70 u16 *index);
71
72/**
73 * ib_get_cached_pkey - Returns a cached PKey table entry
74 * @device: The device to query.
75 * @port_num: The port number of the device to query.
76 * @index: The index into the cached PKey table to query.
77 * @pkey: The PKey value found at the specified index.
78 *
79 * ib_get_cached_pkey() fetches the specified PKey table entry stored in
80 * the local software cache.
81 */
82int ib_get_cached_pkey(struct ib_device *device_handle,
83 u8 port_num,
84 int index,
85 u16 *pkey);
86
87/**
88 * ib_find_cached_pkey - Returns the PKey table index where a specified
89 * PKey value occurs.
90 * @device: The device to query.
91 * @port_num: The port number of the device to search for the PKey.
92 * @pkey: The PKey value to search for.
93 * @index: The index into the cached PKey table where the PKey was found.
94 *
95 * ib_find_cached_pkey() searches the specified PKey table in
96 * the local software cache.
97 */
98int ib_find_cached_pkey(struct ib_device *device,
99 u8 port_num,
100 u16 pkey,
101 u16 *index);
102
103#endif /* _IB_CACHE_H */
diff --git a/drivers/infiniband/include/ib_cm.h b/drivers/infiniband/include/ib_cm.h
deleted file mode 100644
index da650115e79a..000000000000
--- a/drivers/infiniband/include/ib_cm.h
+++ /dev/null
@@ -1,569 +0,0 @@
1/*
2 * Copyright (c) 2004 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 *
35 * $Id: ib_cm.h 2730 2005-06-28 16:43:03Z sean.hefty $
36 */
37#if !defined(IB_CM_H)
38#define IB_CM_H
39
40#include <ib_mad.h>
41#include <ib_sa.h>
42
43enum ib_cm_state {
44 IB_CM_IDLE,
45 IB_CM_LISTEN,
46 IB_CM_REQ_SENT,
47 IB_CM_REQ_RCVD,
48 IB_CM_MRA_REQ_SENT,
49 IB_CM_MRA_REQ_RCVD,
50 IB_CM_REP_SENT,
51 IB_CM_REP_RCVD,
52 IB_CM_MRA_REP_SENT,
53 IB_CM_MRA_REP_RCVD,
54 IB_CM_ESTABLISHED,
55 IB_CM_DREQ_SENT,
56 IB_CM_DREQ_RCVD,
57 IB_CM_TIMEWAIT,
58 IB_CM_SIDR_REQ_SENT,
59 IB_CM_SIDR_REQ_RCVD
60};
61
62enum ib_cm_lap_state {
63 IB_CM_LAP_IDLE,
64 IB_CM_LAP_SENT,
65 IB_CM_LAP_RCVD,
66 IB_CM_MRA_LAP_SENT,
67 IB_CM_MRA_LAP_RCVD,
68};
69
70enum ib_cm_event_type {
71 IB_CM_REQ_ERROR,
72 IB_CM_REQ_RECEIVED,
73 IB_CM_REP_ERROR,
74 IB_CM_REP_RECEIVED,
75 IB_CM_RTU_RECEIVED,
76 IB_CM_USER_ESTABLISHED,
77 IB_CM_DREQ_ERROR,
78 IB_CM_DREQ_RECEIVED,
79 IB_CM_DREP_RECEIVED,
80 IB_CM_TIMEWAIT_EXIT,
81 IB_CM_MRA_RECEIVED,
82 IB_CM_REJ_RECEIVED,
83 IB_CM_LAP_ERROR,
84 IB_CM_LAP_RECEIVED,
85 IB_CM_APR_RECEIVED,
86 IB_CM_SIDR_REQ_ERROR,
87 IB_CM_SIDR_REQ_RECEIVED,
88 IB_CM_SIDR_REP_RECEIVED
89};
90
91enum ib_cm_data_size {
92 IB_CM_REQ_PRIVATE_DATA_SIZE = 92,
93 IB_CM_MRA_PRIVATE_DATA_SIZE = 222,
94 IB_CM_REJ_PRIVATE_DATA_SIZE = 148,
95 IB_CM_REP_PRIVATE_DATA_SIZE = 196,
96 IB_CM_RTU_PRIVATE_DATA_SIZE = 224,
97 IB_CM_DREQ_PRIVATE_DATA_SIZE = 220,
98 IB_CM_DREP_PRIVATE_DATA_SIZE = 224,
99 IB_CM_REJ_ARI_LENGTH = 72,
100 IB_CM_LAP_PRIVATE_DATA_SIZE = 168,
101 IB_CM_APR_PRIVATE_DATA_SIZE = 148,
102 IB_CM_APR_INFO_LENGTH = 72,
103 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE = 216,
104 IB_CM_SIDR_REP_PRIVATE_DATA_SIZE = 136,
105 IB_CM_SIDR_REP_INFO_LENGTH = 72
106};
107
108struct ib_cm_id;
109
110struct ib_cm_req_event_param {
111 struct ib_cm_id *listen_id;
112 struct ib_device *device;
113 u8 port;
114
115 struct ib_sa_path_rec *primary_path;
116 struct ib_sa_path_rec *alternate_path;
117
118 u64 remote_ca_guid;
119 u32 remote_qkey;
120 u32 remote_qpn;
121 enum ib_qp_type qp_type;
122
123 u32 starting_psn;
124 u8 responder_resources;
125 u8 initiator_depth;
126 unsigned int local_cm_response_timeout:5;
127 unsigned int flow_control:1;
128 unsigned int remote_cm_response_timeout:5;
129 unsigned int retry_count:3;
130 unsigned int rnr_retry_count:3;
131 unsigned int srq:1;
132};
133
134struct ib_cm_rep_event_param {
135 u64 remote_ca_guid;
136 u32 remote_qkey;
137 u32 remote_qpn;
138 u32 starting_psn;
139 u8 responder_resources;
140 u8 initiator_depth;
141 unsigned int target_ack_delay:5;
142 unsigned int failover_accepted:2;
143 unsigned int flow_control:1;
144 unsigned int rnr_retry_count:3;
145 unsigned int srq:1;
146};
147
148enum ib_cm_rej_reason {
149 IB_CM_REJ_NO_QP = __constant_htons(1),
150 IB_CM_REJ_NO_EEC = __constant_htons(2),
151 IB_CM_REJ_NO_RESOURCES = __constant_htons(3),
152 IB_CM_REJ_TIMEOUT = __constant_htons(4),
153 IB_CM_REJ_UNSUPPORTED = __constant_htons(5),
154 IB_CM_REJ_INVALID_COMM_ID = __constant_htons(6),
155 IB_CM_REJ_INVALID_COMM_INSTANCE = __constant_htons(7),
156 IB_CM_REJ_INVALID_SERVICE_ID = __constant_htons(8),
157 IB_CM_REJ_INVALID_TRANSPORT_TYPE = __constant_htons(9),
158 IB_CM_REJ_STALE_CONN = __constant_htons(10),
159 IB_CM_REJ_RDC_NOT_EXIST = __constant_htons(11),
160 IB_CM_REJ_INVALID_GID = __constant_htons(12),
161 IB_CM_REJ_INVALID_LID = __constant_htons(13),
162 IB_CM_REJ_INVALID_SL = __constant_htons(14),
163 IB_CM_REJ_INVALID_TRAFFIC_CLASS = __constant_htons(15),
164 IB_CM_REJ_INVALID_HOP_LIMIT = __constant_htons(16),
165 IB_CM_REJ_INVALID_PACKET_RATE = __constant_htons(17),
166 IB_CM_REJ_INVALID_ALT_GID = __constant_htons(18),
167 IB_CM_REJ_INVALID_ALT_LID = __constant_htons(19),
168 IB_CM_REJ_INVALID_ALT_SL = __constant_htons(20),
169 IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS = __constant_htons(21),
170 IB_CM_REJ_INVALID_ALT_HOP_LIMIT = __constant_htons(22),
171 IB_CM_REJ_INVALID_ALT_PACKET_RATE = __constant_htons(23),
172 IB_CM_REJ_PORT_CM_REDIRECT = __constant_htons(24),
173 IB_CM_REJ_PORT_REDIRECT = __constant_htons(25),
174 IB_CM_REJ_INVALID_MTU = __constant_htons(26),
175 IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES = __constant_htons(27),
176 IB_CM_REJ_CONSUMER_DEFINED = __constant_htons(28),
177 IB_CM_REJ_INVALID_RNR_RETRY = __constant_htons(29),
178 IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID = __constant_htons(30),
179 IB_CM_REJ_INVALID_CLASS_VERSION = __constant_htons(31),
180 IB_CM_REJ_INVALID_FLOW_LABEL = __constant_htons(32),
181 IB_CM_REJ_INVALID_ALT_FLOW_LABEL = __constant_htons(33)
182};
183
184struct ib_cm_rej_event_param {
185 enum ib_cm_rej_reason reason;
186 void *ari;
187 u8 ari_length;
188};
189
190struct ib_cm_mra_event_param {
191 u8 service_timeout;
192};
193
194struct ib_cm_lap_event_param {
195 struct ib_sa_path_rec *alternate_path;
196};
197
198enum ib_cm_apr_status {
199 IB_CM_APR_SUCCESS,
200 IB_CM_APR_INVALID_COMM_ID,
201 IB_CM_APR_UNSUPPORTED,
202 IB_CM_APR_REJECT,
203 IB_CM_APR_REDIRECT,
204 IB_CM_APR_IS_CURRENT,
205 IB_CM_APR_INVALID_QPN_EECN,
206 IB_CM_APR_INVALID_LID,
207 IB_CM_APR_INVALID_GID,
208 IB_CM_APR_INVALID_FLOW_LABEL,
209 IB_CM_APR_INVALID_TCLASS,
210 IB_CM_APR_INVALID_HOP_LIMIT,
211 IB_CM_APR_INVALID_PACKET_RATE,
212 IB_CM_APR_INVALID_SL
213};
214
215struct ib_cm_apr_event_param {
216 enum ib_cm_apr_status ap_status;
217 void *apr_info;
218 u8 info_len;
219};
220
221struct ib_cm_sidr_req_event_param {
222 struct ib_cm_id *listen_id;
223 struct ib_device *device;
224 u8 port;
225
226 u16 pkey;
227};
228
229enum ib_cm_sidr_status {
230 IB_SIDR_SUCCESS,
231 IB_SIDR_UNSUPPORTED,
232 IB_SIDR_REJECT,
233 IB_SIDR_NO_QP,
234 IB_SIDR_REDIRECT,
235 IB_SIDR_UNSUPPORTED_VERSION
236};
237
238struct ib_cm_sidr_rep_event_param {
239 enum ib_cm_sidr_status status;
240 u32 qkey;
241 u32 qpn;
242 void *info;
243 u8 info_len;
244
245};
246
247struct ib_cm_event {
248 enum ib_cm_event_type event;
249 union {
250 struct ib_cm_req_event_param req_rcvd;
251 struct ib_cm_rep_event_param rep_rcvd;
252 /* No data for RTU received events. */
253 struct ib_cm_rej_event_param rej_rcvd;
254 struct ib_cm_mra_event_param mra_rcvd;
255 struct ib_cm_lap_event_param lap_rcvd;
256 struct ib_cm_apr_event_param apr_rcvd;
257 /* No data for DREQ/DREP received events. */
258 struct ib_cm_sidr_req_event_param sidr_req_rcvd;
259 struct ib_cm_sidr_rep_event_param sidr_rep_rcvd;
260 enum ib_wc_status send_status;
261 } param;
262
263 void *private_data;
264};
265
266/**
267 * ib_cm_handler - User-defined callback to process communication events.
268 * @cm_id: Communication identifier associated with the reported event.
269 * @event: Information about the communication event.
270 *
271 * IB_CM_REQ_RECEIVED and IB_CM_SIDR_REQ_RECEIVED communication events
272 * generated as a result of listen requests result in the allocation of a
273 * new @cm_id. The new @cm_id is returned to the user through this callback.
274 * Clients are responsible for destroying the new @cm_id. For peer-to-peer
275 * IB_CM_REQ_RECEIVED and all other events, the returned @cm_id corresponds
276 * to a user's existing communication identifier.
277 *
278 * Users may not call ib_destroy_cm_id while in the context of this callback;
279 * however, returning a non-zero value instructs the communication manager to
280 * destroy the @cm_id after the callback completes.
281 */
282typedef int (*ib_cm_handler)(struct ib_cm_id *cm_id,
283 struct ib_cm_event *event);
284
285struct ib_cm_id {
286 ib_cm_handler cm_handler;
287 void *context;
288 u64 service_id;
289 u64 service_mask;
290 enum ib_cm_state state; /* internal CM/debug use */
291 enum ib_cm_lap_state lap_state; /* internal CM/debug use */
292 u32 local_id;
293 u32 remote_id;
294};
295
296/**
297 * ib_create_cm_id - Allocate a communication identifier.
298 * @cm_handler: Callback invoked to notify the user of CM events.
299 * @context: User specified context associated with the communication
300 * identifier.
301 *
302 * Communication identifiers are used to track connection states, service
303 * ID resolution requests, and listen requests.
304 */
305struct ib_cm_id *ib_create_cm_id(ib_cm_handler cm_handler,
306 void *context);
307
308/**
309 * ib_destroy_cm_id - Destroy a connection identifier.
310 * @cm_id: Connection identifier to destroy.
311 *
312 * This call blocks until the connection identifier is destroyed.
313 */
314void ib_destroy_cm_id(struct ib_cm_id *cm_id);
315
316#define IB_SERVICE_ID_AGN_MASK __constant_cpu_to_be64(0xFF00000000000000ULL)
317#define IB_CM_ASSIGN_SERVICE_ID __constant_cpu_to_be64(0x0200000000000000ULL)
318
319/**
320 * ib_cm_listen - Initiates listening on the specified service ID for
321 * connection and service ID resolution requests.
322 * @cm_id: Connection identifier associated with the listen request.
323 * @service_id: Service identifier matched against incoming connection
324 * and service ID resolution requests. The service ID should be specified
325 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
326 * assign a service ID to the caller.
327 * @service_mask: Mask applied to service ID used to listen across a
328 * range of service IDs. If set to 0, the service ID is matched
329 * exactly. This parameter is ignored if %service_id is set to
330 * IB_CM_ASSIGN_SERVICE_ID.
331 */
332int ib_cm_listen(struct ib_cm_id *cm_id,
333 u64 service_id,
334 u64 service_mask);
335
336struct ib_cm_req_param {
337 struct ib_sa_path_rec *primary_path;
338 struct ib_sa_path_rec *alternate_path;
339 u64 service_id;
340 u32 qp_num;
341 enum ib_qp_type qp_type;
342 u32 starting_psn;
343 const void *private_data;
344 u8 private_data_len;
345 u8 peer_to_peer;
346 u8 responder_resources;
347 u8 initiator_depth;
348 u8 remote_cm_response_timeout;
349 u8 flow_control;
350 u8 local_cm_response_timeout;
351 u8 retry_count;
352 u8 rnr_retry_count;
353 u8 max_cm_retries;
354 u8 srq;
355};
356
357/**
358 * ib_send_cm_req - Sends a connection request to the remote node.
359 * @cm_id: Connection identifier that will be associated with the
360 * connection request.
361 * @param: Connection request information needed to establish the
362 * connection.
363 */
364int ib_send_cm_req(struct ib_cm_id *cm_id,
365 struct ib_cm_req_param *param);
366
367struct ib_cm_rep_param {
368 u32 qp_num;
369 u32 starting_psn;
370 const void *private_data;
371 u8 private_data_len;
372 u8 responder_resources;
373 u8 initiator_depth;
374 u8 target_ack_delay;
375 u8 failover_accepted;
376 u8 flow_control;
377 u8 rnr_retry_count;
378 u8 srq;
379};
380
381/**
382 * ib_send_cm_rep - Sends a connection reply in response to a connection
383 * request.
384 * @cm_id: Connection identifier that will be associated with the
385 * connection request.
386 * @param: Connection reply information needed to establish the
387 * connection.
388 */
389int ib_send_cm_rep(struct ib_cm_id *cm_id,
390 struct ib_cm_rep_param *param);
391
392/**
393 * ib_send_cm_rtu - Sends a connection ready to use message in response
394 * to a connection reply message.
395 * @cm_id: Connection identifier associated with the connection request.
396 * @private_data: Optional user-defined private data sent with the
397 * ready to use message.
398 * @private_data_len: Size of the private data buffer, in bytes.
399 */
400int ib_send_cm_rtu(struct ib_cm_id *cm_id,
401 const void *private_data,
402 u8 private_data_len);
403
404/**
405 * ib_send_cm_dreq - Sends a disconnection request for an existing
406 * connection.
407 * @cm_id: Connection identifier associated with the connection being
408 * released.
409 * @private_data: Optional user-defined private data sent with the
410 * disconnection request message.
411 * @private_data_len: Size of the private data buffer, in bytes.
412 */
413int ib_send_cm_dreq(struct ib_cm_id *cm_id,
414 const void *private_data,
415 u8 private_data_len);
416
417/**
418 * ib_send_cm_drep - Sends a disconnection reply to a disconnection request.
419 * @cm_id: Connection identifier associated with the connection being
420 * released.
421 * @private_data: Optional user-defined private data sent with the
422 * disconnection reply message.
423 * @private_data_len: Size of the private data buffer, in bytes.
424 *
425 * If the cm_id is in the correct state, the CM will transition the connection
426 * to the timewait state, even if an error occurs sending the DREP message.
427 */
428int ib_send_cm_drep(struct ib_cm_id *cm_id,
429 const void *private_data,
430 u8 private_data_len);
431
432/**
433 * ib_cm_establish - Forces a connection state to established.
434 * @cm_id: Connection identifier to transition to established.
435 *
436 * This routine should be invoked by users who receive messages on a
437 * connected QP before an RTU has been received.
438 */
439int ib_cm_establish(struct ib_cm_id *cm_id);
440
441/**
442 * ib_send_cm_rej - Sends a connection rejection message to the
443 * remote node.
444 * @cm_id: Connection identifier associated with the connection being
445 * rejected.
446 * @reason: Reason for the connection request rejection.
447 * @ari: Optional additional rejection information.
448 * @ari_length: Size of the additional rejection information, in bytes.
449 * @private_data: Optional user-defined private data sent with the
450 * rejection message.
451 * @private_data_len: Size of the private data buffer, in bytes.
452 */
453int ib_send_cm_rej(struct ib_cm_id *cm_id,
454 enum ib_cm_rej_reason reason,
455 void *ari,
456 u8 ari_length,
457 const void *private_data,
458 u8 private_data_len);
459
460/**
461 * ib_send_cm_mra - Sends a message receipt acknowledgement to a connection
462 * message.
463 * @cm_id: Connection identifier associated with the connection message.
464 * @service_timeout: The maximum time required for the sender to reply to
465 * to the connection message.
466 * @private_data: Optional user-defined private data sent with the
467 * message receipt acknowledgement.
468 * @private_data_len: Size of the private data buffer, in bytes.
469 */
470int ib_send_cm_mra(struct ib_cm_id *cm_id,
471 u8 service_timeout,
472 const void *private_data,
473 u8 private_data_len);
474
475/**
476 * ib_send_cm_lap - Sends a load alternate path request.
477 * @cm_id: Connection identifier associated with the load alternate path
478 * message.
479 * @alternate_path: A path record that identifies the alternate path to
480 * load.
481 * @private_data: Optional user-defined private data sent with the
482 * load alternate path message.
483 * @private_data_len: Size of the private data buffer, in bytes.
484 */
485int ib_send_cm_lap(struct ib_cm_id *cm_id,
486 struct ib_sa_path_rec *alternate_path,
487 const void *private_data,
488 u8 private_data_len);
489
490/**
491 * ib_cm_init_qp_attr - Initializes the QP attributes for use in transitioning
492 * to a specified QP state.
493 * @cm_id: Communication identifier associated with the QP attributes to
494 * initialize.
495 * @qp_attr: On input, specifies the desired QP state. On output, the
496 * mandatory and desired optional attributes will be set in order to
497 * modify the QP to the specified state.
498 * @qp_attr_mask: The QP attribute mask that may be used to transition the
499 * QP to the specified state.
500 *
501 * Users must set the @qp_attr->qp_state to the desired QP state. This call
502 * will set all required attributes for the given transition, along with
503 * known optional attributes. Users may override the attributes returned from
504 * this call before calling ib_modify_qp.
505 */
506int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
507 struct ib_qp_attr *qp_attr,
508 int *qp_attr_mask);
509
510/**
511 * ib_send_cm_apr - Sends an alternate path response message in response to
512 * a load alternate path request.
513 * @cm_id: Connection identifier associated with the alternate path response.
514 * @status: Reply status sent with the alternate path response.
515 * @info: Optional additional information sent with the alternate path
516 * response.
517 * @info_length: Size of the additional information, in bytes.
518 * @private_data: Optional user-defined private data sent with the
519 * alternate path response message.
520 * @private_data_len: Size of the private data buffer, in bytes.
521 */
522int ib_send_cm_apr(struct ib_cm_id *cm_id,
523 enum ib_cm_apr_status status,
524 void *info,
525 u8 info_length,
526 const void *private_data,
527 u8 private_data_len);
528
529struct ib_cm_sidr_req_param {
530 struct ib_sa_path_rec *path;
531 u64 service_id;
532 int timeout_ms;
533 const void *private_data;
534 u8 private_data_len;
535 u8 max_cm_retries;
536 u16 pkey;
537};
538
539/**
540 * ib_send_cm_sidr_req - Sends a service ID resolution request to the
541 * remote node.
542 * @cm_id: Communication identifier that will be associated with the
543 * service ID resolution request.
544 * @param: Service ID resolution request information.
545 */
546int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
547 struct ib_cm_sidr_req_param *param);
548
549struct ib_cm_sidr_rep_param {
550 u32 qp_num;
551 u32 qkey;
552 enum ib_cm_sidr_status status;
553 const void *info;
554 u8 info_length;
555 const void *private_data;
556 u8 private_data_len;
557};
558
559/**
560 * ib_send_cm_sidr_rep - Sends a service ID resolution request to the
561 * remote node.
562 * @cm_id: Communication identifier associated with the received service ID
563 * resolution request.
564 * @param: Service ID resolution reply information.
565 */
566int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
567 struct ib_cm_sidr_rep_param *param);
568
569#endif /* IB_CM_H */
diff --git a/drivers/infiniband/include/ib_fmr_pool.h b/drivers/infiniband/include/ib_fmr_pool.h
deleted file mode 100644
index 6c9e24d6e144..000000000000
--- a/drivers/infiniband/include/ib_fmr_pool.h
+++ /dev/null
@@ -1,93 +0,0 @@
1/*
2 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 * $Id: ib_fmr_pool.h 2730 2005-06-28 16:43:03Z sean.hefty $
34 */
35
36#if !defined(IB_FMR_POOL_H)
37#define IB_FMR_POOL_H
38
39#include <ib_verbs.h>
40
41struct ib_fmr_pool;
42
43/**
44 * struct ib_fmr_pool_param - Parameters for creating FMR pool
45 * @max_pages_per_fmr:Maximum number of pages per map request.
46 * @access:Access flags for FMRs in pool.
47 * @pool_size:Number of FMRs to allocate for pool.
48 * @dirty_watermark:Flush is triggered when @dirty_watermark dirty
49 * FMRs are present.
50 * @flush_function:Callback called when unmapped FMRs are flushed and
51 * more FMRs are possibly available for mapping
52 * @flush_arg:Context passed to user's flush function.
53 * @cache:If set, FMRs may be reused after unmapping for identical map
54 * requests.
55 */
56struct ib_fmr_pool_param {
57 int max_pages_per_fmr;
58 enum ib_access_flags access;
59 int pool_size;
60 int dirty_watermark;
61 void (*flush_function)(struct ib_fmr_pool *pool,
62 void * arg);
63 void *flush_arg;
64 unsigned cache:1;
65};
66
67struct ib_pool_fmr {
68 struct ib_fmr *fmr;
69 struct ib_fmr_pool *pool;
70 struct list_head list;
71 struct hlist_node cache_node;
72 int ref_count;
73 int remap_count;
74 u64 io_virtual_address;
75 int page_list_len;
76 u64 page_list[0];
77};
78
79struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
80 struct ib_fmr_pool_param *params);
81
82void ib_destroy_fmr_pool(struct ib_fmr_pool *pool);
83
84int ib_flush_fmr_pool(struct ib_fmr_pool *pool);
85
86struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
87 u64 *page_list,
88 int list_len,
89 u64 *io_virtual_address);
90
91int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr);
92
93#endif /* IB_FMR_POOL_H */
diff --git a/drivers/infiniband/include/ib_mad.h b/drivers/infiniband/include/ib_mad.h
deleted file mode 100644
index 491b6f25b3b8..000000000000
--- a/drivers/infiniband/include/ib_mad.h
+++ /dev/null
@@ -1,577 +0,0 @@
1/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 *
36 * $Id: ib_mad.h 2775 2005-07-02 13:42:12Z halr $
37 */
38
39#if !defined( IB_MAD_H )
40#define IB_MAD_H
41
42#include <linux/pci.h>
43
44#include <ib_verbs.h>
45
46/* Management base version */
47#define IB_MGMT_BASE_VERSION 1
48
49/* Management classes */
50#define IB_MGMT_CLASS_SUBN_LID_ROUTED 0x01
51#define IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE 0x81
52#define IB_MGMT_CLASS_SUBN_ADM 0x03
53#define IB_MGMT_CLASS_PERF_MGMT 0x04
54#define IB_MGMT_CLASS_BM 0x05
55#define IB_MGMT_CLASS_DEVICE_MGMT 0x06
56#define IB_MGMT_CLASS_CM 0x07
57#define IB_MGMT_CLASS_SNMP 0x08
58#define IB_MGMT_CLASS_VENDOR_RANGE2_START 0x30
59#define IB_MGMT_CLASS_VENDOR_RANGE2_END 0x4F
60
61#define IB_OPENIB_OUI (0x001405)
62
63/* Management methods */
64#define IB_MGMT_METHOD_GET 0x01
65#define IB_MGMT_METHOD_SET 0x02
66#define IB_MGMT_METHOD_GET_RESP 0x81
67#define IB_MGMT_METHOD_SEND 0x03
68#define IB_MGMT_METHOD_TRAP 0x05
69#define IB_MGMT_METHOD_REPORT 0x06
70#define IB_MGMT_METHOD_REPORT_RESP 0x86
71#define IB_MGMT_METHOD_TRAP_REPRESS 0x07
72
73#define IB_MGMT_METHOD_RESP 0x80
74
75#define IB_MGMT_MAX_METHODS 128
76
77/* RMPP information */
78#define IB_MGMT_RMPP_VERSION 1
79
80#define IB_MGMT_RMPP_TYPE_DATA 1
81#define IB_MGMT_RMPP_TYPE_ACK 2
82#define IB_MGMT_RMPP_TYPE_STOP 3
83#define IB_MGMT_RMPP_TYPE_ABORT 4
84
85#define IB_MGMT_RMPP_FLAG_ACTIVE 1
86#define IB_MGMT_RMPP_FLAG_FIRST (1<<1)
87#define IB_MGMT_RMPP_FLAG_LAST (1<<2)
88
89#define IB_MGMT_RMPP_NO_RESPTIME 0x1F
90
91#define IB_MGMT_RMPP_STATUS_SUCCESS 0
92#define IB_MGMT_RMPP_STATUS_RESX 1
93#define IB_MGMT_RMPP_STATUS_T2L 118
94#define IB_MGMT_RMPP_STATUS_BAD_LEN 119
95#define IB_MGMT_RMPP_STATUS_BAD_SEG 120
96#define IB_MGMT_RMPP_STATUS_BADT 121
97#define IB_MGMT_RMPP_STATUS_W2S 122
98#define IB_MGMT_RMPP_STATUS_S2B 123
99#define IB_MGMT_RMPP_STATUS_BAD_STATUS 124
100#define IB_MGMT_RMPP_STATUS_UNV 125
101#define IB_MGMT_RMPP_STATUS_TMR 126
102#define IB_MGMT_RMPP_STATUS_UNSPEC 127
103
104#define IB_QP0 0
105#define IB_QP1 __constant_htonl(1)
106#define IB_QP1_QKEY 0x80010000
107#define IB_QP_SET_QKEY 0x80000000
108
109struct ib_mad_hdr {
110 u8 base_version;
111 u8 mgmt_class;
112 u8 class_version;
113 u8 method;
114 u16 status;
115 u16 class_specific;
116 u64 tid;
117 u16 attr_id;
118 u16 resv;
119 u32 attr_mod;
120};
121
122struct ib_rmpp_hdr {
123 u8 rmpp_version;
124 u8 rmpp_type;
125 u8 rmpp_rtime_flags;
126 u8 rmpp_status;
127 u32 seg_num;
128 u32 paylen_newwin;
129};
130
131typedef u64 __bitwise ib_sa_comp_mask;
132
133#define IB_SA_COMP_MASK(n) ((__force ib_sa_comp_mask) cpu_to_be64(1ull << n))
134
135/*
136 * ib_sa_hdr and ib_sa_mad structures must be packed because they have
137 * 64-bit fields that are only 32-bit aligned. 64-bit architectures will
138 * lay them out wrong otherwise. (And unfortunately they are sent on
139 * the wire so we can't change the layout)
140 */
141struct ib_sa_hdr {
142 u64 sm_key;
143 u16 attr_offset;
144 u16 reserved;
145 ib_sa_comp_mask comp_mask;
146} __attribute__ ((packed));
147
148struct ib_mad {
149 struct ib_mad_hdr mad_hdr;
150 u8 data[232];
151};
152
153struct ib_rmpp_mad {
154 struct ib_mad_hdr mad_hdr;
155 struct ib_rmpp_hdr rmpp_hdr;
156 u8 data[220];
157};
158
159struct ib_sa_mad {
160 struct ib_mad_hdr mad_hdr;
161 struct ib_rmpp_hdr rmpp_hdr;
162 struct ib_sa_hdr sa_hdr;
163 u8 data[200];
164} __attribute__ ((packed));
165
166struct ib_vendor_mad {
167 struct ib_mad_hdr mad_hdr;
168 struct ib_rmpp_hdr rmpp_hdr;
169 u8 reserved;
170 u8 oui[3];
171 u8 data[216];
172};
173
174/**
175 * ib_mad_send_buf - MAD data buffer and work request for sends.
176 * @mad: References an allocated MAD data buffer. The size of the data
177 * buffer is specified in the @send_wr.length field.
178 * @mapping: DMA mapping information.
179 * @mad_agent: MAD agent that allocated the buffer.
180 * @context: User-controlled context fields.
181 * @send_wr: An initialized work request structure used when sending the MAD.
182 * The wr_id field of the work request is initialized to reference this
183 * data structure.
184 * @sge: A scatter-gather list referenced by the work request.
185 *
186 * Users are responsible for initializing the MAD buffer itself, with the
187 * exception of specifying the payload length field in any RMPP MAD.
188 */
189struct ib_mad_send_buf {
190 struct ib_mad *mad;
191 DECLARE_PCI_UNMAP_ADDR(mapping)
192 struct ib_mad_agent *mad_agent;
193 void *context[2];
194 struct ib_send_wr send_wr;
195 struct ib_sge sge;
196};
197
198/**
199 * ib_get_rmpp_resptime - Returns the RMPP response time.
200 * @rmpp_hdr: An RMPP header.
201 */
202static inline u8 ib_get_rmpp_resptime(struct ib_rmpp_hdr *rmpp_hdr)
203{
204 return rmpp_hdr->rmpp_rtime_flags >> 3;
205}
206
207/**
208 * ib_get_rmpp_flags - Returns the RMPP flags.
209 * @rmpp_hdr: An RMPP header.
210 */
211static inline u8 ib_get_rmpp_flags(struct ib_rmpp_hdr *rmpp_hdr)
212{
213 return rmpp_hdr->rmpp_rtime_flags & 0x7;
214}
215
216/**
217 * ib_set_rmpp_resptime - Sets the response time in an RMPP header.
218 * @rmpp_hdr: An RMPP header.
219 * @rtime: The response time to set.
220 */
221static inline void ib_set_rmpp_resptime(struct ib_rmpp_hdr *rmpp_hdr, u8 rtime)
222{
223 rmpp_hdr->rmpp_rtime_flags = ib_get_rmpp_flags(rmpp_hdr) | (rtime << 3);
224}
225
226/**
227 * ib_set_rmpp_flags - Sets the flags in an RMPP header.
228 * @rmpp_hdr: An RMPP header.
229 * @flags: The flags to set.
230 */
231static inline void ib_set_rmpp_flags(struct ib_rmpp_hdr *rmpp_hdr, u8 flags)
232{
233 rmpp_hdr->rmpp_rtime_flags = (rmpp_hdr->rmpp_rtime_flags & 0xF1) |
234 (flags & 0x7);
235}
236
237struct ib_mad_agent;
238struct ib_mad_send_wc;
239struct ib_mad_recv_wc;
240
241/**
242 * ib_mad_send_handler - callback handler for a sent MAD.
243 * @mad_agent: MAD agent that sent the MAD.
244 * @mad_send_wc: Send work completion information on the sent MAD.
245 */
246typedef void (*ib_mad_send_handler)(struct ib_mad_agent *mad_agent,
247 struct ib_mad_send_wc *mad_send_wc);
248
249/**
250 * ib_mad_snoop_handler - Callback handler for snooping sent MADs.
251 * @mad_agent: MAD agent that snooped the MAD.
252 * @send_wr: Work request information on the sent MAD.
253 * @mad_send_wc: Work completion information on the sent MAD. Valid
254 * only for snooping that occurs on a send completion.
255 *
256 * Clients snooping MADs should not modify data referenced by the @send_wr
257 * or @mad_send_wc.
258 */
259typedef void (*ib_mad_snoop_handler)(struct ib_mad_agent *mad_agent,
260 struct ib_send_wr *send_wr,
261 struct ib_mad_send_wc *mad_send_wc);
262
263/**
264 * ib_mad_recv_handler - callback handler for a received MAD.
265 * @mad_agent: MAD agent requesting the received MAD.
266 * @mad_recv_wc: Received work completion information on the received MAD.
267 *
268 * MADs received in response to a send request operation will be handed to
269 * the user after the send operation completes. All data buffers given
270 * to registered agents through this routine are owned by the receiving
271 * client, except for snooping agents. Clients snooping MADs should not
272 * modify the data referenced by @mad_recv_wc.
273 */
274typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent,
275 struct ib_mad_recv_wc *mad_recv_wc);
276
277/**
278 * ib_mad_agent - Used to track MAD registration with the access layer.
279 * @device: Reference to device registration is on.
280 * @qp: Reference to QP used for sending and receiving MADs.
281 * @mr: Memory region for system memory usable for DMA.
282 * @recv_handler: Callback handler for a received MAD.
283 * @send_handler: Callback handler for a sent MAD.
284 * @snoop_handler: Callback handler for snooped sent MADs.
285 * @context: User-specified context associated with this registration.
286 * @hi_tid: Access layer assigned transaction ID for this client.
287 * Unsolicited MADs sent by this client will have the upper 32-bits
288 * of their TID set to this value.
289 * @port_num: Port number on which QP is registered
290 * @rmpp_version: If set, indicates the RMPP version used by this agent.
291 */
292struct ib_mad_agent {
293 struct ib_device *device;
294 struct ib_qp *qp;
295 struct ib_mr *mr;
296 ib_mad_recv_handler recv_handler;
297 ib_mad_send_handler send_handler;
298 ib_mad_snoop_handler snoop_handler;
299 void *context;
300 u32 hi_tid;
301 u8 port_num;
302 u8 rmpp_version;
303};
304
305/**
306 * ib_mad_send_wc - MAD send completion information.
307 * @wr_id: Work request identifier associated with the send MAD request.
308 * @status: Completion status.
309 * @vendor_err: Optional vendor error information returned with a failed
310 * request.
311 */
312struct ib_mad_send_wc {
313 u64 wr_id;
314 enum ib_wc_status status;
315 u32 vendor_err;
316};
317
318/**
319 * ib_mad_recv_buf - received MAD buffer information.
320 * @list: Reference to next data buffer for a received RMPP MAD.
321 * @grh: References a data buffer containing the global route header.
322 * The data refereced by this buffer is only valid if the GRH is
323 * valid.
324 * @mad: References the start of the received MAD.
325 */
326struct ib_mad_recv_buf {
327 struct list_head list;
328 struct ib_grh *grh;
329 struct ib_mad *mad;
330};
331
332/**
333 * ib_mad_recv_wc - received MAD information.
334 * @wc: Completion information for the received data.
335 * @recv_buf: Specifies the location of the received data buffer(s).
336 * @rmpp_list: Specifies a list of RMPP reassembled received MAD buffers.
337 * @mad_len: The length of the received MAD, without duplicated headers.
338 *
339 * For received response, the wr_id field of the wc is set to the wr_id
340 * for the corresponding send request.
341 */
342struct ib_mad_recv_wc {
343 struct ib_wc *wc;
344 struct ib_mad_recv_buf recv_buf;
345 struct list_head rmpp_list;
346 int mad_len;
347};
348
349/**
350 * ib_mad_reg_req - MAD registration request
351 * @mgmt_class: Indicates which management class of MADs should be receive
352 * by the caller. This field is only required if the user wishes to
353 * receive unsolicited MADs, otherwise it should be 0.
354 * @mgmt_class_version: Indicates which version of MADs for the given
355 * management class to receive.
356 * @oui: Indicates IEEE OUI when mgmt_class is a vendor class
357 * in the range from 0x30 to 0x4f. Otherwise not used.
358 * @method_mask: The caller will receive unsolicited MADs for any method
359 * where @method_mask = 1.
360 */
361struct ib_mad_reg_req {
362 u8 mgmt_class;
363 u8 mgmt_class_version;
364 u8 oui[3];
365 DECLARE_BITMAP(method_mask, IB_MGMT_MAX_METHODS);
366};
367
368/**
369 * ib_register_mad_agent - Register to send/receive MADs.
370 * @device: The device to register with.
371 * @port_num: The port on the specified device to use.
372 * @qp_type: Specifies which QP to access. Must be either
373 * IB_QPT_SMI or IB_QPT_GSI.
374 * @mad_reg_req: Specifies which unsolicited MADs should be received
375 * by the caller. This parameter may be NULL if the caller only
376 * wishes to receive solicited responses.
377 * @rmpp_version: If set, indicates that the client will send
378 * and receive MADs that contain the RMPP header for the given version.
379 * If set to 0, indicates that RMPP is not used by this client.
380 * @send_handler: The completion callback routine invoked after a send
381 * request has completed.
382 * @recv_handler: The completion callback routine invoked for a received
383 * MAD.
384 * @context: User specified context associated with the registration.
385 */
386struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
387 u8 port_num,
388 enum ib_qp_type qp_type,
389 struct ib_mad_reg_req *mad_reg_req,
390 u8 rmpp_version,
391 ib_mad_send_handler send_handler,
392 ib_mad_recv_handler recv_handler,
393 void *context);
394
395enum ib_mad_snoop_flags {
396 /*IB_MAD_SNOOP_POSTED_SENDS = 1,*/
397 /*IB_MAD_SNOOP_RMPP_SENDS = (1<<1),*/
398 IB_MAD_SNOOP_SEND_COMPLETIONS = (1<<2),
399 /*IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS = (1<<3),*/
400 IB_MAD_SNOOP_RECVS = (1<<4)
401 /*IB_MAD_SNOOP_RMPP_RECVS = (1<<5),*/
402 /*IB_MAD_SNOOP_REDIRECTED_QPS = (1<<6)*/
403};
404
405/**
406 * ib_register_mad_snoop - Register to snoop sent and received MADs.
407 * @device: The device to register with.
408 * @port_num: The port on the specified device to use.
409 * @qp_type: Specifies which QP traffic to snoop. Must be either
410 * IB_QPT_SMI or IB_QPT_GSI.
411 * @mad_snoop_flags: Specifies information where snooping occurs.
412 * @send_handler: The callback routine invoked for a snooped send.
413 * @recv_handler: The callback routine invoked for a snooped receive.
414 * @context: User specified context associated with the registration.
415 */
416struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
417 u8 port_num,
418 enum ib_qp_type qp_type,
419 int mad_snoop_flags,
420 ib_mad_snoop_handler snoop_handler,
421 ib_mad_recv_handler recv_handler,
422 void *context);
423
424/**
425 * ib_unregister_mad_agent - Unregisters a client from using MAD services.
426 * @mad_agent: Corresponding MAD registration request to deregister.
427 *
428 * After invoking this routine, MAD services are no longer usable by the
429 * client on the associated QP.
430 */
431int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent);
432
433/**
434 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
435 * with the registered client.
436 * @mad_agent: Specifies the associated registration to post the send to.
437 * @send_wr: Specifies the information needed to send the MAD(s).
438 * @bad_send_wr: Specifies the MAD on which an error was encountered.
439 *
440 * Sent MADs are not guaranteed to complete in the order that they were posted.
441 *
442 * If the MAD requires RMPP, the data buffer should contain a single copy
443 * of the common MAD, RMPP, and class specific headers, followed by the class
444 * defined data. If the class defined data would not divide evenly into
445 * RMPP segments, then space must be allocated at the end of the referenced
446 * buffer for any required padding. To indicate the amount of class defined
447 * data being transferred, the paylen_newwin field in the RMPP header should
448 * be set to the size of the class specific header plus the amount of class
449 * defined data being transferred. The paylen_newwin field should be
450 * specified in network-byte order.
451 */
452int ib_post_send_mad(struct ib_mad_agent *mad_agent,
453 struct ib_send_wr *send_wr,
454 struct ib_send_wr **bad_send_wr);
455
456/**
457 * ib_coalesce_recv_mad - Coalesces received MAD data into a single buffer.
458 * @mad_recv_wc: Work completion information for a received MAD.
459 * @buf: User-provided data buffer to receive the coalesced buffers. The
460 * referenced buffer should be at least the size of the mad_len specified
461 * by @mad_recv_wc.
462 *
463 * This call copies a chain of received MAD segments into a single data buffer,
464 * removing duplicated headers.
465 */
466void ib_coalesce_recv_mad(struct ib_mad_recv_wc *mad_recv_wc, void *buf);
467
468/**
469 * ib_free_recv_mad - Returns data buffers used to receive a MAD.
470 * @mad_recv_wc: Work completion information for a received MAD.
471 *
472 * Clients receiving MADs through their ib_mad_recv_handler must call this
473 * routine to return the work completion buffers to the access layer.
474 */
475void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc);
476
477/**
478 * ib_cancel_mad - Cancels an outstanding send MAD operation.
479 * @mad_agent: Specifies the registration associated with sent MAD.
480 * @wr_id: Indicates the work request identifier of the MAD to cancel.
481 *
482 * MADs will be returned to the user through the corresponding
483 * ib_mad_send_handler.
484 */
485void ib_cancel_mad(struct ib_mad_agent *mad_agent, u64 wr_id);
486
487/**
488 * ib_modify_mad - Modifies an outstanding send MAD operation.
489 * @mad_agent: Specifies the registration associated with sent MAD.
490 * @wr_id: Indicates the work request identifier of the MAD to modify.
491 * @timeout_ms: New timeout value for sent MAD.
492 *
493 * This call will reset the timeout value for a sent MAD to the specified
494 * value.
495 */
496int ib_modify_mad(struct ib_mad_agent *mad_agent, u64 wr_id, u32 timeout_ms);
497
498/**
499 * ib_redirect_mad_qp - Registers a QP for MAD services.
500 * @qp: Reference to a QP that requires MAD services.
501 * @rmpp_version: If set, indicates that the client will send
502 * and receive MADs that contain the RMPP header for the given version.
503 * If set to 0, indicates that RMPP is not used by this client.
504 * @send_handler: The completion callback routine invoked after a send
505 * request has completed.
506 * @recv_handler: The completion callback routine invoked for a received
507 * MAD.
508 * @context: User specified context associated with the registration.
509 *
510 * Use of this call allows clients to use MAD services, such as RMPP,
511 * on user-owned QPs. After calling this routine, users may send
512 * MADs on the specified QP by calling ib_mad_post_send.
513 */
514struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
515 u8 rmpp_version,
516 ib_mad_send_handler send_handler,
517 ib_mad_recv_handler recv_handler,
518 void *context);
519
520/**
521 * ib_process_mad_wc - Processes a work completion associated with a
522 * MAD sent or received on a redirected QP.
523 * @mad_agent: Specifies the registered MAD service using the redirected QP.
524 * @wc: References a work completion associated with a sent or received
525 * MAD segment.
526 *
527 * This routine is used to complete or continue processing on a MAD request.
528 * If the work completion is associated with a send operation, calling
529 * this routine is required to continue an RMPP transfer or to wait for a
530 * corresponding response, if it is a request. If the work completion is
531 * associated with a receive operation, calling this routine is required to
532 * process an inbound or outbound RMPP transfer, or to match a response MAD
533 * with its corresponding request.
534 */
535int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
536 struct ib_wc *wc);
537
538/**
539 * ib_create_send_mad - Allocate and initialize a data buffer and work request
540 * for sending a MAD.
541 * @mad_agent: Specifies the registered MAD service to associate with the MAD.
542 * @remote_qpn: Specifies the QPN of the receiving node.
543 * @pkey_index: Specifies which PKey the MAD will be sent using. This field
544 * is valid only if the remote_qpn is QP 1.
545 * @ah: References the address handle used to transfer to the remote node.
546 * @rmpp_active: Indicates if the send will enable RMPP.
547 * @hdr_len: Indicates the size of the data header of the MAD. This length
548 * should include the common MAD header, RMPP header, plus any class
549 * specific header.
550 * @data_len: Indicates the size of any user-transferred data. The call will
551 * automatically adjust the allocated buffer size to account for any
552 * additional padding that may be necessary.
553 * @gfp_mask: GFP mask used for the memory allocation.
554 *
555 * This is a helper routine that may be used to allocate a MAD. Users are
556 * not required to allocate outbound MADs using this call. The returned
557 * MAD send buffer will reference a data buffer usable for sending a MAD, along
558 * with an initialized work request structure. Users may modify the returned
559 * MAD data buffer or work request before posting the send.
560 *
561 * The returned data buffer will be cleared. Users are responsible for
562 * initializing the common MAD and any class specific headers. If @rmpp_active
563 * is set, the RMPP header will be initialized for sending.
564 */
565struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
566 u32 remote_qpn, u16 pkey_index,
567 struct ib_ah *ah, int rmpp_active,
568 int hdr_len, int data_len,
569 unsigned int __nocast gfp_mask);
570
571/**
572 * ib_free_send_mad - Returns data buffers used to send a MAD.
573 * @send_buf: Previously allocated send data buffer.
574 */
575void ib_free_send_mad(struct ib_mad_send_buf *send_buf);
576
577#endif /* IB_MAD_H */
diff --git a/drivers/infiniband/include/ib_pack.h b/drivers/infiniband/include/ib_pack.h
deleted file mode 100644
index fe480f3e8654..000000000000
--- a/drivers/infiniband/include/ib_pack.h
+++ /dev/null
@@ -1,245 +0,0 @@
1/*
2 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: ib_pack.h 1349 2004-12-16 21:09:43Z roland $
33 */
34
35#ifndef IB_PACK_H
36#define IB_PACK_H
37
38#include <ib_verbs.h>
39
40enum {
41 IB_LRH_BYTES = 8,
42 IB_GRH_BYTES = 40,
43 IB_BTH_BYTES = 12,
44 IB_DETH_BYTES = 8
45};
46
47struct ib_field {
48 size_t struct_offset_bytes;
49 size_t struct_size_bytes;
50 int offset_words;
51 int offset_bits;
52 int size_bits;
53 char *field_name;
54};
55
56#define RESERVED \
57 .field_name = "reserved"
58
59/*
60 * This macro cleans up the definitions of constants for BTH opcodes.
61 * It is used to define constants such as IB_OPCODE_UD_SEND_ONLY,
62 * which becomes IB_OPCODE_UD + IB_OPCODE_SEND_ONLY, and this gives
63 * the correct value.
64 *
65 * In short, user code should use the constants defined using the
66 * macro rather than worrying about adding together other constants.
67*/
68#define IB_OPCODE(transport, op) \
69 IB_OPCODE_ ## transport ## _ ## op = \
70 IB_OPCODE_ ## transport + IB_OPCODE_ ## op
71
72enum {
73 /* transport types -- just used to define real constants */
74 IB_OPCODE_RC = 0x00,
75 IB_OPCODE_UC = 0x20,
76 IB_OPCODE_RD = 0x40,
77 IB_OPCODE_UD = 0x60,
78
79 /* operations -- just used to define real constants */
80 IB_OPCODE_SEND_FIRST = 0x00,
81 IB_OPCODE_SEND_MIDDLE = 0x01,
82 IB_OPCODE_SEND_LAST = 0x02,
83 IB_OPCODE_SEND_LAST_WITH_IMMEDIATE = 0x03,
84 IB_OPCODE_SEND_ONLY = 0x04,
85 IB_OPCODE_SEND_ONLY_WITH_IMMEDIATE = 0x05,
86 IB_OPCODE_RDMA_WRITE_FIRST = 0x06,
87 IB_OPCODE_RDMA_WRITE_MIDDLE = 0x07,
88 IB_OPCODE_RDMA_WRITE_LAST = 0x08,
89 IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE = 0x09,
90 IB_OPCODE_RDMA_WRITE_ONLY = 0x0a,
91 IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE = 0x0b,
92 IB_OPCODE_RDMA_READ_REQUEST = 0x0c,
93 IB_OPCODE_RDMA_READ_RESPONSE_FIRST = 0x0d,
94 IB_OPCODE_RDMA_READ_RESPONSE_MIDDLE = 0x0e,
95 IB_OPCODE_RDMA_READ_RESPONSE_LAST = 0x0f,
96 IB_OPCODE_RDMA_READ_RESPONSE_ONLY = 0x10,
97 IB_OPCODE_ACKNOWLEDGE = 0x11,
98 IB_OPCODE_ATOMIC_ACKNOWLEDGE = 0x12,
99 IB_OPCODE_COMPARE_SWAP = 0x13,
100 IB_OPCODE_FETCH_ADD = 0x14,
101
102 /* real constants follow -- see comment about above IB_OPCODE()
103 macro for more details */
104
105 /* RC */
106 IB_OPCODE(RC, SEND_FIRST),
107 IB_OPCODE(RC, SEND_MIDDLE),
108 IB_OPCODE(RC, SEND_LAST),
109 IB_OPCODE(RC, SEND_LAST_WITH_IMMEDIATE),
110 IB_OPCODE(RC, SEND_ONLY),
111 IB_OPCODE(RC, SEND_ONLY_WITH_IMMEDIATE),
112 IB_OPCODE(RC, RDMA_WRITE_FIRST),
113 IB_OPCODE(RC, RDMA_WRITE_MIDDLE),
114 IB_OPCODE(RC, RDMA_WRITE_LAST),
115 IB_OPCODE(RC, RDMA_WRITE_LAST_WITH_IMMEDIATE),
116 IB_OPCODE(RC, RDMA_WRITE_ONLY),
117 IB_OPCODE(RC, RDMA_WRITE_ONLY_WITH_IMMEDIATE),
118 IB_OPCODE(RC, RDMA_READ_REQUEST),
119 IB_OPCODE(RC, RDMA_READ_RESPONSE_FIRST),
120 IB_OPCODE(RC, RDMA_READ_RESPONSE_MIDDLE),
121 IB_OPCODE(RC, RDMA_READ_RESPONSE_LAST),
122 IB_OPCODE(RC, RDMA_READ_RESPONSE_ONLY),
123 IB_OPCODE(RC, ACKNOWLEDGE),
124 IB_OPCODE(RC, ATOMIC_ACKNOWLEDGE),
125 IB_OPCODE(RC, COMPARE_SWAP),
126 IB_OPCODE(RC, FETCH_ADD),
127
128 /* UC */
129 IB_OPCODE(UC, SEND_FIRST),
130 IB_OPCODE(UC, SEND_MIDDLE),
131 IB_OPCODE(UC, SEND_LAST),
132 IB_OPCODE(UC, SEND_LAST_WITH_IMMEDIATE),
133 IB_OPCODE(UC, SEND_ONLY),
134 IB_OPCODE(UC, SEND_ONLY_WITH_IMMEDIATE),
135 IB_OPCODE(UC, RDMA_WRITE_FIRST),
136 IB_OPCODE(UC, RDMA_WRITE_MIDDLE),
137 IB_OPCODE(UC, RDMA_WRITE_LAST),
138 IB_OPCODE(UC, RDMA_WRITE_LAST_WITH_IMMEDIATE),
139 IB_OPCODE(UC, RDMA_WRITE_ONLY),
140 IB_OPCODE(UC, RDMA_WRITE_ONLY_WITH_IMMEDIATE),
141
142 /* RD */
143 IB_OPCODE(RD, SEND_FIRST),
144 IB_OPCODE(RD, SEND_MIDDLE),
145 IB_OPCODE(RD, SEND_LAST),
146 IB_OPCODE(RD, SEND_LAST_WITH_IMMEDIATE),
147 IB_OPCODE(RD, SEND_ONLY),
148 IB_OPCODE(RD, SEND_ONLY_WITH_IMMEDIATE),
149 IB_OPCODE(RD, RDMA_WRITE_FIRST),
150 IB_OPCODE(RD, RDMA_WRITE_MIDDLE),
151 IB_OPCODE(RD, RDMA_WRITE_LAST),
152 IB_OPCODE(RD, RDMA_WRITE_LAST_WITH_IMMEDIATE),
153 IB_OPCODE(RD, RDMA_WRITE_ONLY),
154 IB_OPCODE(RD, RDMA_WRITE_ONLY_WITH_IMMEDIATE),
155 IB_OPCODE(RD, RDMA_READ_REQUEST),
156 IB_OPCODE(RD, RDMA_READ_RESPONSE_FIRST),
157 IB_OPCODE(RD, RDMA_READ_RESPONSE_MIDDLE),
158 IB_OPCODE(RD, RDMA_READ_RESPONSE_LAST),
159 IB_OPCODE(RD, RDMA_READ_RESPONSE_ONLY),
160 IB_OPCODE(RD, ACKNOWLEDGE),
161 IB_OPCODE(RD, ATOMIC_ACKNOWLEDGE),
162 IB_OPCODE(RD, COMPARE_SWAP),
163 IB_OPCODE(RD, FETCH_ADD),
164
165 /* UD */
166 IB_OPCODE(UD, SEND_ONLY),
167 IB_OPCODE(UD, SEND_ONLY_WITH_IMMEDIATE)
168};
169
170enum {
171 IB_LNH_RAW = 0,
172 IB_LNH_IP = 1,
173 IB_LNH_IBA_LOCAL = 2,
174 IB_LNH_IBA_GLOBAL = 3
175};
176
177struct ib_unpacked_lrh {
178 u8 virtual_lane;
179 u8 link_version;
180 u8 service_level;
181 u8 link_next_header;
182 __be16 destination_lid;
183 __be16 packet_length;
184 __be16 source_lid;
185};
186
187struct ib_unpacked_grh {
188 u8 ip_version;
189 u8 traffic_class;
190 __be32 flow_label;
191 __be16 payload_length;
192 u8 next_header;
193 u8 hop_limit;
194 union ib_gid source_gid;
195 union ib_gid destination_gid;
196};
197
198struct ib_unpacked_bth {
199 u8 opcode;
200 u8 solicited_event;
201 u8 mig_req;
202 u8 pad_count;
203 u8 transport_header_version;
204 __be16 pkey;
205 __be32 destination_qpn;
206 u8 ack_req;
207 __be32 psn;
208};
209
210struct ib_unpacked_deth {
211 __be32 qkey;
212 __be32 source_qpn;
213};
214
215struct ib_ud_header {
216 struct ib_unpacked_lrh lrh;
217 int grh_present;
218 struct ib_unpacked_grh grh;
219 struct ib_unpacked_bth bth;
220 struct ib_unpacked_deth deth;
221 int immediate_present;
222 __be32 immediate_data;
223};
224
225void ib_pack(const struct ib_field *desc,
226 int desc_len,
227 void *structure,
228 void *buf);
229
230void ib_unpack(const struct ib_field *desc,
231 int desc_len,
232 void *buf,
233 void *structure);
234
235void ib_ud_header_init(int payload_bytes,
236 int grh_present,
237 struct ib_ud_header *header);
238
239int ib_ud_header_pack(struct ib_ud_header *header,
240 void *buf);
241
242int ib_ud_header_unpack(void *buf,
243 struct ib_ud_header *header);
244
245#endif /* IB_PACK_H */
diff --git a/drivers/infiniband/include/ib_sa.h b/drivers/infiniband/include/ib_sa.h
deleted file mode 100644
index 6d999f7b5d93..000000000000
--- a/drivers/infiniband/include/ib_sa.h
+++ /dev/null
@@ -1,373 +0,0 @@
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 * $Id: ib_sa.h 2811 2005-07-06 18:11:43Z halr $
34 */
35
36#ifndef IB_SA_H
37#define IB_SA_H
38
39#include <linux/compiler.h>
40
41#include <ib_verbs.h>
42#include <ib_mad.h>
43
44enum {
45 IB_SA_CLASS_VERSION = 2, /* IB spec version 1.1/1.2 */
46
47 IB_SA_METHOD_GET_TABLE = 0x12,
48 IB_SA_METHOD_GET_TABLE_RESP = 0x92,
49 IB_SA_METHOD_DELETE = 0x15
50};
51
52enum ib_sa_selector {
53 IB_SA_GTE = 0,
54 IB_SA_LTE = 1,
55 IB_SA_EQ = 2,
56 /*
57 * The meaning of "best" depends on the attribute: for
58 * example, for MTU best will return the largest available
59 * MTU, while for packet life time, best will return the
60 * smallest available life time.
61 */
62 IB_SA_BEST = 3
63};
64
65enum ib_sa_rate {
66 IB_SA_RATE_2_5_GBPS = 2,
67 IB_SA_RATE_5_GBPS = 5,
68 IB_SA_RATE_10_GBPS = 3,
69 IB_SA_RATE_20_GBPS = 6,
70 IB_SA_RATE_30_GBPS = 4,
71 IB_SA_RATE_40_GBPS = 7,
72 IB_SA_RATE_60_GBPS = 8,
73 IB_SA_RATE_80_GBPS = 9,
74 IB_SA_RATE_120_GBPS = 10
75};
76
77static inline int ib_sa_rate_enum_to_int(enum ib_sa_rate rate)
78{
79 switch (rate) {
80 case IB_SA_RATE_2_5_GBPS: return 1;
81 case IB_SA_RATE_5_GBPS: return 2;
82 case IB_SA_RATE_10_GBPS: return 4;
83 case IB_SA_RATE_20_GBPS: return 8;
84 case IB_SA_RATE_30_GBPS: return 12;
85 case IB_SA_RATE_40_GBPS: return 16;
86 case IB_SA_RATE_60_GBPS: return 24;
87 case IB_SA_RATE_80_GBPS: return 32;
88 case IB_SA_RATE_120_GBPS: return 48;
89 default: return -1;
90 }
91}
92
93/*
94 * Structures for SA records are named "struct ib_sa_xxx_rec." No
95 * attempt is made to pack structures to match the physical layout of
96 * SA records in SA MADs; all packing and unpacking is handled by the
97 * SA query code.
98 *
99 * For a record with structure ib_sa_xxx_rec, the naming convention
100 * for the component mask value for field yyy is IB_SA_XXX_REC_YYY (we
101 * never use different abbreviations or otherwise change the spelling
102 * of xxx/yyy between ib_sa_xxx_rec.yyy and IB_SA_XXX_REC_YYY).
103 *
104 * Reserved rows are indicated with comments to help maintainability.
105 */
106
107/* reserved: 0 */
108/* reserved: 1 */
109#define IB_SA_PATH_REC_DGID IB_SA_COMP_MASK( 2)
110#define IB_SA_PATH_REC_SGID IB_SA_COMP_MASK( 3)
111#define IB_SA_PATH_REC_DLID IB_SA_COMP_MASK( 4)
112#define IB_SA_PATH_REC_SLID IB_SA_COMP_MASK( 5)
113#define IB_SA_PATH_REC_RAW_TRAFFIC IB_SA_COMP_MASK( 6)
114/* reserved: 7 */
115#define IB_SA_PATH_REC_FLOW_LABEL IB_SA_COMP_MASK( 8)
116#define IB_SA_PATH_REC_HOP_LIMIT IB_SA_COMP_MASK( 9)
117#define IB_SA_PATH_REC_TRAFFIC_CLASS IB_SA_COMP_MASK(10)
118#define IB_SA_PATH_REC_REVERSIBLE IB_SA_COMP_MASK(11)
119#define IB_SA_PATH_REC_NUMB_PATH IB_SA_COMP_MASK(12)
120#define IB_SA_PATH_REC_PKEY IB_SA_COMP_MASK(13)
121/* reserved: 14 */
122#define IB_SA_PATH_REC_SL IB_SA_COMP_MASK(15)
123#define IB_SA_PATH_REC_MTU_SELECTOR IB_SA_COMP_MASK(16)
124#define IB_SA_PATH_REC_MTU IB_SA_COMP_MASK(17)
125#define IB_SA_PATH_REC_RATE_SELECTOR IB_SA_COMP_MASK(18)
126#define IB_SA_PATH_REC_RATE IB_SA_COMP_MASK(19)
127#define IB_SA_PATH_REC_PACKET_LIFE_TIME_SELECTOR IB_SA_COMP_MASK(20)
128#define IB_SA_PATH_REC_PACKET_LIFE_TIME IB_SA_COMP_MASK(21)
129#define IB_SA_PATH_REC_PREFERENCE IB_SA_COMP_MASK(22)
130
131struct ib_sa_path_rec {
132 /* reserved */
133 /* reserved */
134 union ib_gid dgid;
135 union ib_gid sgid;
136 u16 dlid;
137 u16 slid;
138 int raw_traffic;
139 /* reserved */
140 u32 flow_label;
141 u8 hop_limit;
142 u8 traffic_class;
143 int reversible;
144 u8 numb_path;
145 u16 pkey;
146 /* reserved */
147 u8 sl;
148 u8 mtu_selector;
149 u8 mtu;
150 u8 rate_selector;
151 u8 rate;
152 u8 packet_life_time_selector;
153 u8 packet_life_time;
154 u8 preference;
155};
156
157#define IB_SA_MCMEMBER_REC_MGID IB_SA_COMP_MASK( 0)
158#define IB_SA_MCMEMBER_REC_PORT_GID IB_SA_COMP_MASK( 1)
159#define IB_SA_MCMEMBER_REC_QKEY IB_SA_COMP_MASK( 2)
160#define IB_SA_MCMEMBER_REC_MLID IB_SA_COMP_MASK( 3)
161#define IB_SA_MCMEMBER_REC_MTU_SELECTOR IB_SA_COMP_MASK( 4)
162#define IB_SA_MCMEMBER_REC_MTU IB_SA_COMP_MASK( 5)
163#define IB_SA_MCMEMBER_REC_TRAFFIC_CLASS IB_SA_COMP_MASK( 6)
164#define IB_SA_MCMEMBER_REC_PKEY IB_SA_COMP_MASK( 7)
165#define IB_SA_MCMEMBER_REC_RATE_SELECTOR IB_SA_COMP_MASK( 8)
166#define IB_SA_MCMEMBER_REC_RATE IB_SA_COMP_MASK( 9)
167#define IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR IB_SA_COMP_MASK(10)
168#define IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME IB_SA_COMP_MASK(11)
169#define IB_SA_MCMEMBER_REC_SL IB_SA_COMP_MASK(12)
170#define IB_SA_MCMEMBER_REC_FLOW_LABEL IB_SA_COMP_MASK(13)
171#define IB_SA_MCMEMBER_REC_HOP_LIMIT IB_SA_COMP_MASK(14)
172#define IB_SA_MCMEMBER_REC_SCOPE IB_SA_COMP_MASK(15)
173#define IB_SA_MCMEMBER_REC_JOIN_STATE IB_SA_COMP_MASK(16)
174#define IB_SA_MCMEMBER_REC_PROXY_JOIN IB_SA_COMP_MASK(17)
175
176struct ib_sa_mcmember_rec {
177 union ib_gid mgid;
178 union ib_gid port_gid;
179 u32 qkey;
180 u16 mlid;
181 u8 mtu_selector;
182 u8 mtu;
183 u8 traffic_class;
184 u16 pkey;
185 u8 rate_selector;
186 u8 rate;
187 u8 packet_life_time_selector;
188 u8 packet_life_time;
189 u8 sl;
190 u32 flow_label;
191 u8 hop_limit;
192 u8 scope;
193 u8 join_state;
194 int proxy_join;
195};
196
197/* Service Record Component Mask Sec 15.2.5.14 Ver 1.1 */
198#define IB_SA_SERVICE_REC_SERVICE_ID IB_SA_COMP_MASK( 0)
199#define IB_SA_SERVICE_REC_SERVICE_GID IB_SA_COMP_MASK( 1)
200#define IB_SA_SERVICE_REC_SERVICE_PKEY IB_SA_COMP_MASK( 2)
201/* reserved: 3 */
202#define IB_SA_SERVICE_REC_SERVICE_LEASE IB_SA_COMP_MASK( 4)
203#define IB_SA_SERVICE_REC_SERVICE_KEY IB_SA_COMP_MASK( 5)
204#define IB_SA_SERVICE_REC_SERVICE_NAME IB_SA_COMP_MASK( 6)
205#define IB_SA_SERVICE_REC_SERVICE_DATA8_0 IB_SA_COMP_MASK( 7)
206#define IB_SA_SERVICE_REC_SERVICE_DATA8_1 IB_SA_COMP_MASK( 8)
207#define IB_SA_SERVICE_REC_SERVICE_DATA8_2 IB_SA_COMP_MASK( 9)
208#define IB_SA_SERVICE_REC_SERVICE_DATA8_3 IB_SA_COMP_MASK(10)
209#define IB_SA_SERVICE_REC_SERVICE_DATA8_4 IB_SA_COMP_MASK(11)
210#define IB_SA_SERVICE_REC_SERVICE_DATA8_5 IB_SA_COMP_MASK(12)
211#define IB_SA_SERVICE_REC_SERVICE_DATA8_6 IB_SA_COMP_MASK(13)
212#define IB_SA_SERVICE_REC_SERVICE_DATA8_7 IB_SA_COMP_MASK(14)
213#define IB_SA_SERVICE_REC_SERVICE_DATA8_8 IB_SA_COMP_MASK(15)
214#define IB_SA_SERVICE_REC_SERVICE_DATA8_9 IB_SA_COMP_MASK(16)
215#define IB_SA_SERVICE_REC_SERVICE_DATA8_10 IB_SA_COMP_MASK(17)
216#define IB_SA_SERVICE_REC_SERVICE_DATA8_11 IB_SA_COMP_MASK(18)
217#define IB_SA_SERVICE_REC_SERVICE_DATA8_12 IB_SA_COMP_MASK(19)
218#define IB_SA_SERVICE_REC_SERVICE_DATA8_13 IB_SA_COMP_MASK(20)
219#define IB_SA_SERVICE_REC_SERVICE_DATA8_14 IB_SA_COMP_MASK(21)
220#define IB_SA_SERVICE_REC_SERVICE_DATA8_15 IB_SA_COMP_MASK(22)
221#define IB_SA_SERVICE_REC_SERVICE_DATA16_0 IB_SA_COMP_MASK(23)
222#define IB_SA_SERVICE_REC_SERVICE_DATA16_1 IB_SA_COMP_MASK(24)
223#define IB_SA_SERVICE_REC_SERVICE_DATA16_2 IB_SA_COMP_MASK(25)
224#define IB_SA_SERVICE_REC_SERVICE_DATA16_3 IB_SA_COMP_MASK(26)
225#define IB_SA_SERVICE_REC_SERVICE_DATA16_4 IB_SA_COMP_MASK(27)
226#define IB_SA_SERVICE_REC_SERVICE_DATA16_5 IB_SA_COMP_MASK(28)
227#define IB_SA_SERVICE_REC_SERVICE_DATA16_6 IB_SA_COMP_MASK(29)
228#define IB_SA_SERVICE_REC_SERVICE_DATA16_7 IB_SA_COMP_MASK(30)
229#define IB_SA_SERVICE_REC_SERVICE_DATA32_0 IB_SA_COMP_MASK(31)
230#define IB_SA_SERVICE_REC_SERVICE_DATA32_1 IB_SA_COMP_MASK(32)
231#define IB_SA_SERVICE_REC_SERVICE_DATA32_2 IB_SA_COMP_MASK(33)
232#define IB_SA_SERVICE_REC_SERVICE_DATA32_3 IB_SA_COMP_MASK(34)
233#define IB_SA_SERVICE_REC_SERVICE_DATA64_0 IB_SA_COMP_MASK(35)
234#define IB_SA_SERVICE_REC_SERVICE_DATA64_1 IB_SA_COMP_MASK(36)
235
236#define IB_DEFAULT_SERVICE_LEASE 0xFFFFFFFF
237
238struct ib_sa_service_rec {
239 u64 id;
240 union ib_gid gid;
241 u16 pkey;
242 /* reserved */
243 u32 lease;
244 u8 key[16];
245 u8 name[64];
246 u8 data8[16];
247 u16 data16[8];
248 u32 data32[4];
249 u64 data64[2];
250};
251
252struct ib_sa_query;
253
254void ib_sa_cancel_query(int id, struct ib_sa_query *query);
255
256int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
257 struct ib_sa_path_rec *rec,
258 ib_sa_comp_mask comp_mask,
259 int timeout_ms, unsigned int __nocast gfp_mask,
260 void (*callback)(int status,
261 struct ib_sa_path_rec *resp,
262 void *context),
263 void *context,
264 struct ib_sa_query **query);
265
266int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,
267 u8 method,
268 struct ib_sa_mcmember_rec *rec,
269 ib_sa_comp_mask comp_mask,
270 int timeout_ms, unsigned int __nocast gfp_mask,
271 void (*callback)(int status,
272 struct ib_sa_mcmember_rec *resp,
273 void *context),
274 void *context,
275 struct ib_sa_query **query);
276
277int ib_sa_service_rec_query(struct ib_device *device, u8 port_num,
278 u8 method,
279 struct ib_sa_service_rec *rec,
280 ib_sa_comp_mask comp_mask,
281 int timeout_ms, unsigned int __nocast gfp_mask,
282 void (*callback)(int status,
283 struct ib_sa_service_rec *resp,
284 void *context),
285 void *context,
286 struct ib_sa_query **sa_query);
287
288/**
289 * ib_sa_mcmember_rec_set - Start an MCMember set query
290 * @device:device to send query on
291 * @port_num: port number to send query on
292 * @rec:MCMember Record to send in query
293 * @comp_mask:component mask to send in query
294 * @timeout_ms:time to wait for response
295 * @gfp_mask:GFP mask to use for internal allocations
296 * @callback:function called when query completes, times out or is
297 * canceled
298 * @context:opaque user context passed to callback
299 * @sa_query:query context, used to cancel query
300 *
301 * Send an MCMember Set query to the SA (eg to join a multicast
302 * group). The callback function will be called when the query
303 * completes (or fails); status is 0 for a successful response, -EINTR
304 * if the query is canceled, -ETIMEDOUT is the query timed out, or
305 * -EIO if an error occurred sending the query. The resp parameter of
306 * the callback is only valid if status is 0.
307 *
308 * If the return value of ib_sa_mcmember_rec_set() is negative, it is
309 * an error code. Otherwise it is a query ID that can be used to
310 * cancel the query.
311 */
312static inline int
313ib_sa_mcmember_rec_set(struct ib_device *device, u8 port_num,
314 struct ib_sa_mcmember_rec *rec,
315 ib_sa_comp_mask comp_mask,
316 int timeout_ms, unsigned int __nocast gfp_mask,
317 void (*callback)(int status,
318 struct ib_sa_mcmember_rec *resp,
319 void *context),
320 void *context,
321 struct ib_sa_query **query)
322{
323 return ib_sa_mcmember_rec_query(device, port_num,
324 IB_MGMT_METHOD_SET,
325 rec, comp_mask,
326 timeout_ms, gfp_mask, callback,
327 context, query);
328}
329
330/**
331 * ib_sa_mcmember_rec_delete - Start an MCMember delete query
332 * @device:device to send query on
333 * @port_num: port number to send query on
334 * @rec:MCMember Record to send in query
335 * @comp_mask:component mask to send in query
336 * @timeout_ms:time to wait for response
337 * @gfp_mask:GFP mask to use for internal allocations
338 * @callback:function called when query completes, times out or is
339 * canceled
340 * @context:opaque user context passed to callback
341 * @sa_query:query context, used to cancel query
342 *
343 * Send an MCMember Delete query to the SA (eg to leave a multicast
344 * group). The callback function will be called when the query
345 * completes (or fails); status is 0 for a successful response, -EINTR
346 * if the query is canceled, -ETIMEDOUT is the query timed out, or
347 * -EIO if an error occurred sending the query. The resp parameter of
348 * the callback is only valid if status is 0.
349 *
350 * If the return value of ib_sa_mcmember_rec_delete() is negative, it
351 * is an error code. Otherwise it is a query ID that can be used to
352 * cancel the query.
353 */
354static inline int
355ib_sa_mcmember_rec_delete(struct ib_device *device, u8 port_num,
356 struct ib_sa_mcmember_rec *rec,
357 ib_sa_comp_mask comp_mask,
358 int timeout_ms, unsigned int __nocast gfp_mask,
359 void (*callback)(int status,
360 struct ib_sa_mcmember_rec *resp,
361 void *context),
362 void *context,
363 struct ib_sa_query **query)
364{
365 return ib_sa_mcmember_rec_query(device, port_num,
366 IB_SA_METHOD_DELETE,
367 rec, comp_mask,
368 timeout_ms, gfp_mask, callback,
369 context, query);
370}
371
372
373#endif /* IB_SA_H */
diff --git a/drivers/infiniband/include/ib_smi.h b/drivers/infiniband/include/ib_smi.h
deleted file mode 100644
index ca8216514963..000000000000
--- a/drivers/infiniband/include/ib_smi.h
+++ /dev/null
@@ -1,96 +0,0 @@
1/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 *
36 * $Id: ib_smi.h 1389 2004-12-27 22:56:47Z roland $
37 */
38
39#if !defined( IB_SMI_H )
40#define IB_SMI_H
41
42#include <ib_mad.h>
43
44#define IB_LID_PERMISSIVE 0xFFFF
45
46#define IB_SMP_DATA_SIZE 64
47#define IB_SMP_MAX_PATH_HOPS 64
48
49struct ib_smp {
50 u8 base_version;
51 u8 mgmt_class;
52 u8 class_version;
53 u8 method;
54 u16 status;
55 u8 hop_ptr;
56 u8 hop_cnt;
57 u64 tid;
58 u16 attr_id;
59 u16 resv;
60 u32 attr_mod;
61 u64 mkey;
62 u16 dr_slid;
63 u16 dr_dlid;
64 u8 reserved[28];
65 u8 data[IB_SMP_DATA_SIZE];
66 u8 initial_path[IB_SMP_MAX_PATH_HOPS];
67 u8 return_path[IB_SMP_MAX_PATH_HOPS];
68} __attribute__ ((packed));
69
70#define IB_SMP_DIRECTION __constant_htons(0x8000)
71
72/* Subnet management attributes */
73#define IB_SMP_ATTR_NOTICE __constant_htons(0x0002)
74#define IB_SMP_ATTR_NODE_DESC __constant_htons(0x0010)
75#define IB_SMP_ATTR_NODE_INFO __constant_htons(0x0011)
76#define IB_SMP_ATTR_SWITCH_INFO __constant_htons(0x0012)
77#define IB_SMP_ATTR_GUID_INFO __constant_htons(0x0014)
78#define IB_SMP_ATTR_PORT_INFO __constant_htons(0x0015)
79#define IB_SMP_ATTR_PKEY_TABLE __constant_htons(0x0016)
80#define IB_SMP_ATTR_SL_TO_VL_TABLE __constant_htons(0x0017)
81#define IB_SMP_ATTR_VL_ARB_TABLE __constant_htons(0x0018)
82#define IB_SMP_ATTR_LINEAR_FORWARD_TABLE __constant_htons(0x0019)
83#define IB_SMP_ATTR_RANDOM_FORWARD_TABLE __constant_htons(0x001A)
84#define IB_SMP_ATTR_MCAST_FORWARD_TABLE __constant_htons(0x001B)
85#define IB_SMP_ATTR_SM_INFO __constant_htons(0x0020)
86#define IB_SMP_ATTR_VENDOR_DIAG __constant_htons(0x0030)
87#define IB_SMP_ATTR_LED_INFO __constant_htons(0x0031)
88#define IB_SMP_ATTR_VENDOR_MASK __constant_htons(0xFF00)
89
90static inline u8
91ib_get_smp_direction(struct ib_smp *smp)
92{
93 return ((smp->status & IB_SMP_DIRECTION) == IB_SMP_DIRECTION);
94}
95
96#endif /* IB_SMI_H */
diff --git a/drivers/infiniband/include/ib_user_cm.h b/drivers/infiniband/include/ib_user_cm.h
deleted file mode 100644
index 500b1af6ff77..000000000000
--- a/drivers/infiniband/include/ib_user_cm.h
+++ /dev/null
@@ -1,328 +0,0 @@
1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: ib_user_cm.h 2576 2005-06-09 17:00:30Z libor $
33 */
34
35#ifndef IB_USER_CM_H
36#define IB_USER_CM_H
37
38#include <linux/types.h>
39
40#define IB_USER_CM_ABI_VERSION 1
41
42enum {
43 IB_USER_CM_CMD_CREATE_ID,
44 IB_USER_CM_CMD_DESTROY_ID,
45 IB_USER_CM_CMD_ATTR_ID,
46
47 IB_USER_CM_CMD_LISTEN,
48 IB_USER_CM_CMD_ESTABLISH,
49
50 IB_USER_CM_CMD_SEND_REQ,
51 IB_USER_CM_CMD_SEND_REP,
52 IB_USER_CM_CMD_SEND_RTU,
53 IB_USER_CM_CMD_SEND_DREQ,
54 IB_USER_CM_CMD_SEND_DREP,
55 IB_USER_CM_CMD_SEND_REJ,
56 IB_USER_CM_CMD_SEND_MRA,
57 IB_USER_CM_CMD_SEND_LAP,
58 IB_USER_CM_CMD_SEND_APR,
59 IB_USER_CM_CMD_SEND_SIDR_REQ,
60 IB_USER_CM_CMD_SEND_SIDR_REP,
61
62 IB_USER_CM_CMD_EVENT,
63};
64/*
65 * command ABI structures.
66 */
67struct ib_ucm_cmd_hdr {
68 __u32 cmd;
69 __u16 in;
70 __u16 out;
71};
72
73struct ib_ucm_create_id {
74 __u64 response;
75};
76
77struct ib_ucm_create_id_resp {
78 __u32 id;
79};
80
81struct ib_ucm_destroy_id {
82 __u32 id;
83};
84
85struct ib_ucm_attr_id {
86 __u64 response;
87 __u32 id;
88};
89
90struct ib_ucm_attr_id_resp {
91 __u64 service_id;
92 __u64 service_mask;
93 __u32 local_id;
94 __u32 remote_id;
95};
96
97struct ib_ucm_listen {
98 __u64 service_id;
99 __u64 service_mask;
100 __u32 id;
101};
102
103struct ib_ucm_establish {
104 __u32 id;
105};
106
107struct ib_ucm_private_data {
108 __u64 data;
109 __u32 id;
110 __u8 len;
111 __u8 reserved[3];
112};
113
114struct ib_ucm_path_rec {
115 __u8 dgid[16];
116 __u8 sgid[16];
117 __u16 dlid;
118 __u16 slid;
119 __u32 raw_traffic;
120 __u32 flow_label;
121 __u32 reversible;
122 __u32 mtu;
123 __u16 pkey;
124 __u8 hop_limit;
125 __u8 traffic_class;
126 __u8 numb_path;
127 __u8 sl;
128 __u8 mtu_selector;
129 __u8 rate_selector;
130 __u8 rate;
131 __u8 packet_life_time_selector;
132 __u8 packet_life_time;
133 __u8 preference;
134};
135
136struct ib_ucm_req {
137 __u32 id;
138 __u32 qpn;
139 __u32 qp_type;
140 __u32 psn;
141 __u64 sid;
142 __u64 data;
143 __u64 primary_path;
144 __u64 alternate_path;
145 __u8 len;
146 __u8 peer_to_peer;
147 __u8 responder_resources;
148 __u8 initiator_depth;
149 __u8 remote_cm_response_timeout;
150 __u8 flow_control;
151 __u8 local_cm_response_timeout;
152 __u8 retry_count;
153 __u8 rnr_retry_count;
154 __u8 max_cm_retries;
155 __u8 srq;
156 __u8 reserved[1];
157};
158
159struct ib_ucm_rep {
160 __u64 data;
161 __u32 id;
162 __u32 qpn;
163 __u32 psn;
164 __u8 len;
165 __u8 responder_resources;
166 __u8 initiator_depth;
167 __u8 target_ack_delay;
168 __u8 failover_accepted;
169 __u8 flow_control;
170 __u8 rnr_retry_count;
171 __u8 srq;
172};
173
174struct ib_ucm_info {
175 __u32 id;
176 __u32 status;
177 __u64 info;
178 __u64 data;
179 __u8 info_len;
180 __u8 data_len;
181 __u8 reserved[2];
182};
183
184struct ib_ucm_mra {
185 __u64 data;
186 __u32 id;
187 __u8 len;
188 __u8 timeout;
189 __u8 reserved[2];
190};
191
192struct ib_ucm_lap {
193 __u64 path;
194 __u64 data;
195 __u32 id;
196 __u8 len;
197 __u8 reserved[3];
198};
199
200struct ib_ucm_sidr_req {
201 __u32 id;
202 __u32 timeout;
203 __u64 sid;
204 __u64 data;
205 __u64 path;
206 __u16 pkey;
207 __u8 len;
208 __u8 max_cm_retries;
209};
210
211struct ib_ucm_sidr_rep {
212 __u32 id;
213 __u32 qpn;
214 __u32 qkey;
215 __u32 status;
216 __u64 info;
217 __u64 data;
218 __u8 info_len;
219 __u8 data_len;
220 __u8 reserved[2];
221};
222/*
223 * event notification ABI structures.
224 */
225struct ib_ucm_event_get {
226 __u64 response;
227 __u64 data;
228 __u64 info;
229 __u8 data_len;
230 __u8 info_len;
231 __u8 reserved[2];
232};
233
234struct ib_ucm_req_event_resp {
235 __u32 listen_id;
236 /* device */
237 /* port */
238 struct ib_ucm_path_rec primary_path;
239 struct ib_ucm_path_rec alternate_path;
240 __u64 remote_ca_guid;
241 __u32 remote_qkey;
242 __u32 remote_qpn;
243 __u32 qp_type;
244 __u32 starting_psn;
245 __u8 responder_resources;
246 __u8 initiator_depth;
247 __u8 local_cm_response_timeout;
248 __u8 flow_control;
249 __u8 remote_cm_response_timeout;
250 __u8 retry_count;
251 __u8 rnr_retry_count;
252 __u8 srq;
253};
254
255struct ib_ucm_rep_event_resp {
256 __u64 remote_ca_guid;
257 __u32 remote_qkey;
258 __u32 remote_qpn;
259 __u32 starting_psn;
260 __u8 responder_resources;
261 __u8 initiator_depth;
262 __u8 target_ack_delay;
263 __u8 failover_accepted;
264 __u8 flow_control;
265 __u8 rnr_retry_count;
266 __u8 srq;
267 __u8 reserved[1];
268};
269
270struct ib_ucm_rej_event_resp {
271 __u32 reason;
272 /* ari in ib_ucm_event_get info field. */
273};
274
275struct ib_ucm_mra_event_resp {
276 __u8 timeout;
277 __u8 reserved[3];
278};
279
280struct ib_ucm_lap_event_resp {
281 struct ib_ucm_path_rec path;
282};
283
284struct ib_ucm_apr_event_resp {
285 __u32 status;
286 /* apr info in ib_ucm_event_get info field. */
287};
288
289struct ib_ucm_sidr_req_event_resp {
290 __u32 listen_id;
291 /* device */
292 /* port */
293 __u16 pkey;
294 __u8 reserved[2];
295};
296
297struct ib_ucm_sidr_rep_event_resp {
298 __u32 status;
299 __u32 qkey;
300 __u32 qpn;
301 /* info in ib_ucm_event_get info field. */
302};
303
304#define IB_UCM_PRES_DATA 0x01
305#define IB_UCM_PRES_INFO 0x02
306#define IB_UCM_PRES_PRIMARY 0x04
307#define IB_UCM_PRES_ALTERNATE 0x08
308
309struct ib_ucm_event_resp {
310 __u32 id;
311 __u32 event;
312 __u32 present;
313 union {
314 struct ib_ucm_req_event_resp req_resp;
315 struct ib_ucm_rep_event_resp rep_resp;
316 struct ib_ucm_rej_event_resp rej_resp;
317 struct ib_ucm_mra_event_resp mra_resp;
318 struct ib_ucm_lap_event_resp lap_resp;
319 struct ib_ucm_apr_event_resp apr_resp;
320
321 struct ib_ucm_sidr_req_event_resp sidr_req_resp;
322 struct ib_ucm_sidr_rep_event_resp sidr_rep_resp;
323
324 __u32 send_status;
325 } u;
326};
327
328#endif /* IB_USER_CM_H */
diff --git a/drivers/infiniband/include/ib_user_mad.h b/drivers/infiniband/include/ib_user_mad.h
deleted file mode 100644
index a9a56b50aacc..000000000000
--- a/drivers/infiniband/include/ib_user_mad.h
+++ /dev/null
@@ -1,139 +0,0 @@
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 * $Id: ib_user_mad.h 2814 2005-07-06 19:14:09Z halr $
34 */
35
36#ifndef IB_USER_MAD_H
37#define IB_USER_MAD_H
38
39#include <linux/types.h>
40#include <linux/ioctl.h>
41
42/*
43 * Increment this value if any changes that break userspace ABI
44 * compatibility are made.
45 */
46#define IB_USER_MAD_ABI_VERSION 5
47
48/*
49 * Make sure that all structs defined in this file remain laid out so
50 * that they pack the same way on 32-bit and 64-bit architectures (to
51 * avoid incompatibility between 32-bit userspace and 64-bit kernels).
52 */
53
54/**
55 * ib_user_mad_hdr - MAD packet header
56 * @id - ID of agent MAD received with/to be sent with
57 * @status - 0 on successful receive, ETIMEDOUT if no response
58 * received (transaction ID in data[] will be set to TID of original
59 * request) (ignored on send)
60 * @timeout_ms - Milliseconds to wait for response (unset on receive)
61 * @retries - Number of automatic retries to attempt
62 * @qpn - Remote QP number received from/to be sent to
63 * @qkey - Remote Q_Key to be sent with (unset on receive)
64 * @lid - Remote lid received from/to be sent to
65 * @sl - Service level received with/to be sent with
66 * @path_bits - Local path bits received with/to be sent with
67 * @grh_present - If set, GRH was received/should be sent
68 * @gid_index - Local GID index to send with (unset on receive)
69 * @hop_limit - Hop limit in GRH
70 * @traffic_class - Traffic class in GRH
71 * @gid - Remote GID in GRH
72 * @flow_label - Flow label in GRH
73 *
74 * All multi-byte quantities are stored in network (big endian) byte order.
75 */
76struct ib_user_mad_hdr {
77 __u32 id;
78 __u32 status;
79 __u32 timeout_ms;
80 __u32 retries;
81 __u32 length;
82 __u32 qpn;
83 __u32 qkey;
84 __u16 lid;
85 __u8 sl;
86 __u8 path_bits;
87 __u8 grh_present;
88 __u8 gid_index;
89 __u8 hop_limit;
90 __u8 traffic_class;
91 __u8 gid[16];
92 __u32 flow_label;
93};
94
95/**
96 * ib_user_mad - MAD packet
97 * @hdr - MAD packet header
98 * @data - Contents of MAD
99 *
100 */
101struct ib_user_mad {
102 struct ib_user_mad_hdr hdr;
103 __u8 data[0];
104};
105
106/**
107 * ib_user_mad_reg_req - MAD registration request
108 * @id - Set by the kernel; used to identify agent in future requests.
109 * @qpn - Queue pair number; must be 0 or 1.
110 * @method_mask - The caller will receive unsolicited MADs for any method
111 * where @method_mask = 1.
112 * @mgmt_class - Indicates which management class of MADs should be receive
113 * by the caller. This field is only required if the user wishes to
114 * receive unsolicited MADs, otherwise it should be 0.
115 * @mgmt_class_version - Indicates which version of MADs for the given
116 * management class to receive.
117 * @oui: Indicates IEEE OUI when mgmt_class is a vendor class
118 * in the range from 0x30 to 0x4f. Otherwise not used.
119 * @rmpp_version: If set, indicates the RMPP version used.
120 *
121 */
122struct ib_user_mad_reg_req {
123 __u32 id;
124 __u32 method_mask[4];
125 __u8 qpn;
126 __u8 mgmt_class;
127 __u8 mgmt_class_version;
128 __u8 oui[3];
129 __u8 rmpp_version;
130};
131
132#define IB_IOCTL_MAGIC 0x1b
133
134#define IB_USER_MAD_REGISTER_AGENT _IOWR(IB_IOCTL_MAGIC, 1, \
135 struct ib_user_mad_reg_req)
136
137#define IB_USER_MAD_UNREGISTER_AGENT _IOW(IB_IOCTL_MAGIC, 2, __u32)
138
139#endif /* IB_USER_MAD_H */
diff --git a/drivers/infiniband/include/ib_user_verbs.h b/drivers/infiniband/include/ib_user_verbs.h
deleted file mode 100644
index 7c613706af72..000000000000
--- a/drivers/infiniband/include/ib_user_verbs.h
+++ /dev/null
@@ -1,389 +0,0 @@
1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 * $Id: ib_user_verbs.h 2708 2005-06-24 17:27:21Z roland $
34 */
35
36#ifndef IB_USER_VERBS_H
37#define IB_USER_VERBS_H
38
39#include <linux/types.h>
40
41/*
42 * Increment this value if any changes that break userspace ABI
43 * compatibility are made.
44 */
45#define IB_USER_VERBS_ABI_VERSION 1
46
47enum {
48 IB_USER_VERBS_CMD_QUERY_PARAMS,
49 IB_USER_VERBS_CMD_GET_CONTEXT,
50 IB_USER_VERBS_CMD_QUERY_DEVICE,
51 IB_USER_VERBS_CMD_QUERY_PORT,
52 IB_USER_VERBS_CMD_QUERY_GID,
53 IB_USER_VERBS_CMD_QUERY_PKEY,
54 IB_USER_VERBS_CMD_ALLOC_PD,
55 IB_USER_VERBS_CMD_DEALLOC_PD,
56 IB_USER_VERBS_CMD_CREATE_AH,
57 IB_USER_VERBS_CMD_MODIFY_AH,
58 IB_USER_VERBS_CMD_QUERY_AH,
59 IB_USER_VERBS_CMD_DESTROY_AH,
60 IB_USER_VERBS_CMD_REG_MR,
61 IB_USER_VERBS_CMD_REG_SMR,
62 IB_USER_VERBS_CMD_REREG_MR,
63 IB_USER_VERBS_CMD_QUERY_MR,
64 IB_USER_VERBS_CMD_DEREG_MR,
65 IB_USER_VERBS_CMD_ALLOC_MW,
66 IB_USER_VERBS_CMD_BIND_MW,
67 IB_USER_VERBS_CMD_DEALLOC_MW,
68 IB_USER_VERBS_CMD_CREATE_CQ,
69 IB_USER_VERBS_CMD_RESIZE_CQ,
70 IB_USER_VERBS_CMD_DESTROY_CQ,
71 IB_USER_VERBS_CMD_POLL_CQ,
72 IB_USER_VERBS_CMD_PEEK_CQ,
73 IB_USER_VERBS_CMD_REQ_NOTIFY_CQ,
74 IB_USER_VERBS_CMD_CREATE_QP,
75 IB_USER_VERBS_CMD_QUERY_QP,
76 IB_USER_VERBS_CMD_MODIFY_QP,
77 IB_USER_VERBS_CMD_DESTROY_QP,
78 IB_USER_VERBS_CMD_POST_SEND,
79 IB_USER_VERBS_CMD_POST_RECV,
80 IB_USER_VERBS_CMD_ATTACH_MCAST,
81 IB_USER_VERBS_CMD_DETACH_MCAST
82};
83
84/*
85 * Make sure that all structs defined in this file remain laid out so
86 * that they pack the same way on 32-bit and 64-bit architectures (to
87 * avoid incompatibility between 32-bit userspace and 64-bit kernels).
88 * In particular do not use pointer types -- pass pointers in __u64
89 * instead.
90 */
91
92struct ib_uverbs_async_event_desc {
93 __u64 element;
94 __u32 event_type; /* enum ib_event_type */
95 __u32 reserved;
96};
97
98struct ib_uverbs_comp_event_desc {
99 __u64 cq_handle;
100};
101
102/*
103 * All commands from userspace should start with a __u32 command field
104 * followed by __u16 in_words and out_words fields (which give the
105 * length of the command block and response buffer if any in 32-bit
106 * words). The kernel driver will read these fields first and read
107 * the rest of the command struct based on these value.
108 */
109
110struct ib_uverbs_cmd_hdr {
111 __u32 command;
112 __u16 in_words;
113 __u16 out_words;
114};
115
116/*
117 * No driver_data for "query params" command, since this is intended
118 * to be a core function with no possible device dependence.
119 */
120struct ib_uverbs_query_params {
121 __u64 response;
122};
123
124struct ib_uverbs_query_params_resp {
125 __u32 num_cq_events;
126};
127
128struct ib_uverbs_get_context {
129 __u64 response;
130 __u64 cq_fd_tab;
131 __u64 driver_data[0];
132};
133
134struct ib_uverbs_get_context_resp {
135 __u32 async_fd;
136 __u32 reserved;
137};
138
139struct ib_uverbs_query_device {
140 __u64 response;
141 __u64 driver_data[0];
142};
143
144struct ib_uverbs_query_device_resp {
145 __u64 fw_ver;
146 __u64 node_guid;
147 __u64 sys_image_guid;
148 __u64 max_mr_size;
149 __u64 page_size_cap;
150 __u32 vendor_id;
151 __u32 vendor_part_id;
152 __u32 hw_ver;
153 __u32 max_qp;
154 __u32 max_qp_wr;
155 __u32 device_cap_flags;
156 __u32 max_sge;
157 __u32 max_sge_rd;
158 __u32 max_cq;
159 __u32 max_cqe;
160 __u32 max_mr;
161 __u32 max_pd;
162 __u32 max_qp_rd_atom;
163 __u32 max_ee_rd_atom;
164 __u32 max_res_rd_atom;
165 __u32 max_qp_init_rd_atom;
166 __u32 max_ee_init_rd_atom;
167 __u32 atomic_cap;
168 __u32 max_ee;
169 __u32 max_rdd;
170 __u32 max_mw;
171 __u32 max_raw_ipv6_qp;
172 __u32 max_raw_ethy_qp;
173 __u32 max_mcast_grp;
174 __u32 max_mcast_qp_attach;
175 __u32 max_total_mcast_qp_attach;
176 __u32 max_ah;
177 __u32 max_fmr;
178 __u32 max_map_per_fmr;
179 __u32 max_srq;
180 __u32 max_srq_wr;
181 __u32 max_srq_sge;
182 __u16 max_pkeys;
183 __u8 local_ca_ack_delay;
184 __u8 phys_port_cnt;
185 __u8 reserved[4];
186};
187
188struct ib_uverbs_query_port {
189 __u64 response;
190 __u8 port_num;
191 __u8 reserved[7];
192 __u64 driver_data[0];
193};
194
195struct ib_uverbs_query_port_resp {
196 __u32 port_cap_flags;
197 __u32 max_msg_sz;
198 __u32 bad_pkey_cntr;
199 __u32 qkey_viol_cntr;
200 __u32 gid_tbl_len;
201 __u16 pkey_tbl_len;
202 __u16 lid;
203 __u16 sm_lid;
204 __u8 state;
205 __u8 max_mtu;
206 __u8 active_mtu;
207 __u8 lmc;
208 __u8 max_vl_num;
209 __u8 sm_sl;
210 __u8 subnet_timeout;
211 __u8 init_type_reply;
212 __u8 active_width;
213 __u8 active_speed;
214 __u8 phys_state;
215 __u8 reserved[3];
216};
217
218struct ib_uverbs_query_gid {
219 __u64 response;
220 __u8 port_num;
221 __u8 index;
222 __u8 reserved[6];
223 __u64 driver_data[0];
224};
225
226struct ib_uverbs_query_gid_resp {
227 __u8 gid[16];
228};
229
230struct ib_uverbs_query_pkey {
231 __u64 response;
232 __u8 port_num;
233 __u8 index;
234 __u8 reserved[6];
235 __u64 driver_data[0];
236};
237
238struct ib_uverbs_query_pkey_resp {
239 __u16 pkey;
240 __u16 reserved;
241};
242
243struct ib_uverbs_alloc_pd {
244 __u64 response;
245 __u64 driver_data[0];
246};
247
248struct ib_uverbs_alloc_pd_resp {
249 __u32 pd_handle;
250};
251
252struct ib_uverbs_dealloc_pd {
253 __u32 pd_handle;
254};
255
256struct ib_uverbs_reg_mr {
257 __u64 response;
258 __u64 start;
259 __u64 length;
260 __u64 hca_va;
261 __u32 pd_handle;
262 __u32 access_flags;
263 __u64 driver_data[0];
264};
265
266struct ib_uverbs_reg_mr_resp {
267 __u32 mr_handle;
268 __u32 lkey;
269 __u32 rkey;
270};
271
272struct ib_uverbs_dereg_mr {
273 __u32 mr_handle;
274};
275
276struct ib_uverbs_create_cq {
277 __u64 response;
278 __u64 user_handle;
279 __u32 cqe;
280 __u32 event_handler;
281 __u64 driver_data[0];
282};
283
284struct ib_uverbs_create_cq_resp {
285 __u32 cq_handle;
286 __u32 cqe;
287};
288
289struct ib_uverbs_destroy_cq {
290 __u32 cq_handle;
291};
292
293struct ib_uverbs_create_qp {
294 __u64 response;
295 __u64 user_handle;
296 __u32 pd_handle;
297 __u32 send_cq_handle;
298 __u32 recv_cq_handle;
299 __u32 srq_handle;
300 __u32 max_send_wr;
301 __u32 max_recv_wr;
302 __u32 max_send_sge;
303 __u32 max_recv_sge;
304 __u32 max_inline_data;
305 __u8 sq_sig_all;
306 __u8 qp_type;
307 __u8 is_srq;
308 __u8 reserved;
309 __u64 driver_data[0];
310};
311
312struct ib_uverbs_create_qp_resp {
313 __u32 qp_handle;
314 __u32 qpn;
315};
316
317/*
318 * This struct needs to remain a multiple of 8 bytes to keep the
319 * alignment of the modify QP parameters.
320 */
321struct ib_uverbs_qp_dest {
322 __u8 dgid[16];
323 __u32 flow_label;
324 __u16 dlid;
325 __u16 reserved;
326 __u8 sgid_index;
327 __u8 hop_limit;
328 __u8 traffic_class;
329 __u8 sl;
330 __u8 src_path_bits;
331 __u8 static_rate;
332 __u8 is_global;
333 __u8 port_num;
334};
335
336struct ib_uverbs_modify_qp {
337 struct ib_uverbs_qp_dest dest;
338 struct ib_uverbs_qp_dest alt_dest;
339 __u32 qp_handle;
340 __u32 attr_mask;
341 __u32 qkey;
342 __u32 rq_psn;
343 __u32 sq_psn;
344 __u32 dest_qp_num;
345 __u32 qp_access_flags;
346 __u16 pkey_index;
347 __u16 alt_pkey_index;
348 __u8 qp_state;
349 __u8 cur_qp_state;
350 __u8 path_mtu;
351 __u8 path_mig_state;
352 __u8 en_sqd_async_notify;
353 __u8 max_rd_atomic;
354 __u8 max_dest_rd_atomic;
355 __u8 min_rnr_timer;
356 __u8 port_num;
357 __u8 timeout;
358 __u8 retry_cnt;
359 __u8 rnr_retry;
360 __u8 alt_port_num;
361 __u8 alt_timeout;
362 __u8 reserved[2];
363 __u64 driver_data[0];
364};
365
366struct ib_uverbs_modify_qp_resp {
367};
368
369struct ib_uverbs_destroy_qp {
370 __u32 qp_handle;
371};
372
373struct ib_uverbs_attach_mcast {
374 __u8 gid[16];
375 __u32 qp_handle;
376 __u16 mlid;
377 __u16 reserved;
378 __u64 driver_data[0];
379};
380
381struct ib_uverbs_detach_mcast {
382 __u8 gid[16];
383 __u32 qp_handle;
384 __u16 mlid;
385 __u16 reserved;
386 __u64 driver_data[0];
387};
388
389#endif /* IB_USER_VERBS_H */
diff --git a/drivers/infiniband/include/ib_verbs.h b/drivers/infiniband/include/ib_verbs.h
deleted file mode 100644
index 5d24edaa66e6..000000000000
--- a/drivers/infiniband/include/ib_verbs.h
+++ /dev/null
@@ -1,1365 +0,0 @@
1/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Cisco Systems. All rights reserved.
8 *
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
14 *
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
17 * conditions are met:
18 *
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer.
22 *
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * SOFTWARE.
36 *
37 * $Id: ib_verbs.h 1349 2004-12-16 21:09:43Z roland $
38 */
39
40#if !defined(IB_VERBS_H)
41#define IB_VERBS_H
42
43#include <linux/types.h>
44#include <linux/device.h>
45
46#include <asm/atomic.h>
47#include <asm/scatterlist.h>
48#include <asm/uaccess.h>
49
50union ib_gid {
51 u8 raw[16];
52 struct {
53 u64 subnet_prefix;
54 u64 interface_id;
55 } global;
56};
57
58enum ib_node_type {
59 IB_NODE_CA = 1,
60 IB_NODE_SWITCH,
61 IB_NODE_ROUTER
62};
63
64enum ib_device_cap_flags {
65 IB_DEVICE_RESIZE_MAX_WR = 1,
66 IB_DEVICE_BAD_PKEY_CNTR = (1<<1),
67 IB_DEVICE_BAD_QKEY_CNTR = (1<<2),
68 IB_DEVICE_RAW_MULTI = (1<<3),
69 IB_DEVICE_AUTO_PATH_MIG = (1<<4),
70 IB_DEVICE_CHANGE_PHY_PORT = (1<<5),
71 IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6),
72 IB_DEVICE_CURR_QP_STATE_MOD = (1<<7),
73 IB_DEVICE_SHUTDOWN_PORT = (1<<8),
74 IB_DEVICE_INIT_TYPE = (1<<9),
75 IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10),
76 IB_DEVICE_SYS_IMAGE_GUID = (1<<11),
77 IB_DEVICE_RC_RNR_NAK_GEN = (1<<12),
78 IB_DEVICE_SRQ_RESIZE = (1<<13),
79 IB_DEVICE_N_NOTIFY_CQ = (1<<14),
80};
81
82enum ib_atomic_cap {
83 IB_ATOMIC_NONE,
84 IB_ATOMIC_HCA,
85 IB_ATOMIC_GLOB
86};
87
88struct ib_device_attr {
89 u64 fw_ver;
90 u64 node_guid;
91 u64 sys_image_guid;
92 u64 max_mr_size;
93 u64 page_size_cap;
94 u32 vendor_id;
95 u32 vendor_part_id;
96 u32 hw_ver;
97 int max_qp;
98 int max_qp_wr;
99 int device_cap_flags;
100 int max_sge;
101 int max_sge_rd;
102 int max_cq;
103 int max_cqe;
104 int max_mr;
105 int max_pd;
106 int max_qp_rd_atom;
107 int max_ee_rd_atom;
108 int max_res_rd_atom;
109 int max_qp_init_rd_atom;
110 int max_ee_init_rd_atom;
111 enum ib_atomic_cap atomic_cap;
112 int max_ee;
113 int max_rdd;
114 int max_mw;
115 int max_raw_ipv6_qp;
116 int max_raw_ethy_qp;
117 int max_mcast_grp;
118 int max_mcast_qp_attach;
119 int max_total_mcast_qp_attach;
120 int max_ah;
121 int max_fmr;
122 int max_map_per_fmr;
123 int max_srq;
124 int max_srq_wr;
125 int max_srq_sge;
126 u16 max_pkeys;
127 u8 local_ca_ack_delay;
128};
129
130enum ib_mtu {
131 IB_MTU_256 = 1,
132 IB_MTU_512 = 2,
133 IB_MTU_1024 = 3,
134 IB_MTU_2048 = 4,
135 IB_MTU_4096 = 5
136};
137
138static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
139{
140 switch (mtu) {
141 case IB_MTU_256: return 256;
142 case IB_MTU_512: return 512;
143 case IB_MTU_1024: return 1024;
144 case IB_MTU_2048: return 2048;
145 case IB_MTU_4096: return 4096;
146 default: return -1;
147 }
148}
149
150enum ib_port_state {
151 IB_PORT_NOP = 0,
152 IB_PORT_DOWN = 1,
153 IB_PORT_INIT = 2,
154 IB_PORT_ARMED = 3,
155 IB_PORT_ACTIVE = 4,
156 IB_PORT_ACTIVE_DEFER = 5
157};
158
159enum ib_port_cap_flags {
160 IB_PORT_SM = 1 << 1,
161 IB_PORT_NOTICE_SUP = 1 << 2,
162 IB_PORT_TRAP_SUP = 1 << 3,
163 IB_PORT_OPT_IPD_SUP = 1 << 4,
164 IB_PORT_AUTO_MIGR_SUP = 1 << 5,
165 IB_PORT_SL_MAP_SUP = 1 << 6,
166 IB_PORT_MKEY_NVRAM = 1 << 7,
167 IB_PORT_PKEY_NVRAM = 1 << 8,
168 IB_PORT_LED_INFO_SUP = 1 << 9,
169 IB_PORT_SM_DISABLED = 1 << 10,
170 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
171 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
172 IB_PORT_CM_SUP = 1 << 16,
173 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
174 IB_PORT_REINIT_SUP = 1 << 18,
175 IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
176 IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
177 IB_PORT_DR_NOTICE_SUP = 1 << 21,
178 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
179 IB_PORT_BOOT_MGMT_SUP = 1 << 23,
180 IB_PORT_LINK_LATENCY_SUP = 1 << 24,
181 IB_PORT_CLIENT_REG_SUP = 1 << 25
182};
183
184enum ib_port_width {
185 IB_WIDTH_1X = 1,
186 IB_WIDTH_4X = 2,
187 IB_WIDTH_8X = 4,
188 IB_WIDTH_12X = 8
189};
190
191static inline int ib_width_enum_to_int(enum ib_port_width width)
192{
193 switch (width) {
194 case IB_WIDTH_1X: return 1;
195 case IB_WIDTH_4X: return 4;
196 case IB_WIDTH_8X: return 8;
197 case IB_WIDTH_12X: return 12;
198 default: return -1;
199 }
200}
201
202struct ib_port_attr {
203 enum ib_port_state state;
204 enum ib_mtu max_mtu;
205 enum ib_mtu active_mtu;
206 int gid_tbl_len;
207 u32 port_cap_flags;
208 u32 max_msg_sz;
209 u32 bad_pkey_cntr;
210 u32 qkey_viol_cntr;
211 u16 pkey_tbl_len;
212 u16 lid;
213 u16 sm_lid;
214 u8 lmc;
215 u8 max_vl_num;
216 u8 sm_sl;
217 u8 subnet_timeout;
218 u8 init_type_reply;
219 u8 active_width;
220 u8 active_speed;
221 u8 phys_state;
222};
223
224enum ib_device_modify_flags {
225 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1
226};
227
228struct ib_device_modify {
229 u64 sys_image_guid;
230};
231
232enum ib_port_modify_flags {
233 IB_PORT_SHUTDOWN = 1,
234 IB_PORT_INIT_TYPE = (1<<2),
235 IB_PORT_RESET_QKEY_CNTR = (1<<3)
236};
237
238struct ib_port_modify {
239 u32 set_port_cap_mask;
240 u32 clr_port_cap_mask;
241 u8 init_type;
242};
243
244enum ib_event_type {
245 IB_EVENT_CQ_ERR,
246 IB_EVENT_QP_FATAL,
247 IB_EVENT_QP_REQ_ERR,
248 IB_EVENT_QP_ACCESS_ERR,
249 IB_EVENT_COMM_EST,
250 IB_EVENT_SQ_DRAINED,
251 IB_EVENT_PATH_MIG,
252 IB_EVENT_PATH_MIG_ERR,
253 IB_EVENT_DEVICE_FATAL,
254 IB_EVENT_PORT_ACTIVE,
255 IB_EVENT_PORT_ERR,
256 IB_EVENT_LID_CHANGE,
257 IB_EVENT_PKEY_CHANGE,
258 IB_EVENT_SM_CHANGE
259};
260
261struct ib_event {
262 struct ib_device *device;
263 union {
264 struct ib_cq *cq;
265 struct ib_qp *qp;
266 u8 port_num;
267 } element;
268 enum ib_event_type event;
269};
270
271struct ib_event_handler {
272 struct ib_device *device;
273 void (*handler)(struct ib_event_handler *, struct ib_event *);
274 struct list_head list;
275};
276
277#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
278 do { \
279 (_ptr)->device = _device; \
280 (_ptr)->handler = _handler; \
281 INIT_LIST_HEAD(&(_ptr)->list); \
282 } while (0)
283
284struct ib_global_route {
285 union ib_gid dgid;
286 u32 flow_label;
287 u8 sgid_index;
288 u8 hop_limit;
289 u8 traffic_class;
290};
291
292struct ib_grh {
293 u32 version_tclass_flow;
294 u16 paylen;
295 u8 next_hdr;
296 u8 hop_limit;
297 union ib_gid sgid;
298 union ib_gid dgid;
299};
300
301enum {
302 IB_MULTICAST_QPN = 0xffffff
303};
304
305enum ib_ah_flags {
306 IB_AH_GRH = 1
307};
308
309struct ib_ah_attr {
310 struct ib_global_route grh;
311 u16 dlid;
312 u8 sl;
313 u8 src_path_bits;
314 u8 static_rate;
315 u8 ah_flags;
316 u8 port_num;
317};
318
319enum ib_wc_status {
320 IB_WC_SUCCESS,
321 IB_WC_LOC_LEN_ERR,
322 IB_WC_LOC_QP_OP_ERR,
323 IB_WC_LOC_EEC_OP_ERR,
324 IB_WC_LOC_PROT_ERR,
325 IB_WC_WR_FLUSH_ERR,
326 IB_WC_MW_BIND_ERR,
327 IB_WC_BAD_RESP_ERR,
328 IB_WC_LOC_ACCESS_ERR,
329 IB_WC_REM_INV_REQ_ERR,
330 IB_WC_REM_ACCESS_ERR,
331 IB_WC_REM_OP_ERR,
332 IB_WC_RETRY_EXC_ERR,
333 IB_WC_RNR_RETRY_EXC_ERR,
334 IB_WC_LOC_RDD_VIOL_ERR,
335 IB_WC_REM_INV_RD_REQ_ERR,
336 IB_WC_REM_ABORT_ERR,
337 IB_WC_INV_EECN_ERR,
338 IB_WC_INV_EEC_STATE_ERR,
339 IB_WC_FATAL_ERR,
340 IB_WC_RESP_TIMEOUT_ERR,
341 IB_WC_GENERAL_ERR
342};
343
344enum ib_wc_opcode {
345 IB_WC_SEND,
346 IB_WC_RDMA_WRITE,
347 IB_WC_RDMA_READ,
348 IB_WC_COMP_SWAP,
349 IB_WC_FETCH_ADD,
350 IB_WC_BIND_MW,
351/*
352 * Set value of IB_WC_RECV so consumers can test if a completion is a
353 * receive by testing (opcode & IB_WC_RECV).
354 */
355 IB_WC_RECV = 1 << 7,
356 IB_WC_RECV_RDMA_WITH_IMM
357};
358
359enum ib_wc_flags {
360 IB_WC_GRH = 1,
361 IB_WC_WITH_IMM = (1<<1)
362};
363
364struct ib_wc {
365 u64 wr_id;
366 enum ib_wc_status status;
367 enum ib_wc_opcode opcode;
368 u32 vendor_err;
369 u32 byte_len;
370 __be32 imm_data;
371 u32 qp_num;
372 u32 src_qp;
373 int wc_flags;
374 u16 pkey_index;
375 u16 slid;
376 u8 sl;
377 u8 dlid_path_bits;
378 u8 port_num; /* valid only for DR SMPs on switches */
379};
380
381enum ib_cq_notify {
382 IB_CQ_SOLICITED,
383 IB_CQ_NEXT_COMP
384};
385
386struct ib_qp_cap {
387 u32 max_send_wr;
388 u32 max_recv_wr;
389 u32 max_send_sge;
390 u32 max_recv_sge;
391 u32 max_inline_data;
392};
393
394enum ib_sig_type {
395 IB_SIGNAL_ALL_WR,
396 IB_SIGNAL_REQ_WR
397};
398
399enum ib_qp_type {
400 /*
401 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
402 * here (and in that order) since the MAD layer uses them as
403 * indices into a 2-entry table.
404 */
405 IB_QPT_SMI,
406 IB_QPT_GSI,
407
408 IB_QPT_RC,
409 IB_QPT_UC,
410 IB_QPT_UD,
411 IB_QPT_RAW_IPV6,
412 IB_QPT_RAW_ETY
413};
414
415struct ib_qp_init_attr {
416 void (*event_handler)(struct ib_event *, void *);
417 void *qp_context;
418 struct ib_cq *send_cq;
419 struct ib_cq *recv_cq;
420 struct ib_srq *srq;
421 struct ib_qp_cap cap;
422 enum ib_sig_type sq_sig_type;
423 enum ib_qp_type qp_type;
424 u8 port_num; /* special QP types only */
425};
426
427enum ib_rnr_timeout {
428 IB_RNR_TIMER_655_36 = 0,
429 IB_RNR_TIMER_000_01 = 1,
430 IB_RNR_TIMER_000_02 = 2,
431 IB_RNR_TIMER_000_03 = 3,
432 IB_RNR_TIMER_000_04 = 4,
433 IB_RNR_TIMER_000_06 = 5,
434 IB_RNR_TIMER_000_08 = 6,
435 IB_RNR_TIMER_000_12 = 7,
436 IB_RNR_TIMER_000_16 = 8,
437 IB_RNR_TIMER_000_24 = 9,
438 IB_RNR_TIMER_000_32 = 10,
439 IB_RNR_TIMER_000_48 = 11,
440 IB_RNR_TIMER_000_64 = 12,
441 IB_RNR_TIMER_000_96 = 13,
442 IB_RNR_TIMER_001_28 = 14,
443 IB_RNR_TIMER_001_92 = 15,
444 IB_RNR_TIMER_002_56 = 16,
445 IB_RNR_TIMER_003_84 = 17,
446 IB_RNR_TIMER_005_12 = 18,
447 IB_RNR_TIMER_007_68 = 19,
448 IB_RNR_TIMER_010_24 = 20,
449 IB_RNR_TIMER_015_36 = 21,
450 IB_RNR_TIMER_020_48 = 22,
451 IB_RNR_TIMER_030_72 = 23,
452 IB_RNR_TIMER_040_96 = 24,
453 IB_RNR_TIMER_061_44 = 25,
454 IB_RNR_TIMER_081_92 = 26,
455 IB_RNR_TIMER_122_88 = 27,
456 IB_RNR_TIMER_163_84 = 28,
457 IB_RNR_TIMER_245_76 = 29,
458 IB_RNR_TIMER_327_68 = 30,
459 IB_RNR_TIMER_491_52 = 31
460};
461
462enum ib_qp_attr_mask {
463 IB_QP_STATE = 1,
464 IB_QP_CUR_STATE = (1<<1),
465 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
466 IB_QP_ACCESS_FLAGS = (1<<3),
467 IB_QP_PKEY_INDEX = (1<<4),
468 IB_QP_PORT = (1<<5),
469 IB_QP_QKEY = (1<<6),
470 IB_QP_AV = (1<<7),
471 IB_QP_PATH_MTU = (1<<8),
472 IB_QP_TIMEOUT = (1<<9),
473 IB_QP_RETRY_CNT = (1<<10),
474 IB_QP_RNR_RETRY = (1<<11),
475 IB_QP_RQ_PSN = (1<<12),
476 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
477 IB_QP_ALT_PATH = (1<<14),
478 IB_QP_MIN_RNR_TIMER = (1<<15),
479 IB_QP_SQ_PSN = (1<<16),
480 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
481 IB_QP_PATH_MIG_STATE = (1<<18),
482 IB_QP_CAP = (1<<19),
483 IB_QP_DEST_QPN = (1<<20)
484};
485
486enum ib_qp_state {
487 IB_QPS_RESET,
488 IB_QPS_INIT,
489 IB_QPS_RTR,
490 IB_QPS_RTS,
491 IB_QPS_SQD,
492 IB_QPS_SQE,
493 IB_QPS_ERR
494};
495
496enum ib_mig_state {
497 IB_MIG_MIGRATED,
498 IB_MIG_REARM,
499 IB_MIG_ARMED
500};
501
502struct ib_qp_attr {
503 enum ib_qp_state qp_state;
504 enum ib_qp_state cur_qp_state;
505 enum ib_mtu path_mtu;
506 enum ib_mig_state path_mig_state;
507 u32 qkey;
508 u32 rq_psn;
509 u32 sq_psn;
510 u32 dest_qp_num;
511 int qp_access_flags;
512 struct ib_qp_cap cap;
513 struct ib_ah_attr ah_attr;
514 struct ib_ah_attr alt_ah_attr;
515 u16 pkey_index;
516 u16 alt_pkey_index;
517 u8 en_sqd_async_notify;
518 u8 sq_draining;
519 u8 max_rd_atomic;
520 u8 max_dest_rd_atomic;
521 u8 min_rnr_timer;
522 u8 port_num;
523 u8 timeout;
524 u8 retry_cnt;
525 u8 rnr_retry;
526 u8 alt_port_num;
527 u8 alt_timeout;
528};
529
530enum ib_wr_opcode {
531 IB_WR_RDMA_WRITE,
532 IB_WR_RDMA_WRITE_WITH_IMM,
533 IB_WR_SEND,
534 IB_WR_SEND_WITH_IMM,
535 IB_WR_RDMA_READ,
536 IB_WR_ATOMIC_CMP_AND_SWP,
537 IB_WR_ATOMIC_FETCH_AND_ADD
538};
539
540enum ib_send_flags {
541 IB_SEND_FENCE = 1,
542 IB_SEND_SIGNALED = (1<<1),
543 IB_SEND_SOLICITED = (1<<2),
544 IB_SEND_INLINE = (1<<3)
545};
546
547struct ib_sge {
548 u64 addr;
549 u32 length;
550 u32 lkey;
551};
552
553struct ib_send_wr {
554 struct ib_send_wr *next;
555 u64 wr_id;
556 struct ib_sge *sg_list;
557 int num_sge;
558 enum ib_wr_opcode opcode;
559 int send_flags;
560 __be32 imm_data;
561 union {
562 struct {
563 u64 remote_addr;
564 u32 rkey;
565 } rdma;
566 struct {
567 u64 remote_addr;
568 u64 compare_add;
569 u64 swap;
570 u32 rkey;
571 } atomic;
572 struct {
573 struct ib_ah *ah;
574 struct ib_mad_hdr *mad_hdr;
575 u32 remote_qpn;
576 u32 remote_qkey;
577 int timeout_ms; /* valid for MADs only */
578 int retries; /* valid for MADs only */
579 u16 pkey_index; /* valid for GSI only */
580 u8 port_num; /* valid for DR SMPs on switch only */
581 } ud;
582 } wr;
583};
584
585struct ib_recv_wr {
586 struct ib_recv_wr *next;
587 u64 wr_id;
588 struct ib_sge *sg_list;
589 int num_sge;
590};
591
592enum ib_access_flags {
593 IB_ACCESS_LOCAL_WRITE = 1,
594 IB_ACCESS_REMOTE_WRITE = (1<<1),
595 IB_ACCESS_REMOTE_READ = (1<<2),
596 IB_ACCESS_REMOTE_ATOMIC = (1<<3),
597 IB_ACCESS_MW_BIND = (1<<4)
598};
599
600struct ib_phys_buf {
601 u64 addr;
602 u64 size;
603};
604
605struct ib_mr_attr {
606 struct ib_pd *pd;
607 u64 device_virt_addr;
608 u64 size;
609 int mr_access_flags;
610 u32 lkey;
611 u32 rkey;
612};
613
614enum ib_mr_rereg_flags {
615 IB_MR_REREG_TRANS = 1,
616 IB_MR_REREG_PD = (1<<1),
617 IB_MR_REREG_ACCESS = (1<<2)
618};
619
620struct ib_mw_bind {
621 struct ib_mr *mr;
622 u64 wr_id;
623 u64 addr;
624 u32 length;
625 int send_flags;
626 int mw_access_flags;
627};
628
629struct ib_fmr_attr {
630 int max_pages;
631 int max_maps;
632 u8 page_size;
633};
634
635struct ib_ucontext {
636 struct ib_device *device;
637 struct list_head pd_list;
638 struct list_head mr_list;
639 struct list_head mw_list;
640 struct list_head cq_list;
641 struct list_head qp_list;
642 struct list_head srq_list;
643 struct list_head ah_list;
644 spinlock_t lock;
645};
646
647struct ib_uobject {
648 u64 user_handle; /* handle given to us by userspace */
649 struct ib_ucontext *context; /* associated user context */
650 struct list_head list; /* link to context's list */
651 u32 id; /* index into kernel idr */
652};
653
654struct ib_umem {
655 unsigned long user_base;
656 unsigned long virt_base;
657 size_t length;
658 int offset;
659 int page_size;
660 int writable;
661 struct list_head chunk_list;
662};
663
664struct ib_umem_chunk {
665 struct list_head list;
666 int nents;
667 int nmap;
668 struct scatterlist page_list[0];
669};
670
671struct ib_udata {
672 void __user *inbuf;
673 void __user *outbuf;
674 size_t inlen;
675 size_t outlen;
676};
677
678#define IB_UMEM_MAX_PAGE_CHUNK \
679 ((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) / \
680 ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] - \
681 (void *) &((struct ib_umem_chunk *) 0)->page_list[0]))
682
683struct ib_umem_object {
684 struct ib_uobject uobject;
685 struct ib_umem umem;
686};
687
688struct ib_pd {
689 struct ib_device *device;
690 struct ib_uobject *uobject;
691 atomic_t usecnt; /* count all resources */
692};
693
694struct ib_ah {
695 struct ib_device *device;
696 struct ib_pd *pd;
697 struct ib_uobject *uobject;
698};
699
700typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
701
702struct ib_cq {
703 struct ib_device *device;
704 struct ib_uobject *uobject;
705 ib_comp_handler comp_handler;
706 void (*event_handler)(struct ib_event *, void *);
707 void * cq_context;
708 int cqe;
709 atomic_t usecnt; /* count number of work queues */
710};
711
712struct ib_srq {
713 struct ib_device *device;
714 struct ib_uobject *uobject;
715 struct ib_pd *pd;
716 void *srq_context;
717 atomic_t usecnt;
718};
719
720struct ib_qp {
721 struct ib_device *device;
722 struct ib_pd *pd;
723 struct ib_cq *send_cq;
724 struct ib_cq *recv_cq;
725 struct ib_srq *srq;
726 struct ib_uobject *uobject;
727 void (*event_handler)(struct ib_event *, void *);
728 void *qp_context;
729 u32 qp_num;
730 enum ib_qp_type qp_type;
731};
732
733struct ib_mr {
734 struct ib_device *device;
735 struct ib_pd *pd;
736 struct ib_uobject *uobject;
737 u32 lkey;
738 u32 rkey;
739 atomic_t usecnt; /* count number of MWs */
740};
741
742struct ib_mw {
743 struct ib_device *device;
744 struct ib_pd *pd;
745 struct ib_uobject *uobject;
746 u32 rkey;
747};
748
749struct ib_fmr {
750 struct ib_device *device;
751 struct ib_pd *pd;
752 struct list_head list;
753 u32 lkey;
754 u32 rkey;
755};
756
757struct ib_mad;
758struct ib_grh;
759
760enum ib_process_mad_flags {
761 IB_MAD_IGNORE_MKEY = 1,
762 IB_MAD_IGNORE_BKEY = 2,
763 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
764};
765
766enum ib_mad_result {
767 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */
768 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */
769 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */
770 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */
771};
772
773#define IB_DEVICE_NAME_MAX 64
774
775struct ib_cache {
776 rwlock_t lock;
777 struct ib_event_handler event_handler;
778 struct ib_pkey_cache **pkey_cache;
779 struct ib_gid_cache **gid_cache;
780};
781
782struct ib_device {
783 struct device *dma_device;
784
785 char name[IB_DEVICE_NAME_MAX];
786
787 struct list_head event_handler_list;
788 spinlock_t event_handler_lock;
789
790 struct list_head core_list;
791 struct list_head client_data_list;
792 spinlock_t client_data_lock;
793
794 struct ib_cache cache;
795
796 u32 flags;
797
798 int (*query_device)(struct ib_device *device,
799 struct ib_device_attr *device_attr);
800 int (*query_port)(struct ib_device *device,
801 u8 port_num,
802 struct ib_port_attr *port_attr);
803 int (*query_gid)(struct ib_device *device,
804 u8 port_num, int index,
805 union ib_gid *gid);
806 int (*query_pkey)(struct ib_device *device,
807 u8 port_num, u16 index, u16 *pkey);
808 int (*modify_device)(struct ib_device *device,
809 int device_modify_mask,
810 struct ib_device_modify *device_modify);
811 int (*modify_port)(struct ib_device *device,
812 u8 port_num, int port_modify_mask,
813 struct ib_port_modify *port_modify);
814 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
815 struct ib_udata *udata);
816 int (*dealloc_ucontext)(struct ib_ucontext *context);
817 int (*mmap)(struct ib_ucontext *context,
818 struct vm_area_struct *vma);
819 struct ib_pd * (*alloc_pd)(struct ib_device *device,
820 struct ib_ucontext *context,
821 struct ib_udata *udata);
822 int (*dealloc_pd)(struct ib_pd *pd);
823 struct ib_ah * (*create_ah)(struct ib_pd *pd,
824 struct ib_ah_attr *ah_attr);
825 int (*modify_ah)(struct ib_ah *ah,
826 struct ib_ah_attr *ah_attr);
827 int (*query_ah)(struct ib_ah *ah,
828 struct ib_ah_attr *ah_attr);
829 int (*destroy_ah)(struct ib_ah *ah);
830 struct ib_qp * (*create_qp)(struct ib_pd *pd,
831 struct ib_qp_init_attr *qp_init_attr,
832 struct ib_udata *udata);
833 int (*modify_qp)(struct ib_qp *qp,
834 struct ib_qp_attr *qp_attr,
835 int qp_attr_mask);
836 int (*query_qp)(struct ib_qp *qp,
837 struct ib_qp_attr *qp_attr,
838 int qp_attr_mask,
839 struct ib_qp_init_attr *qp_init_attr);
840 int (*destroy_qp)(struct ib_qp *qp);
841 int (*post_send)(struct ib_qp *qp,
842 struct ib_send_wr *send_wr,
843 struct ib_send_wr **bad_send_wr);
844 int (*post_recv)(struct ib_qp *qp,
845 struct ib_recv_wr *recv_wr,
846 struct ib_recv_wr **bad_recv_wr);
847 struct ib_cq * (*create_cq)(struct ib_device *device, int cqe,
848 struct ib_ucontext *context,
849 struct ib_udata *udata);
850 int (*destroy_cq)(struct ib_cq *cq);
851 int (*resize_cq)(struct ib_cq *cq, int *cqe);
852 int (*poll_cq)(struct ib_cq *cq, int num_entries,
853 struct ib_wc *wc);
854 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
855 int (*req_notify_cq)(struct ib_cq *cq,
856 enum ib_cq_notify cq_notify);
857 int (*req_ncomp_notif)(struct ib_cq *cq,
858 int wc_cnt);
859 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
860 int mr_access_flags);
861 struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd,
862 struct ib_phys_buf *phys_buf_array,
863 int num_phys_buf,
864 int mr_access_flags,
865 u64 *iova_start);
866 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
867 struct ib_umem *region,
868 int mr_access_flags,
869 struct ib_udata *udata);
870 int (*query_mr)(struct ib_mr *mr,
871 struct ib_mr_attr *mr_attr);
872 int (*dereg_mr)(struct ib_mr *mr);
873 int (*rereg_phys_mr)(struct ib_mr *mr,
874 int mr_rereg_mask,
875 struct ib_pd *pd,
876 struct ib_phys_buf *phys_buf_array,
877 int num_phys_buf,
878 int mr_access_flags,
879 u64 *iova_start);
880 struct ib_mw * (*alloc_mw)(struct ib_pd *pd);
881 int (*bind_mw)(struct ib_qp *qp,
882 struct ib_mw *mw,
883 struct ib_mw_bind *mw_bind);
884 int (*dealloc_mw)(struct ib_mw *mw);
885 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
886 int mr_access_flags,
887 struct ib_fmr_attr *fmr_attr);
888 int (*map_phys_fmr)(struct ib_fmr *fmr,
889 u64 *page_list, int list_len,
890 u64 iova);
891 int (*unmap_fmr)(struct list_head *fmr_list);
892 int (*dealloc_fmr)(struct ib_fmr *fmr);
893 int (*attach_mcast)(struct ib_qp *qp,
894 union ib_gid *gid,
895 u16 lid);
896 int (*detach_mcast)(struct ib_qp *qp,
897 union ib_gid *gid,
898 u16 lid);
899 int (*process_mad)(struct ib_device *device,
900 int process_mad_flags,
901 u8 port_num,
902 struct ib_wc *in_wc,
903 struct ib_grh *in_grh,
904 struct ib_mad *in_mad,
905 struct ib_mad *out_mad);
906
907 struct module *owner;
908 struct class_device class_dev;
909 struct kobject ports_parent;
910 struct list_head port_list;
911
912 enum {
913 IB_DEV_UNINITIALIZED,
914 IB_DEV_REGISTERED,
915 IB_DEV_UNREGISTERED
916 } reg_state;
917
918 u8 node_type;
919 u8 phys_port_cnt;
920};
921
922struct ib_client {
923 char *name;
924 void (*add) (struct ib_device *);
925 void (*remove)(struct ib_device *);
926
927 struct list_head list;
928};
929
930struct ib_device *ib_alloc_device(size_t size);
931void ib_dealloc_device(struct ib_device *device);
932
933int ib_register_device (struct ib_device *device);
934void ib_unregister_device(struct ib_device *device);
935
936int ib_register_client (struct ib_client *client);
937void ib_unregister_client(struct ib_client *client);
938
939void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
940void ib_set_client_data(struct ib_device *device, struct ib_client *client,
941 void *data);
942
943static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
944{
945 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
946}
947
948static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
949{
950 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
951}
952
953int ib_register_event_handler (struct ib_event_handler *event_handler);
954int ib_unregister_event_handler(struct ib_event_handler *event_handler);
955void ib_dispatch_event(struct ib_event *event);
956
957int ib_query_device(struct ib_device *device,
958 struct ib_device_attr *device_attr);
959
960int ib_query_port(struct ib_device *device,
961 u8 port_num, struct ib_port_attr *port_attr);
962
963int ib_query_gid(struct ib_device *device,
964 u8 port_num, int index, union ib_gid *gid);
965
966int ib_query_pkey(struct ib_device *device,
967 u8 port_num, u16 index, u16 *pkey);
968
969int ib_modify_device(struct ib_device *device,
970 int device_modify_mask,
971 struct ib_device_modify *device_modify);
972
973int ib_modify_port(struct ib_device *device,
974 u8 port_num, int port_modify_mask,
975 struct ib_port_modify *port_modify);
976
977/**
978 * ib_alloc_pd - Allocates an unused protection domain.
979 * @device: The device on which to allocate the protection domain.
980 *
981 * A protection domain object provides an association between QPs, shared
982 * receive queues, address handles, memory regions, and memory windows.
983 */
984struct ib_pd *ib_alloc_pd(struct ib_device *device);
985
986/**
987 * ib_dealloc_pd - Deallocates a protection domain.
988 * @pd: The protection domain to deallocate.
989 */
990int ib_dealloc_pd(struct ib_pd *pd);
991
992/**
993 * ib_create_ah - Creates an address handle for the given address vector.
994 * @pd: The protection domain associated with the address handle.
995 * @ah_attr: The attributes of the address vector.
996 *
997 * The address handle is used to reference a local or global destination
998 * in all UD QP post sends.
999 */
1000struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1001
1002/**
1003 * ib_create_ah_from_wc - Creates an address handle associated with the
1004 * sender of the specified work completion.
1005 * @pd: The protection domain associated with the address handle.
1006 * @wc: Work completion information associated with a received message.
1007 * @grh: References the received global route header. This parameter is
1008 * ignored unless the work completion indicates that the GRH is valid.
1009 * @port_num: The outbound port number to associate with the address.
1010 *
1011 * The address handle is used to reference a local or global destination
1012 * in all UD QP post sends.
1013 */
1014struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1015 struct ib_grh *grh, u8 port_num);
1016
1017/**
1018 * ib_modify_ah - Modifies the address vector associated with an address
1019 * handle.
1020 * @ah: The address handle to modify.
1021 * @ah_attr: The new address vector attributes to associate with the
1022 * address handle.
1023 */
1024int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1025
1026/**
1027 * ib_query_ah - Queries the address vector associated with an address
1028 * handle.
1029 * @ah: The address handle to query.
1030 * @ah_attr: The address vector attributes associated with the address
1031 * handle.
1032 */
1033int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1034
1035/**
1036 * ib_destroy_ah - Destroys an address handle.
1037 * @ah: The address handle to destroy.
1038 */
1039int ib_destroy_ah(struct ib_ah *ah);
1040
1041/**
1042 * ib_create_qp - Creates a QP associated with the specified protection
1043 * domain.
1044 * @pd: The protection domain associated with the QP.
1045 * @qp_init_attr: A list of initial attributes required to create the QP.
1046 */
1047struct ib_qp *ib_create_qp(struct ib_pd *pd,
1048 struct ib_qp_init_attr *qp_init_attr);
1049
1050/**
1051 * ib_modify_qp - Modifies the attributes for the specified QP and then
1052 * transitions the QP to the given state.
1053 * @qp: The QP to modify.
1054 * @qp_attr: On input, specifies the QP attributes to modify. On output,
1055 * the current values of selected QP attributes are returned.
1056 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
1057 * are being modified.
1058 */
1059int ib_modify_qp(struct ib_qp *qp,
1060 struct ib_qp_attr *qp_attr,
1061 int qp_attr_mask);
1062
1063/**
1064 * ib_query_qp - Returns the attribute list and current values for the
1065 * specified QP.
1066 * @qp: The QP to query.
1067 * @qp_attr: The attributes of the specified QP.
1068 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
1069 * @qp_init_attr: Additional attributes of the selected QP.
1070 *
1071 * The qp_attr_mask may be used to limit the query to gathering only the
1072 * selected attributes.
1073 */
1074int ib_query_qp(struct ib_qp *qp,
1075 struct ib_qp_attr *qp_attr,
1076 int qp_attr_mask,
1077 struct ib_qp_init_attr *qp_init_attr);
1078
1079/**
1080 * ib_destroy_qp - Destroys the specified QP.
1081 * @qp: The QP to destroy.
1082 */
1083int ib_destroy_qp(struct ib_qp *qp);
1084
1085/**
1086 * ib_post_send - Posts a list of work requests to the send queue of
1087 * the specified QP.
1088 * @qp: The QP to post the work request on.
1089 * @send_wr: A list of work requests to post on the send queue.
1090 * @bad_send_wr: On an immediate failure, this parameter will reference
1091 * the work request that failed to be posted on the QP.
1092 */
1093static inline int ib_post_send(struct ib_qp *qp,
1094 struct ib_send_wr *send_wr,
1095 struct ib_send_wr **bad_send_wr)
1096{
1097 return qp->device->post_send(qp, send_wr, bad_send_wr);
1098}
1099
1100/**
1101 * ib_post_recv - Posts a list of work requests to the receive queue of
1102 * the specified QP.
1103 * @qp: The QP to post the work request on.
1104 * @recv_wr: A list of work requests to post on the receive queue.
1105 * @bad_recv_wr: On an immediate failure, this parameter will reference
1106 * the work request that failed to be posted on the QP.
1107 */
1108static inline int ib_post_recv(struct ib_qp *qp,
1109 struct ib_recv_wr *recv_wr,
1110 struct ib_recv_wr **bad_recv_wr)
1111{
1112 return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
1113}
1114
1115/**
1116 * ib_create_cq - Creates a CQ on the specified device.
1117 * @device: The device on which to create the CQ.
1118 * @comp_handler: A user-specified callback that is invoked when a
1119 * completion event occurs on the CQ.
1120 * @event_handler: A user-specified callback that is invoked when an
1121 * asynchronous event not associated with a completion occurs on the CQ.
1122 * @cq_context: Context associated with the CQ returned to the user via
1123 * the associated completion and event handlers.
1124 * @cqe: The minimum size of the CQ.
1125 *
1126 * Users can examine the cq structure to determine the actual CQ size.
1127 */
1128struct ib_cq *ib_create_cq(struct ib_device *device,
1129 ib_comp_handler comp_handler,
1130 void (*event_handler)(struct ib_event *, void *),
1131 void *cq_context, int cqe);
1132
1133/**
1134 * ib_resize_cq - Modifies the capacity of the CQ.
1135 * @cq: The CQ to resize.
1136 * @cqe: The minimum size of the CQ.
1137 *
1138 * Users can examine the cq structure to determine the actual CQ size.
1139 */
1140int ib_resize_cq(struct ib_cq *cq, int cqe);
1141
1142/**
1143 * ib_destroy_cq - Destroys the specified CQ.
1144 * @cq: The CQ to destroy.
1145 */
1146int ib_destroy_cq(struct ib_cq *cq);
1147
1148/**
1149 * ib_poll_cq - poll a CQ for completion(s)
1150 * @cq:the CQ being polled
1151 * @num_entries:maximum number of completions to return
1152 * @wc:array of at least @num_entries &struct ib_wc where completions
1153 * will be returned
1154 *
1155 * Poll a CQ for (possibly multiple) completions. If the return value
1156 * is < 0, an error occurred. If the return value is >= 0, it is the
1157 * number of completions returned. If the return value is
1158 * non-negative and < num_entries, then the CQ was emptied.
1159 */
1160static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
1161 struct ib_wc *wc)
1162{
1163 return cq->device->poll_cq(cq, num_entries, wc);
1164}
1165
1166/**
1167 * ib_peek_cq - Returns the number of unreaped completions currently
1168 * on the specified CQ.
1169 * @cq: The CQ to peek.
1170 * @wc_cnt: A minimum number of unreaped completions to check for.
1171 *
1172 * If the number of unreaped completions is greater than or equal to wc_cnt,
1173 * this function returns wc_cnt, otherwise, it returns the actual number of
1174 * unreaped completions.
1175 */
1176int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
1177
1178/**
1179 * ib_req_notify_cq - Request completion notification on a CQ.
1180 * @cq: The CQ to generate an event for.
1181 * @cq_notify: If set to %IB_CQ_SOLICITED, completion notification will
1182 * occur on the next solicited event. If set to %IB_CQ_NEXT_COMP,
1183 * notification will occur on the next completion.
1184 */
1185static inline int ib_req_notify_cq(struct ib_cq *cq,
1186 enum ib_cq_notify cq_notify)
1187{
1188 return cq->device->req_notify_cq(cq, cq_notify);
1189}
1190
1191/**
1192 * ib_req_ncomp_notif - Request completion notification when there are
1193 * at least the specified number of unreaped completions on the CQ.
1194 * @cq: The CQ to generate an event for.
1195 * @wc_cnt: The number of unreaped completions that should be on the
1196 * CQ before an event is generated.
1197 */
1198static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1199{
1200 return cq->device->req_ncomp_notif ?
1201 cq->device->req_ncomp_notif(cq, wc_cnt) :
1202 -ENOSYS;
1203}
1204
1205/**
1206 * ib_get_dma_mr - Returns a memory region for system memory that is
1207 * usable for DMA.
1208 * @pd: The protection domain associated with the memory region.
1209 * @mr_access_flags: Specifies the memory access rights.
1210 */
1211struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1212
1213/**
1214 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
1215 * by an HCA.
1216 * @pd: The protection domain associated assigned to the registered region.
1217 * @phys_buf_array: Specifies a list of physical buffers to use in the
1218 * memory region.
1219 * @num_phys_buf: Specifies the size of the phys_buf_array.
1220 * @mr_access_flags: Specifies the memory access rights.
1221 * @iova_start: The offset of the region's starting I/O virtual address.
1222 */
1223struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
1224 struct ib_phys_buf *phys_buf_array,
1225 int num_phys_buf,
1226 int mr_access_flags,
1227 u64 *iova_start);
1228
1229/**
1230 * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
1231 * Conceptually, this call performs the functions deregister memory region
1232 * followed by register physical memory region. Where possible,
1233 * resources are reused instead of deallocated and reallocated.
1234 * @mr: The memory region to modify.
1235 * @mr_rereg_mask: A bit-mask used to indicate which of the following
1236 * properties of the memory region are being modified.
1237 * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
1238 * the new protection domain to associated with the memory region,
1239 * otherwise, this parameter is ignored.
1240 * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1241 * field specifies a list of physical buffers to use in the new
1242 * translation, otherwise, this parameter is ignored.
1243 * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1244 * field specifies the size of the phys_buf_array, otherwise, this
1245 * parameter is ignored.
1246 * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
1247 * field specifies the new memory access rights, otherwise, this
1248 * parameter is ignored.
1249 * @iova_start: The offset of the region's starting I/O virtual address.
1250 */
1251int ib_rereg_phys_mr(struct ib_mr *mr,
1252 int mr_rereg_mask,
1253 struct ib_pd *pd,
1254 struct ib_phys_buf *phys_buf_array,
1255 int num_phys_buf,
1256 int mr_access_flags,
1257 u64 *iova_start);
1258
1259/**
1260 * ib_query_mr - Retrieves information about a specific memory region.
1261 * @mr: The memory region to retrieve information about.
1262 * @mr_attr: The attributes of the specified memory region.
1263 */
1264int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
1265
1266/**
1267 * ib_dereg_mr - Deregisters a memory region and removes it from the
1268 * HCA translation table.
1269 * @mr: The memory region to deregister.
1270 */
1271int ib_dereg_mr(struct ib_mr *mr);
1272
1273/**
1274 * ib_alloc_mw - Allocates a memory window.
1275 * @pd: The protection domain associated with the memory window.
1276 */
1277struct ib_mw *ib_alloc_mw(struct ib_pd *pd);
1278
1279/**
1280 * ib_bind_mw - Posts a work request to the send queue of the specified
1281 * QP, which binds the memory window to the given address range and
1282 * remote access attributes.
1283 * @qp: QP to post the bind work request on.
1284 * @mw: The memory window to bind.
1285 * @mw_bind: Specifies information about the memory window, including
1286 * its address range, remote access rights, and associated memory region.
1287 */
1288static inline int ib_bind_mw(struct ib_qp *qp,
1289 struct ib_mw *mw,
1290 struct ib_mw_bind *mw_bind)
1291{
1292 /* XXX reference counting in corresponding MR? */
1293 return mw->device->bind_mw ?
1294 mw->device->bind_mw(qp, mw, mw_bind) :
1295 -ENOSYS;
1296}
1297
1298/**
1299 * ib_dealloc_mw - Deallocates a memory window.
1300 * @mw: The memory window to deallocate.
1301 */
1302int ib_dealloc_mw(struct ib_mw *mw);
1303
1304/**
1305 * ib_alloc_fmr - Allocates a unmapped fast memory region.
1306 * @pd: The protection domain associated with the unmapped region.
1307 * @mr_access_flags: Specifies the memory access rights.
1308 * @fmr_attr: Attributes of the unmapped region.
1309 *
1310 * A fast memory region must be mapped before it can be used as part of
1311 * a work request.
1312 */
1313struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1314 int mr_access_flags,
1315 struct ib_fmr_attr *fmr_attr);
1316
1317/**
1318 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
1319 * @fmr: The fast memory region to associate with the pages.
1320 * @page_list: An array of physical pages to map to the fast memory region.
1321 * @list_len: The number of pages in page_list.
1322 * @iova: The I/O virtual address to use with the mapped region.
1323 */
1324static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
1325 u64 *page_list, int list_len,
1326 u64 iova)
1327{
1328 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
1329}
1330
1331/**
1332 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
1333 * @fmr_list: A linked list of fast memory regions to unmap.
1334 */
1335int ib_unmap_fmr(struct list_head *fmr_list);
1336
1337/**
1338 * ib_dealloc_fmr - Deallocates a fast memory region.
1339 * @fmr: The fast memory region to deallocate.
1340 */
1341int ib_dealloc_fmr(struct ib_fmr *fmr);
1342
1343/**
1344 * ib_attach_mcast - Attaches the specified QP to a multicast group.
1345 * @qp: QP to attach to the multicast group. The QP must be type
1346 * IB_QPT_UD.
1347 * @gid: Multicast group GID.
1348 * @lid: Multicast group LID in host byte order.
1349 *
1350 * In order to send and receive multicast packets, subnet
1351 * administration must have created the multicast group and configured
1352 * the fabric appropriately. The port associated with the specified
1353 * QP must also be a member of the multicast group.
1354 */
1355int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
1356
1357/**
1358 * ib_detach_mcast - Detaches the specified QP from a multicast group.
1359 * @qp: QP to detach from the multicast group.
1360 * @gid: Multicast group GID.
1361 * @lid: Multicast group LID in host byte order.
1362 */
1363int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
1364
1365#endif /* IB_VERBS_H */
diff --git a/drivers/infiniband/ulp/ipoib/Makefile b/drivers/infiniband/ulp/ipoib/Makefile
index 394bc08abc6f..8935e74ae3f8 100644
--- a/drivers/infiniband/ulp/ipoib/Makefile
+++ b/drivers/infiniband/ulp/ipoib/Makefile
@@ -1,5 +1,3 @@
1EXTRA_CFLAGS += -Idrivers/infiniband/include
2
3obj-$(CONFIG_INFINIBAND_IPOIB) += ib_ipoib.o 1obj-$(CONFIG_INFINIBAND_IPOIB) += ib_ipoib.o
4 2
5ib_ipoib-y := ipoib_main.o \ 3ib_ipoib-y := ipoib_main.o \
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 04c98f54e9c4..bea960b8191f 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -1,5 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
3 * 5 *
4 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
@@ -49,9 +51,9 @@
49#include <asm/atomic.h> 51#include <asm/atomic.h>
50#include <asm/semaphore.h> 52#include <asm/semaphore.h>
51 53
52#include <ib_verbs.h> 54#include <rdma/ib_verbs.h>
53#include <ib_pack.h> 55#include <rdma/ib_pack.h>
54#include <ib_sa.h> 56#include <rdma/ib_sa.h>
55 57
56/* constants */ 58/* constants */
57 59
@@ -88,8 +90,8 @@ enum {
88/* structs */ 90/* structs */
89 91
90struct ipoib_header { 92struct ipoib_header {
91 u16 proto; 93 __be16 proto;
92 u16 reserved; 94 u16 reserved;
93}; 95};
94 96
95struct ipoib_pseudoheader { 97struct ipoib_pseudoheader {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
index a84e5fe0f193..38b150f775e7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
@@ -97,7 +97,7 @@ static int ipoib_mcg_seq_show(struct seq_file *file, void *iter_ptr)
97 97
98 for (n = 0, i = 0; i < sizeof mgid / 2; ++i) { 98 for (n = 0, i = 0; i < sizeof mgid / 2; ++i) {
99 n += sprintf(gid_buf + n, "%x", 99 n += sprintf(gid_buf + n, "%x",
100 be16_to_cpu(((u16 *)mgid.raw)[i])); 100 be16_to_cpu(((__be16 *) mgid.raw)[i]));
101 if (i < sizeof mgid / 2 - 1) 101 if (i < sizeof mgid / 2 - 1)
102 gid_buf[n++] = ':'; 102 gid_buf[n++] = ':';
103 } 103 }
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index eee82363167d..ef0e3894863c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -1,5 +1,8 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
3 * 6 *
4 * This software is available to you under a choice of one of two 7 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 8 * licenses. You may choose to be licensed under the terms of the GNU
@@ -35,7 +38,7 @@
35#include <linux/delay.h> 38#include <linux/delay.h>
36#include <linux/dma-mapping.h> 39#include <linux/dma-mapping.h>
37 40
38#include <ib_cache.h> 41#include <rdma/ib_cache.h>
39 42
40#include "ipoib.h" 43#include "ipoib.h"
41 44
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index fa00816a3cf7..0e8ac138e355 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1,5 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
3 * 5 *
4 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
@@ -34,7 +36,6 @@
34 36
35#include "ipoib.h" 37#include "ipoib.h"
36 38
37#include <linux/version.h>
38#include <linux/module.h> 39#include <linux/module.h>
39 40
40#include <linux/init.h> 41#include <linux/init.h>
@@ -607,8 +608,8 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
607 ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x " 608 ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x "
608 IPOIB_GID_FMT "\n", 609 IPOIB_GID_FMT "\n",
609 skb->dst ? "neigh" : "dst", 610 skb->dst ? "neigh" : "dst",
610 be16_to_cpup((u16 *) skb->data), 611 be16_to_cpup((__be16 *) skb->data),
611 be32_to_cpup((u32 *) phdr->hwaddr), 612 be32_to_cpup((__be32 *) phdr->hwaddr),
612 IPOIB_GID_ARG(*(union ib_gid *) (phdr->hwaddr + 4))); 613 IPOIB_GID_ARG(*(union ib_gid *) (phdr->hwaddr + 4)));
613 dev_kfree_skb_any(skb); 614 dev_kfree_skb_any(skb);
614 ++priv->stats.tx_dropped; 615 ++priv->stats.tx_dropped;
@@ -671,7 +672,7 @@ static void ipoib_set_mcast_list(struct net_device *dev)
671{ 672{
672 struct ipoib_dev_priv *priv = netdev_priv(dev); 673 struct ipoib_dev_priv *priv = netdev_priv(dev);
673 674
674 schedule_work(&priv->restart_task); 675 queue_work(ipoib_workqueue, &priv->restart_task);
675} 676}
676 677
677static void ipoib_neigh_destructor(struct neighbour *n) 678static void ipoib_neigh_destructor(struct neighbour *n)
@@ -780,15 +781,11 @@ void ipoib_dev_cleanup(struct net_device *dev)
780 781
781 ipoib_ib_dev_cleanup(dev); 782 ipoib_ib_dev_cleanup(dev);
782 783
783 if (priv->rx_ring) { 784 kfree(priv->rx_ring);
784 kfree(priv->rx_ring); 785 kfree(priv->tx_ring);
785 priv->rx_ring = NULL;
786 }
787 786
788 if (priv->tx_ring) { 787 priv->rx_ring = NULL;
789 kfree(priv->tx_ring); 788 priv->tx_ring = NULL;
790 priv->tx_ring = NULL;
791 }
792} 789}
793 790
794static void ipoib_setup(struct net_device *dev) 791static void ipoib_setup(struct net_device *dev)
@@ -886,6 +883,12 @@ static ssize_t create_child(struct class_device *cdev,
886 if (pkey < 0 || pkey > 0xffff) 883 if (pkey < 0 || pkey > 0xffff)
887 return -EINVAL; 884 return -EINVAL;
888 885
886 /*
887 * Set the full membership bit, so that we join the right
888 * broadcast group, etc.
889 */
890 pkey |= 0x8000;
891
889 ret = ipoib_vlan_add(container_of(cdev, struct net_device, class_dev), 892 ret = ipoib_vlan_add(container_of(cdev, struct net_device, class_dev),
890 pkey); 893 pkey);
891 894
@@ -938,6 +941,12 @@ static struct net_device *ipoib_add_port(const char *format,
938 goto alloc_mem_failed; 941 goto alloc_mem_failed;
939 } 942 }
940 943
944 /*
945 * Set the full membership bit, so that we join the right
946 * broadcast group, etc.
947 */
948 priv->pkey |= 0x8000;
949
941 priv->dev->broadcast[8] = priv->pkey >> 8; 950 priv->dev->broadcast[8] = priv->pkey >> 8;
942 priv->dev->broadcast[9] = priv->pkey & 0xff; 951 priv->dev->broadcast[9] = priv->pkey & 0xff;
943 952
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 70208c3d21e2..aca7aea18a69 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -1,5 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
3 * 5 *
4 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
@@ -357,7 +359,7 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
357 359
358 rec.mgid = mcast->mcmember.mgid; 360 rec.mgid = mcast->mcmember.mgid;
359 rec.port_gid = priv->local_gid; 361 rec.port_gid = priv->local_gid;
360 rec.pkey = be16_to_cpu(priv->pkey); 362 rec.pkey = cpu_to_be16(priv->pkey);
361 363
362 ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec, 364 ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec,
363 IB_SA_MCMEMBER_REC_MGID | 365 IB_SA_MCMEMBER_REC_MGID |
@@ -457,7 +459,7 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
457 459
458 rec.mgid = mcast->mcmember.mgid; 460 rec.mgid = mcast->mcmember.mgid;
459 rec.port_gid = priv->local_gid; 461 rec.port_gid = priv->local_gid;
460 rec.pkey = be16_to_cpu(priv->pkey); 462 rec.pkey = cpu_to_be16(priv->pkey);
461 463
462 comp_mask = 464 comp_mask =
463 IB_SA_MCMEMBER_REC_MGID | 465 IB_SA_MCMEMBER_REC_MGID |
@@ -646,7 +648,7 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
646 648
647 rec.mgid = mcast->mcmember.mgid; 649 rec.mgid = mcast->mcmember.mgid;
648 rec.port_gid = priv->local_gid; 650 rec.port_gid = priv->local_gid;
649 rec.pkey = be16_to_cpu(priv->pkey); 651 rec.pkey = cpu_to_be16(priv->pkey);
650 652
651 /* Remove ourselves from the multicast group */ 653 /* Remove ourselves from the multicast group */
652 ret = ipoib_mcast_detach(dev, be16_to_cpu(mcast->mcmember.mlid), 654 ret = ipoib_mcast_detach(dev, be16_to_cpu(mcast->mcmember.mlid),
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 4933edf062c2..79f59d0563ed 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -32,7 +33,7 @@
32 * $Id: ipoib_verbs.c 1349 2004-12-16 21:09:43Z roland $ 33 * $Id: ipoib_verbs.c 1349 2004-12-16 21:09:43Z roland $
33 */ 34 */
34 35
35#include <ib_cache.h> 36#include <rdma/ib_cache.h>
36 37
37#include "ipoib.h" 38#include "ipoib.h"
38 39
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 94b8ea812fef..332d730e60c2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -32,7 +32,6 @@
32 * $Id: ipoib_vlan.c 1349 2004-12-16 21:09:43Z roland $ 32 * $Id: ipoib_vlan.c 1349 2004-12-16 21:09:43Z roland $
33 */ 33 */
34 34
35#include <linux/version.h>
36#include <linux/module.h> 35#include <linux/module.h>
37 36
38#include <linux/init.h> 37#include <linux/init.h>