aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorJack Morgenstein <jackm@mellanox.co.il>2006-04-10 12:43:47 -0400
committerRoland Dreier <rolandd@cisco.com>2006-04-10 12:43:47 -0400
commitbf6a9e31cfa768ce0a8e18474b3ca808641d9243 (patch)
treea7ebdb5a10e528959ca1497e222975d3087a5eef /drivers/infiniband
parentd2e0655ede1d91c3a586455d03a4a2d57e659830 (diff)
IB: simplify static rate encoding
Push translation of static rate to HCA format into low-level drivers, where it belongs. For static rate encoding, use encoding of rate field from IB standard PathRecord, with addition of value 0, for backwards compatibility with current usage. The changes are: - Add enum ib_rate to midlayer includes. - Get rid of static rate translation in IPoIB; just use static rate directly from Path and MulticastGroup records. - Update mthca driver to translate absolute static rate into the format used by hardware. This also fixes mthca's static rate handling for HCAs that are capable of 4X DDR. Signed-off-by: Jack Morgenstein <jackm@mellanox.co.il> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/verbs.c34
-rw-r--r--drivers/infiniband/hw/mthca/mthca_av.c100
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.h1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c42
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c46
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_fs.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c11
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c10
12 files changed, 233 insertions, 36 deletions
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index cae0845f472a..b78e7dc69330 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -45,6 +45,40 @@
45#include <rdma/ib_verbs.h> 45#include <rdma/ib_verbs.h>
46#include <rdma/ib_cache.h> 46#include <rdma/ib_cache.h>
47 47
48int ib_rate_to_mult(enum ib_rate rate)
49{
50 switch (rate) {
51 case IB_RATE_2_5_GBPS: return 1;
52 case IB_RATE_5_GBPS: return 2;
53 case IB_RATE_10_GBPS: return 4;
54 case IB_RATE_20_GBPS: return 8;
55 case IB_RATE_30_GBPS: return 12;
56 case IB_RATE_40_GBPS: return 16;
57 case IB_RATE_60_GBPS: return 24;
58 case IB_RATE_80_GBPS: return 32;
59 case IB_RATE_120_GBPS: return 48;
60 default: return -1;
61 }
62}
63EXPORT_SYMBOL(ib_rate_to_mult);
64
65enum ib_rate mult_to_ib_rate(int mult)
66{
67 switch (mult) {
68 case 1: return IB_RATE_2_5_GBPS;
69 case 2: return IB_RATE_5_GBPS;
70 case 4: return IB_RATE_10_GBPS;
71 case 8: return IB_RATE_20_GBPS;
72 case 12: return IB_RATE_30_GBPS;
73 case 16: return IB_RATE_40_GBPS;
74 case 24: return IB_RATE_60_GBPS;
75 case 32: return IB_RATE_80_GBPS;
76 case 48: return IB_RATE_120_GBPS;
77 default: return IB_RATE_PORT_CURRENT;
78 }
79}
80EXPORT_SYMBOL(mult_to_ib_rate);
81
48/* Protection domains */ 82/* Protection domains */
49 83
50struct ib_pd *ib_alloc_pd(struct ib_device *device) 84struct ib_pd *ib_alloc_pd(struct ib_device *device)
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c
index bc5bdcbe51b5..b12aa03be251 100644
--- a/drivers/infiniband/hw/mthca/mthca_av.c
+++ b/drivers/infiniband/hw/mthca/mthca_av.c
@@ -42,6 +42,20 @@
42 42
43#include "mthca_dev.h" 43#include "mthca_dev.h"
44 44
45enum {
46 MTHCA_RATE_TAVOR_FULL = 0,
47 MTHCA_RATE_TAVOR_1X = 1,
48 MTHCA_RATE_TAVOR_4X = 2,
49 MTHCA_RATE_TAVOR_1X_DDR = 3
50};
51
52enum {
53 MTHCA_RATE_MEMFREE_FULL = 0,
54 MTHCA_RATE_MEMFREE_QUARTER = 1,
55 MTHCA_RATE_MEMFREE_EIGHTH = 2,
56 MTHCA_RATE_MEMFREE_HALF = 3
57};
58
45struct mthca_av { 59struct mthca_av {
46 __be32 port_pd; 60 __be32 port_pd;
47 u8 reserved1; 61 u8 reserved1;
@@ -55,6 +69,90 @@ struct mthca_av {
55 __be32 dgid[4]; 69 __be32 dgid[4];
56}; 70};
57 71
72static enum ib_rate memfree_rate_to_ib(u8 mthca_rate, u8 port_rate)
73{
74 switch (mthca_rate) {
75 case MTHCA_RATE_MEMFREE_EIGHTH:
76 return mult_to_ib_rate(port_rate >> 3);
77 case MTHCA_RATE_MEMFREE_QUARTER:
78 return mult_to_ib_rate(port_rate >> 2);
79 case MTHCA_RATE_MEMFREE_HALF:
80 return mult_to_ib_rate(port_rate >> 1);
81 case MTHCA_RATE_MEMFREE_FULL:
82 default:
83 return mult_to_ib_rate(port_rate);
84 }
85}
86
87static enum ib_rate tavor_rate_to_ib(u8 mthca_rate, u8 port_rate)
88{
89 switch (mthca_rate) {
90 case MTHCA_RATE_TAVOR_1X: return IB_RATE_2_5_GBPS;
91 case MTHCA_RATE_TAVOR_1X_DDR: return IB_RATE_5_GBPS;
92 case MTHCA_RATE_TAVOR_4X: return IB_RATE_10_GBPS;
93 default: return port_rate;
94 }
95}
96
97enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u8 port)
98{
99 if (mthca_is_memfree(dev)) {
100 /* Handle old Arbel FW */
101 if (dev->limits.stat_rate_support == 0x3 && mthca_rate)
102 return IB_RATE_2_5_GBPS;
103
104 return memfree_rate_to_ib(mthca_rate, dev->rate[port - 1]);
105 } else
106 return tavor_rate_to_ib(mthca_rate, dev->rate[port - 1]);
107}
108
109static u8 ib_rate_to_memfree(u8 req_rate, u8 cur_rate)
110{
111 if (cur_rate <= req_rate)
112 return 0;
113
114 /*
115 * Inter-packet delay (IPD) to get from rate X down to a rate
116 * no more than Y is (X - 1) / Y.
117 */
118 switch ((cur_rate - 1) / req_rate) {
119 case 0: return MTHCA_RATE_MEMFREE_FULL;
120 case 1: return MTHCA_RATE_MEMFREE_HALF;
121 case 2: /* fall through */
122 case 3: return MTHCA_RATE_MEMFREE_QUARTER;
123 default: return MTHCA_RATE_MEMFREE_EIGHTH;
124 }
125}
126
127static u8 ib_rate_to_tavor(u8 static_rate)
128{
129 switch (static_rate) {
130 case IB_RATE_2_5_GBPS: return MTHCA_RATE_TAVOR_1X;
131 case IB_RATE_5_GBPS: return MTHCA_RATE_TAVOR_1X_DDR;
132 case IB_RATE_10_GBPS: return MTHCA_RATE_TAVOR_4X;
133 default: return MTHCA_RATE_TAVOR_FULL;
134 }
135}
136
137u8 mthca_get_rate(struct mthca_dev *dev, int static_rate, u8 port)
138{
139 u8 rate;
140
141 if (!static_rate || ib_rate_to_mult(static_rate) >= dev->rate[port - 1])
142 return 0;
143
144 if (mthca_is_memfree(dev))
145 rate = ib_rate_to_memfree(ib_rate_to_mult(static_rate),
146 dev->rate[port - 1]);
147 else
148 rate = ib_rate_to_tavor(static_rate);
149
150 if (!(dev->limits.stat_rate_support & (1 << rate)))
151 rate = 1;
152
153 return rate;
154}
155
58int mthca_create_ah(struct mthca_dev *dev, 156int mthca_create_ah(struct mthca_dev *dev,
59 struct mthca_pd *pd, 157 struct mthca_pd *pd,
60 struct ib_ah_attr *ah_attr, 158 struct ib_ah_attr *ah_attr,
@@ -107,7 +205,7 @@ on_hca_fail:
107 av->g_slid = ah_attr->src_path_bits; 205 av->g_slid = ah_attr->src_path_bits;
108 av->dlid = cpu_to_be16(ah_attr->dlid); 206 av->dlid = cpu_to_be16(ah_attr->dlid);
109 av->msg_sr = (3 << 4) | /* 2K message */ 207 av->msg_sr = (3 << 4) | /* 2K message */
110 ah_attr->static_rate; 208 mthca_get_rate(dev, ah_attr->static_rate, ah_attr->port_num);
111 av->sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28); 209 av->sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
112 if (ah_attr->ah_flags & IB_AH_GRH) { 210 if (ah_attr->ah_flags & IB_AH_GRH) {
113 av->g_slid |= 0x80; 211 av->g_slid |= 0x80;
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 343eca507870..1985b5dfa481 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -965,6 +965,7 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
965 u32 *outbox; 965 u32 *outbox;
966 u8 field; 966 u8 field;
967 u16 size; 967 u16 size;
968 u16 stat_rate;
968 int err; 969 int err;
969 970
970#define QUERY_DEV_LIM_OUT_SIZE 0x100 971#define QUERY_DEV_LIM_OUT_SIZE 0x100
@@ -995,6 +996,7 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
995#define QUERY_DEV_LIM_MTU_WIDTH_OFFSET 0x36 996#define QUERY_DEV_LIM_MTU_WIDTH_OFFSET 0x36
996#define QUERY_DEV_LIM_VL_PORT_OFFSET 0x37 997#define QUERY_DEV_LIM_VL_PORT_OFFSET 0x37
997#define QUERY_DEV_LIM_MAX_GID_OFFSET 0x3b 998#define QUERY_DEV_LIM_MAX_GID_OFFSET 0x3b
999#define QUERY_DEV_LIM_RATE_SUPPORT_OFFSET 0x3c
998#define QUERY_DEV_LIM_MAX_PKEY_OFFSET 0x3f 1000#define QUERY_DEV_LIM_MAX_PKEY_OFFSET 0x3f
999#define QUERY_DEV_LIM_FLAGS_OFFSET 0x44 1001#define QUERY_DEV_LIM_FLAGS_OFFSET 0x44
1000#define QUERY_DEV_LIM_RSVD_UAR_OFFSET 0x48 1002#define QUERY_DEV_LIM_RSVD_UAR_OFFSET 0x48
@@ -1086,6 +1088,8 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
1086 dev_lim->num_ports = field & 0xf; 1088 dev_lim->num_ports = field & 0xf;
1087 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_GID_OFFSET); 1089 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_GID_OFFSET);
1088 dev_lim->max_gids = 1 << (field & 0xf); 1090 dev_lim->max_gids = 1 << (field & 0xf);
1091 MTHCA_GET(stat_rate, outbox, QUERY_DEV_LIM_RATE_SUPPORT_OFFSET);
1092 dev_lim->stat_rate_support = stat_rate;
1089 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_PKEY_OFFSET); 1093 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_PKEY_OFFSET);
1090 dev_lim->max_pkeys = 1 << (field & 0xf); 1094 dev_lim->max_pkeys = 1 << (field & 0xf);
1091 MTHCA_GET(dev_lim->flags, outbox, QUERY_DEV_LIM_FLAGS_OFFSET); 1095 MTHCA_GET(dev_lim->flags, outbox, QUERY_DEV_LIM_FLAGS_OFFSET);
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.h b/drivers/infiniband/hw/mthca/mthca_cmd.h
index e4ec35c40dd3..2f976f2051d6 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.h
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.h
@@ -146,6 +146,7 @@ struct mthca_dev_lim {
146 int max_vl; 146 int max_vl;
147 int num_ports; 147 int num_ports;
148 int max_gids; 148 int max_gids;
149 u16 stat_rate_support;
149 int max_pkeys; 150 int max_pkeys;
150 u32 flags; 151 u32 flags;
151 int reserved_uars; 152 int reserved_uars;
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index bb2a9d628d7d..49d0eae5518a 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -172,6 +172,7 @@ struct mthca_limits {
172 int reserved_pds; 172 int reserved_pds;
173 u32 page_size_cap; 173 u32 page_size_cap;
174 u32 flags; 174 u32 flags;
175 u16 stat_rate_support;
175 u8 port_width_cap; 176 u8 port_width_cap;
176}; 177};
177 178
@@ -353,6 +354,7 @@ struct mthca_dev {
353 struct ib_mad_agent *send_agent[MTHCA_MAX_PORTS][2]; 354 struct ib_mad_agent *send_agent[MTHCA_MAX_PORTS][2];
354 struct ib_ah *sm_ah[MTHCA_MAX_PORTS]; 355 struct ib_ah *sm_ah[MTHCA_MAX_PORTS];
355 spinlock_t sm_lock; 356 spinlock_t sm_lock;
357 u8 rate[MTHCA_MAX_PORTS];
356}; 358};
357 359
358#ifdef CONFIG_INFINIBAND_MTHCA_DEBUG 360#ifdef CONFIG_INFINIBAND_MTHCA_DEBUG
@@ -555,6 +557,8 @@ int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
555 struct ib_ud_header *header); 557 struct ib_ud_header *header);
556int mthca_ah_query(struct ib_ah *ibah, struct ib_ah_attr *attr); 558int mthca_ah_query(struct ib_ah *ibah, struct ib_ah_attr *attr);
557int mthca_ah_grh_present(struct mthca_ah *ah); 559int mthca_ah_grh_present(struct mthca_ah *ah);
560u8 mthca_get_rate(struct mthca_dev *dev, int static_rate, u8 port);
561enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u8 port);
558 562
559int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); 563int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
560int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); 564int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index dfb482eac9a2..f235c7ea42f0 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -49,6 +49,30 @@ enum {
49 MTHCA_VENDOR_CLASS2 = 0xa 49 MTHCA_VENDOR_CLASS2 = 0xa
50}; 50};
51 51
52int mthca_update_rate(struct mthca_dev *dev, u8 port_num)
53{
54 struct ib_port_attr *tprops = NULL;
55 int ret;
56
57 tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
58 if (!tprops)
59 return -ENOMEM;
60
61 ret = ib_query_port(&dev->ib_dev, port_num, tprops);
62 if (ret) {
63 printk(KERN_WARNING "ib_query_port failed (%d) for %s port %d\n",
64 ret, dev->ib_dev.name, port_num);
65 goto out;
66 }
67
68 dev->rate[port_num - 1] = tprops->active_speed *
69 ib_width_enum_to_int(tprops->active_width);
70
71out:
72 kfree(tprops);
73 return ret;
74}
75
52static void update_sm_ah(struct mthca_dev *dev, 76static void update_sm_ah(struct mthca_dev *dev,
53 u8 port_num, u16 lid, u8 sl) 77 u8 port_num, u16 lid, u8 sl)
54{ 78{
@@ -90,6 +114,7 @@ static void smp_snoop(struct ib_device *ibdev,
90 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && 114 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
91 mad->mad_hdr.method == IB_MGMT_METHOD_SET) { 115 mad->mad_hdr.method == IB_MGMT_METHOD_SET) {
92 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) { 116 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) {
117 mthca_update_rate(to_mdev(ibdev), port_num);
93 update_sm_ah(to_mdev(ibdev), port_num, 118 update_sm_ah(to_mdev(ibdev), port_num,
94 be16_to_cpup((__be16 *) (mad->data + 58)), 119 be16_to_cpup((__be16 *) (mad->data + 58)),
95 (*(u8 *) (mad->data + 76)) & 0xf); 120 (*(u8 *) (mad->data + 76)) & 0xf);
@@ -246,6 +271,7 @@ int mthca_create_agents(struct mthca_dev *dev)
246{ 271{
247 struct ib_mad_agent *agent; 272 struct ib_mad_agent *agent;
248 int p, q; 273 int p, q;
274 int ret;
249 275
250 spin_lock_init(&dev->sm_lock); 276 spin_lock_init(&dev->sm_lock);
251 277
@@ -255,11 +281,23 @@ int mthca_create_agents(struct mthca_dev *dev)
255 q ? IB_QPT_GSI : IB_QPT_SMI, 281 q ? IB_QPT_GSI : IB_QPT_SMI,
256 NULL, 0, send_handler, 282 NULL, 0, send_handler,
257 NULL, NULL); 283 NULL, NULL);
258 if (IS_ERR(agent)) 284 if (IS_ERR(agent)) {
285 ret = PTR_ERR(agent);
259 goto err; 286 goto err;
287 }
260 dev->send_agent[p][q] = agent; 288 dev->send_agent[p][q] = agent;
261 } 289 }
262 290
291
292 for (p = 1; p <= dev->limits.num_ports; ++p) {
293 ret = mthca_update_rate(dev, p);
294 if (ret) {
295 mthca_err(dev, "Failed to obtain port %d rate."
296 " aborting.\n", p);
297 goto err;
298 }
299 }
300
263 return 0; 301 return 0;
264 302
265err: 303err:
@@ -268,7 +306,7 @@ err:
268 if (dev->send_agent[p][q]) 306 if (dev->send_agent[p][q])
269 ib_unregister_mad_agent(dev->send_agent[p][q]); 307 ib_unregister_mad_agent(dev->send_agent[p][q]);
270 308
271 return PTR_ERR(agent); 309 return ret;
272} 310}
273 311
274void __devexit mthca_free_agents(struct mthca_dev *dev) 312void __devexit mthca_free_agents(struct mthca_dev *dev)
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 597d7dc7088e..0dc5b8da0007 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -199,6 +199,18 @@ static int __devinit mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim
199 mdev->limits.port_width_cap = dev_lim->max_port_width; 199 mdev->limits.port_width_cap = dev_lim->max_port_width;
200 mdev->limits.page_size_cap = ~(u32) (dev_lim->min_page_sz - 1); 200 mdev->limits.page_size_cap = ~(u32) (dev_lim->min_page_sz - 1);
201 mdev->limits.flags = dev_lim->flags; 201 mdev->limits.flags = dev_lim->flags;
202 /*
203 * For old FW that doesn't return static rate support, use a
204 * value of 0x3 (only static rate values of 0 or 1 are handled),
205 * except on Sinai, where even old FW can handle static rate
206 * values of 2 and 3.
207 */
208 if (dev_lim->stat_rate_support)
209 mdev->limits.stat_rate_support = dev_lim->stat_rate_support;
210 else if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
211 mdev->limits.stat_rate_support = 0xf;
212 else
213 mdev->limits.stat_rate_support = 0x3;
202 214
203 /* IB_DEVICE_RESIZE_MAX_WR not supported by driver. 215 /* IB_DEVICE_RESIZE_MAX_WR not supported by driver.
204 May be doable since hardware supports it for SRQ. 216 May be doable since hardware supports it for SRQ.
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index 2e7f52136965..6676a786d690 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -257,6 +257,8 @@ struct mthca_qp {
257 atomic_t refcount; 257 atomic_t refcount;
258 u32 qpn; 258 u32 qpn;
259 int is_direct; 259 int is_direct;
260 u8 port; /* for SQP and memfree use only */
261 u8 alt_port; /* for memfree use only */
260 u8 transport; 262 u8 transport;
261 u8 state; 263 u8 state;
262 u8 atomic_rd_en; 264 u8 atomic_rd_en;
@@ -278,7 +280,6 @@ struct mthca_qp {
278 280
279struct mthca_sqp { 281struct mthca_sqp {
280 struct mthca_qp qp; 282 struct mthca_qp qp;
281 int port;
282 int pkey_index; 283 int pkey_index;
283 u32 qkey; 284 u32 qkey;
284 u32 send_psn; 285 u32 send_psn;
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 057c8e6af87b..f37b0e367323 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -248,6 +248,9 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
248 return; 248 return;
249 } 249 }
250 250
251 if (event_type == IB_EVENT_PATH_MIG)
252 qp->port = qp->alt_port;
253
251 event.device = &dev->ib_dev; 254 event.device = &dev->ib_dev;
252 event.event = event_type; 255 event.event = event_type;
253 event.element.qp = &qp->ibqp; 256 event.element.qp = &qp->ibqp;
@@ -392,10 +395,16 @@ static void to_ib_ah_attr(struct mthca_dev *dev, struct ib_ah_attr *ib_ah_attr,
392{ 395{
393 memset(ib_ah_attr, 0, sizeof *path); 396 memset(ib_ah_attr, 0, sizeof *path);
394 ib_ah_attr->port_num = (be32_to_cpu(path->port_pkey) >> 24) & 0x3; 397 ib_ah_attr->port_num = (be32_to_cpu(path->port_pkey) >> 24) & 0x3;
398
399 if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->limits.num_ports)
400 return;
401
395 ib_ah_attr->dlid = be16_to_cpu(path->rlid); 402 ib_ah_attr->dlid = be16_to_cpu(path->rlid);
396 ib_ah_attr->sl = be32_to_cpu(path->sl_tclass_flowlabel) >> 28; 403 ib_ah_attr->sl = be32_to_cpu(path->sl_tclass_flowlabel) >> 28;
397 ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f; 404 ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f;
398 ib_ah_attr->static_rate = path->static_rate & 0x7; 405 ib_ah_attr->static_rate = mthca_rate_to_ib(dev,
406 path->static_rate & 0x7,
407 ib_ah_attr->port_num);
399 ib_ah_attr->ah_flags = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0; 408 ib_ah_attr->ah_flags = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
400 if (ib_ah_attr->ah_flags) { 409 if (ib_ah_attr->ah_flags) {
401 ib_ah_attr->grh.sgid_index = path->mgid_index & (dev->limits.gid_table_len - 1); 410 ib_ah_attr->grh.sgid_index = path->mgid_index & (dev->limits.gid_table_len - 1);
@@ -455,8 +464,10 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
455 qp_attr->cap.max_recv_sge = qp->rq.max_gs; 464 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
456 qp_attr->cap.max_inline_data = qp->max_inline_data; 465 qp_attr->cap.max_inline_data = qp->max_inline_data;
457 466
458 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); 467 if (qp->transport == RC || qp->transport == UC) {
459 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); 468 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
469 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
470 }
460 471
461 qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f; 472 qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f;
462 qp_attr->alt_pkey_index = be32_to_cpu(context->alt_path.port_pkey) & 0x7f; 473 qp_attr->alt_pkey_index = be32_to_cpu(context->alt_path.port_pkey) & 0x7f;
@@ -484,11 +495,11 @@ out:
484} 495}
485 496
486static int mthca_path_set(struct mthca_dev *dev, struct ib_ah_attr *ah, 497static int mthca_path_set(struct mthca_dev *dev, struct ib_ah_attr *ah,
487 struct mthca_qp_path *path) 498 struct mthca_qp_path *path, u8 port)
488{ 499{
489 path->g_mylmc = ah->src_path_bits & 0x7f; 500 path->g_mylmc = ah->src_path_bits & 0x7f;
490 path->rlid = cpu_to_be16(ah->dlid); 501 path->rlid = cpu_to_be16(ah->dlid);
491 path->static_rate = !!ah->static_rate; 502 path->static_rate = mthca_get_rate(dev, ah->static_rate, port);
492 503
493 if (ah->ah_flags & IB_AH_GRH) { 504 if (ah->ah_flags & IB_AH_GRH) {
494 if (ah->grh.sgid_index >= dev->limits.gid_table_len) { 505 if (ah->grh.sgid_index >= dev->limits.gid_table_len) {
@@ -634,7 +645,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
634 645
635 if (qp->transport == MLX) 646 if (qp->transport == MLX)
636 qp_context->pri_path.port_pkey |= 647 qp_context->pri_path.port_pkey |=
637 cpu_to_be32(to_msqp(qp)->port << 24); 648 cpu_to_be32(qp->port << 24);
638 else { 649 else {
639 if (attr_mask & IB_QP_PORT) { 650 if (attr_mask & IB_QP_PORT) {
640 qp_context->pri_path.port_pkey |= 651 qp_context->pri_path.port_pkey |=
@@ -657,7 +668,8 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
657 } 668 }
658 669
659 if (attr_mask & IB_QP_AV) { 670 if (attr_mask & IB_QP_AV) {
660 if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path)) 671 if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path,
672 attr_mask & IB_QP_PORT ? attr->port_num : qp->port))
661 return -EINVAL; 673 return -EINVAL;
662 674
663 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH); 675 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
@@ -681,7 +693,8 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
681 return -EINVAL; 693 return -EINVAL;
682 } 694 }
683 695
684 if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path)) 696 if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path,
697 attr->alt_ah_attr.port_num))
685 return -EINVAL; 698 return -EINVAL;
686 699
687 qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index | 700 qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index |
@@ -791,6 +804,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
791 qp->atomic_rd_en = attr->qp_access_flags; 804 qp->atomic_rd_en = attr->qp_access_flags;
792 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 805 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
793 qp->resp_depth = attr->max_dest_rd_atomic; 806 qp->resp_depth = attr->max_dest_rd_atomic;
807 if (attr_mask & IB_QP_PORT)
808 qp->port = attr->port_num;
809 if (attr_mask & IB_QP_ALT_PATH)
810 qp->alt_port = attr->alt_port_num;
794 811
795 if (is_sqp(dev, qp)) 812 if (is_sqp(dev, qp))
796 store_attrs(to_msqp(qp), attr, attr_mask); 813 store_attrs(to_msqp(qp), attr, attr_mask);
@@ -802,13 +819,13 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
802 if (is_qp0(dev, qp)) { 819 if (is_qp0(dev, qp)) {
803 if (cur_state != IB_QPS_RTR && 820 if (cur_state != IB_QPS_RTR &&
804 new_state == IB_QPS_RTR) 821 new_state == IB_QPS_RTR)
805 init_port(dev, to_msqp(qp)->port); 822 init_port(dev, qp->port);
806 823
807 if (cur_state != IB_QPS_RESET && 824 if (cur_state != IB_QPS_RESET &&
808 cur_state != IB_QPS_ERR && 825 cur_state != IB_QPS_ERR &&
809 (new_state == IB_QPS_RESET || 826 (new_state == IB_QPS_RESET ||
810 new_state == IB_QPS_ERR)) 827 new_state == IB_QPS_ERR))
811 mthca_CLOSE_IB(dev, to_msqp(qp)->port, &status); 828 mthca_CLOSE_IB(dev, qp->port, &status);
812 } 829 }
813 830
814 /* 831 /*
@@ -1212,6 +1229,9 @@ int mthca_alloc_qp(struct mthca_dev *dev,
1212 if (qp->qpn == -1) 1229 if (qp->qpn == -1)
1213 return -ENOMEM; 1230 return -ENOMEM;
1214 1231
1232 /* initialize port to zero for error-catching. */
1233 qp->port = 0;
1234
1215 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, 1235 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
1216 send_policy, qp); 1236 send_policy, qp);
1217 if (err) { 1237 if (err) {
@@ -1261,7 +1281,7 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
1261 if (err) 1281 if (err)
1262 goto err_out; 1282 goto err_out;
1263 1283
1264 sqp->port = port; 1284 sqp->qp.port = port;
1265 sqp->qp.qpn = mqpn; 1285 sqp->qp.qpn = mqpn;
1266 sqp->qp.transport = MLX; 1286 sqp->qp.transport = MLX;
1267 1287
@@ -1404,10 +1424,10 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1404 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; 1424 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
1405 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); 1425 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
1406 if (!sqp->qp.ibqp.qp_num) 1426 if (!sqp->qp.ibqp.qp_num)
1407 ib_get_cached_pkey(&dev->ib_dev, sqp->port, 1427 ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
1408 sqp->pkey_index, &pkey); 1428 sqp->pkey_index, &pkey);
1409 else 1429 else
1410 ib_get_cached_pkey(&dev->ib_dev, sqp->port, 1430 ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
1411 wr->wr.ud.pkey_index, &pkey); 1431 wr->wr.ud.pkey_index, &pkey);
1412 sqp->ud_header.bth.pkey = cpu_to_be16(pkey); 1432 sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
1413 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); 1433 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
index 685258e34034..5dde380e8dbe 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
@@ -213,7 +213,7 @@ static int ipoib_path_seq_show(struct seq_file *file, void *iter_ptr)
213 gid_buf, path.pathrec.dlid ? "yes" : "no"); 213 gid_buf, path.pathrec.dlid ? "yes" : "no");
214 214
215 if (path.pathrec.dlid) { 215 if (path.pathrec.dlid) {
216 rate = ib_sa_rate_enum_to_int(path.pathrec.rate) * 25; 216 rate = ib_rate_to_mult(path.pathrec.rate) * 25;
217 217
218 seq_printf(file, 218 seq_printf(file,
219 " DLID: 0x%04x\n" 219 " DLID: 0x%04x\n"
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 8f6607bf4261..9cb9e430aaaf 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -373,16 +373,9 @@ static void path_rec_completion(int status,
373 struct ib_ah_attr av = { 373 struct ib_ah_attr av = {
374 .dlid = be16_to_cpu(pathrec->dlid), 374 .dlid = be16_to_cpu(pathrec->dlid),
375 .sl = pathrec->sl, 375 .sl = pathrec->sl,
376 .port_num = priv->port 376 .port_num = priv->port,
377 .static_rate = pathrec->rate
377 }; 378 };
378 int path_rate = ib_sa_rate_enum_to_int(pathrec->rate);
379
380 if (path_rate > 0 && priv->local_rate > path_rate)
381 av.static_rate = (priv->local_rate - 1) / path_rate;
382
383 ipoib_dbg(priv, "static_rate %d for local port %dX, path %dX\n",
384 av.static_rate, priv->local_rate,
385 ib_sa_rate_enum_to_int(pathrec->rate));
386 379
387 ah = ipoib_create_ah(dev, priv->pd, &av); 380 ah = ipoib_create_ah(dev, priv->pd, &av);
388 } 381 }
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index a8395ef06c17..07b9826b5193 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -250,6 +250,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
250 .port_num = priv->port, 250 .port_num = priv->port,
251 .sl = mcast->mcmember.sl, 251 .sl = mcast->mcmember.sl,
252 .ah_flags = IB_AH_GRH, 252 .ah_flags = IB_AH_GRH,
253 .static_rate = mcast->mcmember.rate,
253 .grh = { 254 .grh = {
254 .flow_label = be32_to_cpu(mcast->mcmember.flow_label), 255 .flow_label = be32_to_cpu(mcast->mcmember.flow_label),
255 .hop_limit = mcast->mcmember.hop_limit, 256 .hop_limit = mcast->mcmember.hop_limit,
@@ -257,17 +258,8 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
257 .traffic_class = mcast->mcmember.traffic_class 258 .traffic_class = mcast->mcmember.traffic_class
258 } 259 }
259 }; 260 };
260 int path_rate = ib_sa_rate_enum_to_int(mcast->mcmember.rate);
261
262 av.grh.dgid = mcast->mcmember.mgid; 261 av.grh.dgid = mcast->mcmember.mgid;
263 262
264 if (path_rate > 0 && priv->local_rate > path_rate)
265 av.static_rate = (priv->local_rate - 1) / path_rate;
266
267 ipoib_dbg_mcast(priv, "static_rate %d for local port %dX, mcmember %dX\n",
268 av.static_rate, priv->local_rate,
269 ib_sa_rate_enum_to_int(mcast->mcmember.rate));
270
271 ah = ipoib_create_ah(dev, priv->pd, &av); 263 ah = ipoib_create_ah(dev, priv->pd, &av);
272 if (!ah) { 264 if (!ah) {
273 ipoib_warn(priv, "ib_address_create failed\n"); 265 ipoib_warn(priv, "ib_address_create failed\n");