aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYevgeny Petrilin <yevgenyp@mellanox.co.il>2008-10-22 13:25:29 -0400
committerRoland Dreier <rolandd@cisco.com>2008-10-22 13:25:29 -0400
commit93fc9e1bb6507dde945c2eab68c93e1066ac3691 (patch)
treeaa495ec31b7372580f9ec50acead1d170fd70aab
parenta3cdcbfa8fb1fccfe48d359da86e99546610c562 (diff)
mlx4_core: Support multiple pre-reserved QP regions
For ethernet support, we need to reserve QPs for the ethernet and fibre channel driver. The QPs are reserved at the end of the QP table. (This way we assure that they are aligned to their size) We need to consider these reserved ranges in bitmap creation, so we extend the mlx4 bitmap utility functions to allow reserved ranges at both the bottom and the top of the range. Signed-off-by: Yevgeny Petrilin <yevgenyp@mellanox.co.il> Signed-off-by: Roland Dreier <rolandd@cisco.com>
-rw-r--r--drivers/net/mlx4/alloc.c29
-rw-r--r--drivers/net/mlx4/cq.c2
-rw-r--r--drivers/net/mlx4/eq.c2
-rw-r--r--drivers/net/mlx4/fw.c5
-rw-r--r--drivers/net/mlx4/fw.h2
-rw-r--r--drivers/net/mlx4/main.c62
-rw-r--r--drivers/net/mlx4/mcg.c4
-rw-r--r--drivers/net/mlx4/mlx4.h4
-rw-r--r--drivers/net/mlx4/mr.c2
-rw-r--r--drivers/net/mlx4/pd.c4
-rw-r--r--drivers/net/mlx4/qp.c36
-rw-r--r--drivers/net/mlx4/srq.c2
-rw-r--r--include/linux/mlx4/device.h19
13 files changed, 144 insertions, 29 deletions
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index e6c0d5bb5dcb..e2bc7ecf162d 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -47,13 +47,16 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
47 47
48 obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last); 48 obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
49 if (obj >= bitmap->max) { 49 if (obj >= bitmap->max) {
50 bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask; 50 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
51 & bitmap->mask;
51 obj = find_first_zero_bit(bitmap->table, bitmap->max); 52 obj = find_first_zero_bit(bitmap->table, bitmap->max);
52 } 53 }
53 54
54 if (obj < bitmap->max) { 55 if (obj < bitmap->max) {
55 set_bit(obj, bitmap->table); 56 set_bit(obj, bitmap->table);
56 bitmap->last = (obj + 1) & (bitmap->max - 1); 57 bitmap->last = (obj + 1);
58 if (bitmap->last == bitmap->max)
59 bitmap->last = 0;
57 obj |= bitmap->top; 60 obj |= bitmap->top;
58 } else 61 } else
59 obj = -1; 62 obj = -1;
@@ -109,9 +112,9 @@ u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
109 obj = find_aligned_range(bitmap->table, bitmap->last, 112 obj = find_aligned_range(bitmap->table, bitmap->last,
110 bitmap->max, cnt, align); 113 bitmap->max, cnt, align);
111 if (obj >= bitmap->max) { 114 if (obj >= bitmap->max) {
112 bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask; 115 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
113 obj = find_aligned_range(bitmap->table, 0, 116 & bitmap->mask;
114 bitmap->max, 117 obj = find_aligned_range(bitmap->table, 0, bitmap->max,
115 cnt, align); 118 cnt, align);
116 } 119 }
117 120
@@ -136,17 +139,19 @@ void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
136{ 139{
137 u32 i; 140 u32 i;
138 141
139 obj &= bitmap->max - 1; 142 obj &= bitmap->max + bitmap->reserved_top - 1;
140 143
141 spin_lock(&bitmap->lock); 144 spin_lock(&bitmap->lock);
142 for (i = 0; i < cnt; i++) 145 for (i = 0; i < cnt; i++)
143 clear_bit(obj + i, bitmap->table); 146 clear_bit(obj + i, bitmap->table);
144 bitmap->last = min(bitmap->last, obj); 147 bitmap->last = min(bitmap->last, obj);
145 bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask; 148 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
149 & bitmap->mask;
146 spin_unlock(&bitmap->lock); 150 spin_unlock(&bitmap->lock);
147} 151}
148 152
149int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved) 153int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
154 u32 reserved_bot, u32 reserved_top)
150{ 155{
151 int i; 156 int i;
152 157
@@ -156,14 +161,16 @@ int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved
156 161
157 bitmap->last = 0; 162 bitmap->last = 0;
158 bitmap->top = 0; 163 bitmap->top = 0;
159 bitmap->max = num; 164 bitmap->max = num - reserved_top;
160 bitmap->mask = mask; 165 bitmap->mask = mask;
166 bitmap->reserved_top = reserved_top;
161 spin_lock_init(&bitmap->lock); 167 spin_lock_init(&bitmap->lock);
162 bitmap->table = kzalloc(BITS_TO_LONGS(num) * sizeof (long), GFP_KERNEL); 168 bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
169 sizeof (long), GFP_KERNEL);
163 if (!bitmap->table) 170 if (!bitmap->table)
164 return -ENOMEM; 171 return -ENOMEM;
165 172
166 for (i = 0; i < reserved; ++i) 173 for (i = 0; i < reserved_bot; ++i)
167 set_bit(i, bitmap->table); 174 set_bit(i, bitmap->table);
168 175
169 return 0; 176 return 0;
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
index 9bb50e3f8974..b7ad2829d67e 100644
--- a/drivers/net/mlx4/cq.c
+++ b/drivers/net/mlx4/cq.c
@@ -300,7 +300,7 @@ int mlx4_init_cq_table(struct mlx4_dev *dev)
300 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC); 300 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
301 301
302 err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs, 302 err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
303 dev->caps.num_cqs - 1, dev->caps.reserved_cqs); 303 dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
304 if (err) 304 if (err)
305 return err; 305 return err;
306 306
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 8a8b56135a58..de169338cd90 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -558,7 +558,7 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
558 int i; 558 int i;
559 559
560 err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs, 560 err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
561 dev->caps.num_eqs - 1, dev->caps.reserved_eqs); 561 dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0);
562 if (err) 562 if (err)
563 return err; 563 return err;
564 564
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 7e32955da982..40d8142c23b2 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -357,6 +357,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
357#define QUERY_PORT_MTU_OFFSET 0x01 357#define QUERY_PORT_MTU_OFFSET 0x01
358#define QUERY_PORT_WIDTH_OFFSET 0x06 358#define QUERY_PORT_WIDTH_OFFSET 0x06
359#define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07 359#define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07
360#define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a
360#define QUERY_PORT_MAX_VL_OFFSET 0x0b 361#define QUERY_PORT_MAX_VL_OFFSET 0x0b
361 362
362 for (i = 1; i <= dev_cap->num_ports; ++i) { 363 for (i = 1; i <= dev_cap->num_ports; ++i) {
@@ -374,6 +375,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
374 dev_cap->max_pkeys[i] = 1 << (field & 0xf); 375 dev_cap->max_pkeys[i] = 1 << (field & 0xf);
375 MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET); 376 MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
376 dev_cap->max_vl[i] = field & 0xf; 377 dev_cap->max_vl[i] = field & 0xf;
378 MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
379 dev_cap->log_max_macs[i] = field & 0xf;
380 dev_cap->log_max_vlans[i] = field >> 4;
381
377 } 382 }
378 } 383 }
379 384
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index decbb5c2ad41..c34e726d66e4 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -102,6 +102,8 @@ struct mlx4_dev_cap {
102 u32 reserved_lkey; 102 u32 reserved_lkey;
103 u64 max_icm_sz; 103 u64 max_icm_sz;
104 int max_gso_sz; 104 int max_gso_sz;
105 u8 log_max_macs[MLX4_MAX_PORTS + 1];
106 u8 log_max_vlans[MLX4_MAX_PORTS + 1];
105}; 107};
106 108
107struct mlx4_adapter { 109struct mlx4_adapter {
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 1252a919de2e..560e1962212e 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -85,6 +85,19 @@ static struct mlx4_profile default_profile = {
85 .num_mtt = 1 << 20, 85 .num_mtt = 1 << 20,
86}; 86};
87 87
88static int log_num_mac = 2;
89module_param_named(log_num_mac, log_num_mac, int, 0444);
90MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
91
92static int log_num_vlan;
93module_param_named(log_num_vlan, log_num_vlan, int, 0444);
94MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
95
96static int use_prio;
97module_param_named(use_prio, use_prio, bool, 0444);
98MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
99 "(0/1, default 0)");
100
88static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 101static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
89{ 102{
90 int err; 103 int err;
@@ -134,7 +147,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
134 dev->caps.max_rq_sg = dev_cap->max_rq_sg; 147 dev->caps.max_rq_sg = dev_cap->max_rq_sg;
135 dev->caps.max_wqes = dev_cap->max_qp_sz; 148 dev->caps.max_wqes = dev_cap->max_qp_sz;
136 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; 149 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
137 dev->caps.reserved_qps = dev_cap->reserved_qps;
138 dev->caps.max_srq_wqes = dev_cap->max_srq_sz; 150 dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
139 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; 151 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
140 dev->caps.reserved_srqs = dev_cap->reserved_srqs; 152 dev->caps.reserved_srqs = dev_cap->reserved_srqs;
@@ -163,6 +175,39 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
163 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 175 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
164 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 176 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
165 177
178 dev->caps.log_num_macs = log_num_mac;
179 dev->caps.log_num_vlans = log_num_vlan;
180 dev->caps.log_num_prios = use_prio ? 3 : 0;
181
182 for (i = 1; i <= dev->caps.num_ports; ++i) {
183 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
184 dev->caps.log_num_macs = dev_cap->log_max_macs[i];
185 mlx4_warn(dev, "Requested number of MACs is too much "
186 "for port %d, reducing to %d.\n",
187 i, 1 << dev->caps.log_num_macs);
188 }
189 if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
190 dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
191 mlx4_warn(dev, "Requested number of VLANs is too much "
192 "for port %d, reducing to %d.\n",
193 i, 1 << dev->caps.log_num_vlans);
194 }
195 }
196
197 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
198 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
199 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
200 (1 << dev->caps.log_num_macs) *
201 (1 << dev->caps.log_num_vlans) *
202 (1 << dev->caps.log_num_prios) *
203 dev->caps.num_ports;
204 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
205
206 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
207 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
208 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
209 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
210
166 return 0; 211 return 0;
167} 212}
168 213
@@ -211,7 +256,8 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
211 ((u64) (MLX4_CMPT_TYPE_QP * 256 ((u64) (MLX4_CMPT_TYPE_QP *
212 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 257 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
213 cmpt_entry_sz, dev->caps.num_qps, 258 cmpt_entry_sz, dev->caps.num_qps,
214 dev->caps.reserved_qps, 0, 0); 259 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
260 0, 0);
215 if (err) 261 if (err)
216 goto err; 262 goto err;
217 263
@@ -336,7 +382,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
336 init_hca->qpc_base, 382 init_hca->qpc_base,
337 dev_cap->qpc_entry_sz, 383 dev_cap->qpc_entry_sz,
338 dev->caps.num_qps, 384 dev->caps.num_qps,
339 dev->caps.reserved_qps, 0, 0); 385 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
386 0, 0);
340 if (err) { 387 if (err) {
341 mlx4_err(dev, "Failed to map QP context memory, aborting.\n"); 388 mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
342 goto err_unmap_dmpt; 389 goto err_unmap_dmpt;
@@ -346,7 +393,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
346 init_hca->auxc_base, 393 init_hca->auxc_base,
347 dev_cap->aux_entry_sz, 394 dev_cap->aux_entry_sz,
348 dev->caps.num_qps, 395 dev->caps.num_qps,
349 dev->caps.reserved_qps, 0, 0); 396 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
397 0, 0);
350 if (err) { 398 if (err) {
351 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n"); 399 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
352 goto err_unmap_qp; 400 goto err_unmap_qp;
@@ -356,7 +404,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
356 init_hca->altc_base, 404 init_hca->altc_base,
357 dev_cap->altc_entry_sz, 405 dev_cap->altc_entry_sz,
358 dev->caps.num_qps, 406 dev->caps.num_qps,
359 dev->caps.reserved_qps, 0, 0); 407 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
408 0, 0);
360 if (err) { 409 if (err) {
361 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n"); 410 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
362 goto err_unmap_auxc; 411 goto err_unmap_auxc;
@@ -366,7 +415,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
366 init_hca->rdmarc_base, 415 init_hca->rdmarc_base,
367 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, 416 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
368 dev->caps.num_qps, 417 dev->caps.num_qps,
369 dev->caps.reserved_qps, 0, 0); 418 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
419 0, 0);
370 if (err) { 420 if (err) {
371 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); 421 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
372 goto err_unmap_altc; 422 goto err_unmap_altc;
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index c83f88ce0736..592c01ae2c5d 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -368,8 +368,8 @@ int mlx4_init_mcg_table(struct mlx4_dev *dev)
368 struct mlx4_priv *priv = mlx4_priv(dev); 368 struct mlx4_priv *priv = mlx4_priv(dev);
369 int err; 369 int err;
370 370
371 err = mlx4_bitmap_init(&priv->mcg_table.bitmap, 371 err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
372 dev->caps.num_amgms, dev->caps.num_amgms - 1, 0); 372 dev->caps.num_amgms - 1, 0, 0);
373 if (err) 373 if (err)
374 return err; 374 return err;
375 375
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index b55ddab73f66..9e2f44c31810 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -111,6 +111,7 @@ struct mlx4_bitmap {
111 u32 last; 111 u32 last;
112 u32 top; 112 u32 top;
113 u32 max; 113 u32 max;
114 u32 reserved_top;
114 u32 mask; 115 u32 mask;
115 spinlock_t lock; 116 spinlock_t lock;
116 unsigned long *table; 117 unsigned long *table;
@@ -290,7 +291,8 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
290void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj); 291void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
291u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align); 292u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align);
292void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt); 293void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt);
293int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved); 294int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
295 u32 reserved_bot, u32 resetrved_top);
294void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap); 296void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap);
295 297
296int mlx4_reset(struct mlx4_dev *dev); 298int mlx4_reset(struct mlx4_dev *dev);
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index d1dd5b48dbd1..0caf74cae8bc 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -461,7 +461,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
461 int err; 461 int err;
462 462
463 err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts, 463 err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
464 ~0, dev->caps.reserved_mrws); 464 ~0, dev->caps.reserved_mrws, 0);
465 if (err) 465 if (err)
466 return err; 466 return err;
467 467
diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c
index aa616892d09c..26d1a7a9e375 100644
--- a/drivers/net/mlx4/pd.c
+++ b/drivers/net/mlx4/pd.c
@@ -62,7 +62,7 @@ int mlx4_init_pd_table(struct mlx4_dev *dev)
62 struct mlx4_priv *priv = mlx4_priv(dev); 62 struct mlx4_priv *priv = mlx4_priv(dev);
63 63
64 return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds, 64 return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds,
65 (1 << 24) - 1, dev->caps.reserved_pds); 65 (1 << 24) - 1, dev->caps.reserved_pds, 0);
66} 66}
67 67
68void mlx4_cleanup_pd_table(struct mlx4_dev *dev) 68void mlx4_cleanup_pd_table(struct mlx4_dev *dev)
@@ -100,7 +100,7 @@ int mlx4_init_uar_table(struct mlx4_dev *dev)
100 100
101 return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap, 101 return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap,
102 dev->caps.num_uars, dev->caps.num_uars - 1, 102 dev->caps.num_uars, dev->caps.num_uars - 1,
103 max(128, dev->caps.reserved_uars)); 103 max(128, dev->caps.reserved_uars), 0);
104} 104}
105 105
106void mlx4_cleanup_uar_table(struct mlx4_dev *dev) 106void mlx4_cleanup_uar_table(struct mlx4_dev *dev)
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c
index 98e0c40ba368..1c565ef8d179 100644
--- a/drivers/net/mlx4/qp.c
+++ b/drivers/net/mlx4/qp.c
@@ -272,6 +272,7 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
272{ 272{
273 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; 273 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
274 int err; 274 int err;
275 int reserved_from_top = 0;
275 276
276 spin_lock_init(&qp_table->lock); 277 spin_lock_init(&qp_table->lock);
277 INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC); 278 INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
@@ -281,9 +282,40 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
281 * block of special QPs must be aligned to a multiple of 8, so 282 * block of special QPs must be aligned to a multiple of 8, so
282 * round up. 283 * round up.
283 */ 284 */
284 dev->caps.sqp_start = ALIGN(dev->caps.reserved_qps, 8); 285 dev->caps.sqp_start =
286 ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8);
287
288 {
289 int sort[MLX4_NUM_QP_REGION];
290 int i, j, tmp;
291 int last_base = dev->caps.num_qps;
292
293 for (i = 1; i < MLX4_NUM_QP_REGION; ++i)
294 sort[i] = i;
295
296 for (i = MLX4_NUM_QP_REGION; i > 0; --i) {
297 for (j = 2; j < i; ++j) {
298 if (dev->caps.reserved_qps_cnt[sort[j]] >
299 dev->caps.reserved_qps_cnt[sort[j - 1]]) {
300 tmp = sort[j];
301 sort[j] = sort[j - 1];
302 sort[j - 1] = tmp;
303 }
304 }
305 }
306
307 for (i = 1; i < MLX4_NUM_QP_REGION; ++i) {
308 last_base -= dev->caps.reserved_qps_cnt[sort[i]];
309 dev->caps.reserved_qps_base[sort[i]] = last_base;
310 reserved_from_top +=
311 dev->caps.reserved_qps_cnt[sort[i]];
312 }
313
314 }
315
285 err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps, 316 err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps,
286 (1 << 24) - 1, dev->caps.sqp_start + 8); 317 (1 << 23) - 1, dev->caps.sqp_start + 8,
318 reserved_from_top);
287 if (err) 319 if (err)
288 return err; 320 return err;
289 321
diff --git a/drivers/net/mlx4/srq.c b/drivers/net/mlx4/srq.c
index 533eb6db24b3..fe9f218691f5 100644
--- a/drivers/net/mlx4/srq.c
+++ b/drivers/net/mlx4/srq.c
@@ -245,7 +245,7 @@ int mlx4_init_srq_table(struct mlx4_dev *dev)
245 INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC); 245 INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC);
246 246
247 err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs, 247 err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs,
248 dev->caps.num_srqs - 1, dev->caps.reserved_srqs); 248 dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0);
249 if (err) 249 if (err)
250 return err; 250 return err;
251 251
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index d21e879f3c90..693f93cd29e1 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -145,6 +145,18 @@ enum {
145 MLX4_MTT_FLAG_PRESENT = 1 145 MLX4_MTT_FLAG_PRESENT = 1
146}; 146};
147 147
148enum mlx4_qp_region {
149 MLX4_QP_REGION_FW = 0,
150 MLX4_QP_REGION_ETH_ADDR,
151 MLX4_QP_REGION_FC_ADDR,
152 MLX4_QP_REGION_FC_EXCH,
153 MLX4_NUM_QP_REGION
154};
155
156enum {
157 MLX4_NUM_FEXCH = 64 * 1024,
158};
159
148static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) 160static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
149{ 161{
150 return (major << 32) | (minor << 16) | subminor; 162 return (major << 32) | (minor << 16) | subminor;
@@ -169,7 +181,6 @@ struct mlx4_caps {
169 int max_rq_desc_sz; 181 int max_rq_desc_sz;
170 int max_qp_init_rdma; 182 int max_qp_init_rdma;
171 int max_qp_dest_rdma; 183 int max_qp_dest_rdma;
172 int reserved_qps;
173 int sqp_start; 184 int sqp_start;
174 int num_srqs; 185 int num_srqs;
175 int max_srq_wqes; 186 int max_srq_wqes;
@@ -201,6 +212,12 @@ struct mlx4_caps {
201 u16 stat_rate_support; 212 u16 stat_rate_support;
202 u8 port_width_cap[MLX4_MAX_PORTS + 1]; 213 u8 port_width_cap[MLX4_MAX_PORTS + 1];
203 int max_gso_sz; 214 int max_gso_sz;
215 int reserved_qps_cnt[MLX4_NUM_QP_REGION];
216 int reserved_qps;
217 int reserved_qps_base[MLX4_NUM_QP_REGION];
218 int log_num_macs;
219 int log_num_vlans;
220 int log_num_prios;
204}; 221};
205 222
206struct mlx4_buf_list { 223struct mlx4_buf_list {