diff options
author | Marcel Apfelbaum <marcela@dev.mellanox.co.il> | 2011-12-12 23:16:56 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-12-13 13:56:07 -0500 |
commit | 2b8fb2867ca2736a715a88067fd0ec2904777cbe (patch) | |
tree | 507a7645aaeda05151045157756e1a828e60fe11 /drivers/net/ethernet | |
parent | 5b4c4d36860ef1c411d0669ffc15090417a33389 (diff) |
mlx4_core: mtts resources units changed to offset
In the previous implementation mtts are managed by:
1. order - log(mtt segments), 'mtt segment' groups several mtts together.
2. first_seg - segment location relative to mtt table.
In the current implementation:
1. order - log(mtts) rather than segments
2. offset - mtt index in mtt table
Note: The actual mtt allocation is made in segments but it is
transparent to callers.
Rational: The mtt resource holders are not interested on how the allocation
of mtt is done, but rather on how they will use it.
Signed-off-by: Marcel Apfelbaum <marcela@dev.mellanox.co.il>
Reviewed-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/fw.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/main.c | 11 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/mlx4.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/mr.c | 107 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/profile.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | 59 |
6 files changed, 90 insertions, 96 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 49bb2ead805a..99415fec9fdb 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c | |||
@@ -209,7 +209,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, | |||
209 | size = dev->caps.num_mpts; | 209 | size = dev->caps.num_mpts; |
210 | MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); | 210 | MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); |
211 | 211 | ||
212 | size = dev->caps.num_mtt_segs * dev->caps.mtts_per_seg; | 212 | size = dev->caps.num_mtts; |
213 | MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET); | 213 | MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET); |
214 | 214 | ||
215 | size = dev->caps.num_mgms + dev->caps.num_amgms; | 215 | size = dev->caps.num_mgms + dev->caps.num_amgms; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 8be56326b04a..19363b618295 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
@@ -112,7 +112,7 @@ module_param_named(use_prio, use_prio, bool, 0444); | |||
112 | MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports " | 112 | MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports " |
113 | "(0/1, default 0)"); | 113 | "(0/1, default 0)"); |
114 | 114 | ||
115 | static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); | 115 | int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); |
116 | module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); | 116 | module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); |
117 | MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)"); | 117 | MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)"); |
118 | 118 | ||
@@ -222,9 +222,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
222 | dev->caps.max_cqes = dev_cap->max_cq_sz - 1; | 222 | dev->caps.max_cqes = dev_cap->max_cq_sz - 1; |
223 | dev->caps.reserved_cqs = dev_cap->reserved_cqs; | 223 | dev->caps.reserved_cqs = dev_cap->reserved_cqs; |
224 | dev->caps.reserved_eqs = dev_cap->reserved_eqs; | 224 | dev->caps.reserved_eqs = dev_cap->reserved_eqs; |
225 | dev->caps.mtts_per_seg = 1 << log_mtts_per_seg; | 225 | dev->caps.reserved_mtts = dev_cap->reserved_mtts; |
226 | dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts, | ||
227 | dev->caps.mtts_per_seg); | ||
228 | dev->caps.reserved_mrws = dev_cap->reserved_mrws; | 226 | dev->caps.reserved_mrws = dev_cap->reserved_mrws; |
229 | dev->caps.reserved_uars = dev_cap->reserved_uars; | 227 | dev->caps.reserved_uars = dev_cap->reserved_uars; |
230 | dev->caps.reserved_pds = dev_cap->reserved_pds; | 228 | dev->caps.reserved_pds = dev_cap->reserved_pds; |
@@ -232,7 +230,8 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
232 | dev_cap->reserved_xrcds : 0; | 230 | dev_cap->reserved_xrcds : 0; |
233 | dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? | 231 | dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? |
234 | dev_cap->max_xrcds : 0; | 232 | dev_cap->max_xrcds : 0; |
235 | dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz; | 233 | dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz; |
234 | |||
236 | dev->caps.max_msg_sz = dev_cap->max_msg_sz; | 235 | dev->caps.max_msg_sz = dev_cap->max_msg_sz; |
237 | dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); | 236 | dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); |
238 | dev->caps.flags = dev_cap->flags; | 237 | dev->caps.flags = dev_cap->flags; |
@@ -569,7 +568,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, | |||
569 | err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table, | 568 | err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table, |
570 | init_hca->mtt_base, | 569 | init_hca->mtt_base, |
571 | dev->caps.mtt_entry_sz, | 570 | dev->caps.mtt_entry_sz, |
572 | dev->caps.num_mtt_segs, | 571 | dev->caps.num_mtts, |
573 | dev->caps.reserved_mtts, 1, 0); | 572 | dev->caps.reserved_mtts, 1, 0); |
574 | if (err) { | 573 | if (err) { |
575 | mlx4_err(dev, "Failed to map MTT context memory, aborting.\n"); | 574 | mlx4_err(dev, "Failed to map MTT context memory, aborting.\n"); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index abf65d8af48d..879f825c6f6a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h | |||
@@ -191,6 +191,7 @@ do { \ | |||
191 | dev_warn(&mdev->pdev->dev, format, ##arg) | 191 | dev_warn(&mdev->pdev->dev, format, ##arg) |
192 | 192 | ||
193 | extern int mlx4_log_num_mgm_entry_size; | 193 | extern int mlx4_log_num_mgm_entry_size; |
194 | extern int log_mtts_per_seg; | ||
194 | 195 | ||
195 | #define MLX4_MAX_NUM_SLAVES (MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF) | 196 | #define MLX4_MAX_NUM_SLAVES (MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF) |
196 | #define ALL_SLAVES 0xff | 197 | #define ALL_SLAVES 0xff |
@@ -240,7 +241,7 @@ struct mlx4_mpt_entry { | |||
240 | __be32 win_cnt; | 241 | __be32 win_cnt; |
241 | u8 reserved1[3]; | 242 | u8 reserved1[3]; |
242 | u8 mtt_rep; | 243 | u8 mtt_rep; |
243 | __be64 mtt_seg; | 244 | __be64 mtt_addr; |
244 | __be32 mtt_sz; | 245 | __be32 mtt_sz; |
245 | __be32 entity_size; | 246 | __be32 entity_size; |
246 | __be32 first_byte_offset; | 247 | __be32 first_byte_offset; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index f8fd0a1d73af..f7243b26bdf5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c | |||
@@ -166,18 +166,24 @@ u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) | |||
166 | { | 166 | { |
167 | struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; | 167 | struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; |
168 | u32 seg; | 168 | u32 seg; |
169 | int seg_order; | ||
170 | u32 offset; | ||
169 | 171 | ||
170 | seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, order); | 172 | seg_order = max_t(int, order - log_mtts_per_seg, 0); |
173 | |||
174 | seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order); | ||
171 | if (seg == -1) | 175 | if (seg == -1) |
172 | return -1; | 176 | return -1; |
173 | 177 | ||
174 | if (mlx4_table_get_range(dev, &mr_table->mtt_table, seg, | 178 | offset = seg * (1 << log_mtts_per_seg); |
175 | seg + (1 << order) - 1)) { | 179 | |
176 | mlx4_buddy_free(&mr_table->mtt_buddy, seg, order); | 180 | if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset, |
181 | offset + (1 << order) - 1)) { | ||
182 | mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order); | ||
177 | return -1; | 183 | return -1; |
178 | } | 184 | } |
179 | 185 | ||
180 | return seg; | 186 | return offset; |
181 | } | 187 | } |
182 | 188 | ||
183 | static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) | 189 | static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) |
@@ -212,45 +218,49 @@ int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, | |||
212 | } else | 218 | } else |
213 | mtt->page_shift = page_shift; | 219 | mtt->page_shift = page_shift; |
214 | 220 | ||
215 | for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1) | 221 | for (mtt->order = 0, i = 1; i < npages; i <<= 1) |
216 | ++mtt->order; | 222 | ++mtt->order; |
217 | 223 | ||
218 | mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order); | 224 | mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order); |
219 | if (mtt->first_seg == -1) | 225 | if (mtt->offset == -1) |
220 | return -ENOMEM; | 226 | return -ENOMEM; |
221 | 227 | ||
222 | return 0; | 228 | return 0; |
223 | } | 229 | } |
224 | EXPORT_SYMBOL_GPL(mlx4_mtt_init); | 230 | EXPORT_SYMBOL_GPL(mlx4_mtt_init); |
225 | 231 | ||
226 | void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, | 232 | void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) |
227 | int order) | ||
228 | { | 233 | { |
234 | u32 first_seg; | ||
235 | int seg_order; | ||
229 | struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; | 236 | struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; |
230 | 237 | ||
231 | mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, order); | 238 | seg_order = max_t(int, order - log_mtts_per_seg, 0); |
239 | first_seg = offset / (1 << log_mtts_per_seg); | ||
240 | |||
241 | mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order); | ||
232 | mlx4_table_put_range(dev, &mr_table->mtt_table, first_seg, | 242 | mlx4_table_put_range(dev, &mr_table->mtt_table, first_seg, |
233 | first_seg + (1 << order) - 1); | 243 | first_seg + (1 << seg_order) - 1); |
234 | } | 244 | } |
235 | 245 | ||
236 | static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order) | 246 | static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) |
237 | { | 247 | { |
238 | u64 in_param; | 248 | u64 in_param; |
239 | int err; | 249 | int err; |
240 | 250 | ||
241 | if (mlx4_is_mfunc(dev)) { | 251 | if (mlx4_is_mfunc(dev)) { |
242 | set_param_l(&in_param, first_seg); | 252 | set_param_l(&in_param, offset); |
243 | set_param_h(&in_param, order); | 253 | set_param_h(&in_param, order); |
244 | err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP, | 254 | err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP, |
245 | MLX4_CMD_FREE_RES, | 255 | MLX4_CMD_FREE_RES, |
246 | MLX4_CMD_TIME_CLASS_A, | 256 | MLX4_CMD_TIME_CLASS_A, |
247 | MLX4_CMD_WRAPPED); | 257 | MLX4_CMD_WRAPPED); |
248 | if (err) | 258 | if (err) |
249 | mlx4_warn(dev, "Failed to free mtt range at:%d" | 259 | mlx4_warn(dev, "Failed to free mtt range at:" |
250 | " order:%d\n", first_seg, order); | 260 | "%d order:%d\n", offset, order); |
251 | return; | 261 | return; |
252 | } | 262 | } |
253 | __mlx4_free_mtt_range(dev, first_seg, order); | 263 | __mlx4_free_mtt_range(dev, offset, order); |
254 | } | 264 | } |
255 | 265 | ||
256 | void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt) | 266 | void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt) |
@@ -258,13 +268,13 @@ void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt) | |||
258 | if (mtt->order < 0) | 268 | if (mtt->order < 0) |
259 | return; | 269 | return; |
260 | 270 | ||
261 | mlx4_free_mtt_range(dev, mtt->first_seg, mtt->order); | 271 | mlx4_free_mtt_range(dev, mtt->offset, mtt->order); |
262 | } | 272 | } |
263 | EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup); | 273 | EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup); |
264 | 274 | ||
265 | u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt) | 275 | u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt) |
266 | { | 276 | { |
267 | return (u64) mtt->first_seg * dev->caps.mtt_entry_sz; | 277 | return (u64) mtt->offset * dev->caps.mtt_entry_sz; |
268 | } | 278 | } |
269 | EXPORT_SYMBOL_GPL(mlx4_mtt_addr); | 279 | EXPORT_SYMBOL_GPL(mlx4_mtt_addr); |
270 | 280 | ||
@@ -504,9 +514,10 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) | |||
504 | 514 | ||
505 | if (mr->mtt.order < 0) { | 515 | if (mr->mtt.order < 0) { |
506 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); | 516 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); |
507 | mpt_entry->mtt_seg = 0; | 517 | mpt_entry->mtt_addr = 0; |
508 | } else { | 518 | } else { |
509 | mpt_entry->mtt_seg = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt)); | 519 | mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev, |
520 | &mr->mtt)); | ||
510 | } | 521 | } |
511 | 522 | ||
512 | if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) { | 523 | if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) { |
@@ -514,8 +525,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) | |||
514 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); | 525 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); |
515 | mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | | 526 | mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | |
516 | MLX4_MPT_PD_FLAG_RAE); | 527 | MLX4_MPT_PD_FLAG_RAE); |
517 | mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) * | 528 | mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order); |
518 | dev->caps.mtts_per_seg); | ||
519 | } else { | 529 | } else { |
520 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); | 530 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); |
521 | } | 531 | } |
@@ -548,18 +558,10 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | |||
548 | __be64 *mtts; | 558 | __be64 *mtts; |
549 | dma_addr_t dma_handle; | 559 | dma_addr_t dma_handle; |
550 | int i; | 560 | int i; |
551 | int s = start_index * sizeof (u64); | ||
552 | |||
553 | /* All MTTs must fit in the same page */ | ||
554 | if (start_index / (PAGE_SIZE / sizeof (u64)) != | ||
555 | (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64))) | ||
556 | return -EINVAL; | ||
557 | 561 | ||
558 | if (start_index & (dev->caps.mtts_per_seg - 1)) | 562 | mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset + |
559 | return -EINVAL; | 563 | start_index, &dma_handle); |
560 | 564 | ||
561 | mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg + | ||
562 | s / dev->caps.mtt_entry_sz, &dma_handle); | ||
563 | if (!mtts) | 565 | if (!mtts) |
564 | return -ENOMEM; | 566 | return -ENOMEM; |
565 | 567 | ||
@@ -580,15 +582,25 @@ int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | |||
580 | { | 582 | { |
581 | int err = 0; | 583 | int err = 0; |
582 | int chunk; | 584 | int chunk; |
585 | int mtts_per_page; | ||
586 | int max_mtts_first_page; | ||
587 | |||
588 | /* compute how may mtts fit in the first page */ | ||
589 | mtts_per_page = PAGE_SIZE / sizeof(u64); | ||
590 | max_mtts_first_page = mtts_per_page - (mtt->offset + start_index) | ||
591 | % mtts_per_page; | ||
592 | |||
593 | chunk = min_t(int, max_mtts_first_page, npages); | ||
583 | 594 | ||
584 | while (npages > 0) { | 595 | while (npages > 0) { |
585 | chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages); | ||
586 | err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list); | 596 | err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list); |
587 | if (err) | 597 | if (err) |
588 | return err; | 598 | return err; |
589 | npages -= chunk; | 599 | npages -= chunk; |
590 | start_index += chunk; | 600 | start_index += chunk; |
591 | page_list += chunk; | 601 | page_list += chunk; |
602 | |||
603 | chunk = min_t(int, mtts_per_page, npages); | ||
592 | } | 604 | } |
593 | return err; | 605 | return err; |
594 | } | 606 | } |
@@ -612,18 +624,9 @@ int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | |||
612 | inbox = mailbox->buf; | 624 | inbox = mailbox->buf; |
613 | 625 | ||
614 | while (npages > 0) { | 626 | while (npages > 0) { |
615 | int s = mtt->first_seg * dev->caps.mtts_per_seg + | 627 | chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2, |
616 | start_index; | 628 | npages); |
617 | chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - | 629 | inbox[0] = cpu_to_be64(mtt->offset + start_index); |
618 | dev->caps.mtts_per_seg, npages); | ||
619 | if (s / (PAGE_SIZE / sizeof(u64)) != | ||
620 | (s + chunk - 1) / (PAGE_SIZE / sizeof(u64))) | ||
621 | chunk = PAGE_SIZE / sizeof(u64) - | ||
622 | (s % (PAGE_SIZE / sizeof(u64))); | ||
623 | |||
624 | inbox[0] = cpu_to_be64(mtt->first_seg * | ||
625 | dev->caps.mtts_per_seg + | ||
626 | start_index); | ||
627 | inbox[1] = 0; | 630 | inbox[1] = 0; |
628 | for (i = 0; i < chunk; ++i) | 631 | for (i = 0; i < chunk; ++i) |
629 | inbox[i + 2] = cpu_to_be64(page_list[i] | | 632 | inbox[i + 2] = cpu_to_be64(page_list[i] | |
@@ -690,7 +693,8 @@ int mlx4_init_mr_table(struct mlx4_dev *dev) | |||
690 | return err; | 693 | return err; |
691 | 694 | ||
692 | err = mlx4_buddy_init(&mr_table->mtt_buddy, | 695 | err = mlx4_buddy_init(&mr_table->mtt_buddy, |
693 | ilog2(dev->caps.num_mtt_segs)); | 696 | ilog2(dev->caps.num_mtts / |
697 | (1 << log_mtts_per_seg))); | ||
694 | if (err) | 698 | if (err) |
695 | goto err_buddy; | 699 | goto err_buddy; |
696 | 700 | ||
@@ -809,7 +813,7 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, | |||
809 | int max_maps, u8 page_shift, struct mlx4_fmr *fmr) | 813 | int max_maps, u8 page_shift, struct mlx4_fmr *fmr) |
810 | { | 814 | { |
811 | struct mlx4_priv *priv = mlx4_priv(dev); | 815 | struct mlx4_priv *priv = mlx4_priv(dev); |
812 | u64 mtt_seg; | 816 | u64 mtt_offset; |
813 | int err = -ENOMEM; | 817 | int err = -ENOMEM; |
814 | 818 | ||
815 | if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32) | 819 | if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32) |
@@ -829,11 +833,12 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, | |||
829 | if (err) | 833 | if (err) |
830 | return err; | 834 | return err; |
831 | 835 | ||
832 | mtt_seg = fmr->mr.mtt.first_seg * dev->caps.mtt_entry_sz; | 836 | mtt_offset = fmr->mr.mtt.offset * dev->caps.mtt_entry_sz; |
833 | 837 | ||
834 | fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table, | 838 | fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table, |
835 | fmr->mr.mtt.first_seg, | 839 | fmr->mr.mtt.offset, |
836 | &fmr->dma_handle); | 840 | &fmr->dma_handle); |
841 | |||
837 | if (!fmr->mtts) { | 842 | if (!fmr->mtts) { |
838 | err = -ENOMEM; | 843 | err = -ENOMEM; |
839 | goto err_free; | 844 | goto err_free; |
@@ -872,7 +877,7 @@ static int mlx4_fmr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, | |||
872 | return err; | 877 | return err; |
873 | 878 | ||
874 | fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table, | 879 | fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table, |
875 | fmr->mr.mtt.first_seg, | 880 | fmr->mr.mtt.offset, |
876 | &fmr->dma_handle); | 881 | &fmr->dma_handle); |
877 | if (!fmr->mtts) { | 882 | if (!fmr->mtts) { |
878 | err = -ENOMEM; | 883 | err = -ENOMEM; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c index 771c4605ef86..66f91ca7a7c6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/profile.c +++ b/drivers/net/ethernet/mellanox/mlx4/profile.c | |||
@@ -98,7 +98,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, | |||
98 | profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz; | 98 | profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz; |
99 | profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz; | 99 | profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz; |
100 | profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz; | 100 | profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz; |
101 | profile[MLX4_RES_MTT].size = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz; | 101 | profile[MLX4_RES_MTT].size = dev_cap->mtt_entry_sz; |
102 | profile[MLX4_RES_MCG].size = mlx4_get_mgm_entry_size(dev); | 102 | profile[MLX4_RES_MCG].size = mlx4_get_mgm_entry_size(dev); |
103 | 103 | ||
104 | profile[MLX4_RES_QP].num = request->num_qp; | 104 | profile[MLX4_RES_QP].num = request->num_qp; |
@@ -210,7 +210,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, | |||
210 | init_hca->cmpt_base = profile[i].start; | 210 | init_hca->cmpt_base = profile[i].start; |
211 | break; | 211 | break; |
212 | case MLX4_RES_MTT: | 212 | case MLX4_RES_MTT: |
213 | dev->caps.num_mtt_segs = profile[i].num; | 213 | dev->caps.num_mtts = profile[i].num; |
214 | priv->mr_table.mtt_base = profile[i].start; | 214 | priv->mr_table.mtt_base = profile[i].start; |
215 | init_hca->mtt_base = profile[i].start; | 215 | init_hca->mtt_base = profile[i].start; |
216 | break; | 216 | break; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 0d99f57f9c8c..bdd61c35d044 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | |||
@@ -1550,9 +1550,9 @@ static int mr_phys_mpt(struct mlx4_mpt_entry *mpt) | |||
1550 | return (be32_to_cpu(mpt->flags) >> 9) & 1; | 1550 | return (be32_to_cpu(mpt->flags) >> 9) & 1; |
1551 | } | 1551 | } |
1552 | 1552 | ||
1553 | static int mr_get_mtt_seg(struct mlx4_mpt_entry *mpt) | 1553 | static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt) |
1554 | { | 1554 | { |
1555 | return (int)be64_to_cpu(mpt->mtt_seg) & 0xfffffff8; | 1555 | return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8; |
1556 | } | 1556 | } |
1557 | 1557 | ||
1558 | static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt) | 1558 | static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt) |
@@ -1565,12 +1565,12 @@ static int mr_get_pdn(struct mlx4_mpt_entry *mpt) | |||
1565 | return be32_to_cpu(mpt->pd_flags) & 0xffffff; | 1565 | return be32_to_cpu(mpt->pd_flags) & 0xffffff; |
1566 | } | 1566 | } |
1567 | 1567 | ||
1568 | static int qp_get_mtt_seg(struct mlx4_qp_context *qpc) | 1568 | static int qp_get_mtt_addr(struct mlx4_qp_context *qpc) |
1569 | { | 1569 | { |
1570 | return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8; | 1570 | return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8; |
1571 | } | 1571 | } |
1572 | 1572 | ||
1573 | static int srq_get_mtt_seg(struct mlx4_srq_context *srqc) | 1573 | static int srq_get_mtt_addr(struct mlx4_srq_context *srqc) |
1574 | { | 1574 | { |
1575 | return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8; | 1575 | return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8; |
1576 | } | 1576 | } |
@@ -1614,8 +1614,8 @@ static int pdn2slave(int pdn) | |||
1614 | static int check_mtt_range(struct mlx4_dev *dev, int slave, int start, | 1614 | static int check_mtt_range(struct mlx4_dev *dev, int slave, int start, |
1615 | int size, struct res_mtt *mtt) | 1615 | int size, struct res_mtt *mtt) |
1616 | { | 1616 | { |
1617 | int res_start = mtt->com.res_id * dev->caps.mtts_per_seg; | 1617 | int res_start = mtt->com.res_id; |
1618 | int res_size = (1 << mtt->order) * dev->caps.mtts_per_seg; | 1618 | int res_size = (1 << mtt->order); |
1619 | 1619 | ||
1620 | if (start < res_start || start + size > res_start + res_size) | 1620 | if (start < res_start || start + size > res_start + res_size) |
1621 | return -EPERM; | 1621 | return -EPERM; |
@@ -1632,8 +1632,7 @@ int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave, | |||
1632 | int index = vhcr->in_modifier; | 1632 | int index = vhcr->in_modifier; |
1633 | struct res_mtt *mtt; | 1633 | struct res_mtt *mtt; |
1634 | struct res_mpt *mpt; | 1634 | struct res_mpt *mpt; |
1635 | int mtt_base = (mr_get_mtt_seg(inbox->buf) / dev->caps.mtt_entry_sz) * | 1635 | int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz; |
1636 | dev->caps.mtts_per_seg; | ||
1637 | int phys; | 1636 | int phys; |
1638 | int id; | 1637 | int id; |
1639 | 1638 | ||
@@ -1644,8 +1643,7 @@ int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave, | |||
1644 | 1643 | ||
1645 | phys = mr_phys_mpt(inbox->buf); | 1644 | phys = mr_phys_mpt(inbox->buf); |
1646 | if (!phys) { | 1645 | if (!phys) { |
1647 | err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, | 1646 | err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); |
1648 | RES_MTT, &mtt); | ||
1649 | if (err) | 1647 | if (err) |
1650 | goto ex_abort; | 1648 | goto ex_abort; |
1651 | 1649 | ||
@@ -1769,8 +1767,7 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, | |||
1769 | struct res_mtt *mtt; | 1767 | struct res_mtt *mtt; |
1770 | struct res_qp *qp; | 1768 | struct res_qp *qp; |
1771 | struct mlx4_qp_context *qpc = inbox->buf + 8; | 1769 | struct mlx4_qp_context *qpc = inbox->buf + 8; |
1772 | int mtt_base = (qp_get_mtt_seg(qpc) / dev->caps.mtt_entry_sz) * | 1770 | int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz; |
1773 | dev->caps.mtts_per_seg; | ||
1774 | int mtt_size = qp_get_mtt_size(qpc); | 1771 | int mtt_size = qp_get_mtt_size(qpc); |
1775 | struct res_cq *rcq; | 1772 | struct res_cq *rcq; |
1776 | struct res_cq *scq; | 1773 | struct res_cq *scq; |
@@ -1786,8 +1783,7 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, | |||
1786 | return err; | 1783 | return err; |
1787 | qp->local_qpn = local_qpn; | 1784 | qp->local_qpn = local_qpn; |
1788 | 1785 | ||
1789 | err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT, | 1786 | err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); |
1790 | &mtt); | ||
1791 | if (err) | 1787 | if (err) |
1792 | goto ex_abort; | 1788 | goto ex_abort; |
1793 | 1789 | ||
@@ -1836,7 +1832,7 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, | |||
1836 | qp->srq = srq; | 1832 | qp->srq = srq; |
1837 | } | 1833 | } |
1838 | put_res(dev, slave, rcqn, RES_CQ); | 1834 | put_res(dev, slave, rcqn, RES_CQ); |
1839 | put_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT); | 1835 | put_res(dev, slave, mtt_base, RES_MTT); |
1840 | res_end_move(dev, slave, RES_QP, qpn); | 1836 | res_end_move(dev, slave, RES_QP, qpn); |
1841 | 1837 | ||
1842 | return 0; | 1838 | return 0; |
@@ -1850,14 +1846,14 @@ ex_put_scq: | |||
1850 | ex_put_rcq: | 1846 | ex_put_rcq: |
1851 | put_res(dev, slave, rcqn, RES_CQ); | 1847 | put_res(dev, slave, rcqn, RES_CQ); |
1852 | ex_put_mtt: | 1848 | ex_put_mtt: |
1853 | put_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT); | 1849 | put_res(dev, slave, mtt_base, RES_MTT); |
1854 | ex_abort: | 1850 | ex_abort: |
1855 | res_abort_move(dev, slave, RES_QP, qpn); | 1851 | res_abort_move(dev, slave, RES_QP, qpn); |
1856 | 1852 | ||
1857 | return err; | 1853 | return err; |
1858 | } | 1854 | } |
1859 | 1855 | ||
1860 | static int eq_get_mtt_seg(struct mlx4_eq_context *eqc) | 1856 | static int eq_get_mtt_addr(struct mlx4_eq_context *eqc) |
1861 | { | 1857 | { |
1862 | return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8; | 1858 | return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8; |
1863 | } | 1859 | } |
@@ -1873,7 +1869,7 @@ static int eq_get_mtt_size(struct mlx4_eq_context *eqc) | |||
1873 | return 1 << (log_eq_size + 5 - page_shift); | 1869 | return 1 << (log_eq_size + 5 - page_shift); |
1874 | } | 1870 | } |
1875 | 1871 | ||
1876 | static int cq_get_mtt_seg(struct mlx4_cq_context *cqc) | 1872 | static int cq_get_mtt_addr(struct mlx4_cq_context *cqc) |
1877 | { | 1873 | { |
1878 | return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8; | 1874 | return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8; |
1879 | } | 1875 | } |
@@ -1899,8 +1895,7 @@ int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave, | |||
1899 | int eqn = vhcr->in_modifier; | 1895 | int eqn = vhcr->in_modifier; |
1900 | int res_id = (slave << 8) | eqn; | 1896 | int res_id = (slave << 8) | eqn; |
1901 | struct mlx4_eq_context *eqc = inbox->buf; | 1897 | struct mlx4_eq_context *eqc = inbox->buf; |
1902 | int mtt_base = (eq_get_mtt_seg(eqc) / dev->caps.mtt_entry_sz) * | 1898 | int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz; |
1903 | dev->caps.mtts_per_seg; | ||
1904 | int mtt_size = eq_get_mtt_size(eqc); | 1899 | int mtt_size = eq_get_mtt_size(eqc); |
1905 | struct res_eq *eq; | 1900 | struct res_eq *eq; |
1906 | struct res_mtt *mtt; | 1901 | struct res_mtt *mtt; |
@@ -1912,8 +1907,7 @@ int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave, | |||
1912 | if (err) | 1907 | if (err) |
1913 | goto out_add; | 1908 | goto out_add; |
1914 | 1909 | ||
1915 | err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT, | 1910 | err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); |
1916 | &mtt); | ||
1917 | if (err) | 1911 | if (err) |
1918 | goto out_move; | 1912 | goto out_move; |
1919 | 1913 | ||
@@ -1986,7 +1980,8 @@ int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave, | |||
1986 | /* Call the SW implementation of write_mtt: | 1980 | /* Call the SW implementation of write_mtt: |
1987 | * - Prepare a dummy mtt struct | 1981 | * - Prepare a dummy mtt struct |
1988 | * - Translate inbox contents to simple addresses in host endianess */ | 1982 | * - Translate inbox contents to simple addresses in host endianess */ |
1989 | mtt.first_seg = 0; | 1983 | mtt.offset = 0; /* TBD this is broken but I don't handle it since |
1984 | we don't really use it */ | ||
1990 | mtt.order = 0; | 1985 | mtt.order = 0; |
1991 | mtt.page_shift = 0; | 1986 | mtt.page_shift = 0; |
1992 | for (i = 0; i < npages; ++i) | 1987 | for (i = 0; i < npages; ++i) |
@@ -2137,16 +2132,14 @@ int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave, | |||
2137 | int err; | 2132 | int err; |
2138 | int cqn = vhcr->in_modifier; | 2133 | int cqn = vhcr->in_modifier; |
2139 | struct mlx4_cq_context *cqc = inbox->buf; | 2134 | struct mlx4_cq_context *cqc = inbox->buf; |
2140 | int mtt_base = (cq_get_mtt_seg(cqc) / dev->caps.mtt_entry_sz) * | 2135 | int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz; |
2141 | dev->caps.mtts_per_seg; | ||
2142 | struct res_cq *cq; | 2136 | struct res_cq *cq; |
2143 | struct res_mtt *mtt; | 2137 | struct res_mtt *mtt; |
2144 | 2138 | ||
2145 | err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq); | 2139 | err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq); |
2146 | if (err) | 2140 | if (err) |
2147 | return err; | 2141 | return err; |
2148 | err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT, | 2142 | err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); |
2149 | &mtt); | ||
2150 | if (err) | 2143 | if (err) |
2151 | goto out_move; | 2144 | goto out_move; |
2152 | err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt); | 2145 | err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt); |
@@ -2228,8 +2221,7 @@ static int handle_resize(struct mlx4_dev *dev, int slave, | |||
2228 | struct res_mtt *orig_mtt; | 2221 | struct res_mtt *orig_mtt; |
2229 | struct res_mtt *mtt; | 2222 | struct res_mtt *mtt; |
2230 | struct mlx4_cq_context *cqc = inbox->buf; | 2223 | struct mlx4_cq_context *cqc = inbox->buf; |
2231 | int mtt_base = (cq_get_mtt_seg(cqc) / dev->caps.mtt_entry_sz) * | 2224 | int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz; |
2232 | dev->caps.mtts_per_seg; | ||
2233 | 2225 | ||
2234 | err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt); | 2226 | err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt); |
2235 | if (err) | 2227 | if (err) |
@@ -2240,8 +2232,7 @@ static int handle_resize(struct mlx4_dev *dev, int slave, | |||
2240 | goto ex_put; | 2232 | goto ex_put; |
2241 | } | 2233 | } |
2242 | 2234 | ||
2243 | err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT, | 2235 | err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); |
2244 | &mtt); | ||
2245 | if (err) | 2236 | if (err) |
2246 | goto ex_put; | 2237 | goto ex_put; |
2247 | 2238 | ||
@@ -2325,8 +2316,7 @@ int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave, | |||
2325 | struct res_mtt *mtt; | 2316 | struct res_mtt *mtt; |
2326 | struct res_srq *srq; | 2317 | struct res_srq *srq; |
2327 | struct mlx4_srq_context *srqc = inbox->buf; | 2318 | struct mlx4_srq_context *srqc = inbox->buf; |
2328 | int mtt_base = (srq_get_mtt_seg(srqc) / dev->caps.mtt_entry_sz) * | 2319 | int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz; |
2329 | dev->caps.mtts_per_seg; | ||
2330 | 2320 | ||
2331 | if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff)) | 2321 | if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff)) |
2332 | return -EINVAL; | 2322 | return -EINVAL; |
@@ -2334,8 +2324,7 @@ int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave, | |||
2334 | err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq); | 2324 | err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq); |
2335 | if (err) | 2325 | if (err) |
2336 | return err; | 2326 | return err; |
2337 | err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, | 2327 | err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); |
2338 | RES_MTT, &mtt); | ||
2339 | if (err) | 2328 | if (err) |
2340 | goto ex_abort; | 2329 | goto ex_abort; |
2341 | err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc), | 2330 | err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc), |