diff options
author | David S. Miller <davem@davemloft.net> | 2019-08-24 19:27:09 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-08-24 19:27:09 -0400 |
commit | d37fb9758f76316a9dd9aaafffad726eb2bc020a (patch) | |
tree | 8e27ec22f83e72421a97618ad2a44dfae6915276 | |
parent | 0c69b19f92dfcc0962bbc09741677f658bc55452 (diff) | |
parent | a195784c105b2907b45fd62307d9ce821da9dc20 (diff) |
Merge tag 'mlx5-fixes-2019-08-22' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux
Saeed Mahameed says:
====================
Mellanox, mlx5 fixes 2019-08-22
This series introduces some fixes to mlx5 driver.
1) Form Moshe, two fixes for firmware health reporter
2) From Eran, two ktls fixes.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 38 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/health.c | 22 |
2 files changed, 29 insertions, 31 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index 8b93101e1a09..7833ddef0427 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | |||
@@ -109,13 +109,15 @@ build_progress_params(struct mlx5e_tx_wqe *wqe, u16 pc, u32 sqn, | |||
109 | 109 | ||
110 | static void tx_fill_wi(struct mlx5e_txqsq *sq, | 110 | static void tx_fill_wi(struct mlx5e_txqsq *sq, |
111 | u16 pi, u8 num_wqebbs, | 111 | u16 pi, u8 num_wqebbs, |
112 | skb_frag_t *resync_dump_frag) | 112 | skb_frag_t *resync_dump_frag, |
113 | u32 num_bytes) | ||
113 | { | 114 | { |
114 | struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; | 115 | struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; |
115 | 116 | ||
116 | wi->skb = NULL; | 117 | wi->skb = NULL; |
117 | wi->num_wqebbs = num_wqebbs; | 118 | wi->num_wqebbs = num_wqebbs; |
118 | wi->resync_dump_frag = resync_dump_frag; | 119 | wi->resync_dump_frag = resync_dump_frag; |
120 | wi->num_bytes = num_bytes; | ||
119 | } | 121 | } |
120 | 122 | ||
121 | void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx) | 123 | void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx) |
@@ -143,7 +145,7 @@ post_static_params(struct mlx5e_txqsq *sq, | |||
143 | 145 | ||
144 | umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi); | 146 | umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi); |
145 | build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence); | 147 | build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence); |
146 | tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL); | 148 | tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL, 0); |
147 | sq->pc += MLX5E_KTLS_STATIC_WQEBBS; | 149 | sq->pc += MLX5E_KTLS_STATIC_WQEBBS; |
148 | } | 150 | } |
149 | 151 | ||
@@ -157,7 +159,7 @@ post_progress_params(struct mlx5e_txqsq *sq, | |||
157 | 159 | ||
158 | wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi); | 160 | wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi); |
159 | build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence); | 161 | build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence); |
160 | tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL); | 162 | tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL, 0); |
161 | sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS; | 163 | sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS; |
162 | } | 164 | } |
163 | 165 | ||
@@ -248,43 +250,37 @@ tx_post_resync_params(struct mlx5e_txqsq *sq, | |||
248 | mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true); | 250 | mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true); |
249 | } | 251 | } |
250 | 252 | ||
253 | struct mlx5e_dump_wqe { | ||
254 | struct mlx5_wqe_ctrl_seg ctrl; | ||
255 | struct mlx5_wqe_data_seg data; | ||
256 | }; | ||
257 | |||
251 | static int | 258 | static int |
252 | tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb, | 259 | tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb, |
253 | skb_frag_t *frag, u32 tisn, bool first) | 260 | skb_frag_t *frag, u32 tisn, bool first) |
254 | { | 261 | { |
255 | struct mlx5_wqe_ctrl_seg *cseg; | 262 | struct mlx5_wqe_ctrl_seg *cseg; |
256 | struct mlx5_wqe_eth_seg *eseg; | ||
257 | struct mlx5_wqe_data_seg *dseg; | 263 | struct mlx5_wqe_data_seg *dseg; |
258 | struct mlx5e_tx_wqe *wqe; | 264 | struct mlx5e_dump_wqe *wqe; |
259 | dma_addr_t dma_addr = 0; | 265 | dma_addr_t dma_addr = 0; |
260 | u16 ds_cnt, ds_cnt_inl; | ||
261 | u8 num_wqebbs; | 266 | u8 num_wqebbs; |
262 | u16 pi, ihs; | 267 | u16 ds_cnt; |
263 | int fsz; | 268 | int fsz; |
264 | 269 | u16 pi; | |
265 | ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; | ||
266 | ihs = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb)); | ||
267 | ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS); | ||
268 | ds_cnt += ds_cnt_inl; | ||
269 | ds_cnt += 1; /* one frag */ | ||
270 | 270 | ||
271 | wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi); | 271 | wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi); |
272 | 272 | ||
273 | ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; | ||
273 | num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); | 274 | num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); |
274 | 275 | ||
275 | cseg = &wqe->ctrl; | 276 | cseg = &wqe->ctrl; |
276 | eseg = &wqe->eth; | 277 | dseg = &wqe->data; |
277 | dseg = wqe->data; | ||
278 | 278 | ||
279 | cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP); | 279 | cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP); |
280 | cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); | 280 | cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); |
281 | cseg->tisn = cpu_to_be32(tisn << 8); | 281 | cseg->tisn = cpu_to_be32(tisn << 8); |
282 | cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; | 282 | cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; |
283 | 283 | ||
284 | eseg->inline_hdr.sz = cpu_to_be16(ihs); | ||
285 | memcpy(eseg->inline_hdr.start, skb->data, ihs); | ||
286 | dseg += ds_cnt_inl; | ||
287 | |||
288 | fsz = skb_frag_size(frag); | 284 | fsz = skb_frag_size(frag); |
289 | dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, | 285 | dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, |
290 | DMA_TO_DEVICE); | 286 | DMA_TO_DEVICE); |
@@ -296,7 +292,7 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb, | |||
296 | dseg->byte_count = cpu_to_be32(fsz); | 292 | dseg->byte_count = cpu_to_be32(fsz); |
297 | mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE); | 293 | mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE); |
298 | 294 | ||
299 | tx_fill_wi(sq, pi, num_wqebbs, frag); | 295 | tx_fill_wi(sq, pi, num_wqebbs, frag, fsz); |
300 | sq->pc += num_wqebbs; | 296 | sq->pc += num_wqebbs; |
301 | 297 | ||
302 | WARN(num_wqebbs > MLX5E_KTLS_MAX_DUMP_WQEBBS, | 298 | WARN(num_wqebbs > MLX5E_KTLS_MAX_DUMP_WQEBBS, |
@@ -323,7 +319,7 @@ static void tx_post_fence_nop(struct mlx5e_txqsq *sq) | |||
323 | struct mlx5_wq_cyc *wq = &sq->wq; | 319 | struct mlx5_wq_cyc *wq = &sq->wq; |
324 | u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); | 320 | u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); |
325 | 321 | ||
326 | tx_fill_wi(sq, pi, 1, NULL); | 322 | tx_fill_wi(sq, pi, 1, NULL, 0); |
327 | 323 | ||
328 | mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc); | 324 | mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc); |
329 | } | 325 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 9314777d99e3..d685122d9ff7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c | |||
@@ -590,7 +590,8 @@ mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter, | |||
590 | data_size = crdump_size - offset; | 590 | data_size = crdump_size - offset; |
591 | else | 591 | else |
592 | data_size = MLX5_CR_DUMP_CHUNK_SIZE; | 592 | data_size = MLX5_CR_DUMP_CHUNK_SIZE; |
593 | err = devlink_fmsg_binary_put(fmsg, cr_data, data_size); | 593 | err = devlink_fmsg_binary_put(fmsg, (char *)cr_data + offset, |
594 | data_size); | ||
594 | if (err) | 595 | if (err) |
595 | goto free_data; | 596 | goto free_data; |
596 | } | 597 | } |
@@ -700,6 +701,16 @@ static void poll_health(struct timer_list *t) | |||
700 | if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) | 701 | if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) |
701 | goto out; | 702 | goto out; |
702 | 703 | ||
704 | fatal_error = check_fatal_sensors(dev); | ||
705 | |||
706 | if (fatal_error && !health->fatal_error) { | ||
707 | mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error); | ||
708 | dev->priv.health.fatal_error = fatal_error; | ||
709 | print_health_info(dev); | ||
710 | mlx5_trigger_health_work(dev); | ||
711 | goto out; | ||
712 | } | ||
713 | |||
703 | count = ioread32be(health->health_counter); | 714 | count = ioread32be(health->health_counter); |
704 | if (count == health->prev) | 715 | if (count == health->prev) |
705 | ++health->miss_counter; | 716 | ++health->miss_counter; |
@@ -718,15 +729,6 @@ static void poll_health(struct timer_list *t) | |||
718 | if (health->synd && health->synd != prev_synd) | 729 | if (health->synd && health->synd != prev_synd) |
719 | queue_work(health->wq, &health->report_work); | 730 | queue_work(health->wq, &health->report_work); |
720 | 731 | ||
721 | fatal_error = check_fatal_sensors(dev); | ||
722 | |||
723 | if (fatal_error && !health->fatal_error) { | ||
724 | mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error); | ||
725 | dev->priv.health.fatal_error = fatal_error; | ||
726 | print_health_info(dev); | ||
727 | mlx5_trigger_health_work(dev); | ||
728 | } | ||
729 | |||
730 | out: | 732 | out: |
731 | mod_timer(&health->timer, get_next_poll_jiffies()); | 733 | mod_timer(&health->timer, get_next_poll_jiffies()); |
732 | } | 734 | } |