diff options
| author | David S. Miller <davem@davemloft.net> | 2014-12-11 14:47:40 -0500 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2014-12-11 14:47:40 -0500 |
| commit | efef793926f1baec7f2c8638c6d13576e6fe8997 (patch) | |
| tree | 91215913abb8d16177517d254f677c7119461875 /include/linux | |
| parent | 630f4b70567fa0e2dc9283829f7cee04a5de3be2 (diff) | |
| parent | 7d077cd34eabb2ffd05abe0f2cad01da1ef11712 (diff) | |
Merge branch 'mlx4-next'
Or Gerlitz says:
====================
mlx4 driver update
This series from Matan, Jenny, Dotan and myself is mostly about adding
support to a new performance optimized flow steering mode (patches 4-10).
The 1st two patches are small fixes (one for VXLAN and one for SRIOV),
and the third patch is a fix to avoid hard-lockup situation when many
(hunderds) processes holding user-space QPs/CQs get events.
Matan and Or.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/mlx4/device.h | 51 |
1 files changed, 46 insertions, 5 deletions
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index cf09e65c2901..25c791e295fd 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
| @@ -117,6 +117,14 @@ enum { | |||
| 117 | MLX4_STEERING_MODE_DEVICE_MANAGED | 117 | MLX4_STEERING_MODE_DEVICE_MANAGED |
| 118 | }; | 118 | }; |
| 119 | 119 | ||
| 120 | enum { | ||
| 121 | MLX4_STEERING_DMFS_A0_DEFAULT, | ||
| 122 | MLX4_STEERING_DMFS_A0_DYNAMIC, | ||
| 123 | MLX4_STEERING_DMFS_A0_STATIC, | ||
| 124 | MLX4_STEERING_DMFS_A0_DISABLE, | ||
| 125 | MLX4_STEERING_DMFS_A0_NOT_SUPPORTED | ||
| 126 | }; | ||
| 127 | |||
| 120 | static inline const char *mlx4_steering_mode_str(int steering_mode) | 128 | static inline const char *mlx4_steering_mode_str(int steering_mode) |
| 121 | { | 129 | { |
| 122 | switch (steering_mode) { | 130 | switch (steering_mode) { |
| @@ -191,7 +199,26 @@ enum { | |||
| 191 | MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP = 1LL << 15, | 199 | MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP = 1LL << 15, |
| 192 | MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16, | 200 | MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16, |
| 193 | MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17, | 201 | MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17, |
| 194 | MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18 | 202 | MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18, |
| 203 | MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19 | ||
| 204 | }; | ||
| 205 | |||
| 206 | enum { | ||
| 207 | MLX4_QUERY_FUNC_FLAGS_BF_RES_QP = 1LL << 0, | ||
| 208 | MLX4_QUERY_FUNC_FLAGS_A0_RES_QP = 1LL << 1 | ||
| 209 | }; | ||
| 210 | |||
| 211 | /* bit enums for an 8-bit flags field indicating special use | ||
| 212 | * QPs which require special handling in qp_reserve_range. | ||
| 213 | * Currently, this only includes QPs used by the ETH interface, | ||
| 214 | * where we expect to use blueflame. These QPs must not have | ||
| 215 | * bits 6 and 7 set in their qp number. | ||
| 216 | * | ||
| 217 | * This enum may use only bits 0..7. | ||
| 218 | */ | ||
| 219 | enum { | ||
| 220 | MLX4_RESERVE_A0_QP = 1 << 6, | ||
| 221 | MLX4_RESERVE_ETH_BF_QP = 1 << 7, | ||
| 195 | }; | 222 | }; |
| 196 | 223 | ||
| 197 | enum { | 224 | enum { |
| @@ -207,7 +234,8 @@ enum { | |||
| 207 | 234 | ||
| 208 | enum { | 235 | enum { |
| 209 | MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0, | 236 | MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0, |
| 210 | MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1 | 237 | MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1, |
| 238 | MLX4_FUNC_CAP_DMFS_A0_STATIC = 1L << 2 | ||
| 211 | }; | 239 | }; |
| 212 | 240 | ||
| 213 | 241 | ||
| @@ -333,6 +361,8 @@ enum { | |||
| 333 | 361 | ||
| 334 | enum mlx4_qp_region { | 362 | enum mlx4_qp_region { |
| 335 | MLX4_QP_REGION_FW = 0, | 363 | MLX4_QP_REGION_FW = 0, |
| 364 | MLX4_QP_REGION_RSS_RAW_ETH, | ||
| 365 | MLX4_QP_REGION_BOTTOM = MLX4_QP_REGION_RSS_RAW_ETH, | ||
| 336 | MLX4_QP_REGION_ETH_ADDR, | 366 | MLX4_QP_REGION_ETH_ADDR, |
| 337 | MLX4_QP_REGION_FC_ADDR, | 367 | MLX4_QP_REGION_FC_ADDR, |
| 338 | MLX4_QP_REGION_FC_EXCH, | 368 | MLX4_QP_REGION_FC_EXCH, |
| @@ -462,6 +492,7 @@ struct mlx4_caps { | |||
| 462 | int reserved_mcgs; | 492 | int reserved_mcgs; |
| 463 | int num_qp_per_mgm; | 493 | int num_qp_per_mgm; |
| 464 | int steering_mode; | 494 | int steering_mode; |
| 495 | int dmfs_high_steer_mode; | ||
| 465 | int fs_log_max_ucast_qp_range_size; | 496 | int fs_log_max_ucast_qp_range_size; |
| 466 | int num_pds; | 497 | int num_pds; |
| 467 | int reserved_pds; | 498 | int reserved_pds; |
| @@ -501,6 +532,9 @@ struct mlx4_caps { | |||
| 501 | u64 phys_port_id[MLX4_MAX_PORTS + 1]; | 532 | u64 phys_port_id[MLX4_MAX_PORTS + 1]; |
| 502 | int tunnel_offload_mode; | 533 | int tunnel_offload_mode; |
| 503 | u8 rx_checksum_flags_port[MLX4_MAX_PORTS + 1]; | 534 | u8 rx_checksum_flags_port[MLX4_MAX_PORTS + 1]; |
| 535 | u8 alloc_res_qp_mask; | ||
| 536 | u32 dmfs_high_rate_qpn_base; | ||
| 537 | u32 dmfs_high_rate_qpn_range; | ||
| 504 | }; | 538 | }; |
| 505 | 539 | ||
| 506 | struct mlx4_buf_list { | 540 | struct mlx4_buf_list { |
| @@ -621,6 +655,11 @@ struct mlx4_cq { | |||
| 621 | 655 | ||
| 622 | atomic_t refcount; | 656 | atomic_t refcount; |
| 623 | struct completion free; | 657 | struct completion free; |
| 658 | struct { | ||
| 659 | struct list_head list; | ||
| 660 | void (*comp)(struct mlx4_cq *); | ||
| 661 | void *priv; | ||
| 662 | } tasklet_ctx; | ||
| 624 | }; | 663 | }; |
| 625 | 664 | ||
| 626 | struct mlx4_qp { | 665 | struct mlx4_qp { |
| @@ -869,7 +908,9 @@ static inline int mlx4_num_reserved_sqps(struct mlx4_dev *dev) | |||
| 869 | static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn) | 908 | static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn) |
| 870 | { | 909 | { |
| 871 | return (qpn < dev->phys_caps.base_sqpn + 8 + | 910 | return (qpn < dev->phys_caps.base_sqpn + 8 + |
| 872 | 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev)); | 911 | 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev) && |
| 912 | qpn >= dev->phys_caps.base_sqpn) || | ||
| 913 | (qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]); | ||
| 873 | } | 914 | } |
| 874 | 915 | ||
| 875 | static inline int mlx4_is_guest_proxy(struct mlx4_dev *dev, int slave, u32 qpn) | 916 | static inline int mlx4_is_guest_proxy(struct mlx4_dev *dev, int slave, u32 qpn) |
| @@ -945,8 +986,8 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, | |||
| 945 | struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, | 986 | struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, |
| 946 | unsigned vector, int collapsed, int timestamp_en); | 987 | unsigned vector, int collapsed, int timestamp_en); |
| 947 | void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); | 988 | void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); |
| 948 | 989 | int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, | |
| 949 | int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base); | 990 | int *base, u8 flags); |
| 950 | void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); | 991 | void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); |
| 951 | 992 | ||
| 952 | int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, | 993 | int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, |
