diff options
author | Doug Ledford <dledford@redhat.com> | 2016-12-14 14:44:25 -0500 |
---|---|---|
committer | Doug Ledford <dledford@redhat.com> | 2016-12-14 14:44:25 -0500 |
commit | 86ef0beaa0bdbec70d4261977b8b4a100fe54bfe (patch) | |
tree | d2c1b52a7a6e493bada10dbdbcd2f8511baccc66 | |
parent | 253f8b22e0ad643edafd75e831e5c765732877f5 (diff) | |
parent | 7ceb740c540dde362b3055ad92c6a38009eb7a83 (diff) |
Merge branch 'mlx' into merge-test
40 files changed, 875 insertions, 336 deletions
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index 19d499dcab76..1acc95b3aaa3 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h | |||
@@ -72,9 +72,6 @@ void ib_device_unregister_sysfs(struct ib_device *device); | |||
72 | void ib_cache_setup(void); | 72 | void ib_cache_setup(void); |
73 | void ib_cache_cleanup(void); | 73 | void ib_cache_cleanup(void); |
74 | 74 | ||
75 | int ib_resolve_eth_dmac(struct ib_qp *qp, | ||
76 | struct ib_qp_attr *qp_attr, int *qp_attr_mask); | ||
77 | |||
78 | typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port, | 75 | typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port, |
79 | struct net_device *idev, void *cookie); | 76 | struct net_device *idev, void *cookie); |
80 | 77 | ||
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index df26a741cda6..455034ac994e 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h | |||
@@ -289,5 +289,6 @@ IB_UVERBS_DECLARE_EX_CMD(modify_wq); | |||
289 | IB_UVERBS_DECLARE_EX_CMD(destroy_wq); | 289 | IB_UVERBS_DECLARE_EX_CMD(destroy_wq); |
290 | IB_UVERBS_DECLARE_EX_CMD(create_rwq_ind_table); | 290 | IB_UVERBS_DECLARE_EX_CMD(create_rwq_ind_table); |
291 | IB_UVERBS_DECLARE_EX_CMD(destroy_rwq_ind_table); | 291 | IB_UVERBS_DECLARE_EX_CMD(destroy_rwq_ind_table); |
292 | IB_UVERBS_DECLARE_EX_CMD(modify_qp); | ||
292 | 293 | ||
293 | #endif /* UVERBS_H */ | 294 | #endif /* UVERBS_H */ |
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index cb3f515a2285..09b649159e6c 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
@@ -2328,94 +2328,88 @@ static int modify_qp_mask(enum ib_qp_type qp_type, int mask) | |||
2328 | } | 2328 | } |
2329 | } | 2329 | } |
2330 | 2330 | ||
2331 | ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, | 2331 | static int modify_qp(struct ib_uverbs_file *file, |
2332 | struct ib_device *ib_dev, | 2332 | struct ib_uverbs_ex_modify_qp *cmd, struct ib_udata *udata) |
2333 | const char __user *buf, int in_len, | ||
2334 | int out_len) | ||
2335 | { | 2333 | { |
2336 | struct ib_uverbs_modify_qp cmd; | 2334 | struct ib_qp_attr *attr; |
2337 | struct ib_udata udata; | 2335 | struct ib_qp *qp; |
2338 | struct ib_qp *qp; | 2336 | int ret; |
2339 | struct ib_qp_attr *attr; | ||
2340 | int ret; | ||
2341 | |||
2342 | if (copy_from_user(&cmd, buf, sizeof cmd)) | ||
2343 | return -EFAULT; | ||
2344 | |||
2345 | INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, | ||
2346 | out_len); | ||
2347 | 2337 | ||
2348 | attr = kmalloc(sizeof *attr, GFP_KERNEL); | 2338 | attr = kmalloc(sizeof *attr, GFP_KERNEL); |
2349 | if (!attr) | 2339 | if (!attr) |
2350 | return -ENOMEM; | 2340 | return -ENOMEM; |
2351 | 2341 | ||
2352 | qp = idr_read_qp(cmd.qp_handle, file->ucontext); | 2342 | qp = idr_read_qp(cmd->base.qp_handle, file->ucontext); |
2353 | if (!qp) { | 2343 | if (!qp) { |
2354 | ret = -EINVAL; | 2344 | ret = -EINVAL; |
2355 | goto out; | 2345 | goto out; |
2356 | } | 2346 | } |
2357 | 2347 | ||
2358 | attr->qp_state = cmd.qp_state; | 2348 | attr->qp_state = cmd->base.qp_state; |
2359 | attr->cur_qp_state = cmd.cur_qp_state; | 2349 | attr->cur_qp_state = cmd->base.cur_qp_state; |
2360 | attr->path_mtu = cmd.path_mtu; | 2350 | attr->path_mtu = cmd->base.path_mtu; |
2361 | attr->path_mig_state = cmd.path_mig_state; | 2351 | attr->path_mig_state = cmd->base.path_mig_state; |
2362 | attr->qkey = cmd.qkey; | 2352 | attr->qkey = cmd->base.qkey; |
2363 | attr->rq_psn = cmd.rq_psn; | 2353 | attr->rq_psn = cmd->base.rq_psn; |
2364 | attr->sq_psn = cmd.sq_psn; | 2354 | attr->sq_psn = cmd->base.sq_psn; |
2365 | attr->dest_qp_num = cmd.dest_qp_num; | 2355 | attr->dest_qp_num = cmd->base.dest_qp_num; |
2366 | attr->qp_access_flags = cmd.qp_access_flags; | 2356 | attr->qp_access_flags = cmd->base.qp_access_flags; |
2367 | attr->pkey_index = cmd.pkey_index; | 2357 | attr->pkey_index = cmd->base.pkey_index; |
2368 | attr->alt_pkey_index = cmd.alt_pkey_index; | 2358 | attr->alt_pkey_index = cmd->base.alt_pkey_index; |
2369 | attr->en_sqd_async_notify = cmd.en_sqd_async_notify; | 2359 | attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify; |
2370 | attr->max_rd_atomic = cmd.max_rd_atomic; | 2360 | attr->max_rd_atomic = cmd->base.max_rd_atomic; |
2371 | attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic; | 2361 | attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic; |
2372 | attr->min_rnr_timer = cmd.min_rnr_timer; | 2362 | attr->min_rnr_timer = cmd->base.min_rnr_timer; |
2373 | attr->port_num = cmd.port_num; | 2363 | attr->port_num = cmd->base.port_num; |
2374 | attr->timeout = cmd.timeout; | 2364 | attr->timeout = cmd->base.timeout; |
2375 | attr->retry_cnt = cmd.retry_cnt; | 2365 | attr->retry_cnt = cmd->base.retry_cnt; |
2376 | attr->rnr_retry = cmd.rnr_retry; | 2366 | attr->rnr_retry = cmd->base.rnr_retry; |
2377 | attr->alt_port_num = cmd.alt_port_num; | 2367 | attr->alt_port_num = cmd->base.alt_port_num; |
2378 | attr->alt_timeout = cmd.alt_timeout; | 2368 | attr->alt_timeout = cmd->base.alt_timeout; |
2379 | 2369 | attr->rate_limit = cmd->rate_limit; | |
2380 | memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16); | 2370 | |
2381 | attr->ah_attr.grh.flow_label = cmd.dest.flow_label; | 2371 | memcpy(attr->ah_attr.grh.dgid.raw, cmd->base.dest.dgid, 16); |
2382 | attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index; | 2372 | attr->ah_attr.grh.flow_label = cmd->base.dest.flow_label; |
2383 | attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit; | 2373 | attr->ah_attr.grh.sgid_index = cmd->base.dest.sgid_index; |
2384 | attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class; | 2374 | attr->ah_attr.grh.hop_limit = cmd->base.dest.hop_limit; |
2385 | attr->ah_attr.dlid = cmd.dest.dlid; | 2375 | attr->ah_attr.grh.traffic_class = cmd->base.dest.traffic_class; |
2386 | attr->ah_attr.sl = cmd.dest.sl; | 2376 | attr->ah_attr.dlid = cmd->base.dest.dlid; |
2387 | attr->ah_attr.src_path_bits = cmd.dest.src_path_bits; | 2377 | attr->ah_attr.sl = cmd->base.dest.sl; |
2388 | attr->ah_attr.static_rate = cmd.dest.static_rate; | 2378 | attr->ah_attr.src_path_bits = cmd->base.dest.src_path_bits; |
2389 | attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0; | 2379 | attr->ah_attr.static_rate = cmd->base.dest.static_rate; |
2390 | attr->ah_attr.port_num = cmd.dest.port_num; | 2380 | attr->ah_attr.ah_flags = cmd->base.dest.is_global ? |
2391 | 2381 | IB_AH_GRH : 0; | |
2392 | memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16); | 2382 | attr->ah_attr.port_num = cmd->base.dest.port_num; |
2393 | attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label; | 2383 | |
2394 | attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index; | 2384 | memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd->base.alt_dest.dgid, 16); |
2395 | attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit; | 2385 | attr->alt_ah_attr.grh.flow_label = cmd->base.alt_dest.flow_label; |
2396 | attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class; | 2386 | attr->alt_ah_attr.grh.sgid_index = cmd->base.alt_dest.sgid_index; |
2397 | attr->alt_ah_attr.dlid = cmd.alt_dest.dlid; | 2387 | attr->alt_ah_attr.grh.hop_limit = cmd->base.alt_dest.hop_limit; |
2398 | attr->alt_ah_attr.sl = cmd.alt_dest.sl; | 2388 | attr->alt_ah_attr.grh.traffic_class = cmd->base.alt_dest.traffic_class; |
2399 | attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits; | 2389 | attr->alt_ah_attr.dlid = cmd->base.alt_dest.dlid; |
2400 | attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate; | 2390 | attr->alt_ah_attr.sl = cmd->base.alt_dest.sl; |
2401 | attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; | 2391 | attr->alt_ah_attr.src_path_bits = cmd->base.alt_dest.src_path_bits; |
2402 | attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; | 2392 | attr->alt_ah_attr.static_rate = cmd->base.alt_dest.static_rate; |
2393 | attr->alt_ah_attr.ah_flags = cmd->base.alt_dest.is_global ? | ||
2394 | IB_AH_GRH : 0; | ||
2395 | attr->alt_ah_attr.port_num = cmd->base.alt_dest.port_num; | ||
2403 | 2396 | ||
2404 | if (qp->real_qp == qp) { | 2397 | if (qp->real_qp == qp) { |
2405 | ret = ib_resolve_eth_dmac(qp, attr, &cmd.attr_mask); | 2398 | if (cmd->base.attr_mask & IB_QP_AV) { |
2406 | if (ret) | 2399 | ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr); |
2407 | goto release_qp; | 2400 | if (ret) |
2401 | goto release_qp; | ||
2402 | } | ||
2408 | ret = qp->device->modify_qp(qp, attr, | 2403 | ret = qp->device->modify_qp(qp, attr, |
2409 | modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata); | 2404 | modify_qp_mask(qp->qp_type, |
2405 | cmd->base.attr_mask), | ||
2406 | udata); | ||
2410 | } else { | 2407 | } else { |
2411 | ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask)); | 2408 | ret = ib_modify_qp(qp, attr, |
2409 | modify_qp_mask(qp->qp_type, | ||
2410 | cmd->base.attr_mask)); | ||
2412 | } | 2411 | } |
2413 | 2412 | ||
2414 | if (ret) | ||
2415 | goto release_qp; | ||
2416 | |||
2417 | ret = in_len; | ||
2418 | |||
2419 | release_qp: | 2413 | release_qp: |
2420 | put_qp_read(qp); | 2414 | put_qp_read(qp); |
2421 | 2415 | ||
@@ -2425,6 +2419,68 @@ out: | |||
2425 | return ret; | 2419 | return ret; |
2426 | } | 2420 | } |
2427 | 2421 | ||
2422 | ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, | ||
2423 | struct ib_device *ib_dev, | ||
2424 | const char __user *buf, int in_len, | ||
2425 | int out_len) | ||
2426 | { | ||
2427 | struct ib_uverbs_ex_modify_qp cmd = {}; | ||
2428 | struct ib_udata udata; | ||
2429 | int ret; | ||
2430 | |||
2431 | if (copy_from_user(&cmd.base, buf, sizeof(cmd.base))) | ||
2432 | return -EFAULT; | ||
2433 | |||
2434 | if (cmd.base.attr_mask & | ||
2435 | ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1)) | ||
2436 | return -EOPNOTSUPP; | ||
2437 | |||
2438 | INIT_UDATA(&udata, buf + sizeof(cmd.base), NULL, | ||
2439 | in_len - sizeof(cmd.base), out_len); | ||
2440 | |||
2441 | ret = modify_qp(file, &cmd, &udata); | ||
2442 | if (ret) | ||
2443 | return ret; | ||
2444 | |||
2445 | return in_len; | ||
2446 | } | ||
2447 | |||
2448 | int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file, | ||
2449 | struct ib_device *ib_dev, | ||
2450 | struct ib_udata *ucore, | ||
2451 | struct ib_udata *uhw) | ||
2452 | { | ||
2453 | struct ib_uverbs_ex_modify_qp cmd = {}; | ||
2454 | int ret; | ||
2455 | |||
2456 | /* | ||
2457 | * Last bit is reserved for extending the attr_mask by | ||
2458 | * using another field. | ||
2459 | */ | ||
2460 | BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1 << 31)); | ||
2461 | |||
2462 | if (ucore->inlen < sizeof(cmd.base)) | ||
2463 | return -EINVAL; | ||
2464 | |||
2465 | ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); | ||
2466 | if (ret) | ||
2467 | return ret; | ||
2468 | |||
2469 | if (cmd.base.attr_mask & | ||
2470 | ~((IB_USER_LAST_QP_ATTR_MASK << 1) - 1)) | ||
2471 | return -EOPNOTSUPP; | ||
2472 | |||
2473 | if (ucore->inlen > sizeof(cmd)) { | ||
2474 | if (ib_is_udata_cleared(ucore, sizeof(cmd), | ||
2475 | ucore->inlen - sizeof(cmd))) | ||
2476 | return -EOPNOTSUPP; | ||
2477 | } | ||
2478 | |||
2479 | ret = modify_qp(file, &cmd, uhw); | ||
2480 | |||
2481 | return ret; | ||
2482 | } | ||
2483 | |||
2428 | ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, | 2484 | ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, |
2429 | struct ib_device *ib_dev, | 2485 | struct ib_device *ib_dev, |
2430 | const char __user *buf, int in_len, | 2486 | const char __user *buf, int in_len, |
@@ -2875,6 +2931,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, | |||
2875 | struct ib_ah *ah; | 2931 | struct ib_ah *ah; |
2876 | struct ib_ah_attr attr; | 2932 | struct ib_ah_attr attr; |
2877 | int ret; | 2933 | int ret; |
2934 | struct ib_udata udata; | ||
2878 | 2935 | ||
2879 | if (out_len < sizeof resp) | 2936 | if (out_len < sizeof resp) |
2880 | return -ENOSPC; | 2937 | return -ENOSPC; |
@@ -2882,6 +2939,10 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, | |||
2882 | if (copy_from_user(&cmd, buf, sizeof cmd)) | 2939 | if (copy_from_user(&cmd, buf, sizeof cmd)) |
2883 | return -EFAULT; | 2940 | return -EFAULT; |
2884 | 2941 | ||
2942 | INIT_UDATA(&udata, buf + sizeof(cmd), | ||
2943 | (unsigned long)cmd.response + sizeof(resp), | ||
2944 | in_len - sizeof(cmd), out_len - sizeof(resp)); | ||
2945 | |||
2885 | uobj = kmalloc(sizeof *uobj, GFP_KERNEL); | 2946 | uobj = kmalloc(sizeof *uobj, GFP_KERNEL); |
2886 | if (!uobj) | 2947 | if (!uobj) |
2887 | return -ENOMEM; | 2948 | return -ENOMEM; |
@@ -2908,12 +2969,16 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, | |||
2908 | memset(&attr.dmac, 0, sizeof(attr.dmac)); | 2969 | memset(&attr.dmac, 0, sizeof(attr.dmac)); |
2909 | memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16); | 2970 | memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16); |
2910 | 2971 | ||
2911 | ah = ib_create_ah(pd, &attr); | 2972 | ah = pd->device->create_ah(pd, &attr, &udata); |
2973 | |||
2912 | if (IS_ERR(ah)) { | 2974 | if (IS_ERR(ah)) { |
2913 | ret = PTR_ERR(ah); | 2975 | ret = PTR_ERR(ah); |
2914 | goto err_put; | 2976 | goto err_put; |
2915 | } | 2977 | } |
2916 | 2978 | ||
2979 | ah->device = pd->device; | ||
2980 | ah->pd = pd; | ||
2981 | atomic_inc(&pd->usecnt); | ||
2917 | ah->uobject = uobj; | 2982 | ah->uobject = uobj; |
2918 | uobj->object = ah; | 2983 | uobj->object = ah; |
2919 | 2984 | ||
@@ -3124,8 +3189,10 @@ static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, | |||
3124 | kern_spec_val = (void *)kern_spec + | 3189 | kern_spec_val = (void *)kern_spec + |
3125 | sizeof(struct ib_uverbs_flow_spec_hdr); | 3190 | sizeof(struct ib_uverbs_flow_spec_hdr); |
3126 | kern_spec_mask = kern_spec_val + kern_filter_sz; | 3191 | kern_spec_mask = kern_spec_val + kern_filter_sz; |
3192 | if (ib_spec->type == (IB_FLOW_SPEC_INNER | IB_FLOW_SPEC_VXLAN_TUNNEL)) | ||
3193 | return -EINVAL; | ||
3127 | 3194 | ||
3128 | switch (ib_spec->type) { | 3195 | switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) { |
3129 | case IB_FLOW_SPEC_ETH: | 3196 | case IB_FLOW_SPEC_ETH: |
3130 | ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz); | 3197 | ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz); |
3131 | actual_filter_sz = spec_filter_size(kern_spec_mask, | 3198 | actual_filter_sz = spec_filter_size(kern_spec_mask, |
@@ -3175,6 +3242,21 @@ static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, | |||
3175 | memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz); | 3242 | memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz); |
3176 | memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz); | 3243 | memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz); |
3177 | break; | 3244 | break; |
3245 | case IB_FLOW_SPEC_VXLAN_TUNNEL: | ||
3246 | ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz); | ||
3247 | actual_filter_sz = spec_filter_size(kern_spec_mask, | ||
3248 | kern_filter_sz, | ||
3249 | ib_filter_sz); | ||
3250 | if (actual_filter_sz <= 0) | ||
3251 | return -EINVAL; | ||
3252 | ib_spec->tunnel.size = sizeof(struct ib_flow_spec_tunnel); | ||
3253 | memcpy(&ib_spec->tunnel.val, kern_spec_val, actual_filter_sz); | ||
3254 | memcpy(&ib_spec->tunnel.mask, kern_spec_mask, actual_filter_sz); | ||
3255 | |||
3256 | if ((ntohl(ib_spec->tunnel.mask.tunnel_id)) >= BIT(24) || | ||
3257 | (ntohl(ib_spec->tunnel.val.tunnel_id)) >= BIT(24)) | ||
3258 | return -EINVAL; | ||
3259 | break; | ||
3178 | default: | 3260 | default: |
3179 | return -EINVAL; | 3261 | return -EINVAL; |
3180 | } | 3262 | } |
@@ -3745,7 +3827,6 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, | |||
3745 | err = PTR_ERR(flow_id); | 3827 | err = PTR_ERR(flow_id); |
3746 | goto err_free; | 3828 | goto err_free; |
3747 | } | 3829 | } |
3748 | flow_id->qp = qp; | ||
3749 | flow_id->uobject = uobj; | 3830 | flow_id->uobject = uobj; |
3750 | uobj->object = flow_id; | 3831 | uobj->object = flow_id; |
3751 | 3832 | ||
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 3efec031c253..257d0799b526 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
@@ -137,6 +137,7 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file, | |||
137 | [IB_USER_VERBS_EX_CMD_DESTROY_WQ] = ib_uverbs_ex_destroy_wq, | 137 | [IB_USER_VERBS_EX_CMD_DESTROY_WQ] = ib_uverbs_ex_destroy_wq, |
138 | [IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL] = ib_uverbs_ex_create_rwq_ind_table, | 138 | [IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL] = ib_uverbs_ex_create_rwq_ind_table, |
139 | [IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL] = ib_uverbs_ex_destroy_rwq_ind_table, | 139 | [IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL] = ib_uverbs_ex_destroy_rwq_ind_table, |
140 | [IB_USER_VERBS_EX_CMD_MODIFY_QP] = ib_uverbs_ex_modify_qp, | ||
140 | }; | 141 | }; |
141 | 142 | ||
142 | static void ib_uverbs_add_one(struct ib_device *device); | 143 | static void ib_uverbs_add_one(struct ib_device *device); |
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 83687646da68..71580cc28c9e 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c | |||
@@ -315,7 +315,7 @@ struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) | |||
315 | { | 315 | { |
316 | struct ib_ah *ah; | 316 | struct ib_ah *ah; |
317 | 317 | ||
318 | ah = pd->device->create_ah(pd, ah_attr); | 318 | ah = pd->device->create_ah(pd, ah_attr, NULL); |
319 | 319 | ||
320 | if (!IS_ERR(ah)) { | 320 | if (!IS_ERR(ah)) { |
321 | ah->device = pd->device; | 321 | ah->device = pd->device; |
@@ -328,7 +328,7 @@ struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) | |||
328 | } | 328 | } |
329 | EXPORT_SYMBOL(ib_create_ah); | 329 | EXPORT_SYMBOL(ib_create_ah); |
330 | 330 | ||
331 | static int ib_get_header_version(const union rdma_network_hdr *hdr) | 331 | int ib_get_rdma_header_version(const union rdma_network_hdr *hdr) |
332 | { | 332 | { |
333 | const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh; | 333 | const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh; |
334 | struct iphdr ip4h_checked; | 334 | struct iphdr ip4h_checked; |
@@ -359,6 +359,7 @@ static int ib_get_header_version(const union rdma_network_hdr *hdr) | |||
359 | return 4; | 359 | return 4; |
360 | return 6; | 360 | return 6; |
361 | } | 361 | } |
362 | EXPORT_SYMBOL(ib_get_rdma_header_version); | ||
362 | 363 | ||
363 | static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device, | 364 | static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device, |
364 | u8 port_num, | 365 | u8 port_num, |
@@ -369,7 +370,7 @@ static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device, | |||
369 | if (rdma_protocol_ib(device, port_num)) | 370 | if (rdma_protocol_ib(device, port_num)) |
370 | return RDMA_NETWORK_IB; | 371 | return RDMA_NETWORK_IB; |
371 | 372 | ||
372 | grh_version = ib_get_header_version((union rdma_network_hdr *)grh); | 373 | grh_version = ib_get_rdma_header_version((union rdma_network_hdr *)grh); |
373 | 374 | ||
374 | if (grh_version == 4) | 375 | if (grh_version == 4) |
375 | return RDMA_NETWORK_IPV4; | 376 | return RDMA_NETWORK_IPV4; |
@@ -415,9 +416,9 @@ static int get_sgid_index_from_eth(struct ib_device *device, u8 port_num, | |||
415 | &context, gid_index); | 416 | &context, gid_index); |
416 | } | 417 | } |
417 | 418 | ||
418 | static int get_gids_from_rdma_hdr(union rdma_network_hdr *hdr, | 419 | int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr, |
419 | enum rdma_network_type net_type, | 420 | enum rdma_network_type net_type, |
420 | union ib_gid *sgid, union ib_gid *dgid) | 421 | union ib_gid *sgid, union ib_gid *dgid) |
421 | { | 422 | { |
422 | struct sockaddr_in src_in; | 423 | struct sockaddr_in src_in; |
423 | struct sockaddr_in dst_in; | 424 | struct sockaddr_in dst_in; |
@@ -447,6 +448,7 @@ static int get_gids_from_rdma_hdr(union rdma_network_hdr *hdr, | |||
447 | return -EINVAL; | 448 | return -EINVAL; |
448 | } | 449 | } |
449 | } | 450 | } |
451 | EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr); | ||
450 | 452 | ||
451 | int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, | 453 | int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, |
452 | const struct ib_wc *wc, const struct ib_grh *grh, | 454 | const struct ib_wc *wc, const struct ib_grh *grh, |
@@ -469,8 +471,8 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, | |||
469 | net_type = ib_get_net_type_by_grh(device, port_num, grh); | 471 | net_type = ib_get_net_type_by_grh(device, port_num, grh); |
470 | gid_type = ib_network_to_gid_type(net_type); | 472 | gid_type = ib_network_to_gid_type(net_type); |
471 | } | 473 | } |
472 | ret = get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type, | 474 | ret = ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type, |
473 | &sgid, &dgid); | 475 | &sgid, &dgid); |
474 | if (ret) | 476 | if (ret) |
475 | return ret; | 477 | return ret; |
476 | 478 | ||
@@ -1014,6 +1016,7 @@ static const struct { | |||
1014 | IB_QP_QKEY), | 1016 | IB_QP_QKEY), |
1015 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | | 1017 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | |
1016 | IB_QP_QKEY), | 1018 | IB_QP_QKEY), |
1019 | [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT, | ||
1017 | } | 1020 | } |
1018 | } | 1021 | } |
1019 | }, | 1022 | }, |
@@ -1047,6 +1050,7 @@ static const struct { | |||
1047 | IB_QP_QKEY), | 1050 | IB_QP_QKEY), |
1048 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | | 1051 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | |
1049 | IB_QP_QKEY), | 1052 | IB_QP_QKEY), |
1053 | [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT, | ||
1050 | } | 1054 | } |
1051 | }, | 1055 | }, |
1052 | [IB_QPS_SQD] = { | 1056 | [IB_QPS_SQD] = { |
@@ -1196,66 +1200,66 @@ int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, | |||
1196 | } | 1200 | } |
1197 | EXPORT_SYMBOL(ib_modify_qp_is_ok); | 1201 | EXPORT_SYMBOL(ib_modify_qp_is_ok); |
1198 | 1202 | ||
1199 | int ib_resolve_eth_dmac(struct ib_qp *qp, | 1203 | int ib_resolve_eth_dmac(struct ib_device *device, |
1200 | struct ib_qp_attr *qp_attr, int *qp_attr_mask) | 1204 | struct ib_ah_attr *ah_attr) |
1201 | { | 1205 | { |
1202 | int ret = 0; | 1206 | int ret = 0; |
1203 | 1207 | ||
1204 | if (*qp_attr_mask & IB_QP_AV) { | 1208 | if (ah_attr->port_num < rdma_start_port(device) || |
1205 | if (qp_attr->ah_attr.port_num < rdma_start_port(qp->device) || | 1209 | ah_attr->port_num > rdma_end_port(device)) |
1206 | qp_attr->ah_attr.port_num > rdma_end_port(qp->device)) | 1210 | return -EINVAL; |
1207 | return -EINVAL; | ||
1208 | |||
1209 | if (!rdma_cap_eth_ah(qp->device, qp_attr->ah_attr.port_num)) | ||
1210 | return 0; | ||
1211 | |||
1212 | if (rdma_link_local_addr((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw)) { | ||
1213 | rdma_get_ll_mac((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw, | ||
1214 | qp_attr->ah_attr.dmac); | ||
1215 | } else { | ||
1216 | union ib_gid sgid; | ||
1217 | struct ib_gid_attr sgid_attr; | ||
1218 | int ifindex; | ||
1219 | int hop_limit; | ||
1220 | |||
1221 | ret = ib_query_gid(qp->device, | ||
1222 | qp_attr->ah_attr.port_num, | ||
1223 | qp_attr->ah_attr.grh.sgid_index, | ||
1224 | &sgid, &sgid_attr); | ||
1225 | |||
1226 | if (ret || !sgid_attr.ndev) { | ||
1227 | if (!ret) | ||
1228 | ret = -ENXIO; | ||
1229 | goto out; | ||
1230 | } | ||
1231 | 1211 | ||
1232 | ifindex = sgid_attr.ndev->ifindex; | 1212 | if (!rdma_cap_eth_ah(device, ah_attr->port_num)) |
1213 | return 0; | ||
1233 | 1214 | ||
1234 | ret = rdma_addr_find_l2_eth_by_grh(&sgid, | 1215 | if (rdma_link_local_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) { |
1235 | &qp_attr->ah_attr.grh.dgid, | 1216 | rdma_get_ll_mac((struct in6_addr *)ah_attr->grh.dgid.raw, |
1236 | qp_attr->ah_attr.dmac, | 1217 | ah_attr->dmac); |
1237 | NULL, &ifindex, &hop_limit); | 1218 | } else { |
1219 | union ib_gid sgid; | ||
1220 | struct ib_gid_attr sgid_attr; | ||
1221 | int ifindex; | ||
1222 | int hop_limit; | ||
1223 | |||
1224 | ret = ib_query_gid(device, | ||
1225 | ah_attr->port_num, | ||
1226 | ah_attr->grh.sgid_index, | ||
1227 | &sgid, &sgid_attr); | ||
1228 | |||
1229 | if (ret || !sgid_attr.ndev) { | ||
1230 | if (!ret) | ||
1231 | ret = -ENXIO; | ||
1232 | goto out; | ||
1233 | } | ||
1238 | 1234 | ||
1239 | dev_put(sgid_attr.ndev); | 1235 | ifindex = sgid_attr.ndev->ifindex; |
1240 | 1236 | ||
1241 | qp_attr->ah_attr.grh.hop_limit = hop_limit; | 1237 | ret = rdma_addr_find_l2_eth_by_grh(&sgid, |
1242 | } | 1238 | &ah_attr->grh.dgid, |
1239 | ah_attr->dmac, | ||
1240 | NULL, &ifindex, &hop_limit); | ||
1241 | |||
1242 | dev_put(sgid_attr.ndev); | ||
1243 | |||
1244 | ah_attr->grh.hop_limit = hop_limit; | ||
1243 | } | 1245 | } |
1244 | out: | 1246 | out: |
1245 | return ret; | 1247 | return ret; |
1246 | } | 1248 | } |
1247 | EXPORT_SYMBOL(ib_resolve_eth_dmac); | 1249 | EXPORT_SYMBOL(ib_resolve_eth_dmac); |
1248 | 1250 | ||
1249 | |||
1250 | int ib_modify_qp(struct ib_qp *qp, | 1251 | int ib_modify_qp(struct ib_qp *qp, |
1251 | struct ib_qp_attr *qp_attr, | 1252 | struct ib_qp_attr *qp_attr, |
1252 | int qp_attr_mask) | 1253 | int qp_attr_mask) |
1253 | { | 1254 | { |
1254 | int ret; | ||
1255 | 1255 | ||
1256 | ret = ib_resolve_eth_dmac(qp, qp_attr, &qp_attr_mask); | 1256 | if (qp_attr_mask & IB_QP_AV) { |
1257 | if (ret) | 1257 | int ret; |
1258 | return ret; | 1258 | |
1259 | ret = ib_resolve_eth_dmac(qp->device, &qp_attr->ah_attr); | ||
1260 | if (ret) | ||
1261 | return ret; | ||
1262 | } | ||
1259 | 1263 | ||
1260 | return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); | 1264 | return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); |
1261 | } | 1265 | } |
@@ -1734,8 +1738,10 @@ struct ib_flow *ib_create_flow(struct ib_qp *qp, | |||
1734 | return ERR_PTR(-ENOSYS); | 1738 | return ERR_PTR(-ENOSYS); |
1735 | 1739 | ||
1736 | flow_id = qp->device->create_flow(qp, flow_attr, domain); | 1740 | flow_id = qp->device->create_flow(qp, flow_attr, domain); |
1737 | if (!IS_ERR(flow_id)) | 1741 | if (!IS_ERR(flow_id)) { |
1738 | atomic_inc(&qp->usecnt); | 1742 | atomic_inc(&qp->usecnt); |
1743 | flow_id->qp = qp; | ||
1744 | } | ||
1739 | return flow_id; | 1745 | return flow_id; |
1740 | } | 1746 | } |
1741 | EXPORT_SYMBOL(ib_create_flow); | 1747 | EXPORT_SYMBOL(ib_create_flow); |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index cba57bb53dba..9d5fe1853da4 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c | |||
@@ -62,7 +62,8 @@ | |||
62 | #include "common.h" | 62 | #include "common.h" |
63 | 63 | ||
64 | static struct ib_ah *iwch_ah_create(struct ib_pd *pd, | 64 | static struct ib_ah *iwch_ah_create(struct ib_pd *pd, |
65 | struct ib_ah_attr *ah_attr) | 65 | struct ib_ah_attr *ah_attr, |
66 | struct ib_udata *udata) | ||
66 | { | 67 | { |
67 | return ERR_PTR(-ENOSYS); | 68 | return ERR_PTR(-ENOSYS); |
68 | } | 69 | } |
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 645e606a17c5..49b51b7e0fd7 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c | |||
@@ -59,7 +59,9 @@ module_param(fastreg_support, int, 0644); | |||
59 | MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)"); | 59 | MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)"); |
60 | 60 | ||
61 | static struct ib_ah *c4iw_ah_create(struct ib_pd *pd, | 61 | static struct ib_ah *c4iw_ah_create(struct ib_pd *pd, |
62 | struct ib_ah_attr *ah_attr) | 62 | struct ib_ah_attr *ah_attr, |
63 | struct ib_udata *udata) | ||
64 | |||
63 | { | 65 | { |
64 | return ERR_PTR(-ENOSYS); | 66 | return ERR_PTR(-ENOSYS); |
65 | } | 67 | } |
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c index 24f79ee39fdf..0ac294db3b29 100644 --- a/drivers/infiniband/hw/hns/hns_roce_ah.c +++ b/drivers/infiniband/hw/hns/hns_roce_ah.c | |||
@@ -39,7 +39,8 @@ | |||
39 | #define HNS_ROCE_VLAN_SL_BIT_MASK 7 | 39 | #define HNS_ROCE_VLAN_SL_BIT_MASK 7 |
40 | #define HNS_ROCE_VLAN_SL_SHIFT 13 | 40 | #define HNS_ROCE_VLAN_SL_SHIFT 13 |
41 | 41 | ||
42 | struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *ah_attr) | 42 | struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *ah_attr, |
43 | struct ib_udata *udata) | ||
43 | { | 44 | { |
44 | struct hns_roce_dev *hr_dev = to_hr_dev(ibpd->device); | 45 | struct hns_roce_dev *hr_dev = to_hr_dev(ibpd->device); |
45 | struct device *dev = &hr_dev->pdev->dev; | 46 | struct device *dev = &hr_dev->pdev->dev; |
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index d4f0fce98587..1a6cb5d7a0dd 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h | |||
@@ -687,7 +687,8 @@ void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap, | |||
687 | unsigned long obj, int cnt, | 687 | unsigned long obj, int cnt, |
688 | int rr); | 688 | int rr); |
689 | 689 | ||
690 | struct ib_ah *hns_roce_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); | 690 | struct ib_ah *hns_roce_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, |
691 | struct ib_udata *udata); | ||
691 | int hns_roce_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr); | 692 | int hns_roce_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr); |
692 | int hns_roce_destroy_ah(struct ib_ah *ah); | 693 | int hns_roce_destroy_ah(struct ib_ah *ah); |
693 | 694 | ||
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 81dafa9a4c3c..7368a50bbdaa 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c | |||
@@ -2704,7 +2704,9 @@ static int i40iw_query_pkey(struct ib_device *ibdev, | |||
2704 | * @ah_attr: address handle attributes | 2704 | * @ah_attr: address handle attributes |
2705 | */ | 2705 | */ |
2706 | static struct ib_ah *i40iw_create_ah(struct ib_pd *ibpd, | 2706 | static struct ib_ah *i40iw_create_ah(struct ib_pd *ibpd, |
2707 | struct ib_ah_attr *attr) | 2707 | struct ib_ah_attr *attr, |
2708 | struct ib_udata *udata) | ||
2709 | |||
2708 | { | 2710 | { |
2709 | return ERR_PTR(-ENOSYS); | 2711 | return ERR_PTR(-ENOSYS); |
2710 | } | 2712 | } |
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c index 5fc623362731..20c6d17ac8b8 100644 --- a/drivers/infiniband/hw/mlx4/ah.c +++ b/drivers/infiniband/hw/mlx4/ah.c | |||
@@ -111,7 +111,9 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr | |||
111 | !(1 << ah->av.eth.stat_rate & dev->caps.stat_rate_support)) | 111 | !(1 << ah->av.eth.stat_rate & dev->caps.stat_rate_support)) |
112 | --ah->av.eth.stat_rate; | 112 | --ah->av.eth.stat_rate; |
113 | } | 113 | } |
114 | 114 | ah->av.eth.sl_tclass_flowlabel |= | |
115 | cpu_to_be32((ah_attr->grh.traffic_class << 20) | | ||
116 | ah_attr->grh.flow_label); | ||
115 | /* | 117 | /* |
116 | * HW requires multicast LID so we just choose one. | 118 | * HW requires multicast LID so we just choose one. |
117 | */ | 119 | */ |
@@ -119,12 +121,14 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr | |||
119 | ah->av.ib.dlid = cpu_to_be16(0xc000); | 121 | ah->av.ib.dlid = cpu_to_be16(0xc000); |
120 | 122 | ||
121 | memcpy(ah->av.eth.dgid, ah_attr->grh.dgid.raw, 16); | 123 | memcpy(ah->av.eth.dgid, ah_attr->grh.dgid.raw, 16); |
122 | ah->av.eth.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 29); | 124 | ah->av.eth.sl_tclass_flowlabel |= cpu_to_be32(ah_attr->sl << 29); |
123 | 125 | ||
124 | return &ah->ibah; | 126 | return &ah->ibah; |
125 | } | 127 | } |
126 | 128 | ||
127 | struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) | 129 | struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, |
130 | struct ib_udata *udata) | ||
131 | |||
128 | { | 132 | { |
129 | struct mlx4_ib_ah *ah; | 133 | struct mlx4_ib_ah *ah; |
130 | struct ib_ah *ret; | 134 | struct ib_ah *ret; |
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index b0cd66336fcb..db564ccc0f92 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c | |||
@@ -39,6 +39,8 @@ | |||
39 | #include <linux/mlx4/cmd.h> | 39 | #include <linux/mlx4/cmd.h> |
40 | #include <linux/gfp.h> | 40 | #include <linux/gfp.h> |
41 | #include <rdma/ib_pma.h> | 41 | #include <rdma/ib_pma.h> |
42 | #include <linux/ip.h> | ||
43 | #include <net/ipv6.h> | ||
42 | 44 | ||
43 | #include <linux/mlx4/driver.h> | 45 | #include <linux/mlx4/driver.h> |
44 | #include "mlx4_ib.h" | 46 | #include "mlx4_ib.h" |
@@ -480,6 +482,23 @@ static int find_slave_port_pkey_ix(struct mlx4_ib_dev *dev, int slave, | |||
480 | return -EINVAL; | 482 | return -EINVAL; |
481 | } | 483 | } |
482 | 484 | ||
485 | static int get_gids_from_l3_hdr(struct ib_grh *grh, union ib_gid *sgid, | ||
486 | union ib_gid *dgid) | ||
487 | { | ||
488 | int version = ib_get_rdma_header_version((const union rdma_network_hdr *)grh); | ||
489 | enum rdma_network_type net_type; | ||
490 | |||
491 | if (version == 4) | ||
492 | net_type = RDMA_NETWORK_IPV4; | ||
493 | else if (version == 6) | ||
494 | net_type = RDMA_NETWORK_IPV6; | ||
495 | else | ||
496 | return -EINVAL; | ||
497 | |||
498 | return ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type, | ||
499 | sgid, dgid); | ||
500 | } | ||
501 | |||
483 | int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, | 502 | int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, |
484 | enum ib_qp_type dest_qpt, struct ib_wc *wc, | 503 | enum ib_qp_type dest_qpt, struct ib_wc *wc, |
485 | struct ib_grh *grh, struct ib_mad *mad) | 504 | struct ib_grh *grh, struct ib_mad *mad) |
@@ -538,7 +557,10 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, | |||
538 | memset(&attr, 0, sizeof attr); | 557 | memset(&attr, 0, sizeof attr); |
539 | attr.port_num = port; | 558 | attr.port_num = port; |
540 | if (is_eth) { | 559 | if (is_eth) { |
541 | memcpy(&attr.grh.dgid.raw[0], &grh->dgid.raw[0], 16); | 560 | union ib_gid sgid; |
561 | |||
562 | if (get_gids_from_l3_hdr(grh, &sgid, &attr.grh.dgid)) | ||
563 | return -EINVAL; | ||
542 | attr.ah_flags = IB_AH_GRH; | 564 | attr.ah_flags = IB_AH_GRH; |
543 | } | 565 | } |
544 | ah = ib_create_ah(tun_ctx->pd, &attr); | 566 | ah = ib_create_ah(tun_ctx->pd, &attr); |
@@ -651,6 +673,11 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port, | |||
651 | is_eth = 1; | 673 | is_eth = 1; |
652 | 674 | ||
653 | if (is_eth) { | 675 | if (is_eth) { |
676 | union ib_gid dgid; | ||
677 | union ib_gid sgid; | ||
678 | |||
679 | if (get_gids_from_l3_hdr(grh, &sgid, &dgid)) | ||
680 | return -EINVAL; | ||
654 | if (!(wc->wc_flags & IB_WC_GRH)) { | 681 | if (!(wc->wc_flags & IB_WC_GRH)) { |
655 | mlx4_ib_warn(ibdev, "RoCE grh not present.\n"); | 682 | mlx4_ib_warn(ibdev, "RoCE grh not present.\n"); |
656 | return -EINVAL; | 683 | return -EINVAL; |
@@ -659,10 +686,10 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port, | |||
659 | mlx4_ib_warn(ibdev, "RoCE mgmt class is not CM\n"); | 686 | mlx4_ib_warn(ibdev, "RoCE mgmt class is not CM\n"); |
660 | return -EINVAL; | 687 | return -EINVAL; |
661 | } | 688 | } |
662 | err = mlx4_get_slave_from_roce_gid(dev->dev, port, grh->dgid.raw, &slave); | 689 | err = mlx4_get_slave_from_roce_gid(dev->dev, port, dgid.raw, &slave); |
663 | if (err && mlx4_is_mf_bonded(dev->dev)) { | 690 | if (err && mlx4_is_mf_bonded(dev->dev)) { |
664 | other_port = (port == 1) ? 2 : 1; | 691 | other_port = (port == 1) ? 2 : 1; |
665 | err = mlx4_get_slave_from_roce_gid(dev->dev, other_port, grh->dgid.raw, &slave); | 692 | err = mlx4_get_slave_from_roce_gid(dev->dev, other_port, dgid.raw, &slave); |
666 | if (!err) { | 693 | if (!err) { |
667 | port = other_port; | 694 | port = other_port; |
668 | pr_debug("resolved slave %d from gid %pI6 wire port %d other %d\n", | 695 | pr_debug("resolved slave %d from gid %pI6 wire port %d other %d\n", |
@@ -702,10 +729,18 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port, | |||
702 | 729 | ||
703 | /* If a grh is present, we demux according to it */ | 730 | /* If a grh is present, we demux according to it */ |
704 | if (wc->wc_flags & IB_WC_GRH) { | 731 | if (wc->wc_flags & IB_WC_GRH) { |
705 | slave = mlx4_ib_find_real_gid(ibdev, port, grh->dgid.global.interface_id); | 732 | if (grh->dgid.global.interface_id == |
706 | if (slave < 0) { | 733 | cpu_to_be64(IB_SA_WELL_KNOWN_GUID) && |
707 | mlx4_ib_warn(ibdev, "failed matching grh\n"); | 734 | grh->dgid.global.subnet_prefix == cpu_to_be64( |
708 | return -ENOENT; | 735 | atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix))) { |
736 | slave = 0; | ||
737 | } else { | ||
738 | slave = mlx4_ib_find_real_gid(ibdev, port, | ||
739 | grh->dgid.global.interface_id); | ||
740 | if (slave < 0) { | ||
741 | mlx4_ib_warn(ibdev, "failed matching grh\n"); | ||
742 | return -ENOENT; | ||
743 | } | ||
709 | } | 744 | } |
710 | } | 745 | } |
711 | /* Class-specific handling */ | 746 | /* Class-specific handling */ |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 1b54786d13d0..6c61cf9a16a6 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -547,6 +547,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, | |||
547 | props->max_map_per_fmr = dev->dev->caps.max_fmr_maps; | 547 | props->max_map_per_fmr = dev->dev->caps.max_fmr_maps; |
548 | props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL; | 548 | props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL; |
549 | props->timestamp_mask = 0xFFFFFFFFFFFFULL; | 549 | props->timestamp_mask = 0xFFFFFFFFFFFFULL; |
550 | props->max_ah = INT_MAX; | ||
550 | 551 | ||
551 | if (!mlx4_is_slave(dev->dev)) | 552 | if (!mlx4_is_slave(dev->dev)) |
552 | err = mlx4_get_internal_clock_params(dev->dev, &clock_params); | 553 | err = mlx4_get_internal_clock_params(dev->dev, &clock_params); |
@@ -697,9 +698,11 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, | |||
697 | if (err) | 698 | if (err) |
698 | goto out; | 699 | goto out; |
699 | 700 | ||
700 | props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ? | 701 | props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) || |
701 | IB_WIDTH_4X : IB_WIDTH_1X; | 702 | (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ? |
702 | props->active_speed = IB_SPEED_QDR; | 703 | IB_WIDTH_4X : IB_WIDTH_1X; |
704 | props->active_speed = (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ? | ||
705 | IB_SPEED_FDR : IB_SPEED_QDR; | ||
703 | props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS; | 706 | props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS; |
704 | props->gid_tbl_len = mdev->dev->caps.gid_table_len[port]; | 707 | props->gid_tbl_len = mdev->dev->caps.gid_table_len[port]; |
705 | props->max_msg_sz = mdev->dev->caps.max_msg_sz; | 708 | props->max_msg_sz = mdev->dev->caps.max_msg_sz; |
@@ -2817,14 +2820,19 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
2817 | if (!ibdev->ib_uc_qpns_bitmap) | 2820 | if (!ibdev->ib_uc_qpns_bitmap) |
2818 | goto err_steer_qp_release; | 2821 | goto err_steer_qp_release; |
2819 | 2822 | ||
2820 | bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count); | 2823 | if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) { |
2821 | 2824 | bitmap_zero(ibdev->ib_uc_qpns_bitmap, | |
2822 | err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE( | 2825 | ibdev->steer_qpn_count); |
2823 | dev, ibdev->steer_qpn_base, | 2826 | err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE( |
2824 | ibdev->steer_qpn_base + | 2827 | dev, ibdev->steer_qpn_base, |
2825 | ibdev->steer_qpn_count - 1); | 2828 | ibdev->steer_qpn_base + |
2826 | if (err) | 2829 | ibdev->steer_qpn_count - 1); |
2827 | goto err_steer_free_bitmap; | 2830 | if (err) |
2831 | goto err_steer_free_bitmap; | ||
2832 | } else { | ||
2833 | bitmap_fill(ibdev->ib_uc_qpns_bitmap, | ||
2834 | ibdev->steer_qpn_count); | ||
2835 | } | ||
2828 | } | 2836 | } |
2829 | 2837 | ||
2830 | for (j = 1; j <= ibdev->dev->caps.num_ports; j++) | 2838 | for (j = 1; j <= ibdev->dev->caps.num_ports; j++) |
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 35141f451e5c..7f3d976d81ed 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
@@ -742,7 +742,8 @@ int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); | |||
742 | void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq); | 742 | void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq); |
743 | void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq); | 743 | void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq); |
744 | 744 | ||
745 | struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); | 745 | struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, |
746 | struct ib_udata *udata); | ||
746 | int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr); | 747 | int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr); |
747 | int mlx4_ib_destroy_ah(struct ib_ah *ah); | 748 | int mlx4_ib_destroy_ah(struct ib_ah *ah); |
748 | 749 | ||
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 570bc866b1d6..c068add8838b 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -644,7 +644,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
644 | int qpn; | 644 | int qpn; |
645 | int err; | 645 | int err; |
646 | struct ib_qp_cap backup_cap; | 646 | struct ib_qp_cap backup_cap; |
647 | struct mlx4_ib_sqp *sqp; | 647 | struct mlx4_ib_sqp *sqp = NULL; |
648 | struct mlx4_ib_qp *qp; | 648 | struct mlx4_ib_qp *qp; |
649 | enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type; | 649 | enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type; |
650 | struct mlx4_ib_cq *mcq; | 650 | struct mlx4_ib_cq *mcq; |
@@ -933,7 +933,9 @@ err_db: | |||
933 | mlx4_db_free(dev->dev, &qp->db); | 933 | mlx4_db_free(dev->dev, &qp->db); |
934 | 934 | ||
935 | err: | 935 | err: |
936 | if (!*caller_qp) | 936 | if (sqp) |
937 | kfree(sqp); | ||
938 | else if (!*caller_qp) | ||
937 | kfree(qp); | 939 | kfree(qp); |
938 | return err; | 940 | return err; |
939 | } | 941 | } |
@@ -1280,7 +1282,8 @@ static int _mlx4_ib_destroy_qp(struct ib_qp *qp) | |||
1280 | if (is_qp0(dev, mqp)) | 1282 | if (is_qp0(dev, mqp)) |
1281 | mlx4_CLOSE_PORT(dev->dev, mqp->port); | 1283 | mlx4_CLOSE_PORT(dev->dev, mqp->port); |
1282 | 1284 | ||
1283 | if (dev->qp1_proxy[mqp->port - 1] == mqp) { | 1285 | if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI && |
1286 | dev->qp1_proxy[mqp->port - 1] == mqp) { | ||
1284 | mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]); | 1287 | mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]); |
1285 | dev->qp1_proxy[mqp->port - 1] = NULL; | 1288 | dev->qp1_proxy[mqp->port - 1] = NULL; |
1286 | mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]); | 1289 | mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]); |
@@ -1764,14 +1767,14 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
1764 | u8 port_num = mlx4_is_bonded(to_mdev(ibqp->device)->dev) ? 1 : | 1767 | u8 port_num = mlx4_is_bonded(to_mdev(ibqp->device)->dev) ? 1 : |
1765 | attr_mask & IB_QP_PORT ? attr->port_num : qp->port; | 1768 | attr_mask & IB_QP_PORT ? attr->port_num : qp->port; |
1766 | union ib_gid gid; | 1769 | union ib_gid gid; |
1767 | struct ib_gid_attr gid_attr; | 1770 | struct ib_gid_attr gid_attr = {.gid_type = IB_GID_TYPE_IB}; |
1768 | u16 vlan = 0xffff; | 1771 | u16 vlan = 0xffff; |
1769 | u8 smac[ETH_ALEN]; | 1772 | u8 smac[ETH_ALEN]; |
1770 | int status = 0; | 1773 | int status = 0; |
1771 | int is_eth = rdma_cap_eth_ah(&dev->ib_dev, port_num) && | 1774 | int is_eth = rdma_cap_eth_ah(&dev->ib_dev, port_num) && |
1772 | attr->ah_attr.ah_flags & IB_AH_GRH; | 1775 | attr->ah_attr.ah_flags & IB_AH_GRH; |
1773 | 1776 | ||
1774 | if (is_eth) { | 1777 | if (is_eth && attr->ah_attr.ah_flags & IB_AH_GRH) { |
1775 | int index = attr->ah_attr.grh.sgid_index; | 1778 | int index = attr->ah_attr.grh.sgid_index; |
1776 | 1779 | ||
1777 | status = ib_get_cached_gid(ibqp->device, port_num, | 1780 | status = ib_get_cached_gid(ibqp->device, port_num, |
diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c index 745efa4cfc71..d090e96f6f01 100644 --- a/drivers/infiniband/hw/mlx5/ah.c +++ b/drivers/infiniband/hw/mlx5/ah.c | |||
@@ -64,7 +64,9 @@ static struct ib_ah *create_ib_ah(struct mlx5_ib_dev *dev, | |||
64 | return &ah->ibah; | 64 | return &ah->ibah; |
65 | } | 65 | } |
66 | 66 | ||
67 | struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) | 67 | struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, |
68 | struct ib_udata *udata) | ||
69 | |||
68 | { | 70 | { |
69 | struct mlx5_ib_ah *ah; | 71 | struct mlx5_ib_ah *ah; |
70 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | 72 | struct mlx5_ib_dev *dev = to_mdev(pd->device); |
@@ -75,6 +77,27 @@ struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) | |||
75 | if (ll == IB_LINK_LAYER_ETHERNET && !(ah_attr->ah_flags & IB_AH_GRH)) | 77 | if (ll == IB_LINK_LAYER_ETHERNET && !(ah_attr->ah_flags & IB_AH_GRH)) |
76 | return ERR_PTR(-EINVAL); | 78 | return ERR_PTR(-EINVAL); |
77 | 79 | ||
80 | if (ll == IB_LINK_LAYER_ETHERNET && udata) { | ||
81 | int err; | ||
82 | struct mlx5_ib_create_ah_resp resp = {}; | ||
83 | u32 min_resp_len = offsetof(typeof(resp), dmac) + | ||
84 | sizeof(resp.dmac); | ||
85 | |||
86 | if (udata->outlen < min_resp_len) | ||
87 | return ERR_PTR(-EINVAL); | ||
88 | |||
89 | resp.response_length = min_resp_len; | ||
90 | |||
91 | err = ib_resolve_eth_dmac(pd->device, ah_attr); | ||
92 | if (err) | ||
93 | return ERR_PTR(err); | ||
94 | |||
95 | memcpy(resp.dmac, ah_attr->dmac, ETH_ALEN); | ||
96 | err = ib_copy_to_udata(udata, &resp, resp.response_length); | ||
97 | if (err) | ||
98 | return ERR_PTR(err); | ||
99 | } | ||
100 | |||
78 | ah = kzalloc(sizeof(*ah), GFP_ATOMIC); | 101 | ah = kzalloc(sizeof(*ah), GFP_ATOMIC); |
79 | if (!ah) | 102 | if (!ah) |
80 | return ERR_PTR(-ENOMEM); | 103 | return ERR_PTR(-ENOMEM); |
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 79d017baf6f4..d72a4367c891 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c | |||
@@ -731,7 +731,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, | |||
731 | int entries, u32 **cqb, | 731 | int entries, u32 **cqb, |
732 | int *cqe_size, int *index, int *inlen) | 732 | int *cqe_size, int *index, int *inlen) |
733 | { | 733 | { |
734 | struct mlx5_ib_create_cq ucmd; | 734 | struct mlx5_ib_create_cq ucmd = {}; |
735 | size_t ucmdlen; | 735 | size_t ucmdlen; |
736 | int page_shift; | 736 | int page_shift; |
737 | __be64 *pas; | 737 | __be64 *pas; |
@@ -770,7 +770,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, | |||
770 | if (err) | 770 | if (err) |
771 | goto err_umem; | 771 | goto err_umem; |
772 | 772 | ||
773 | mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift, | 773 | mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, 0, &npages, &page_shift, |
774 | &ncont, NULL); | 774 | &ncont, NULL); |
775 | mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n", | 775 | mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n", |
776 | ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont); | 776 | ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont); |
@@ -792,8 +792,36 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, | |||
792 | 792 | ||
793 | *index = to_mucontext(context)->uuari.uars[0].index; | 793 | *index = to_mucontext(context)->uuari.uars[0].index; |
794 | 794 | ||
795 | if (ucmd.cqe_comp_en == 1) { | ||
796 | if (unlikely((*cqe_size != 64) || | ||
797 | !MLX5_CAP_GEN(dev->mdev, cqe_compression))) { | ||
798 | err = -EOPNOTSUPP; | ||
799 | mlx5_ib_warn(dev, "CQE compression is not supported for size %d!\n", | ||
800 | *cqe_size); | ||
801 | goto err_cqb; | ||
802 | } | ||
803 | |||
804 | if (unlikely(!ucmd.cqe_comp_res_format || | ||
805 | !(ucmd.cqe_comp_res_format < | ||
806 | MLX5_IB_CQE_RES_RESERVED) || | ||
807 | (ucmd.cqe_comp_res_format & | ||
808 | (ucmd.cqe_comp_res_format - 1)))) { | ||
809 | err = -EOPNOTSUPP; | ||
810 | mlx5_ib_warn(dev, "CQE compression res format %d is not supported!\n", | ||
811 | ucmd.cqe_comp_res_format); | ||
812 | goto err_cqb; | ||
813 | } | ||
814 | |||
815 | MLX5_SET(cqc, cqc, cqe_comp_en, 1); | ||
816 | MLX5_SET(cqc, cqc, mini_cqe_res_format, | ||
817 | ilog2(ucmd.cqe_comp_res_format)); | ||
818 | } | ||
819 | |||
795 | return 0; | 820 | return 0; |
796 | 821 | ||
822 | err_cqb: | ||
823 | kfree(cqb); | ||
824 | |||
797 | err_db: | 825 | err_db: |
798 | mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); | 826 | mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); |
799 | 827 | ||
@@ -1125,7 +1153,7 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, | |||
1125 | return err; | 1153 | return err; |
1126 | } | 1154 | } |
1127 | 1155 | ||
1128 | mlx5_ib_cont_pages(umem, ucmd.buf_addr, &npages, page_shift, | 1156 | mlx5_ib_cont_pages(umem, ucmd.buf_addr, 0, &npages, page_shift, |
1129 | npas, NULL); | 1157 | npas, NULL); |
1130 | 1158 | ||
1131 | cq->resize_umem = umem; | 1159 | cq->resize_umem = umem; |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 8e0dbd51944e..b81736d625fc 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
@@ -127,7 +127,7 @@ static int mlx5_netdev_event(struct notifier_block *this, | |||
127 | 127 | ||
128 | if ((upper == ndev || (!upper && ndev == ibdev->roce.netdev)) | 128 | if ((upper == ndev || (!upper && ndev == ibdev->roce.netdev)) |
129 | && ibdev->ib_active) { | 129 | && ibdev->ib_active) { |
130 | struct ib_event ibev = {0}; | 130 | struct ib_event ibev = { }; |
131 | 131 | ||
132 | ibev.device = &ibdev->ib_dev; | 132 | ibev.device = &ibdev->ib_dev; |
133 | ibev.event = (event == NETDEV_UP) ? | 133 | ibev.event = (event == NETDEV_UP) ? |
@@ -496,6 +496,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, | |||
496 | struct mlx5_ib_dev *dev = to_mdev(ibdev); | 496 | struct mlx5_ib_dev *dev = to_mdev(ibdev); |
497 | struct mlx5_core_dev *mdev = dev->mdev; | 497 | struct mlx5_core_dev *mdev = dev->mdev; |
498 | int err = -ENOMEM; | 498 | int err = -ENOMEM; |
499 | int max_sq_desc; | ||
499 | int max_rq_sg; | 500 | int max_rq_sg; |
500 | int max_sq_sg; | 501 | int max_sq_sg; |
501 | u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz); | 502 | u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz); |
@@ -618,9 +619,10 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, | |||
618 | props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); | 619 | props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); |
619 | max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) / | 620 | max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) / |
620 | sizeof(struct mlx5_wqe_data_seg); | 621 | sizeof(struct mlx5_wqe_data_seg); |
621 | max_sq_sg = (MLX5_CAP_GEN(mdev, max_wqe_sz_sq) - | 622 | max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512); |
622 | sizeof(struct mlx5_wqe_ctrl_seg)) / | 623 | max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) - |
623 | sizeof(struct mlx5_wqe_data_seg); | 624 | sizeof(struct mlx5_wqe_raddr_seg)) / |
625 | sizeof(struct mlx5_wqe_data_seg); | ||
624 | props->max_sge = min(max_rq_sg, max_sq_sg); | 626 | props->max_sge = min(max_rq_sg, max_sq_sg); |
625 | props->max_sge_rd = MLX5_MAX_SGE_RD; | 627 | props->max_sge_rd = MLX5_MAX_SGE_RD; |
626 | props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq); | 628 | props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq); |
@@ -643,6 +645,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, | |||
643 | props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * | 645 | props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * |
644 | props->max_mcast_grp; | 646 | props->max_mcast_grp; |
645 | props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */ | 647 | props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */ |
648 | props->max_ah = INT_MAX; | ||
646 | props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz); | 649 | props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz); |
647 | props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL; | 650 | props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL; |
648 | 651 | ||
@@ -669,6 +672,40 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, | |||
669 | 1 << MLX5_CAP_GEN(dev->mdev, log_max_rq); | 672 | 1 << MLX5_CAP_GEN(dev->mdev, log_max_rq); |
670 | } | 673 | } |
671 | 674 | ||
675 | if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes, | ||
676 | uhw->outlen)) { | ||
677 | resp.mlx5_ib_support_multi_pkt_send_wqes = | ||
678 | MLX5_CAP_ETH(mdev, multi_pkt_send_wqe); | ||
679 | resp.response_length += | ||
680 | sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes); | ||
681 | } | ||
682 | |||
683 | if (field_avail(typeof(resp), reserved, uhw->outlen)) | ||
684 | resp.response_length += sizeof(resp.reserved); | ||
685 | |||
686 | if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) { | ||
687 | resp.cqe_comp_caps.max_num = | ||
688 | MLX5_CAP_GEN(dev->mdev, cqe_compression) ? | ||
689 | MLX5_CAP_GEN(dev->mdev, cqe_compression_max_num) : 0; | ||
690 | resp.cqe_comp_caps.supported_format = | ||
691 | MLX5_IB_CQE_RES_FORMAT_HASH | | ||
692 | MLX5_IB_CQE_RES_FORMAT_CSUM; | ||
693 | resp.response_length += sizeof(resp.cqe_comp_caps); | ||
694 | } | ||
695 | |||
696 | if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen)) { | ||
697 | if (MLX5_CAP_QOS(mdev, packet_pacing) && | ||
698 | MLX5_CAP_GEN(mdev, qos)) { | ||
699 | resp.packet_pacing_caps.qp_rate_limit_max = | ||
700 | MLX5_CAP_QOS(mdev, packet_pacing_max_rate); | ||
701 | resp.packet_pacing_caps.qp_rate_limit_min = | ||
702 | MLX5_CAP_QOS(mdev, packet_pacing_min_rate); | ||
703 | resp.packet_pacing_caps.supported_qpts |= | ||
704 | 1 << IB_QPT_RAW_PACKET; | ||
705 | } | ||
706 | resp.response_length += sizeof(resp.packet_pacing_caps); | ||
707 | } | ||
708 | |||
672 | if (uhw->outlen) { | 709 | if (uhw->outlen) { |
673 | err = ib_copy_to_udata(uhw, &resp, resp.response_length); | 710 | err = ib_copy_to_udata(uhw, &resp, resp.response_length); |
674 | 711 | ||
@@ -1093,7 +1130,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, | |||
1093 | resp.response_length += sizeof(resp.cqe_version); | 1130 | resp.response_length += sizeof(resp.cqe_version); |
1094 | 1131 | ||
1095 | if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) { | 1132 | if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) { |
1096 | resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE; | 1133 | resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE | |
1134 | MLX5_USER_CMDS_SUPP_UHW_CREATE_AH; | ||
1097 | resp.response_length += sizeof(resp.cmds_supp_uhw); | 1135 | resp.response_length += sizeof(resp.cmds_supp_uhw); |
1098 | } | 1136 | } |
1099 | 1137 | ||
@@ -1502,6 +1540,22 @@ static void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val) | |||
1502 | MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val); | 1540 | MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val); |
1503 | } | 1541 | } |
1504 | 1542 | ||
1543 | static void set_flow_label(void *misc_c, void *misc_v, u8 mask, u8 val, | ||
1544 | bool inner) | ||
1545 | { | ||
1546 | if (inner) { | ||
1547 | MLX5_SET(fte_match_set_misc, | ||
1548 | misc_c, inner_ipv6_flow_label, mask); | ||
1549 | MLX5_SET(fte_match_set_misc, | ||
1550 | misc_v, inner_ipv6_flow_label, val); | ||
1551 | } else { | ||
1552 | MLX5_SET(fte_match_set_misc, | ||
1553 | misc_c, outer_ipv6_flow_label, mask); | ||
1554 | MLX5_SET(fte_match_set_misc, | ||
1555 | misc_v, outer_ipv6_flow_label, val); | ||
1556 | } | ||
1557 | } | ||
1558 | |||
1505 | static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val) | 1559 | static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val) |
1506 | { | 1560 | { |
1507 | MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask); | 1561 | MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask); |
@@ -1515,6 +1569,7 @@ static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val) | |||
1515 | #define LAST_IPV4_FIELD tos | 1569 | #define LAST_IPV4_FIELD tos |
1516 | #define LAST_IPV6_FIELD traffic_class | 1570 | #define LAST_IPV6_FIELD traffic_class |
1517 | #define LAST_TCP_UDP_FIELD src_port | 1571 | #define LAST_TCP_UDP_FIELD src_port |
1572 | #define LAST_TUNNEL_FIELD tunnel_id | ||
1518 | 1573 | ||
1519 | /* Field is the last supported field */ | 1574 | /* Field is the last supported field */ |
1520 | #define FIELDS_NOT_SUPPORTED(filter, field)\ | 1575 | #define FIELDS_NOT_SUPPORTED(filter, field)\ |
@@ -1527,155 +1582,164 @@ static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val) | |||
1527 | static int parse_flow_attr(u32 *match_c, u32 *match_v, | 1582 | static int parse_flow_attr(u32 *match_c, u32 *match_v, |
1528 | const union ib_flow_spec *ib_spec) | 1583 | const union ib_flow_spec *ib_spec) |
1529 | { | 1584 | { |
1530 | void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c, | ||
1531 | outer_headers); | ||
1532 | void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v, | ||
1533 | outer_headers); | ||
1534 | void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c, | 1585 | void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c, |
1535 | misc_parameters); | 1586 | misc_parameters); |
1536 | void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v, | 1587 | void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v, |
1537 | misc_parameters); | 1588 | misc_parameters); |
1589 | void *headers_c; | ||
1590 | void *headers_v; | ||
1591 | |||
1592 | if (ib_spec->type & IB_FLOW_SPEC_INNER) { | ||
1593 | headers_c = MLX5_ADDR_OF(fte_match_param, match_c, | ||
1594 | inner_headers); | ||
1595 | headers_v = MLX5_ADDR_OF(fte_match_param, match_v, | ||
1596 | inner_headers); | ||
1597 | } else { | ||
1598 | headers_c = MLX5_ADDR_OF(fte_match_param, match_c, | ||
1599 | outer_headers); | ||
1600 | headers_v = MLX5_ADDR_OF(fte_match_param, match_v, | ||
1601 | outer_headers); | ||
1602 | } | ||
1538 | 1603 | ||
1539 | switch (ib_spec->type) { | 1604 | switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) { |
1540 | case IB_FLOW_SPEC_ETH: | 1605 | case IB_FLOW_SPEC_ETH: |
1541 | if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD)) | 1606 | if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD)) |
1542 | return -ENOTSUPP; | 1607 | return -ENOTSUPP; |
1543 | 1608 | ||
1544 | ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, | 1609 | ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, |
1545 | dmac_47_16), | 1610 | dmac_47_16), |
1546 | ib_spec->eth.mask.dst_mac); | 1611 | ib_spec->eth.mask.dst_mac); |
1547 | ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, | 1612 | ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, |
1548 | dmac_47_16), | 1613 | dmac_47_16), |
1549 | ib_spec->eth.val.dst_mac); | 1614 | ib_spec->eth.val.dst_mac); |
1550 | 1615 | ||
1551 | ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, | 1616 | ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, |
1552 | smac_47_16), | 1617 | smac_47_16), |
1553 | ib_spec->eth.mask.src_mac); | 1618 | ib_spec->eth.mask.src_mac); |
1554 | ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, | 1619 | ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, |
1555 | smac_47_16), | 1620 | smac_47_16), |
1556 | ib_spec->eth.val.src_mac); | 1621 | ib_spec->eth.val.src_mac); |
1557 | 1622 | ||
1558 | if (ib_spec->eth.mask.vlan_tag) { | 1623 | if (ib_spec->eth.mask.vlan_tag) { |
1559 | MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, | 1624 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, |
1560 | vlan_tag, 1); | 1625 | vlan_tag, 1); |
1561 | MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, | 1626 | MLX5_SET(fte_match_set_lyr_2_4, headers_v, |
1562 | vlan_tag, 1); | 1627 | vlan_tag, 1); |
1563 | 1628 | ||
1564 | MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, | 1629 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, |
1565 | first_vid, ntohs(ib_spec->eth.mask.vlan_tag)); | 1630 | first_vid, ntohs(ib_spec->eth.mask.vlan_tag)); |
1566 | MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, | 1631 | MLX5_SET(fte_match_set_lyr_2_4, headers_v, |
1567 | first_vid, ntohs(ib_spec->eth.val.vlan_tag)); | 1632 | first_vid, ntohs(ib_spec->eth.val.vlan_tag)); |
1568 | 1633 | ||
1569 | MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, | 1634 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, |
1570 | first_cfi, | 1635 | first_cfi, |
1571 | ntohs(ib_spec->eth.mask.vlan_tag) >> 12); | 1636 | ntohs(ib_spec->eth.mask.vlan_tag) >> 12); |
1572 | MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, | 1637 | MLX5_SET(fte_match_set_lyr_2_4, headers_v, |
1573 | first_cfi, | 1638 | first_cfi, |
1574 | ntohs(ib_spec->eth.val.vlan_tag) >> 12); | 1639 | ntohs(ib_spec->eth.val.vlan_tag) >> 12); |
1575 | 1640 | ||
1576 | MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, | 1641 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, |
1577 | first_prio, | 1642 | first_prio, |
1578 | ntohs(ib_spec->eth.mask.vlan_tag) >> 13); | 1643 | ntohs(ib_spec->eth.mask.vlan_tag) >> 13); |
1579 | MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, | 1644 | MLX5_SET(fte_match_set_lyr_2_4, headers_v, |
1580 | first_prio, | 1645 | first_prio, |
1581 | ntohs(ib_spec->eth.val.vlan_tag) >> 13); | 1646 | ntohs(ib_spec->eth.val.vlan_tag) >> 13); |
1582 | } | 1647 | } |
1583 | MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, | 1648 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, |
1584 | ethertype, ntohs(ib_spec->eth.mask.ether_type)); | 1649 | ethertype, ntohs(ib_spec->eth.mask.ether_type)); |
1585 | MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, | 1650 | MLX5_SET(fte_match_set_lyr_2_4, headers_v, |
1586 | ethertype, ntohs(ib_spec->eth.val.ether_type)); | 1651 | ethertype, ntohs(ib_spec->eth.val.ether_type)); |
1587 | break; | 1652 | break; |
1588 | case IB_FLOW_SPEC_IPV4: | 1653 | case IB_FLOW_SPEC_IPV4: |
1589 | if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD)) | 1654 | if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD)) |
1590 | return -ENOTSUPP; | 1655 | return -ENOTSUPP; |
1591 | 1656 | ||
1592 | MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, | 1657 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, |
1593 | ethertype, 0xffff); | 1658 | ethertype, 0xffff); |
1594 | MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, | 1659 | MLX5_SET(fte_match_set_lyr_2_4, headers_v, |
1595 | ethertype, ETH_P_IP); | 1660 | ethertype, ETH_P_IP); |
1596 | 1661 | ||
1597 | memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, | 1662 | memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, |
1598 | src_ipv4_src_ipv6.ipv4_layout.ipv4), | 1663 | src_ipv4_src_ipv6.ipv4_layout.ipv4), |
1599 | &ib_spec->ipv4.mask.src_ip, | 1664 | &ib_spec->ipv4.mask.src_ip, |
1600 | sizeof(ib_spec->ipv4.mask.src_ip)); | 1665 | sizeof(ib_spec->ipv4.mask.src_ip)); |
1601 | memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, | 1666 | memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, |
1602 | src_ipv4_src_ipv6.ipv4_layout.ipv4), | 1667 | src_ipv4_src_ipv6.ipv4_layout.ipv4), |
1603 | &ib_spec->ipv4.val.src_ip, | 1668 | &ib_spec->ipv4.val.src_ip, |
1604 | sizeof(ib_spec->ipv4.val.src_ip)); | 1669 | sizeof(ib_spec->ipv4.val.src_ip)); |
1605 | memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, | 1670 | memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, |
1606 | dst_ipv4_dst_ipv6.ipv4_layout.ipv4), | 1671 | dst_ipv4_dst_ipv6.ipv4_layout.ipv4), |
1607 | &ib_spec->ipv4.mask.dst_ip, | 1672 | &ib_spec->ipv4.mask.dst_ip, |
1608 | sizeof(ib_spec->ipv4.mask.dst_ip)); | 1673 | sizeof(ib_spec->ipv4.mask.dst_ip)); |
1609 | memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, | 1674 | memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, |
1610 | dst_ipv4_dst_ipv6.ipv4_layout.ipv4), | 1675 | dst_ipv4_dst_ipv6.ipv4_layout.ipv4), |
1611 | &ib_spec->ipv4.val.dst_ip, | 1676 | &ib_spec->ipv4.val.dst_ip, |
1612 | sizeof(ib_spec->ipv4.val.dst_ip)); | 1677 | sizeof(ib_spec->ipv4.val.dst_ip)); |
1613 | 1678 | ||
1614 | set_tos(outer_headers_c, outer_headers_v, | 1679 | set_tos(headers_c, headers_v, |
1615 | ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos); | 1680 | ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos); |
1616 | 1681 | ||
1617 | set_proto(outer_headers_c, outer_headers_v, | 1682 | set_proto(headers_c, headers_v, |
1618 | ib_spec->ipv4.mask.proto, ib_spec->ipv4.val.proto); | 1683 | ib_spec->ipv4.mask.proto, ib_spec->ipv4.val.proto); |
1619 | break; | 1684 | break; |
1620 | case IB_FLOW_SPEC_IPV6: | 1685 | case IB_FLOW_SPEC_IPV6: |
1621 | if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD)) | 1686 | if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD)) |
1622 | return -ENOTSUPP; | 1687 | return -ENOTSUPP; |
1623 | 1688 | ||
1624 | MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, | 1689 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, |
1625 | ethertype, 0xffff); | 1690 | ethertype, 0xffff); |
1626 | MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, | 1691 | MLX5_SET(fte_match_set_lyr_2_4, headers_v, |
1627 | ethertype, ETH_P_IPV6); | 1692 | ethertype, ETH_P_IPV6); |
1628 | 1693 | ||
1629 | memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, | 1694 | memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, |
1630 | src_ipv4_src_ipv6.ipv6_layout.ipv6), | 1695 | src_ipv4_src_ipv6.ipv6_layout.ipv6), |
1631 | &ib_spec->ipv6.mask.src_ip, | 1696 | &ib_spec->ipv6.mask.src_ip, |
1632 | sizeof(ib_spec->ipv6.mask.src_ip)); | 1697 | sizeof(ib_spec->ipv6.mask.src_ip)); |
1633 | memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, | 1698 | memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, |
1634 | src_ipv4_src_ipv6.ipv6_layout.ipv6), | 1699 | src_ipv4_src_ipv6.ipv6_layout.ipv6), |
1635 | &ib_spec->ipv6.val.src_ip, | 1700 | &ib_spec->ipv6.val.src_ip, |
1636 | sizeof(ib_spec->ipv6.val.src_ip)); | 1701 | sizeof(ib_spec->ipv6.val.src_ip)); |
1637 | memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, | 1702 | memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, |
1638 | dst_ipv4_dst_ipv6.ipv6_layout.ipv6), | 1703 | dst_ipv4_dst_ipv6.ipv6_layout.ipv6), |
1639 | &ib_spec->ipv6.mask.dst_ip, | 1704 | &ib_spec->ipv6.mask.dst_ip, |
1640 | sizeof(ib_spec->ipv6.mask.dst_ip)); | 1705 | sizeof(ib_spec->ipv6.mask.dst_ip)); |
1641 | memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, | 1706 | memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, |
1642 | dst_ipv4_dst_ipv6.ipv6_layout.ipv6), | 1707 | dst_ipv4_dst_ipv6.ipv6_layout.ipv6), |
1643 | &ib_spec->ipv6.val.dst_ip, | 1708 | &ib_spec->ipv6.val.dst_ip, |
1644 | sizeof(ib_spec->ipv6.val.dst_ip)); | 1709 | sizeof(ib_spec->ipv6.val.dst_ip)); |
1645 | 1710 | ||
1646 | set_tos(outer_headers_c, outer_headers_v, | 1711 | set_tos(headers_c, headers_v, |
1647 | ib_spec->ipv6.mask.traffic_class, | 1712 | ib_spec->ipv6.mask.traffic_class, |
1648 | ib_spec->ipv6.val.traffic_class); | 1713 | ib_spec->ipv6.val.traffic_class); |
1649 | 1714 | ||
1650 | set_proto(outer_headers_c, outer_headers_v, | 1715 | set_proto(headers_c, headers_v, |
1651 | ib_spec->ipv6.mask.next_hdr, | 1716 | ib_spec->ipv6.mask.next_hdr, |
1652 | ib_spec->ipv6.val.next_hdr); | 1717 | ib_spec->ipv6.val.next_hdr); |
1653 | 1718 | ||
1654 | MLX5_SET(fte_match_set_misc, misc_params_c, | 1719 | set_flow_label(misc_params_c, misc_params_v, |
1655 | outer_ipv6_flow_label, | 1720 | ntohl(ib_spec->ipv6.mask.flow_label), |
1656 | ntohl(ib_spec->ipv6.mask.flow_label)); | 1721 | ntohl(ib_spec->ipv6.val.flow_label), |
1657 | MLX5_SET(fte_match_set_misc, misc_params_v, | 1722 | ib_spec->type & IB_FLOW_SPEC_INNER); |
1658 | outer_ipv6_flow_label, | 1723 | |
1659 | ntohl(ib_spec->ipv6.val.flow_label)); | ||
1660 | break; | 1724 | break; |
1661 | case IB_FLOW_SPEC_TCP: | 1725 | case IB_FLOW_SPEC_TCP: |
1662 | if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, | 1726 | if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, |
1663 | LAST_TCP_UDP_FIELD)) | 1727 | LAST_TCP_UDP_FIELD)) |
1664 | return -ENOTSUPP; | 1728 | return -ENOTSUPP; |
1665 | 1729 | ||
1666 | MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol, | 1730 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, |
1667 | 0xff); | 1731 | 0xff); |
1668 | MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol, | 1732 | MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, |
1669 | IPPROTO_TCP); | 1733 | IPPROTO_TCP); |
1670 | 1734 | ||
1671 | MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport, | 1735 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport, |
1672 | ntohs(ib_spec->tcp_udp.mask.src_port)); | 1736 | ntohs(ib_spec->tcp_udp.mask.src_port)); |
1673 | MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_sport, | 1737 | MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport, |
1674 | ntohs(ib_spec->tcp_udp.val.src_port)); | 1738 | ntohs(ib_spec->tcp_udp.val.src_port)); |
1675 | 1739 | ||
1676 | MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport, | 1740 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_dport, |
1677 | ntohs(ib_spec->tcp_udp.mask.dst_port)); | 1741 | ntohs(ib_spec->tcp_udp.mask.dst_port)); |
1678 | MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_dport, | 1742 | MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport, |
1679 | ntohs(ib_spec->tcp_udp.val.dst_port)); | 1743 | ntohs(ib_spec->tcp_udp.val.dst_port)); |
1680 | break; | 1744 | break; |
1681 | case IB_FLOW_SPEC_UDP: | 1745 | case IB_FLOW_SPEC_UDP: |
@@ -1683,21 +1747,31 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v, | |||
1683 | LAST_TCP_UDP_FIELD)) | 1747 | LAST_TCP_UDP_FIELD)) |
1684 | return -ENOTSUPP; | 1748 | return -ENOTSUPP; |
1685 | 1749 | ||
1686 | MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol, | 1750 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, |
1687 | 0xff); | 1751 | 0xff); |
1688 | MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol, | 1752 | MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, |
1689 | IPPROTO_UDP); | 1753 | IPPROTO_UDP); |
1690 | 1754 | ||
1691 | MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_sport, | 1755 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport, |
1692 | ntohs(ib_spec->tcp_udp.mask.src_port)); | 1756 | ntohs(ib_spec->tcp_udp.mask.src_port)); |
1693 | MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_sport, | 1757 | MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport, |
1694 | ntohs(ib_spec->tcp_udp.val.src_port)); | 1758 | ntohs(ib_spec->tcp_udp.val.src_port)); |
1695 | 1759 | ||
1696 | MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_dport, | 1760 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport, |
1697 | ntohs(ib_spec->tcp_udp.mask.dst_port)); | 1761 | ntohs(ib_spec->tcp_udp.mask.dst_port)); |
1698 | MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_dport, | 1762 | MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, |
1699 | ntohs(ib_spec->tcp_udp.val.dst_port)); | 1763 | ntohs(ib_spec->tcp_udp.val.dst_port)); |
1700 | break; | 1764 | break; |
1765 | case IB_FLOW_SPEC_VXLAN_TUNNEL: | ||
1766 | if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask, | ||
1767 | LAST_TUNNEL_FIELD)) | ||
1768 | return -ENOTSUPP; | ||
1769 | |||
1770 | MLX5_SET(fte_match_set_misc, misc_params_c, vxlan_vni, | ||
1771 | ntohl(ib_spec->tunnel.mask.tunnel_id)); | ||
1772 | MLX5_SET(fte_match_set_misc, misc_params_v, vxlan_vni, | ||
1773 | ntohl(ib_spec->tunnel.val.tunnel_id)); | ||
1774 | break; | ||
1701 | default: | 1775 | default: |
1702 | return -EINVAL; | 1776 | return -EINVAL; |
1703 | } | 1777 | } |
@@ -2718,6 +2792,8 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num, | |||
2718 | struct ib_port_immutable *immutable) | 2792 | struct ib_port_immutable *immutable) |
2719 | { | 2793 | { |
2720 | struct ib_port_attr attr; | 2794 | struct ib_port_attr attr; |
2795 | struct mlx5_ib_dev *dev = to_mdev(ibdev); | ||
2796 | enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num); | ||
2721 | int err; | 2797 | int err; |
2722 | 2798 | ||
2723 | err = mlx5_ib_query_port(ibdev, port_num, &attr); | 2799 | err = mlx5_ib_query_port(ibdev, port_num, &attr); |
@@ -2727,7 +2803,8 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num, | |||
2727 | immutable->pkey_tbl_len = attr.pkey_tbl_len; | 2803 | immutable->pkey_tbl_len = attr.pkey_tbl_len; |
2728 | immutable->gid_tbl_len = attr.gid_tbl_len; | 2804 | immutable->gid_tbl_len = attr.gid_tbl_len; |
2729 | immutable->core_cap_flags = get_core_cap_flags(ibdev); | 2805 | immutable->core_cap_flags = get_core_cap_flags(ibdev); |
2730 | immutable->max_mad_size = IB_MGMT_MAD_SIZE; | 2806 | if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce)) |
2807 | immutable->max_mad_size = IB_MGMT_MAD_SIZE; | ||
2731 | 2808 | ||
2732 | return 0; | 2809 | return 0; |
2733 | } | 2810 | } |
@@ -2741,7 +2818,7 @@ static void get_dev_fw_str(struct ib_device *ibdev, char *str, | |||
2741 | fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev)); | 2818 | fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev)); |
2742 | } | 2819 | } |
2743 | 2820 | ||
2744 | static int mlx5_roce_lag_init(struct mlx5_ib_dev *dev) | 2821 | static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev) |
2745 | { | 2822 | { |
2746 | struct mlx5_core_dev *mdev = dev->mdev; | 2823 | struct mlx5_core_dev *mdev = dev->mdev; |
2747 | struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev, | 2824 | struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev, |
@@ -2770,7 +2847,7 @@ err_destroy_vport_lag: | |||
2770 | return err; | 2847 | return err; |
2771 | } | 2848 | } |
2772 | 2849 | ||
2773 | static void mlx5_roce_lag_cleanup(struct mlx5_ib_dev *dev) | 2850 | static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev) |
2774 | { | 2851 | { |
2775 | struct mlx5_core_dev *mdev = dev->mdev; | 2852 | struct mlx5_core_dev *mdev = dev->mdev; |
2776 | 2853 | ||
@@ -2782,7 +2859,21 @@ static void mlx5_roce_lag_cleanup(struct mlx5_ib_dev *dev) | |||
2782 | } | 2859 | } |
2783 | } | 2860 | } |
2784 | 2861 | ||
2785 | static void mlx5_remove_roce_notifier(struct mlx5_ib_dev *dev) | 2862 | static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev) |
2863 | { | ||
2864 | int err; | ||
2865 | |||
2866 | dev->roce.nb.notifier_call = mlx5_netdev_event; | ||
2867 | err = register_netdevice_notifier(&dev->roce.nb); | ||
2868 | if (err) { | ||
2869 | dev->roce.nb.notifier_call = NULL; | ||
2870 | return err; | ||
2871 | } | ||
2872 | |||
2873 | return 0; | ||
2874 | } | ||
2875 | |||
2876 | static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev) | ||
2786 | { | 2877 | { |
2787 | if (dev->roce.nb.notifier_call) { | 2878 | if (dev->roce.nb.notifier_call) { |
2788 | unregister_netdevice_notifier(&dev->roce.nb); | 2879 | unregister_netdevice_notifier(&dev->roce.nb); |
@@ -2790,39 +2881,40 @@ static void mlx5_remove_roce_notifier(struct mlx5_ib_dev *dev) | |||
2790 | } | 2881 | } |
2791 | } | 2882 | } |
2792 | 2883 | ||
2793 | static int mlx5_enable_roce(struct mlx5_ib_dev *dev) | 2884 | static int mlx5_enable_eth(struct mlx5_ib_dev *dev) |
2794 | { | 2885 | { |
2795 | int err; | 2886 | int err; |
2796 | 2887 | ||
2797 | dev->roce.nb.notifier_call = mlx5_netdev_event; | 2888 | err = mlx5_add_netdev_notifier(dev); |
2798 | err = register_netdevice_notifier(&dev->roce.nb); | 2889 | if (err) |
2799 | if (err) { | ||
2800 | dev->roce.nb.notifier_call = NULL; | ||
2801 | return err; | 2890 | return err; |
2802 | } | ||
2803 | 2891 | ||
2804 | err = mlx5_nic_vport_enable_roce(dev->mdev); | 2892 | if (MLX5_CAP_GEN(dev->mdev, roce)) { |
2805 | if (err) | 2893 | err = mlx5_nic_vport_enable_roce(dev->mdev); |
2806 | goto err_unregister_netdevice_notifier; | 2894 | if (err) |
2895 | goto err_unregister_netdevice_notifier; | ||
2896 | } | ||
2807 | 2897 | ||
2808 | err = mlx5_roce_lag_init(dev); | 2898 | err = mlx5_eth_lag_init(dev); |
2809 | if (err) | 2899 | if (err) |
2810 | goto err_disable_roce; | 2900 | goto err_disable_roce; |
2811 | 2901 | ||
2812 | return 0; | 2902 | return 0; |
2813 | 2903 | ||
2814 | err_disable_roce: | 2904 | err_disable_roce: |
2815 | mlx5_nic_vport_disable_roce(dev->mdev); | 2905 | if (MLX5_CAP_GEN(dev->mdev, roce)) |
2906 | mlx5_nic_vport_disable_roce(dev->mdev); | ||
2816 | 2907 | ||
2817 | err_unregister_netdevice_notifier: | 2908 | err_unregister_netdevice_notifier: |
2818 | mlx5_remove_roce_notifier(dev); | 2909 | mlx5_remove_netdev_notifier(dev); |
2819 | return err; | 2910 | return err; |
2820 | } | 2911 | } |
2821 | 2912 | ||
2822 | static void mlx5_disable_roce(struct mlx5_ib_dev *dev) | 2913 | static void mlx5_disable_eth(struct mlx5_ib_dev *dev) |
2823 | { | 2914 | { |
2824 | mlx5_roce_lag_cleanup(dev); | 2915 | mlx5_eth_lag_cleanup(dev); |
2825 | mlx5_nic_vport_disable_roce(dev->mdev); | 2916 | if (MLX5_CAP_GEN(dev->mdev, roce)) |
2917 | mlx5_nic_vport_disable_roce(dev->mdev); | ||
2826 | } | 2918 | } |
2827 | 2919 | ||
2828 | static void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev) | 2920 | static void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev) |
@@ -2944,9 +3036,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) | |||
2944 | port_type_cap = MLX5_CAP_GEN(mdev, port_type); | 3036 | port_type_cap = MLX5_CAP_GEN(mdev, port_type); |
2945 | ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); | 3037 | ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); |
2946 | 3038 | ||
2947 | if ((ll == IB_LINK_LAYER_ETHERNET) && !MLX5_CAP_GEN(mdev, roce)) | ||
2948 | return NULL; | ||
2949 | |||
2950 | printk_once(KERN_INFO "%s", mlx5_version); | 3039 | printk_once(KERN_INFO "%s", mlx5_version); |
2951 | 3040 | ||
2952 | dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev)); | 3041 | dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev)); |
@@ -2992,6 +3081,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) | |||
2992 | (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | | 3081 | (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | |
2993 | (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | | 3082 | (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | |
2994 | (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | | 3083 | (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | |
3084 | (1ull << IB_USER_VERBS_CMD_CREATE_AH) | | ||
3085 | (1ull << IB_USER_VERBS_CMD_DESTROY_AH) | | ||
2995 | (1ull << IB_USER_VERBS_CMD_REG_MR) | | 3086 | (1ull << IB_USER_VERBS_CMD_REG_MR) | |
2996 | (1ull << IB_USER_VERBS_CMD_REREG_MR) | | 3087 | (1ull << IB_USER_VERBS_CMD_REREG_MR) | |
2997 | (1ull << IB_USER_VERBS_CMD_DEREG_MR) | | 3088 | (1ull << IB_USER_VERBS_CMD_DEREG_MR) | |
@@ -3014,7 +3105,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) | |||
3014 | dev->ib_dev.uverbs_ex_cmd_mask = | 3105 | dev->ib_dev.uverbs_ex_cmd_mask = |
3015 | (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) | | 3106 | (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) | |
3016 | (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) | | 3107 | (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) | |
3017 | (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP); | 3108 | (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP) | |
3109 | (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP); | ||
3018 | 3110 | ||
3019 | dev->ib_dev.query_device = mlx5_ib_query_device; | 3111 | dev->ib_dev.query_device = mlx5_ib_query_device; |
3020 | dev->ib_dev.query_port = mlx5_ib_query_port; | 3112 | dev->ib_dev.query_port = mlx5_ib_query_port; |
@@ -3125,14 +3217,14 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) | |||
3125 | spin_lock_init(&dev->reset_flow_resource_lock); | 3217 | spin_lock_init(&dev->reset_flow_resource_lock); |
3126 | 3218 | ||
3127 | if (ll == IB_LINK_LAYER_ETHERNET) { | 3219 | if (ll == IB_LINK_LAYER_ETHERNET) { |
3128 | err = mlx5_enable_roce(dev); | 3220 | err = mlx5_enable_eth(dev); |
3129 | if (err) | 3221 | if (err) |
3130 | goto err_dealloc; | 3222 | goto err_dealloc; |
3131 | } | 3223 | } |
3132 | 3224 | ||
3133 | err = create_dev_resources(&dev->devr); | 3225 | err = create_dev_resources(&dev->devr); |
3134 | if (err) | 3226 | if (err) |
3135 | goto err_disable_roce; | 3227 | goto err_disable_eth; |
3136 | 3228 | ||
3137 | err = mlx5_ib_odp_init_one(dev); | 3229 | err = mlx5_ib_odp_init_one(dev); |
3138 | if (err) | 3230 | if (err) |
@@ -3176,10 +3268,10 @@ err_odp: | |||
3176 | err_rsrc: | 3268 | err_rsrc: |
3177 | destroy_dev_resources(&dev->devr); | 3269 | destroy_dev_resources(&dev->devr); |
3178 | 3270 | ||
3179 | err_disable_roce: | 3271 | err_disable_eth: |
3180 | if (ll == IB_LINK_LAYER_ETHERNET) { | 3272 | if (ll == IB_LINK_LAYER_ETHERNET) { |
3181 | mlx5_disable_roce(dev); | 3273 | mlx5_disable_eth(dev); |
3182 | mlx5_remove_roce_notifier(dev); | 3274 | mlx5_remove_netdev_notifier(dev); |
3183 | } | 3275 | } |
3184 | 3276 | ||
3185 | err_free_port: | 3277 | err_free_port: |
@@ -3196,14 +3288,14 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context) | |||
3196 | struct mlx5_ib_dev *dev = context; | 3288 | struct mlx5_ib_dev *dev = context; |
3197 | enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1); | 3289 | enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1); |
3198 | 3290 | ||
3199 | mlx5_remove_roce_notifier(dev); | 3291 | mlx5_remove_netdev_notifier(dev); |
3200 | ib_unregister_device(&dev->ib_dev); | 3292 | ib_unregister_device(&dev->ib_dev); |
3201 | mlx5_ib_dealloc_q_counters(dev); | 3293 | mlx5_ib_dealloc_q_counters(dev); |
3202 | destroy_umrc_res(dev); | 3294 | destroy_umrc_res(dev); |
3203 | mlx5_ib_odp_remove_one(dev); | 3295 | mlx5_ib_odp_remove_one(dev); |
3204 | destroy_dev_resources(&dev->devr); | 3296 | destroy_dev_resources(&dev->devr); |
3205 | if (ll == IB_LINK_LAYER_ETHERNET) | 3297 | if (ll == IB_LINK_LAYER_ETHERNET) |
3206 | mlx5_disable_roce(dev); | 3298 | mlx5_disable_eth(dev); |
3207 | kfree(dev->port); | 3299 | kfree(dev->port); |
3208 | ib_dealloc_device(&dev->ib_dev); | 3300 | ib_dealloc_device(&dev->ib_dev); |
3209 | } | 3301 | } |
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c index 996b54e366b0..6851357c16f4 100644 --- a/drivers/infiniband/hw/mlx5/mem.c +++ b/drivers/infiniband/hw/mlx5/mem.c | |||
@@ -37,12 +37,15 @@ | |||
37 | 37 | ||
38 | /* @umem: umem object to scan | 38 | /* @umem: umem object to scan |
39 | * @addr: ib virtual address requested by the user | 39 | * @addr: ib virtual address requested by the user |
40 | * @max_page_shift: high limit for page_shift - 0 means no limit | ||
40 | * @count: number of PAGE_SIZE pages covered by umem | 41 | * @count: number of PAGE_SIZE pages covered by umem |
41 | * @shift: page shift for the compound pages found in the region | 42 | * @shift: page shift for the compound pages found in the region |
42 | * @ncont: number of compund pages | 43 | * @ncont: number of compund pages |
43 | * @order: log2 of the number of compound pages | 44 | * @order: log2 of the number of compound pages |
44 | */ | 45 | */ |
45 | void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, | 46 | void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, |
47 | unsigned long max_page_shift, | ||
48 | int *count, int *shift, | ||
46 | int *ncont, int *order) | 49 | int *ncont, int *order) |
47 | { | 50 | { |
48 | unsigned long tmp; | 51 | unsigned long tmp; |
@@ -72,6 +75,8 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, | |||
72 | addr = addr >> page_shift; | 75 | addr = addr >> page_shift; |
73 | tmp = (unsigned long)addr; | 76 | tmp = (unsigned long)addr; |
74 | m = find_first_bit(&tmp, BITS_PER_LONG); | 77 | m = find_first_bit(&tmp, BITS_PER_LONG); |
78 | if (max_page_shift) | ||
79 | m = min_t(unsigned long, max_page_shift - page_shift, m); | ||
75 | skip = 1 << m; | 80 | skip = 1 << m; |
76 | mask = skip - 1; | 81 | mask = skip - 1; |
77 | i = 0; | 82 | i = 0; |
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index d5d007740159..ab8961cc8bca 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h | |||
@@ -63,6 +63,8 @@ pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \ | |||
63 | #define MLX5_IB_DEFAULT_UIDX 0xffffff | 63 | #define MLX5_IB_DEFAULT_UIDX 0xffffff |
64 | #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index) | 64 | #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index) |
65 | 65 | ||
66 | #define MLX5_MKEY_PAGE_SHIFT_MASK __mlx5_mask(mkc, log_page_size) | ||
67 | |||
66 | enum { | 68 | enum { |
67 | MLX5_IB_MMAP_CMD_SHIFT = 8, | 69 | MLX5_IB_MMAP_CMD_SHIFT = 8, |
68 | MLX5_IB_MMAP_CMD_MASK = 0xff, | 70 | MLX5_IB_MMAP_CMD_MASK = 0xff, |
@@ -387,6 +389,7 @@ struct mlx5_ib_qp { | |||
387 | struct list_head qps_list; | 389 | struct list_head qps_list; |
388 | struct list_head cq_recv_list; | 390 | struct list_head cq_recv_list; |
389 | struct list_head cq_send_list; | 391 | struct list_head cq_send_list; |
392 | u32 rate_limit; | ||
390 | }; | 393 | }; |
391 | 394 | ||
392 | struct mlx5_ib_cq_buf { | 395 | struct mlx5_ib_cq_buf { |
@@ -418,7 +421,7 @@ struct mlx5_umr_wr { | |||
418 | struct ib_pd *pd; | 421 | struct ib_pd *pd; |
419 | unsigned int page_shift; | 422 | unsigned int page_shift; |
420 | unsigned int npages; | 423 | unsigned int npages; |
421 | u32 length; | 424 | u64 length; |
422 | int access_flags; | 425 | int access_flags; |
423 | u32 mkey; | 426 | u32 mkey; |
424 | }; | 427 | }; |
@@ -737,7 +740,8 @@ void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index); | |||
737 | int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, | 740 | int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, |
738 | u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh, | 741 | u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh, |
739 | const void *in_mad, void *response_mad); | 742 | const void *in_mad, void *response_mad); |
740 | struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); | 743 | struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, |
744 | struct ib_udata *udata); | ||
741 | int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr); | 745 | int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr); |
742 | int mlx5_ib_destroy_ah(struct ib_ah *ah); | 746 | int mlx5_ib_destroy_ah(struct ib_ah *ah); |
743 | struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, | 747 | struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, |
@@ -823,7 +827,9 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, | |||
823 | struct ib_port_attr *props); | 827 | struct ib_port_attr *props); |
824 | int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev); | 828 | int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev); |
825 | void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev); | 829 | void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev); |
826 | void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, | 830 | void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, |
831 | unsigned long max_page_shift, | ||
832 | int *count, int *shift, | ||
827 | int *ncont, int *order); | 833 | int *ncont, int *order); |
828 | void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, | 834 | void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, |
829 | int page_shift, size_t offset, size_t num_pages, | 835 | int page_shift, size_t offset, size_t num_pages, |
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index d4ad672b905b..67985c69f9b9 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c | |||
@@ -627,7 +627,8 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) | |||
627 | ent->order = i + 2; | 627 | ent->order = i + 2; |
628 | ent->dev = dev; | 628 | ent->dev = dev; |
629 | 629 | ||
630 | if (dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) | 630 | if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) && |
631 | (mlx5_core_is_pf(dev->mdev))) | ||
631 | limit = dev->mdev->profile->mr_cache[i].limit; | 632 | limit = dev->mdev->profile->mr_cache[i].limit; |
632 | else | 633 | else |
633 | limit = 0; | 634 | limit = 0; |
@@ -645,6 +646,33 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) | |||
645 | return 0; | 646 | return 0; |
646 | } | 647 | } |
647 | 648 | ||
649 | static void wait_for_async_commands(struct mlx5_ib_dev *dev) | ||
650 | { | ||
651 | struct mlx5_mr_cache *cache = &dev->cache; | ||
652 | struct mlx5_cache_ent *ent; | ||
653 | int total = 0; | ||
654 | int i; | ||
655 | int j; | ||
656 | |||
657 | for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { | ||
658 | ent = &cache->ent[i]; | ||
659 | for (j = 0 ; j < 1000; j++) { | ||
660 | if (!ent->pending) | ||
661 | break; | ||
662 | msleep(50); | ||
663 | } | ||
664 | } | ||
665 | for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { | ||
666 | ent = &cache->ent[i]; | ||
667 | total += ent->pending; | ||
668 | } | ||
669 | |||
670 | if (total) | ||
671 | mlx5_ib_warn(dev, "aborted while there are %d pending mr requests\n", total); | ||
672 | else | ||
673 | mlx5_ib_warn(dev, "done with all pending requests\n"); | ||
674 | } | ||
675 | |||
648 | int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev) | 676 | int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev) |
649 | { | 677 | { |
650 | int i; | 678 | int i; |
@@ -658,6 +686,7 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev) | |||
658 | clean_keys(dev, i); | 686 | clean_keys(dev, i); |
659 | 687 | ||
660 | destroy_workqueue(dev->cache.wq); | 688 | destroy_workqueue(dev->cache.wq); |
689 | wait_for_async_commands(dev); | ||
661 | del_timer_sync(&dev->delay_timer); | 690 | del_timer_sync(&dev->delay_timer); |
662 | 691 | ||
663 | return 0; | 692 | return 0; |
@@ -815,29 +844,34 @@ static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev, | |||
815 | umrwr->mkey = key; | 844 | umrwr->mkey = key; |
816 | } | 845 | } |
817 | 846 | ||
818 | static struct ib_umem *mr_umem_get(struct ib_pd *pd, u64 start, u64 length, | 847 | static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length, |
819 | int access_flags, int *npages, | 848 | int access_flags, struct ib_umem **umem, |
820 | int *page_shift, int *ncont, int *order) | 849 | int *npages, int *page_shift, int *ncont, |
850 | int *order) | ||
821 | { | 851 | { |
822 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | 852 | struct mlx5_ib_dev *dev = to_mdev(pd->device); |
823 | struct ib_umem *umem = ib_umem_get(pd->uobject->context, start, length, | 853 | int err; |
824 | access_flags, 0); | 854 | |
825 | if (IS_ERR(umem)) { | 855 | *umem = ib_umem_get(pd->uobject->context, start, length, |
856 | access_flags, 0); | ||
857 | err = PTR_ERR_OR_ZERO(*umem); | ||
858 | if (err < 0) { | ||
826 | mlx5_ib_err(dev, "umem get failed (%ld)\n", PTR_ERR(umem)); | 859 | mlx5_ib_err(dev, "umem get failed (%ld)\n", PTR_ERR(umem)); |
827 | return (void *)umem; | 860 | return err; |
828 | } | 861 | } |
829 | 862 | ||
830 | mlx5_ib_cont_pages(umem, start, npages, page_shift, ncont, order); | 863 | mlx5_ib_cont_pages(*umem, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages, |
864 | page_shift, ncont, order); | ||
831 | if (!*npages) { | 865 | if (!*npages) { |
832 | mlx5_ib_warn(dev, "avoid zero region\n"); | 866 | mlx5_ib_warn(dev, "avoid zero region\n"); |
833 | ib_umem_release(umem); | 867 | ib_umem_release(*umem); |
834 | return ERR_PTR(-EINVAL); | 868 | return -EINVAL; |
835 | } | 869 | } |
836 | 870 | ||
837 | mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n", | 871 | mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n", |
838 | *npages, *ncont, *order, *page_shift); | 872 | *npages, *ncont, *order, *page_shift); |
839 | 873 | ||
840 | return umem; | 874 | return 0; |
841 | } | 875 | } |
842 | 876 | ||
843 | static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc) | 877 | static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc) |
@@ -1163,11 +1197,11 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
1163 | 1197 | ||
1164 | mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n", | 1198 | mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n", |
1165 | start, virt_addr, length, access_flags); | 1199 | start, virt_addr, length, access_flags); |
1166 | umem = mr_umem_get(pd, start, length, access_flags, &npages, | 1200 | err = mr_umem_get(pd, start, length, access_flags, &umem, &npages, |
1167 | &page_shift, &ncont, &order); | 1201 | &page_shift, &ncont, &order); |
1168 | 1202 | ||
1169 | if (IS_ERR(umem)) | 1203 | if (err < 0) |
1170 | return (void *)umem; | 1204 | return ERR_PTR(err); |
1171 | 1205 | ||
1172 | if (use_umr(order)) { | 1206 | if (use_umr(order)) { |
1173 | mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift, | 1207 | mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift, |
@@ -1341,10 +1375,9 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, | |||
1341 | */ | 1375 | */ |
1342 | flags |= IB_MR_REREG_TRANS; | 1376 | flags |= IB_MR_REREG_TRANS; |
1343 | ib_umem_release(mr->umem); | 1377 | ib_umem_release(mr->umem); |
1344 | mr->umem = mr_umem_get(pd, addr, len, access_flags, &npages, | 1378 | err = mr_umem_get(pd, addr, len, access_flags, &mr->umem, |
1345 | &page_shift, &ncont, &order); | 1379 | &npages, &page_shift, &ncont, &order); |
1346 | if (IS_ERR(mr->umem)) { | 1380 | if (err < 0) { |
1347 | err = PTR_ERR(mr->umem); | ||
1348 | mr->umem = NULL; | 1381 | mr->umem = NULL; |
1349 | return err; | 1382 | return err; |
1350 | } | 1383 | } |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 41f4c2afbcdd..cc24f2d429b9 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
@@ -78,12 +78,14 @@ struct mlx5_wqe_eth_pad { | |||
78 | 78 | ||
79 | enum raw_qp_set_mask_map { | 79 | enum raw_qp_set_mask_map { |
80 | MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID = 1UL << 0, | 80 | MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID = 1UL << 0, |
81 | MLX5_RAW_QP_RATE_LIMIT = 1UL << 1, | ||
81 | }; | 82 | }; |
82 | 83 | ||
83 | struct mlx5_modify_raw_qp_param { | 84 | struct mlx5_modify_raw_qp_param { |
84 | u16 operation; | 85 | u16 operation; |
85 | 86 | ||
86 | u32 set_mask; /* raw_qp_set_mask_map */ | 87 | u32 set_mask; /* raw_qp_set_mask_map */ |
88 | u32 rate_limit; | ||
87 | u8 rq_q_ctr_id; | 89 | u8 rq_q_ctr_id; |
88 | }; | 90 | }; |
89 | 91 | ||
@@ -352,6 +354,29 @@ static int calc_send_wqe(struct ib_qp_init_attr *attr) | |||
352 | return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB); | 354 | return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB); |
353 | } | 355 | } |
354 | 356 | ||
357 | static int get_send_sge(struct ib_qp_init_attr *attr, int wqe_size) | ||
358 | { | ||
359 | int max_sge; | ||
360 | |||
361 | if (attr->qp_type == IB_QPT_RC) | ||
362 | max_sge = (min_t(int, wqe_size, 512) - | ||
363 | sizeof(struct mlx5_wqe_ctrl_seg) - | ||
364 | sizeof(struct mlx5_wqe_raddr_seg)) / | ||
365 | sizeof(struct mlx5_wqe_data_seg); | ||
366 | else if (attr->qp_type == IB_QPT_XRC_INI) | ||
367 | max_sge = (min_t(int, wqe_size, 512) - | ||
368 | sizeof(struct mlx5_wqe_ctrl_seg) - | ||
369 | sizeof(struct mlx5_wqe_xrc_seg) - | ||
370 | sizeof(struct mlx5_wqe_raddr_seg)) / | ||
371 | sizeof(struct mlx5_wqe_data_seg); | ||
372 | else | ||
373 | max_sge = (wqe_size - sq_overhead(attr)) / | ||
374 | sizeof(struct mlx5_wqe_data_seg); | ||
375 | |||
376 | return min_t(int, max_sge, wqe_size - sq_overhead(attr) / | ||
377 | sizeof(struct mlx5_wqe_data_seg)); | ||
378 | } | ||
379 | |||
355 | static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, | 380 | static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, |
356 | struct mlx5_ib_qp *qp) | 381 | struct mlx5_ib_qp *qp) |
357 | { | 382 | { |
@@ -382,13 +407,18 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, | |||
382 | wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size); | 407 | wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size); |
383 | qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; | 408 | qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; |
384 | if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { | 409 | if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { |
385 | mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n", | 410 | mlx5_ib_dbg(dev, "send queue size (%d * %d / %d -> %d) exceeds limits(%d)\n", |
411 | attr->cap.max_send_wr, wqe_size, MLX5_SEND_WQE_BB, | ||
386 | qp->sq.wqe_cnt, | 412 | qp->sq.wqe_cnt, |
387 | 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)); | 413 | 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)); |
388 | return -ENOMEM; | 414 | return -ENOMEM; |
389 | } | 415 | } |
390 | qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); | 416 | qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); |
391 | qp->sq.max_gs = attr->cap.max_send_sge; | 417 | qp->sq.max_gs = get_send_sge(attr, wqe_size); |
418 | if (qp->sq.max_gs < attr->cap.max_send_sge) | ||
419 | return -ENOMEM; | ||
420 | |||
421 | attr->cap.max_send_sge = qp->sq.max_gs; | ||
392 | qp->sq.max_post = wq_size / wqe_size; | 422 | qp->sq.max_post = wq_size / wqe_size; |
393 | attr->cap.max_send_wr = qp->sq.max_post; | 423 | attr->cap.max_send_wr = qp->sq.max_post; |
394 | 424 | ||
@@ -648,7 +678,7 @@ static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev, | |||
648 | return PTR_ERR(*umem); | 678 | return PTR_ERR(*umem); |
649 | } | 679 | } |
650 | 680 | ||
651 | mlx5_ib_cont_pages(*umem, addr, npages, page_shift, ncont, NULL); | 681 | mlx5_ib_cont_pages(*umem, addr, 0, npages, page_shift, ncont, NULL); |
652 | 682 | ||
653 | err = mlx5_ib_get_buf_offset(addr, *page_shift, offset); | 683 | err = mlx5_ib_get_buf_offset(addr, *page_shift, offset); |
654 | if (err) { | 684 | if (err) { |
@@ -701,7 +731,7 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd, | |||
701 | return err; | 731 | return err; |
702 | } | 732 | } |
703 | 733 | ||
704 | mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, &npages, &page_shift, | 734 | mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, 0, &npages, &page_shift, |
705 | &ncont, NULL); | 735 | &ncont, NULL); |
706 | err = mlx5_ib_get_buf_offset(ucmd->buf_addr, page_shift, | 736 | err = mlx5_ib_get_buf_offset(ucmd->buf_addr, page_shift, |
707 | &rwq->rq_page_offset); | 737 | &rwq->rq_page_offset); |
@@ -2443,8 +2473,14 @@ out: | |||
2443 | } | 2473 | } |
2444 | 2474 | ||
2445 | static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev, | 2475 | static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev, |
2446 | struct mlx5_ib_sq *sq, int new_state) | 2476 | struct mlx5_ib_sq *sq, |
2477 | int new_state, | ||
2478 | const struct mlx5_modify_raw_qp_param *raw_qp_param) | ||
2447 | { | 2479 | { |
2480 | struct mlx5_ib_qp *ibqp = sq->base.container_mibqp; | ||
2481 | u32 old_rate = ibqp->rate_limit; | ||
2482 | u32 new_rate = old_rate; | ||
2483 | u16 rl_index = 0; | ||
2448 | void *in; | 2484 | void *in; |
2449 | void *sqc; | 2485 | void *sqc; |
2450 | int inlen; | 2486 | int inlen; |
@@ -2460,10 +2496,44 @@ static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev, | |||
2460 | sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); | 2496 | sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); |
2461 | MLX5_SET(sqc, sqc, state, new_state); | 2497 | MLX5_SET(sqc, sqc, state, new_state); |
2462 | 2498 | ||
2499 | if (raw_qp_param->set_mask & MLX5_RAW_QP_RATE_LIMIT) { | ||
2500 | if (new_state != MLX5_SQC_STATE_RDY) | ||
2501 | pr_warn("%s: Rate limit can only be changed when SQ is moving to RDY\n", | ||
2502 | __func__); | ||
2503 | else | ||
2504 | new_rate = raw_qp_param->rate_limit; | ||
2505 | } | ||
2506 | |||
2507 | if (old_rate != new_rate) { | ||
2508 | if (new_rate) { | ||
2509 | err = mlx5_rl_add_rate(dev, new_rate, &rl_index); | ||
2510 | if (err) { | ||
2511 | pr_err("Failed configuring rate %u: %d\n", | ||
2512 | new_rate, err); | ||
2513 | goto out; | ||
2514 | } | ||
2515 | } | ||
2516 | |||
2517 | MLX5_SET64(modify_sq_in, in, modify_bitmask, 1); | ||
2518 | MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index); | ||
2519 | } | ||
2520 | |||
2463 | err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in, inlen); | 2521 | err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in, inlen); |
2464 | if (err) | 2522 | if (err) { |
2523 | /* Remove new rate from table if failed */ | ||
2524 | if (new_rate && | ||
2525 | old_rate != new_rate) | ||
2526 | mlx5_rl_remove_rate(dev, new_rate); | ||
2465 | goto out; | 2527 | goto out; |
2528 | } | ||
2529 | |||
2530 | /* Only remove the old rate after new rate was set */ | ||
2531 | if ((old_rate && | ||
2532 | (old_rate != new_rate)) || | ||
2533 | (new_state != MLX5_SQC_STATE_RDY)) | ||
2534 | mlx5_rl_remove_rate(dev, old_rate); | ||
2466 | 2535 | ||
2536 | ibqp->rate_limit = new_rate; | ||
2467 | sq->state = new_state; | 2537 | sq->state = new_state; |
2468 | 2538 | ||
2469 | out: | 2539 | out: |
@@ -2478,6 +2548,8 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, | |||
2478 | struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; | 2548 | struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; |
2479 | struct mlx5_ib_rq *rq = &raw_packet_qp->rq; | 2549 | struct mlx5_ib_rq *rq = &raw_packet_qp->rq; |
2480 | struct mlx5_ib_sq *sq = &raw_packet_qp->sq; | 2550 | struct mlx5_ib_sq *sq = &raw_packet_qp->sq; |
2551 | int modify_rq = !!qp->rq.wqe_cnt; | ||
2552 | int modify_sq = !!qp->sq.wqe_cnt; | ||
2481 | int rq_state; | 2553 | int rq_state; |
2482 | int sq_state; | 2554 | int sq_state; |
2483 | int err; | 2555 | int err; |
@@ -2495,10 +2567,18 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, | |||
2495 | rq_state = MLX5_RQC_STATE_RST; | 2567 | rq_state = MLX5_RQC_STATE_RST; |
2496 | sq_state = MLX5_SQC_STATE_RST; | 2568 | sq_state = MLX5_SQC_STATE_RST; |
2497 | break; | 2569 | break; |
2498 | case MLX5_CMD_OP_INIT2INIT_QP: | ||
2499 | case MLX5_CMD_OP_INIT2RTR_QP: | ||
2500 | case MLX5_CMD_OP_RTR2RTS_QP: | 2570 | case MLX5_CMD_OP_RTR2RTS_QP: |
2501 | case MLX5_CMD_OP_RTS2RTS_QP: | 2571 | case MLX5_CMD_OP_RTS2RTS_QP: |
2572 | if (raw_qp_param->set_mask == | ||
2573 | MLX5_RAW_QP_RATE_LIMIT) { | ||
2574 | modify_rq = 0; | ||
2575 | sq_state = sq->state; | ||
2576 | } else { | ||
2577 | return raw_qp_param->set_mask ? -EINVAL : 0; | ||
2578 | } | ||
2579 | break; | ||
2580 | case MLX5_CMD_OP_INIT2INIT_QP: | ||
2581 | case MLX5_CMD_OP_INIT2RTR_QP: | ||
2502 | if (raw_qp_param->set_mask) | 2582 | if (raw_qp_param->set_mask) |
2503 | return -EINVAL; | 2583 | return -EINVAL; |
2504 | else | 2584 | else |
@@ -2508,13 +2588,13 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, | |||
2508 | return -EINVAL; | 2588 | return -EINVAL; |
2509 | } | 2589 | } |
2510 | 2590 | ||
2511 | if (qp->rq.wqe_cnt) { | 2591 | if (modify_rq) { |
2512 | err = modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param); | 2592 | err = modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param); |
2513 | if (err) | 2593 | if (err) |
2514 | return err; | 2594 | return err; |
2515 | } | 2595 | } |
2516 | 2596 | ||
2517 | if (qp->sq.wqe_cnt) { | 2597 | if (modify_sq) { |
2518 | if (tx_affinity) { | 2598 | if (tx_affinity) { |
2519 | err = modify_raw_packet_tx_affinity(dev->mdev, sq, | 2599 | err = modify_raw_packet_tx_affinity(dev->mdev, sq, |
2520 | tx_affinity); | 2600 | tx_affinity); |
@@ -2522,7 +2602,7 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, | |||
2522 | return err; | 2602 | return err; |
2523 | } | 2603 | } |
2524 | 2604 | ||
2525 | return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state); | 2605 | return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state, raw_qp_param); |
2526 | } | 2606 | } |
2527 | 2607 | ||
2528 | return 0; | 2608 | return 0; |
@@ -2578,7 +2658,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, | |||
2578 | struct mlx5_ib_port *mibport = NULL; | 2658 | struct mlx5_ib_port *mibport = NULL; |
2579 | enum mlx5_qp_state mlx5_cur, mlx5_new; | 2659 | enum mlx5_qp_state mlx5_cur, mlx5_new; |
2580 | enum mlx5_qp_optpar optpar; | 2660 | enum mlx5_qp_optpar optpar; |
2581 | int sqd_event; | ||
2582 | int mlx5_st; | 2661 | int mlx5_st; |
2583 | int err; | 2662 | int err; |
2584 | u16 op; | 2663 | u16 op; |
@@ -2725,12 +2804,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, | |||
2725 | if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) | 2804 | if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) |
2726 | context->db_rec_addr = cpu_to_be64(qp->db.dma); | 2805 | context->db_rec_addr = cpu_to_be64(qp->db.dma); |
2727 | 2806 | ||
2728 | if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && | ||
2729 | attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify) | ||
2730 | sqd_event = 1; | ||
2731 | else | ||
2732 | sqd_event = 0; | ||
2733 | |||
2734 | if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { | 2807 | if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { |
2735 | u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num : | 2808 | u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num : |
2736 | qp->port) - 1; | 2809 | qp->port) - 1; |
@@ -2777,6 +2850,12 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, | |||
2777 | raw_qp_param.rq_q_ctr_id = mibport->q_cnt_id; | 2850 | raw_qp_param.rq_q_ctr_id = mibport->q_cnt_id; |
2778 | raw_qp_param.set_mask |= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID; | 2851 | raw_qp_param.set_mask |= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID; |
2779 | } | 2852 | } |
2853 | |||
2854 | if (attr_mask & IB_QP_RATE_LIMIT) { | ||
2855 | raw_qp_param.rate_limit = attr->rate_limit; | ||
2856 | raw_qp_param.set_mask |= MLX5_RAW_QP_RATE_LIMIT; | ||
2857 | } | ||
2858 | |||
2780 | err = modify_raw_packet_qp(dev, qp, &raw_qp_param, tx_affinity); | 2859 | err = modify_raw_packet_qp(dev, qp, &raw_qp_param, tx_affinity); |
2781 | } else { | 2860 | } else { |
2782 | err = mlx5_core_qp_modify(dev->mdev, op, optpar, context, | 2861 | err = mlx5_core_qp_modify(dev->mdev, op, optpar, context, |
@@ -3068,10 +3147,10 @@ static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr) | |||
3068 | { | 3147 | { |
3069 | memset(umr, 0, sizeof(*umr)); | 3148 | memset(umr, 0, sizeof(*umr)); |
3070 | umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); | 3149 | umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); |
3071 | umr->flags = 1 << 7; | 3150 | umr->flags = MLX5_UMR_INLINE; |
3072 | } | 3151 | } |
3073 | 3152 | ||
3074 | static __be64 get_umr_reg_mr_mask(void) | 3153 | static __be64 get_umr_reg_mr_mask(int atomic) |
3075 | { | 3154 | { |
3076 | u64 result; | 3155 | u64 result; |
3077 | 3156 | ||
@@ -3084,9 +3163,11 @@ static __be64 get_umr_reg_mr_mask(void) | |||
3084 | MLX5_MKEY_MASK_KEY | | 3163 | MLX5_MKEY_MASK_KEY | |
3085 | MLX5_MKEY_MASK_RR | | 3164 | MLX5_MKEY_MASK_RR | |
3086 | MLX5_MKEY_MASK_RW | | 3165 | MLX5_MKEY_MASK_RW | |
3087 | MLX5_MKEY_MASK_A | | ||
3088 | MLX5_MKEY_MASK_FREE; | 3166 | MLX5_MKEY_MASK_FREE; |
3089 | 3167 | ||
3168 | if (atomic) | ||
3169 | result |= MLX5_MKEY_MASK_A; | ||
3170 | |||
3090 | return cpu_to_be64(result); | 3171 | return cpu_to_be64(result); |
3091 | } | 3172 | } |
3092 | 3173 | ||
@@ -3147,7 +3228,7 @@ static __be64 get_umr_update_pd_mask(void) | |||
3147 | } | 3228 | } |
3148 | 3229 | ||
3149 | static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, | 3230 | static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, |
3150 | struct ib_send_wr *wr) | 3231 | struct ib_send_wr *wr, int atomic) |
3151 | { | 3232 | { |
3152 | struct mlx5_umr_wr *umrwr = umr_wr(wr); | 3233 | struct mlx5_umr_wr *umrwr = umr_wr(wr); |
3153 | 3234 | ||
@@ -3172,7 +3253,7 @@ static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, | |||
3172 | if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD) | 3253 | if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD) |
3173 | umr->mkey_mask |= get_umr_update_pd_mask(); | 3254 | umr->mkey_mask |= get_umr_update_pd_mask(); |
3174 | if (!umr->mkey_mask) | 3255 | if (!umr->mkey_mask) |
3175 | umr->mkey_mask = get_umr_reg_mr_mask(); | 3256 | umr->mkey_mask = get_umr_reg_mr_mask(atomic); |
3176 | } else { | 3257 | } else { |
3177 | umr->mkey_mask = get_umr_unreg_mr_mask(); | 3258 | umr->mkey_mask = get_umr_unreg_mr_mask(); |
3178 | } | 3259 | } |
@@ -4025,7 +4106,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
4025 | } | 4106 | } |
4026 | qp->sq.wr_data[idx] = MLX5_IB_WR_UMR; | 4107 | qp->sq.wr_data[idx] = MLX5_IB_WR_UMR; |
4027 | ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey); | 4108 | ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey); |
4028 | set_reg_umr_segment(seg, wr); | 4109 | set_reg_umr_segment(seg, wr, !!(MLX5_CAP_GEN(mdev, atomic))); |
4029 | seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); | 4110 | seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); |
4030 | size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; | 4111 | size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; |
4031 | if (unlikely((seg == qend))) | 4112 | if (unlikely((seg == qend))) |
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index c676133750b7..6f4397ee1ed6 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c | |||
@@ -118,7 +118,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, | |||
118 | return err; | 118 | return err; |
119 | } | 119 | } |
120 | 120 | ||
121 | mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages, | 121 | mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, 0, &npages, |
122 | &page_shift, &ncont, NULL); | 122 | &page_shift, &ncont, NULL); |
123 | err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, | 123 | err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, |
124 | &offset); | 124 | &offset); |
@@ -280,6 +280,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, | |||
280 | mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n", | 280 | mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n", |
281 | desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs, | 281 | desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs, |
282 | srq->msrq.max_avail_gather); | 282 | srq->msrq.max_avail_gather); |
283 | in.type = init_attr->srq_type; | ||
283 | 284 | ||
284 | if (pd->uobject) | 285 | if (pd->uobject) |
285 | err = create_srq_user(pd, srq, &in, udata, buf_size); | 286 | err = create_srq_user(pd, srq, &in, udata, buf_size); |
@@ -292,7 +293,6 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, | |||
292 | goto err_srq; | 293 | goto err_srq; |
293 | } | 294 | } |
294 | 295 | ||
295 | in.type = init_attr->srq_type; | ||
296 | in.log_size = ilog2(srq->msrq.max); | 296 | in.log_size = ilog2(srq->msrq.max); |
297 | in.wqe_shift = srq->msrq.wqe_shift - 4; | 297 | in.wqe_shift = srq->msrq.wqe_shift - 4; |
298 | if (srq->wq_sig) | 298 | if (srq->wq_sig) |
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c index bcac294042f5..c9f0f364f484 100644 --- a/drivers/infiniband/hw/mthca/mthca_av.c +++ b/drivers/infiniband/hw/mthca/mthca_av.c | |||
@@ -186,8 +186,8 @@ int mthca_create_ah(struct mthca_dev *dev, | |||
186 | 186 | ||
187 | on_hca_fail: | 187 | on_hca_fail: |
188 | if (ah->type == MTHCA_AH_PCI_POOL) { | 188 | if (ah->type == MTHCA_AH_PCI_POOL) { |
189 | ah->av = pci_pool_alloc(dev->av_table.pool, | 189 | ah->av = pci_pool_zalloc(dev->av_table.pool, |
190 | GFP_ATOMIC, &ah->avdma); | 190 | GFP_ATOMIC, &ah->avdma); |
191 | if (!ah->av) | 191 | if (!ah->av) |
192 | return -ENOMEM; | 192 | return -ENOMEM; |
193 | 193 | ||
@@ -196,8 +196,6 @@ on_hca_fail: | |||
196 | 196 | ||
197 | ah->key = pd->ntmr.ibmr.lkey; | 197 | ah->key = pd->ntmr.ibmr.lkey; |
198 | 198 | ||
199 | memset(av, 0, MTHCA_AV_SIZE); | ||
200 | |||
201 | av->port_pd = cpu_to_be32(pd->pd_num | (ah_attr->port_num << 24)); | 199 | av->port_pd = cpu_to_be32(pd->pd_num | (ah_attr->port_num << 24)); |
202 | av->g_slid = ah_attr->src_path_bits; | 200 | av->g_slid = ah_attr->src_path_bits; |
203 | av->dlid = cpu_to_be16(ah_attr->dlid); | 201 | av->dlid = cpu_to_be16(ah_attr->dlid); |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 358930a41e36..d31708742ba5 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c | |||
@@ -410,7 +410,9 @@ static int mthca_dealloc_pd(struct ib_pd *pd) | |||
410 | } | 410 | } |
411 | 411 | ||
412 | static struct ib_ah *mthca_ah_create(struct ib_pd *pd, | 412 | static struct ib_ah *mthca_ah_create(struct ib_pd *pd, |
413 | struct ib_ah_attr *ah_attr) | 413 | struct ib_ah_attr *ah_attr, |
414 | struct ib_udata *udata) | ||
415 | |||
414 | { | 416 | { |
415 | int err; | 417 | int err; |
416 | struct mthca_ah *ah; | 418 | struct mthca_ah *ah; |
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 42ab31d06ef9..aff9fb14768b 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c | |||
@@ -771,7 +771,8 @@ static int nes_dealloc_pd(struct ib_pd *ibpd) | |||
771 | /** | 771 | /** |
772 | * nes_create_ah | 772 | * nes_create_ah |
773 | */ | 773 | */ |
774 | static struct ib_ah *nes_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) | 774 | static struct ib_ah *nes_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, |
775 | struct ib_udata *udata) | ||
775 | { | 776 | { |
776 | return ERR_PTR(-ENOSYS); | 777 | return ERR_PTR(-ENOSYS); |
777 | } | 778 | } |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c index 797362a297b2..14d33b0f3950 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c | |||
@@ -154,7 +154,8 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, | |||
154 | return status; | 154 | return status; |
155 | } | 155 | } |
156 | 156 | ||
157 | struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) | 157 | struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr, |
158 | struct ib_udata *udata) | ||
158 | { | 159 | { |
159 | u32 *ahid_addr; | 160 | u32 *ahid_addr; |
160 | int status; | 161 | int status; |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h index 3856dd4c7e3d..0704a24b17c8 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h | |||
@@ -50,7 +50,9 @@ enum { | |||
50 | OCRDMA_AH_L3_TYPE_MASK = 0x03, | 50 | OCRDMA_AH_L3_TYPE_MASK = 0x03, |
51 | OCRDMA_AH_L3_TYPE_SHIFT = 0x1D /* 29 bits */ | 51 | OCRDMA_AH_L3_TYPE_SHIFT = 0x1D /* 29 bits */ |
52 | }; | 52 | }; |
53 | struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *); | 53 | |
54 | struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *, | ||
55 | struct ib_udata *); | ||
54 | int ocrdma_destroy_ah(struct ib_ah *); | 56 | int ocrdma_destroy_ah(struct ib_ah *); |
55 | int ocrdma_query_ah(struct ib_ah *, struct ib_ah_attr *); | 57 | int ocrdma_query_ah(struct ib_ah *, struct ib_ah_attr *); |
56 | int ocrdma_modify_ah(struct ib_ah *, struct ib_ah_attr *); | 58 | int ocrdma_modify_ah(struct ib_ah *, struct ib_ah_attr *); |
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index a61514296767..ccff6c6e3f33 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c | |||
@@ -2094,7 +2094,8 @@ int qedr_destroy_qp(struct ib_qp *ibqp) | |||
2094 | return rc; | 2094 | return rc; |
2095 | } | 2095 | } |
2096 | 2096 | ||
2097 | struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) | 2097 | struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr, |
2098 | struct ib_udata *udata) | ||
2098 | { | 2099 | { |
2099 | struct qedr_ah *ah; | 2100 | struct qedr_ah *ah; |
2100 | 2101 | ||
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h index a9b5e67bb81e..070677ca4d19 100644 --- a/drivers/infiniband/hw/qedr/verbs.h +++ b/drivers/infiniband/hw/qedr/verbs.h | |||
@@ -70,7 +70,8 @@ int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr, | |||
70 | int qp_attr_mask, struct ib_qp_init_attr *); | 70 | int qp_attr_mask, struct ib_qp_init_attr *); |
71 | int qedr_destroy_qp(struct ib_qp *ibqp); | 71 | int qedr_destroy_qp(struct ib_qp *ibqp); |
72 | 72 | ||
73 | struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr); | 73 | struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr, |
74 | struct ib_udata *udata); | ||
74 | int qedr_destroy_ah(struct ib_ah *ibah); | 75 | int qedr_destroy_ah(struct ib_ah *ibah); |
75 | 76 | ||
76 | int qedr_dereg_mr(struct ib_mr *); | 77 | int qedr_dereg_mr(struct ib_mr *); |
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c index a5bfbba6bbac..fd2a50eb4c91 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c | |||
@@ -738,7 +738,9 @@ int usnic_ib_mmap(struct ib_ucontext *context, | |||
738 | 738 | ||
739 | /* In ib callbacks section - Start of stub funcs */ | 739 | /* In ib callbacks section - Start of stub funcs */ |
740 | struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd, | 740 | struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd, |
741 | struct ib_ah_attr *ah_attr) | 741 | struct ib_ah_attr *ah_attr, |
742 | struct ib_udata *udata) | ||
743 | |||
742 | { | 744 | { |
743 | usnic_dbg("\n"); | 745 | usnic_dbg("\n"); |
744 | return ERR_PTR(-EPERM); | 746 | return ERR_PTR(-EPERM); |
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h index 0d9d2e6a14d5..0ed8e072329e 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h | |||
@@ -75,7 +75,9 @@ int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext); | |||
75 | int usnic_ib_mmap(struct ib_ucontext *context, | 75 | int usnic_ib_mmap(struct ib_ucontext *context, |
76 | struct vm_area_struct *vma); | 76 | struct vm_area_struct *vma); |
77 | struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd, | 77 | struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd, |
78 | struct ib_ah_attr *ah_attr); | 78 | struct ib_ah_attr *ah_attr, |
79 | struct ib_udata *udata); | ||
80 | |||
79 | int usnic_ib_destroy_ah(struct ib_ah *ah); | 81 | int usnic_ib_destroy_ah(struct ib_ah *ah); |
80 | int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | 82 | int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, |
81 | struct ib_send_wr **bad_wr); | 83 | struct ib_send_wr **bad_wr); |
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h index f459c43a77c8..13ed2cc6eaa2 100644 --- a/drivers/infiniband/sw/rxe/rxe_param.h +++ b/drivers/infiniband/sw/rxe/rxe_param.h | |||
@@ -82,7 +82,7 @@ enum rxe_device_param { | |||
82 | RXE_MAX_SGE = 32, | 82 | RXE_MAX_SGE = 32, |
83 | RXE_MAX_SGE_RD = 32, | 83 | RXE_MAX_SGE_RD = 32, |
84 | RXE_MAX_CQ = 16384, | 84 | RXE_MAX_CQ = 16384, |
85 | RXE_MAX_LOG_CQE = 13, | 85 | RXE_MAX_LOG_CQE = 15, |
86 | RXE_MAX_MR = 2 * 1024, | 86 | RXE_MAX_MR = 2 * 1024, |
87 | RXE_MAX_PD = 0x7ffc, | 87 | RXE_MAX_PD = 0x7ffc, |
88 | RXE_MAX_QP_RD_ATOM = 128, | 88 | RXE_MAX_QP_RD_ATOM = 128, |
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 19841c863daf..187d85ccfe58 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c | |||
@@ -316,7 +316,9 @@ static int rxe_init_av(struct rxe_dev *rxe, struct ib_ah_attr *attr, | |||
316 | return err; | 316 | return err; |
317 | } | 317 | } |
318 | 318 | ||
319 | static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) | 319 | static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr, |
320 | struct ib_udata *udata) | ||
321 | |||
320 | { | 322 | { |
321 | int err; | 323 | int err; |
322 | struct rxe_dev *rxe = to_rdev(ibpd->device); | 324 | struct rxe_dev *rxe = to_rdev(ibpd->device); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 44f152e431cb..46234f52ee29 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
@@ -1050,8 +1050,6 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_ | |||
1050 | 1050 | ||
1051 | tx_qp = ib_create_qp(priv->pd, &attr); | 1051 | tx_qp = ib_create_qp(priv->pd, &attr); |
1052 | if (PTR_ERR(tx_qp) == -EINVAL) { | 1052 | if (PTR_ERR(tx_qp) == -EINVAL) { |
1053 | ipoib_warn(priv, "can't use GFP_NOIO for QPs on device %s, using GFP_KERNEL\n", | ||
1054 | priv->ca->name); | ||
1055 | attr.create_flags &= ~IB_QP_CREATE_USE_GFP_NOIO; | 1053 | attr.create_flags &= ~IB_QP_CREATE_USE_GFP_NOIO; |
1056 | tx_qp = ib_create_qp(priv->pd, &attr); | 1054 | tx_qp = ib_create_qp(priv->pd, &attr); |
1057 | } | 1055 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 84d7857ccc27..c548beaaf910 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | |||
@@ -1605,13 +1605,14 @@ static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index, | |||
1605 | r->com.from_state = r->com.state; | 1605 | r->com.from_state = r->com.state; |
1606 | r->com.to_state = state; | 1606 | r->com.to_state = state; |
1607 | r->com.state = RES_EQ_BUSY; | 1607 | r->com.state = RES_EQ_BUSY; |
1608 | if (eq) | ||
1609 | *eq = r; | ||
1610 | } | 1608 | } |
1611 | } | 1609 | } |
1612 | 1610 | ||
1613 | spin_unlock_irq(mlx4_tlock(dev)); | 1611 | spin_unlock_irq(mlx4_tlock(dev)); |
1614 | 1612 | ||
1613 | if (!err && eq) | ||
1614 | *eq = r; | ||
1615 | |||
1615 | return err; | 1616 | return err; |
1616 | } | 1617 | } |
1617 | 1618 | ||
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 2632cb2caf10..0779ad2e8f51 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h | |||
@@ -576,7 +576,7 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { | |||
576 | u8 self_lb_en_modifiable[0x1]; | 576 | u8 self_lb_en_modifiable[0x1]; |
577 | u8 reserved_at_9[0x2]; | 577 | u8 reserved_at_9[0x2]; |
578 | u8 max_lso_cap[0x5]; | 578 | u8 max_lso_cap[0x5]; |
579 | u8 reserved_at_10[0x2]; | 579 | u8 multi_pkt_send_wqe[0x2]; |
580 | u8 wqe_inline_mode[0x2]; | 580 | u8 wqe_inline_mode[0x2]; |
581 | u8 rss_ind_tbl_cap[0x4]; | 581 | u8 rss_ind_tbl_cap[0x4]; |
582 | u8 reg_umr_sq[0x1]; | 582 | u8 reg_umr_sq[0x1]; |
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 5ad43a487745..8029d2a51f14 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
@@ -1102,6 +1102,7 @@ enum ib_qp_attr_mask { | |||
1102 | IB_QP_RESERVED2 = (1<<22), | 1102 | IB_QP_RESERVED2 = (1<<22), |
1103 | IB_QP_RESERVED3 = (1<<23), | 1103 | IB_QP_RESERVED3 = (1<<23), |
1104 | IB_QP_RESERVED4 = (1<<24), | 1104 | IB_QP_RESERVED4 = (1<<24), |
1105 | IB_QP_RATE_LIMIT = (1<<25), | ||
1105 | }; | 1106 | }; |
1106 | 1107 | ||
1107 | enum ib_qp_state { | 1108 | enum ib_qp_state { |
@@ -1151,6 +1152,7 @@ struct ib_qp_attr { | |||
1151 | u8 rnr_retry; | 1152 | u8 rnr_retry; |
1152 | u8 alt_port_num; | 1153 | u8 alt_port_num; |
1153 | u8 alt_timeout; | 1154 | u8 alt_timeout; |
1155 | u32 rate_limit; | ||
1154 | }; | 1156 | }; |
1155 | 1157 | ||
1156 | enum ib_wr_opcode { | 1158 | enum ib_wr_opcode { |
@@ -1592,17 +1594,19 @@ enum ib_flow_attr_type { | |||
1592 | /* Supported steering header types */ | 1594 | /* Supported steering header types */ |
1593 | enum ib_flow_spec_type { | 1595 | enum ib_flow_spec_type { |
1594 | /* L2 headers*/ | 1596 | /* L2 headers*/ |
1595 | IB_FLOW_SPEC_ETH = 0x20, | 1597 | IB_FLOW_SPEC_ETH = 0x20, |
1596 | IB_FLOW_SPEC_IB = 0x22, | 1598 | IB_FLOW_SPEC_IB = 0x22, |
1597 | /* L3 header*/ | 1599 | /* L3 header*/ |
1598 | IB_FLOW_SPEC_IPV4 = 0x30, | 1600 | IB_FLOW_SPEC_IPV4 = 0x30, |
1599 | IB_FLOW_SPEC_IPV6 = 0x31, | 1601 | IB_FLOW_SPEC_IPV6 = 0x31, |
1600 | /* L4 headers*/ | 1602 | /* L4 headers*/ |
1601 | IB_FLOW_SPEC_TCP = 0x40, | 1603 | IB_FLOW_SPEC_TCP = 0x40, |
1602 | IB_FLOW_SPEC_UDP = 0x41 | 1604 | IB_FLOW_SPEC_UDP = 0x41, |
1605 | IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50, | ||
1606 | IB_FLOW_SPEC_INNER = 0x100, | ||
1603 | }; | 1607 | }; |
1604 | #define IB_FLOW_SPEC_LAYER_MASK 0xF0 | 1608 | #define IB_FLOW_SPEC_LAYER_MASK 0xF0 |
1605 | #define IB_FLOW_SPEC_SUPPORT_LAYERS 4 | 1609 | #define IB_FLOW_SPEC_SUPPORT_LAYERS 8 |
1606 | 1610 | ||
1607 | /* Flow steering rule priority is set according to it's domain. | 1611 | /* Flow steering rule priority is set according to it's domain. |
1608 | * Lower domain value means higher priority. | 1612 | * Lower domain value means higher priority. |
@@ -1630,7 +1634,7 @@ struct ib_flow_eth_filter { | |||
1630 | }; | 1634 | }; |
1631 | 1635 | ||
1632 | struct ib_flow_spec_eth { | 1636 | struct ib_flow_spec_eth { |
1633 | enum ib_flow_spec_type type; | 1637 | u32 type; |
1634 | u16 size; | 1638 | u16 size; |
1635 | struct ib_flow_eth_filter val; | 1639 | struct ib_flow_eth_filter val; |
1636 | struct ib_flow_eth_filter mask; | 1640 | struct ib_flow_eth_filter mask; |
@@ -1644,7 +1648,7 @@ struct ib_flow_ib_filter { | |||
1644 | }; | 1648 | }; |
1645 | 1649 | ||
1646 | struct ib_flow_spec_ib { | 1650 | struct ib_flow_spec_ib { |
1647 | enum ib_flow_spec_type type; | 1651 | u32 type; |
1648 | u16 size; | 1652 | u16 size; |
1649 | struct ib_flow_ib_filter val; | 1653 | struct ib_flow_ib_filter val; |
1650 | struct ib_flow_ib_filter mask; | 1654 | struct ib_flow_ib_filter mask; |
@@ -1669,7 +1673,7 @@ struct ib_flow_ipv4_filter { | |||
1669 | }; | 1673 | }; |
1670 | 1674 | ||
1671 | struct ib_flow_spec_ipv4 { | 1675 | struct ib_flow_spec_ipv4 { |
1672 | enum ib_flow_spec_type type; | 1676 | u32 type; |
1673 | u16 size; | 1677 | u16 size; |
1674 | struct ib_flow_ipv4_filter val; | 1678 | struct ib_flow_ipv4_filter val; |
1675 | struct ib_flow_ipv4_filter mask; | 1679 | struct ib_flow_ipv4_filter mask; |
@@ -1687,7 +1691,7 @@ struct ib_flow_ipv6_filter { | |||
1687 | }; | 1691 | }; |
1688 | 1692 | ||
1689 | struct ib_flow_spec_ipv6 { | 1693 | struct ib_flow_spec_ipv6 { |
1690 | enum ib_flow_spec_type type; | 1694 | u32 type; |
1691 | u16 size; | 1695 | u16 size; |
1692 | struct ib_flow_ipv6_filter val; | 1696 | struct ib_flow_ipv6_filter val; |
1693 | struct ib_flow_ipv6_filter mask; | 1697 | struct ib_flow_ipv6_filter mask; |
@@ -1701,15 +1705,30 @@ struct ib_flow_tcp_udp_filter { | |||
1701 | }; | 1705 | }; |
1702 | 1706 | ||
1703 | struct ib_flow_spec_tcp_udp { | 1707 | struct ib_flow_spec_tcp_udp { |
1704 | enum ib_flow_spec_type type; | 1708 | u32 type; |
1705 | u16 size; | 1709 | u16 size; |
1706 | struct ib_flow_tcp_udp_filter val; | 1710 | struct ib_flow_tcp_udp_filter val; |
1707 | struct ib_flow_tcp_udp_filter mask; | 1711 | struct ib_flow_tcp_udp_filter mask; |
1708 | }; | 1712 | }; |
1709 | 1713 | ||
1714 | struct ib_flow_tunnel_filter { | ||
1715 | __be32 tunnel_id; | ||
1716 | u8 real_sz[0]; | ||
1717 | }; | ||
1718 | |||
1719 | /* ib_flow_spec_tunnel describes the Vxlan tunnel | ||
1720 | * the tunnel_id from val has the vni value | ||
1721 | */ | ||
1722 | struct ib_flow_spec_tunnel { | ||
1723 | u32 type; | ||
1724 | u16 size; | ||
1725 | struct ib_flow_tunnel_filter val; | ||
1726 | struct ib_flow_tunnel_filter mask; | ||
1727 | }; | ||
1728 | |||
1710 | union ib_flow_spec { | 1729 | union ib_flow_spec { |
1711 | struct { | 1730 | struct { |
1712 | enum ib_flow_spec_type type; | 1731 | u32 type; |
1713 | u16 size; | 1732 | u16 size; |
1714 | }; | 1733 | }; |
1715 | struct ib_flow_spec_eth eth; | 1734 | struct ib_flow_spec_eth eth; |
@@ -1717,6 +1736,7 @@ union ib_flow_spec { | |||
1717 | struct ib_flow_spec_ipv4 ipv4; | 1736 | struct ib_flow_spec_ipv4 ipv4; |
1718 | struct ib_flow_spec_tcp_udp tcp_udp; | 1737 | struct ib_flow_spec_tcp_udp tcp_udp; |
1719 | struct ib_flow_spec_ipv6 ipv6; | 1738 | struct ib_flow_spec_ipv6 ipv6; |
1739 | struct ib_flow_spec_tunnel tunnel; | ||
1720 | }; | 1740 | }; |
1721 | 1741 | ||
1722 | struct ib_flow_attr { | 1742 | struct ib_flow_attr { |
@@ -1933,7 +1953,8 @@ struct ib_device { | |||
1933 | struct ib_udata *udata); | 1953 | struct ib_udata *udata); |
1934 | int (*dealloc_pd)(struct ib_pd *pd); | 1954 | int (*dealloc_pd)(struct ib_pd *pd); |
1935 | struct ib_ah * (*create_ah)(struct ib_pd *pd, | 1955 | struct ib_ah * (*create_ah)(struct ib_pd *pd, |
1936 | struct ib_ah_attr *ah_attr); | 1956 | struct ib_ah_attr *ah_attr, |
1957 | struct ib_udata *udata); | ||
1937 | int (*modify_ah)(struct ib_ah *ah, | 1958 | int (*modify_ah)(struct ib_ah *ah, |
1938 | struct ib_ah_attr *ah_attr); | 1959 | struct ib_ah_attr *ah_attr); |
1939 | int (*query_ah)(struct ib_ah *ah, | 1960 | int (*query_ah)(struct ib_ah *ah, |
@@ -2581,6 +2602,24 @@ void ib_dealloc_pd(struct ib_pd *pd); | |||
2581 | struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); | 2602 | struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); |
2582 | 2603 | ||
2583 | /** | 2604 | /** |
2605 | * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header | ||
2606 | * work completion. | ||
2607 | * @hdr: the L3 header to parse | ||
2608 | * @net_type: type of header to parse | ||
2609 | * @sgid: place to store source gid | ||
2610 | * @dgid: place to store destination gid | ||
2611 | */ | ||
2612 | int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr, | ||
2613 | enum rdma_network_type net_type, | ||
2614 | union ib_gid *sgid, union ib_gid *dgid); | ||
2615 | |||
2616 | /** | ||
2617 | * ib_get_rdma_header_version - Get the header version | ||
2618 | * @hdr: the L3 header to parse | ||
2619 | */ | ||
2620 | int ib_get_rdma_header_version(const union rdma_network_hdr *hdr); | ||
2621 | |||
2622 | /** | ||
2584 | * ib_init_ah_from_wc - Initializes address handle attributes from a | 2623 | * ib_init_ah_from_wc - Initializes address handle attributes from a |
2585 | * work completion. | 2624 | * work completion. |
2586 | * @device: Device on which the received message arrived. | 2625 | * @device: Device on which the received message arrived. |
@@ -3357,4 +3396,7 @@ int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents, | |||
3357 | void ib_drain_rq(struct ib_qp *qp); | 3396 | void ib_drain_rq(struct ib_qp *qp); |
3358 | void ib_drain_sq(struct ib_qp *qp); | 3397 | void ib_drain_sq(struct ib_qp *qp); |
3359 | void ib_drain_qp(struct ib_qp *qp); | 3398 | void ib_drain_qp(struct ib_qp *qp); |
3399 | |||
3400 | int ib_resolve_eth_dmac(struct ib_device *device, | ||
3401 | struct ib_ah_attr *ah_attr); | ||
3360 | #endif /* IB_VERBS_H */ | 3402 | #endif /* IB_VERBS_H */ |
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h index 25225ebbc7d5..dfdfe4e92d31 100644 --- a/include/uapi/rdma/ib_user_verbs.h +++ b/include/uapi/rdma/ib_user_verbs.h | |||
@@ -37,6 +37,7 @@ | |||
37 | #define IB_USER_VERBS_H | 37 | #define IB_USER_VERBS_H |
38 | 38 | ||
39 | #include <linux/types.h> | 39 | #include <linux/types.h> |
40 | #include <rdma/ib_verbs.h> | ||
40 | 41 | ||
41 | /* | 42 | /* |
42 | * Increment this value if any changes that break userspace ABI | 43 | * Increment this value if any changes that break userspace ABI |
@@ -93,6 +94,7 @@ enum { | |||
93 | IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE, | 94 | IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE, |
94 | IB_USER_VERBS_EX_CMD_CREATE_CQ = IB_USER_VERBS_CMD_CREATE_CQ, | 95 | IB_USER_VERBS_EX_CMD_CREATE_CQ = IB_USER_VERBS_CMD_CREATE_CQ, |
95 | IB_USER_VERBS_EX_CMD_CREATE_QP = IB_USER_VERBS_CMD_CREATE_QP, | 96 | IB_USER_VERBS_EX_CMD_CREATE_QP = IB_USER_VERBS_CMD_CREATE_QP, |
97 | IB_USER_VERBS_EX_CMD_MODIFY_QP = IB_USER_VERBS_CMD_MODIFY_QP, | ||
96 | IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD, | 98 | IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD, |
97 | IB_USER_VERBS_EX_CMD_DESTROY_FLOW, | 99 | IB_USER_VERBS_EX_CMD_DESTROY_FLOW, |
98 | IB_USER_VERBS_EX_CMD_CREATE_WQ, | 100 | IB_USER_VERBS_EX_CMD_CREATE_WQ, |
@@ -545,6 +547,14 @@ enum { | |||
545 | IB_UVERBS_CREATE_QP_SUP_COMP_MASK = IB_UVERBS_CREATE_QP_MASK_IND_TABLE, | 547 | IB_UVERBS_CREATE_QP_SUP_COMP_MASK = IB_UVERBS_CREATE_QP_MASK_IND_TABLE, |
546 | }; | 548 | }; |
547 | 549 | ||
550 | enum { | ||
551 | IB_USER_LEGACY_LAST_QP_ATTR_MASK = IB_QP_DEST_QPN | ||
552 | }; | ||
553 | |||
554 | enum { | ||
555 | IB_USER_LAST_QP_ATTR_MASK = IB_QP_RATE_LIMIT | ||
556 | }; | ||
557 | |||
548 | struct ib_uverbs_ex_create_qp { | 558 | struct ib_uverbs_ex_create_qp { |
549 | __u64 user_handle; | 559 | __u64 user_handle; |
550 | __u32 pd_handle; | 560 | __u32 pd_handle; |
@@ -684,9 +694,20 @@ struct ib_uverbs_modify_qp { | |||
684 | __u64 driver_data[0]; | 694 | __u64 driver_data[0]; |
685 | }; | 695 | }; |
686 | 696 | ||
697 | struct ib_uverbs_ex_modify_qp { | ||
698 | struct ib_uverbs_modify_qp base; | ||
699 | __u32 rate_limit; | ||
700 | __u32 reserved; | ||
701 | }; | ||
702 | |||
687 | struct ib_uverbs_modify_qp_resp { | 703 | struct ib_uverbs_modify_qp_resp { |
688 | }; | 704 | }; |
689 | 705 | ||
706 | struct ib_uverbs_ex_modify_qp_resp { | ||
707 | __u32 comp_mask; | ||
708 | __u32 response_length; | ||
709 | }; | ||
710 | |||
690 | struct ib_uverbs_destroy_qp { | 711 | struct ib_uverbs_destroy_qp { |
691 | __u64 response; | 712 | __u64 response; |
692 | __u32 qp_handle; | 713 | __u32 qp_handle; |
@@ -908,6 +929,23 @@ struct ib_uverbs_flow_spec_ipv6 { | |||
908 | struct ib_uverbs_flow_ipv6_filter mask; | 929 | struct ib_uverbs_flow_ipv6_filter mask; |
909 | }; | 930 | }; |
910 | 931 | ||
932 | struct ib_uverbs_flow_tunnel_filter { | ||
933 | __be32 tunnel_id; | ||
934 | }; | ||
935 | |||
936 | struct ib_uverbs_flow_spec_tunnel { | ||
937 | union { | ||
938 | struct ib_uverbs_flow_spec_hdr hdr; | ||
939 | struct { | ||
940 | __u32 type; | ||
941 | __u16 size; | ||
942 | __u16 reserved; | ||
943 | }; | ||
944 | }; | ||
945 | struct ib_uverbs_flow_tunnel_filter val; | ||
946 | struct ib_uverbs_flow_tunnel_filter mask; | ||
947 | }; | ||
948 | |||
911 | struct ib_uverbs_flow_attr { | 949 | struct ib_uverbs_flow_attr { |
912 | __u32 type; | 950 | __u32 type; |
913 | __u16 size; | 951 | __u16 size; |
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h index f5d0f4e83b59..fae6cdaeb56d 100644 --- a/include/uapi/rdma/mlx5-abi.h +++ b/include/uapi/rdma/mlx5-abi.h | |||
@@ -82,6 +82,7 @@ enum mlx5_ib_alloc_ucontext_resp_mask { | |||
82 | 82 | ||
83 | enum mlx5_user_cmds_supp_uhw { | 83 | enum mlx5_user_cmds_supp_uhw { |
84 | MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0, | 84 | MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0, |
85 | MLX5_USER_CMDS_SUPP_UHW_CREATE_AH = 1 << 1, | ||
85 | }; | 86 | }; |
86 | 87 | ||
87 | struct mlx5_ib_alloc_ucontext_resp { | 88 | struct mlx5_ib_alloc_ucontext_resp { |
@@ -124,18 +125,47 @@ struct mlx5_ib_rss_caps { | |||
124 | __u8 reserved[7]; | 125 | __u8 reserved[7]; |
125 | }; | 126 | }; |
126 | 127 | ||
128 | enum mlx5_ib_cqe_comp_res_format { | ||
129 | MLX5_IB_CQE_RES_FORMAT_HASH = 1 << 0, | ||
130 | MLX5_IB_CQE_RES_FORMAT_CSUM = 1 << 1, | ||
131 | MLX5_IB_CQE_RES_RESERVED = 1 << 2, | ||
132 | }; | ||
133 | |||
134 | struct mlx5_ib_cqe_comp_caps { | ||
135 | __u32 max_num; | ||
136 | __u32 supported_format; /* enum mlx5_ib_cqe_comp_res_format */ | ||
137 | }; | ||
138 | |||
139 | struct mlx5_packet_pacing_caps { | ||
140 | __u32 qp_rate_limit_min; | ||
141 | __u32 qp_rate_limit_max; /* In kpbs */ | ||
142 | |||
143 | /* Corresponding bit will be set if qp type from | ||
144 | * 'enum ib_qp_type' is supported, e.g. | ||
145 | * supported_qpts |= 1 << IB_QPT_RAW_PACKET | ||
146 | */ | ||
147 | __u32 supported_qpts; | ||
148 | __u32 reserved; | ||
149 | }; | ||
150 | |||
127 | struct mlx5_ib_query_device_resp { | 151 | struct mlx5_ib_query_device_resp { |
128 | __u32 comp_mask; | 152 | __u32 comp_mask; |
129 | __u32 response_length; | 153 | __u32 response_length; |
130 | struct mlx5_ib_tso_caps tso_caps; | 154 | struct mlx5_ib_tso_caps tso_caps; |
131 | struct mlx5_ib_rss_caps rss_caps; | 155 | struct mlx5_ib_rss_caps rss_caps; |
156 | struct mlx5_ib_cqe_comp_caps cqe_comp_caps; | ||
157 | struct mlx5_packet_pacing_caps packet_pacing_caps; | ||
158 | __u32 mlx5_ib_support_multi_pkt_send_wqes; | ||
159 | __u32 reserved; | ||
132 | }; | 160 | }; |
133 | 161 | ||
134 | struct mlx5_ib_create_cq { | 162 | struct mlx5_ib_create_cq { |
135 | __u64 buf_addr; | 163 | __u64 buf_addr; |
136 | __u64 db_addr; | 164 | __u64 db_addr; |
137 | __u32 cqe_size; | 165 | __u32 cqe_size; |
138 | __u32 reserved; /* explicit padding (optional on i386) */ | 166 | __u8 cqe_comp_en; |
167 | __u8 cqe_comp_res_format; | ||
168 | __u16 reserved; /* explicit padding (optional on i386) */ | ||
139 | }; | 169 | }; |
140 | 170 | ||
141 | struct mlx5_ib_create_cq_resp { | 171 | struct mlx5_ib_create_cq_resp { |
@@ -232,6 +262,12 @@ struct mlx5_ib_create_wq { | |||
232 | __u32 reserved; | 262 | __u32 reserved; |
233 | }; | 263 | }; |
234 | 264 | ||
265 | struct mlx5_ib_create_ah_resp { | ||
266 | __u32 response_length; | ||
267 | __u8 dmac[ETH_ALEN]; | ||
268 | __u8 reserved[6]; | ||
269 | }; | ||
270 | |||
235 | struct mlx5_ib_create_wq_resp { | 271 | struct mlx5_ib_create_wq_resp { |
236 | __u32 response_length; | 272 | __u32 response_length; |
237 | __u32 reserved; | 273 | __u32 reserved; |