diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-08-04 20:26:31 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-08-04 20:26:31 -0400 |
| commit | 84e39eeb08c0ea7e9ec43ac820bf76a6fe8ecbad (patch) | |
| tree | 680f704b29ec68cee50a6456088ffac1902bbf95 /include/rdma | |
| parent | 0cda611386b2fcbf8bb32e9a5d82bfed4856fc36 (diff) | |
| parent | 7c41765d8c30bdf1b056533c0521ecdec0ec11fa (diff) | |
Merge tag 'for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
Pull second round of rdma updates from Doug Ledford:
"This can be split out into just two categories:
- fixes to the RDMA R/W API in regards to SG list length limits
(about 5 patches)
- fixes/features for the Intel hfi1 driver (everything else)
The hfi1 driver is still being brought to full feature support by
Intel, and they have a lot of people working on it, so that amounts to
almost the entirety of this pull request"
* tag 'for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (84 commits)
IB/hfi1: Add cache evict LRU list
IB/hfi1: Fix memory leak during unexpected shutdown
IB/hfi1: Remove unneeded mm argument in remove function
IB/hfi1: Consistently call ops->remove outside spinlock
IB/hfi1: Use evict mmu rb operation
IB/hfi1: Add evict operation to the mmu rb handler
IB/hfi1: Fix TID caching actions
IB/hfi1: Make the cache handler own its rb tree root
IB/hfi1: Make use of mm consistent
IB/hfi1: Fix user SDMA racy user request claim
IB/hfi1: Fix error condition that needs to clean up
IB/hfi1: Release node on insert failure
IB/hfi1: Validate SDMA user iovector count
IB/hfi1: Validate SDMA user request index
IB/hfi1: Use the same capability state for all shared contexts
IB/hfi1: Prevent null pointer dereference
IB/hfi1: Rename TID mmu_rb_* functions
IB/hfi1: Remove unneeded empty check in hfi1_mmu_rb_unregister()
IB/hfi1: Restructure hfi1_file_open
IB/hfi1: Make iovec loop index easy to understand
...
Diffstat (limited to 'include/rdma')
| -rw-r--r-- | include/rdma/ib_verbs.h | 6 | ||||
| -rw-r--r-- | include/rdma/opa_port_info.h | 16 | ||||
| -rw-r--r-- | include/rdma/rdma_vt.h | 7 | ||||
| -rw-r--r-- | include/rdma/rdmavt_mr.h | 1 | ||||
| -rw-r--r-- | include/rdma/rdmavt_qp.h | 92 |
5 files changed, 96 insertions, 26 deletions
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 94a0bc5b5bdd..8e90dd28bb75 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
| @@ -1490,6 +1490,10 @@ struct ib_rwq_ind_table_init_attr { | |||
| 1490 | struct ib_wq **ind_tbl; | 1490 | struct ib_wq **ind_tbl; |
| 1491 | }; | 1491 | }; |
| 1492 | 1492 | ||
| 1493 | /* | ||
| 1494 | * @max_write_sge: Maximum SGE elements per RDMA WRITE request. | ||
| 1495 | * @max_read_sge: Maximum SGE elements per RDMA READ request. | ||
| 1496 | */ | ||
| 1493 | struct ib_qp { | 1497 | struct ib_qp { |
| 1494 | struct ib_device *device; | 1498 | struct ib_device *device; |
| 1495 | struct ib_pd *pd; | 1499 | struct ib_pd *pd; |
| @@ -1511,6 +1515,8 @@ struct ib_qp { | |||
| 1511 | void (*event_handler)(struct ib_event *, void *); | 1515 | void (*event_handler)(struct ib_event *, void *); |
| 1512 | void *qp_context; | 1516 | void *qp_context; |
| 1513 | u32 qp_num; | 1517 | u32 qp_num; |
| 1518 | u32 max_write_sge; | ||
| 1519 | u32 max_read_sge; | ||
| 1514 | enum ib_qp_type qp_type; | 1520 | enum ib_qp_type qp_type; |
| 1515 | struct ib_rwq_ind_table *rwq_ind_tbl; | 1521 | struct ib_rwq_ind_table *rwq_ind_tbl; |
| 1516 | }; | 1522 | }; |
diff --git a/include/rdma/opa_port_info.h b/include/rdma/opa_port_info.h index 2b95c2c336eb..9303e0e4f508 100644 --- a/include/rdma/opa_port_info.h +++ b/include/rdma/opa_port_info.h | |||
| @@ -33,11 +33,6 @@ | |||
| 33 | #if !defined(OPA_PORT_INFO_H) | 33 | #if !defined(OPA_PORT_INFO_H) |
| 34 | #define OPA_PORT_INFO_H | 34 | #define OPA_PORT_INFO_H |
| 35 | 35 | ||
| 36 | /* Temporary until HFI driver is updated */ | ||
| 37 | #ifndef USE_PI_LED_ENABLE | ||
| 38 | #define USE_PI_LED_ENABLE 0 | ||
| 39 | #endif | ||
| 40 | |||
| 41 | #define OPA_PORT_LINK_MODE_NOP 0 /* No change */ | 36 | #define OPA_PORT_LINK_MODE_NOP 0 /* No change */ |
| 42 | #define OPA_PORT_LINK_MODE_OPA 4 /* Port mode is OPA */ | 37 | #define OPA_PORT_LINK_MODE_OPA 4 /* Port mode is OPA */ |
| 43 | 38 | ||
| @@ -274,23 +269,12 @@ enum port_info_field_masks { | |||
| 274 | OPA_PI_MASK_MTU_CAP = 0x0F, | 269 | OPA_PI_MASK_MTU_CAP = 0x0F, |
| 275 | }; | 270 | }; |
| 276 | 271 | ||
| 277 | #if USE_PI_LED_ENABLE | ||
| 278 | struct opa_port_states { | 272 | struct opa_port_states { |
| 279 | u8 reserved; | 273 | u8 reserved; |
| 280 | u8 ledenable_offlinereason; /* 1 res, 1 bit, 6 bits */ | 274 | u8 ledenable_offlinereason; /* 1 res, 1 bit, 6 bits */ |
| 281 | u8 reserved2; | 275 | u8 reserved2; |
| 282 | u8 portphysstate_portstate; /* 4 bits, 4 bits */ | 276 | u8 portphysstate_portstate; /* 4 bits, 4 bits */ |
| 283 | }; | 277 | }; |
| 284 | #define PI_LED_ENABLE_SUP 1 | ||
| 285 | #else | ||
| 286 | struct opa_port_states { | ||
| 287 | u8 reserved; | ||
| 288 | u8 offline_reason; /* 2 res, 6 bits */ | ||
| 289 | u8 reserved2; | ||
| 290 | u8 portphysstate_portstate; /* 4 bits, 4 bits */ | ||
| 291 | }; | ||
| 292 | #define PI_LED_ENABLE_SUP 0 | ||
| 293 | #endif | ||
| 294 | 278 | ||
| 295 | struct opa_port_state_info { | 279 | struct opa_port_state_info { |
| 296 | struct opa_port_states port_states; | 280 | struct opa_port_states port_states; |
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h index 9c9a27d42aaa..e31502107a58 100644 --- a/include/rdma/rdma_vt.h +++ b/include/rdma/rdma_vt.h | |||
| @@ -158,6 +158,7 @@ struct rvt_driver_params { | |||
| 158 | u32 max_mad_size; | 158 | u32 max_mad_size; |
| 159 | u8 qos_shift; | 159 | u8 qos_shift; |
| 160 | u8 max_rdma_atomic; | 160 | u8 max_rdma_atomic; |
| 161 | u8 reserved_operations; | ||
| 161 | }; | 162 | }; |
| 162 | 163 | ||
| 163 | /* Protection domain */ | 164 | /* Protection domain */ |
| @@ -351,6 +352,9 @@ struct rvt_dev_info { | |||
| 351 | /* Driver specific properties */ | 352 | /* Driver specific properties */ |
| 352 | struct rvt_driver_params dparms; | 353 | struct rvt_driver_params dparms; |
| 353 | 354 | ||
| 355 | /* post send table */ | ||
| 356 | const struct rvt_operation_params *post_parms; | ||
| 357 | |||
| 354 | struct rvt_mregion __rcu *dma_mr; | 358 | struct rvt_mregion __rcu *dma_mr; |
| 355 | struct rvt_lkey_table lkey_table; | 359 | struct rvt_lkey_table lkey_table; |
| 356 | 360 | ||
| @@ -484,6 +488,9 @@ void rvt_unregister_device(struct rvt_dev_info *rvd); | |||
| 484 | int rvt_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr); | 488 | int rvt_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr); |
| 485 | int rvt_init_port(struct rvt_dev_info *rdi, struct rvt_ibport *port, | 489 | int rvt_init_port(struct rvt_dev_info *rdi, struct rvt_ibport *port, |
| 486 | int port_index, u16 *pkey_table); | 490 | int port_index, u16 *pkey_table); |
| 491 | int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key, | ||
| 492 | int access); | ||
| 493 | int rvt_invalidate_rkey(struct rvt_qp *qp, u32 rkey); | ||
| 487 | int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge, | 494 | int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge, |
| 488 | u32 len, u64 vaddr, u32 rkey, int acc); | 495 | u32 len, u64 vaddr, u32 rkey, int acc); |
| 489 | int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd, | 496 | int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd, |
diff --git a/include/rdma/rdmavt_mr.h b/include/rdma/rdmavt_mr.h index 5edffdca8c53..6b3c6c8b6b77 100644 --- a/include/rdma/rdmavt_mr.h +++ b/include/rdma/rdmavt_mr.h | |||
| @@ -81,6 +81,7 @@ struct rvt_mregion { | |||
| 81 | u32 mapsz; /* size of the map array */ | 81 | u32 mapsz; /* size of the map array */ |
| 82 | u8 page_shift; /* 0 - non unform/non powerof2 sizes */ | 82 | u8 page_shift; /* 0 - non unform/non powerof2 sizes */ |
| 83 | u8 lkey_published; /* in global table */ | 83 | u8 lkey_published; /* in global table */ |
| 84 | atomic_t lkey_invalid; /* true if current lkey is invalid */ | ||
| 84 | struct completion comp; /* complete when refcount goes to zero */ | 85 | struct completion comp; /* complete when refcount goes to zero */ |
| 85 | atomic_t refcount; | 86 | atomic_t refcount; |
| 86 | struct rvt_segarray *map[0]; /* the segments */ | 87 | struct rvt_segarray *map[0]; /* the segments */ |
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index 6d23b879416a..bd34d0b56bf7 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h | |||
| @@ -145,6 +145,12 @@ | |||
| 145 | (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND) | 145 | (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND) |
| 146 | 146 | ||
| 147 | /* | 147 | /* |
| 148 | * Internal send flags | ||
| 149 | */ | ||
| 150 | #define RVT_SEND_RESERVE_USED IB_SEND_RESERVED_START | ||
| 151 | #define RVT_SEND_COMPLETION_ONLY (IB_SEND_RESERVED_START << 1) | ||
| 152 | |||
| 153 | /* | ||
| 148 | * Send work request queue entry. | 154 | * Send work request queue entry. |
| 149 | * The size of the sg_list is determined when the QP is created and stored | 155 | * The size of the sg_list is determined when the QP is created and stored |
| 150 | * in qp->s_max_sge. | 156 | * in qp->s_max_sge. |
| @@ -216,23 +222,43 @@ struct rvt_mmap_info { | |||
| 216 | * to send a RDMA read response or atomic operation. | 222 | * to send a RDMA read response or atomic operation. |
| 217 | */ | 223 | */ |
| 218 | struct rvt_ack_entry { | 224 | struct rvt_ack_entry { |
| 219 | u8 opcode; | 225 | struct rvt_sge rdma_sge; |
| 220 | u8 sent; | 226 | u64 atomic_data; |
| 221 | u32 psn; | 227 | u32 psn; |
| 222 | u32 lpsn; | 228 | u32 lpsn; |
| 223 | union { | 229 | u8 opcode; |
| 224 | struct rvt_sge rdma_sge; | 230 | u8 sent; |
| 225 | u64 atomic_data; | ||
| 226 | }; | ||
| 227 | }; | 231 | }; |
| 228 | 232 | ||
| 229 | #define RC_QP_SCALING_INTERVAL 5 | 233 | #define RC_QP_SCALING_INTERVAL 5 |
| 230 | 234 | ||
| 231 | /* | 235 | #define RVT_OPERATION_PRIV 0x00000001 |
| 232 | * Variables prefixed with s_ are for the requester (sender). | 236 | #define RVT_OPERATION_ATOMIC 0x00000002 |
| 233 | * Variables prefixed with r_ are for the responder (receiver). | 237 | #define RVT_OPERATION_ATOMIC_SGE 0x00000004 |
| 234 | * Variables prefixed with ack_ are for responder replies. | 238 | #define RVT_OPERATION_LOCAL 0x00000008 |
| 239 | #define RVT_OPERATION_USE_RESERVE 0x00000010 | ||
| 240 | |||
| 241 | #define RVT_OPERATION_MAX (IB_WR_RESERVED10 + 1) | ||
| 242 | |||
| 243 | /** | ||
| 244 | * rvt_operation_params - op table entry | ||
| 245 | * @length - the length to copy into the swqe entry | ||
| 246 | * @qpt_support - a bit mask indicating QP type support | ||
| 247 | * @flags - RVT_OPERATION flags (see above) | ||
| 235 | * | 248 | * |
| 249 | * This supports table driven post send so that | ||
| 250 | * the driver can have differing an potentially | ||
| 251 | * different sets of operations. | ||
| 252 | * | ||
| 253 | **/ | ||
| 254 | |||
| 255 | struct rvt_operation_params { | ||
| 256 | size_t length; | ||
| 257 | u32 qpt_support; | ||
| 258 | u32 flags; | ||
| 259 | }; | ||
| 260 | |||
| 261 | /* | ||
| 236 | * Common variables are protected by both r_rq.lock and s_lock in that order | 262 | * Common variables are protected by both r_rq.lock and s_lock in that order |
| 237 | * which only happens in modify_qp() or changing the QP 'state'. | 263 | * which only happens in modify_qp() or changing the QP 'state'. |
| 238 | */ | 264 | */ |
| @@ -307,6 +333,7 @@ struct rvt_qp { | |||
| 307 | u32 s_next_psn; /* PSN for next request */ | 333 | u32 s_next_psn; /* PSN for next request */ |
| 308 | u32 s_avail; /* number of entries avail */ | 334 | u32 s_avail; /* number of entries avail */ |
| 309 | u32 s_ssn; /* SSN of tail entry */ | 335 | u32 s_ssn; /* SSN of tail entry */ |
| 336 | atomic_t s_reserved_used; /* reserved entries in use */ | ||
| 310 | 337 | ||
| 311 | spinlock_t s_lock ____cacheline_aligned_in_smp; | 338 | spinlock_t s_lock ____cacheline_aligned_in_smp; |
| 312 | u32 s_flags; | 339 | u32 s_flags; |
| @@ -343,6 +370,8 @@ struct rvt_qp { | |||
| 343 | struct rvt_sge_state s_ack_rdma_sge; | 370 | struct rvt_sge_state s_ack_rdma_sge; |
| 344 | struct timer_list s_timer; | 371 | struct timer_list s_timer; |
| 345 | 372 | ||
| 373 | atomic_t local_ops_pending; /* number of fast_reg/local_inv reqs */ | ||
| 374 | |||
| 346 | /* | 375 | /* |
| 347 | * This sge list MUST be last. Do not add anything below here. | 376 | * This sge list MUST be last. Do not add anything below here. |
| 348 | */ | 377 | */ |
| @@ -436,6 +465,49 @@ static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n) | |||
| 436 | rq->max_sge * sizeof(struct ib_sge)) * n); | 465 | rq->max_sge * sizeof(struct ib_sge)) * n); |
| 437 | } | 466 | } |
| 438 | 467 | ||
| 468 | /** | ||
| 469 | * rvt_qp_wqe_reserve - reserve operation | ||
| 470 | * @qp - the rvt qp | ||
| 471 | * @wqe - the send wqe | ||
| 472 | * | ||
| 473 | * This routine used in post send to record | ||
| 474 | * a wqe relative reserved operation use. | ||
| 475 | */ | ||
| 476 | static inline void rvt_qp_wqe_reserve( | ||
| 477 | struct rvt_qp *qp, | ||
| 478 | struct rvt_swqe *wqe) | ||
| 479 | { | ||
| 480 | wqe->wr.send_flags |= RVT_SEND_RESERVE_USED; | ||
| 481 | atomic_inc(&qp->s_reserved_used); | ||
| 482 | } | ||
| 483 | |||
| 484 | /** | ||
| 485 | * rvt_qp_wqe_unreserve - clean reserved operation | ||
| 486 | * @qp - the rvt qp | ||
| 487 | * @wqe - the send wqe | ||
| 488 | * | ||
| 489 | * This decrements the reserve use count. | ||
| 490 | * | ||
| 491 | * This call MUST precede the change to | ||
| 492 | * s_last to insure that post send sees a stable | ||
| 493 | * s_avail. | ||
| 494 | * | ||
| 495 | * An smp_mp__after_atomic() is used to insure | ||
| 496 | * the compiler does not juggle the order of the s_last | ||
| 497 | * ring index and the decrementing of s_reserved_used. | ||
| 498 | */ | ||
| 499 | static inline void rvt_qp_wqe_unreserve( | ||
| 500 | struct rvt_qp *qp, | ||
| 501 | struct rvt_swqe *wqe) | ||
| 502 | { | ||
| 503 | if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) { | ||
| 504 | wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED; | ||
| 505 | atomic_dec(&qp->s_reserved_used); | ||
| 506 | /* insure no compiler re-order up to s_last change */ | ||
| 507 | smp_mb__after_atomic(); | ||
| 508 | } | ||
| 509 | } | ||
| 510 | |||
| 439 | extern const int ib_rvt_state_ops[]; | 511 | extern const int ib_rvt_state_ops[]; |
| 440 | 512 | ||
| 441 | struct rvt_dev_info; | 513 | struct rvt_dev_info; |
