aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorNicholas Bellinger <nab@linux-iscsi.org>2014-03-13 15:01:58 -0400
committerNicholas Bellinger <nab@linux-iscsi.org>2014-03-13 15:01:58 -0400
commit5aad2145ac42a12ae8bf8383d2e0319c172d99a7 (patch)
tree2c4a38a9e13222aed73b14c05c159955a88c6b1f /include
parentfa389e220254c69ffae0d403eac4146171062d08 (diff)
parent2dea909444c294f55316c068906945ef38980ef3 (diff)
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband into for-next
Diffstat (limited to 'include')
-rw-r--r--include/linux/mlx5/cq.h1
-rw-r--r--include/linux/mlx5/device.h43
-rw-r--r--include/linux/mlx5/driver.h41
-rw-r--r--include/linux/mlx5/qp.h67
-rw-r--r--include/rdma/ib_umem.h11
-rw-r--r--include/rdma/ib_verbs.h187
6 files changed, 341 insertions, 9 deletions
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 2202c7f72b75..f6b17ac601bd 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -80,6 +80,7 @@ enum {
80 MLX5_CQE_RESP_SEND_IMM = 3, 80 MLX5_CQE_RESP_SEND_IMM = 3,
81 MLX5_CQE_RESP_SEND_INV = 4, 81 MLX5_CQE_RESP_SEND_INV = 4,
82 MLX5_CQE_RESIZE_CQ = 5, 82 MLX5_CQE_RESIZE_CQ = 5,
83 MLX5_CQE_SIG_ERR = 12,
83 MLX5_CQE_REQ_ERR = 13, 84 MLX5_CQE_REQ_ERR = 13,
84 MLX5_CQE_RESP_ERR = 14, 85 MLX5_CQE_RESP_ERR = 14,
85 MLX5_CQE_INVALID = 15, 86 MLX5_CQE_INVALID = 15,
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 817a6fae6d2c..407bdb67fd4f 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -48,6 +48,8 @@ enum {
48 MLX5_MAX_COMMANDS = 32, 48 MLX5_MAX_COMMANDS = 32,
49 MLX5_CMD_DATA_BLOCK_SIZE = 512, 49 MLX5_CMD_DATA_BLOCK_SIZE = 512,
50 MLX5_PCI_CMD_XPORT = 7, 50 MLX5_PCI_CMD_XPORT = 7,
51 MLX5_MKEY_BSF_OCTO_SIZE = 4,
52 MLX5_MAX_PSVS = 4,
51}; 53};
52 54
53enum { 55enum {
@@ -116,6 +118,7 @@ enum {
116 MLX5_MKEY_MASK_START_ADDR = 1ull << 6, 118 MLX5_MKEY_MASK_START_ADDR = 1ull << 6,
117 MLX5_MKEY_MASK_PD = 1ull << 7, 119 MLX5_MKEY_MASK_PD = 1ull << 7,
118 MLX5_MKEY_MASK_EN_RINVAL = 1ull << 8, 120 MLX5_MKEY_MASK_EN_RINVAL = 1ull << 8,
121 MLX5_MKEY_MASK_EN_SIGERR = 1ull << 9,
119 MLX5_MKEY_MASK_BSF_EN = 1ull << 12, 122 MLX5_MKEY_MASK_BSF_EN = 1ull << 12,
120 MLX5_MKEY_MASK_KEY = 1ull << 13, 123 MLX5_MKEY_MASK_KEY = 1ull << 13,
121 MLX5_MKEY_MASK_QPN = 1ull << 14, 124 MLX5_MKEY_MASK_QPN = 1ull << 14,
@@ -555,6 +558,23 @@ struct mlx5_cqe64 {
555 u8 op_own; 558 u8 op_own;
556}; 559};
557 560
561struct mlx5_sig_err_cqe {
562 u8 rsvd0[16];
563 __be32 expected_trans_sig;
564 __be32 actual_trans_sig;
565 __be32 expected_reftag;
566 __be32 actual_reftag;
567 __be16 syndrome;
568 u8 rsvd22[2];
569 __be32 mkey;
570 __be64 err_offset;
571 u8 rsvd30[8];
572 __be32 qpn;
573 u8 rsvd38[2];
574 u8 signature;
575 u8 op_own;
576};
577
558struct mlx5_wqe_srq_next_seg { 578struct mlx5_wqe_srq_next_seg {
559 u8 rsvd0[2]; 579 u8 rsvd0[2];
560 __be16 next_wqe_index; 580 __be16 next_wqe_index;
@@ -936,4 +956,27 @@ enum {
936 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0 956 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0
937}; 957};
938 958
959struct mlx5_allocate_psv_in {
960 struct mlx5_inbox_hdr hdr;
961 __be32 npsv_pd;
962 __be32 rsvd_psv0;
963};
964
965struct mlx5_allocate_psv_out {
966 struct mlx5_outbox_hdr hdr;
967 u8 rsvd[8];
968 __be32 psv_idx[4];
969};
970
971struct mlx5_destroy_psv_in {
972 struct mlx5_inbox_hdr hdr;
973 __be32 psv_number;
974 u8 rsvd[4];
975};
976
977struct mlx5_destroy_psv_out {
978 struct mlx5_outbox_hdr hdr;
979 u8 rsvd[8];
980};
981
939#endif /* MLX5_DEVICE_H */ 982#endif /* MLX5_DEVICE_H */
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 130bc8d77fa5..93cef6313e72 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -401,6 +401,26 @@ struct mlx5_eq {
401 struct mlx5_rsc_debug *dbg; 401 struct mlx5_rsc_debug *dbg;
402}; 402};
403 403
404struct mlx5_core_psv {
405 u32 psv_idx;
406 struct psv_layout {
407 u32 pd;
408 u16 syndrome;
409 u16 reserved;
410 u16 bg;
411 u16 app_tag;
412 u32 ref_tag;
413 } psv;
414};
415
416struct mlx5_core_sig_ctx {
417 struct mlx5_core_psv psv_memory;
418 struct mlx5_core_psv psv_wire;
419 struct ib_sig_err err_item;
420 bool sig_status_checked;
421 bool sig_err_exists;
422 u32 sigerr_count;
423};
404 424
405struct mlx5_core_mr { 425struct mlx5_core_mr {
406 u64 iova; 426 u64 iova;
@@ -475,6 +495,13 @@ struct mlx5_srq_table {
475 struct radix_tree_root tree; 495 struct radix_tree_root tree;
476}; 496};
477 497
498struct mlx5_mr_table {
499 /* protect radix tree
500 */
501 rwlock_t lock;
502 struct radix_tree_root tree;
503};
504
478struct mlx5_priv { 505struct mlx5_priv {
479 char name[MLX5_MAX_NAME_LEN]; 506 char name[MLX5_MAX_NAME_LEN];
480 struct mlx5_eq_table eq_table; 507 struct mlx5_eq_table eq_table;
@@ -504,6 +531,10 @@ struct mlx5_priv {
504 struct mlx5_cq_table cq_table; 531 struct mlx5_cq_table cq_table;
505 /* end: cq staff */ 532 /* end: cq staff */
506 533
534 /* start: mr staff */
535 struct mlx5_mr_table mr_table;
536 /* end: mr staff */
537
507 /* start: alloc staff */ 538 /* start: alloc staff */
508 struct mutex pgdir_mutex; 539 struct mutex pgdir_mutex;
509 struct list_head pgdir_list; 540 struct list_head pgdir_list;
@@ -651,6 +682,11 @@ static inline void mlx5_vfree(const void *addr)
651 kfree(addr); 682 kfree(addr);
652} 683}
653 684
685static inline u32 mlx5_base_mkey(const u32 key)
686{
687 return key & 0xffffff00u;
688}
689
654int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev); 690int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev);
655void mlx5_dev_cleanup(struct mlx5_core_dev *dev); 691void mlx5_dev_cleanup(struct mlx5_core_dev *dev);
656int mlx5_cmd_init(struct mlx5_core_dev *dev); 692int mlx5_cmd_init(struct mlx5_core_dev *dev);
@@ -685,6 +721,8 @@ int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
685 struct mlx5_query_srq_mbox_out *out); 721 struct mlx5_query_srq_mbox_out *out);
686int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, 722int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
687 u16 lwm, int is_srq); 723 u16 lwm, int is_srq);
724void mlx5_init_mr_table(struct mlx5_core_dev *dev);
725void mlx5_cleanup_mr_table(struct mlx5_core_dev *dev);
688int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, 726int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
689 struct mlx5_create_mkey_mbox_in *in, int inlen, 727 struct mlx5_create_mkey_mbox_in *in, int inlen,
690 mlx5_cmd_cbk_t callback, void *context, 728 mlx5_cmd_cbk_t callback, void *context,
@@ -746,6 +784,9 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
746const char *mlx5_command_str(int command); 784const char *mlx5_command_str(int command);
747int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); 785int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
748void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); 786void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
787int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
788 int npsvs, u32 *sig_index);
789int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
749 790
750static inline u32 mlx5_mkey_to_idx(u32 mkey) 791static inline u32 mlx5_mkey_to_idx(u32 mkey)
751{ 792{
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index d51eff713549..f829ad80ff28 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -37,6 +37,9 @@
37#include <linux/mlx5/driver.h> 37#include <linux/mlx5/driver.h>
38 38
39#define MLX5_INVALID_LKEY 0x100 39#define MLX5_INVALID_LKEY 0x100
40#define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 5)
41#define MLX5_DIF_SIZE 8
42#define MLX5_STRIDE_BLOCK_OP 0x400
40 43
41enum mlx5_qp_optpar { 44enum mlx5_qp_optpar {
42 MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, 45 MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
@@ -151,6 +154,11 @@ enum {
151 MLX5_SND_DBR = 1, 154 MLX5_SND_DBR = 1,
152}; 155};
153 156
157enum {
158 MLX5_FLAGS_INLINE = 1<<7,
159 MLX5_FLAGS_CHECK_FREE = 1<<5,
160};
161
154struct mlx5_wqe_fmr_seg { 162struct mlx5_wqe_fmr_seg {
155 __be32 flags; 163 __be32 flags;
156 __be32 mem_key; 164 __be32 mem_key;
@@ -278,6 +286,60 @@ struct mlx5_wqe_inline_seg {
278 __be32 byte_count; 286 __be32 byte_count;
279}; 287};
280 288
289struct mlx5_bsf {
290 struct mlx5_bsf_basic {
291 u8 bsf_size_sbs;
292 u8 check_byte_mask;
293 union {
294 u8 copy_byte_mask;
295 u8 bs_selector;
296 u8 rsvd_wflags;
297 } wire;
298 union {
299 u8 bs_selector;
300 u8 rsvd_mflags;
301 } mem;
302 __be32 raw_data_size;
303 __be32 w_bfs_psv;
304 __be32 m_bfs_psv;
305 } basic;
306 struct mlx5_bsf_ext {
307 __be32 t_init_gen_pro_size;
308 __be32 rsvd_epi_size;
309 __be32 w_tfs_psv;
310 __be32 m_tfs_psv;
311 } ext;
312 struct mlx5_bsf_inl {
313 __be32 w_inl_vld;
314 __be32 w_rsvd;
315 __be64 w_block_format;
316 __be32 m_inl_vld;
317 __be32 m_rsvd;
318 __be64 m_block_format;
319 } inl;
320};
321
322struct mlx5_klm {
323 __be32 bcount;
324 __be32 key;
325 __be64 va;
326};
327
328struct mlx5_stride_block_entry {
329 __be16 stride;
330 __be16 bcount;
331 __be32 key;
332 __be64 va;
333};
334
335struct mlx5_stride_block_ctrl_seg {
336 __be32 bcount_per_cycle;
337 __be32 op;
338 __be32 repeat_count;
339 u16 rsvd;
340 __be16 num_entries;
341};
342
281struct mlx5_core_qp { 343struct mlx5_core_qp {
282 void (*event) (struct mlx5_core_qp *, int); 344 void (*event) (struct mlx5_core_qp *, int);
283 int qpn; 345 int qpn;
@@ -444,6 +506,11 @@ static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u
444 return radix_tree_lookup(&dev->priv.qp_table.tree, qpn); 506 return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
445} 507}
446 508
509static inline struct mlx5_core_mr *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key)
510{
511 return radix_tree_lookup(&dev->priv.mr_table.tree, key);
512}
513
447int mlx5_core_create_qp(struct mlx5_core_dev *dev, 514int mlx5_core_create_qp(struct mlx5_core_dev *dev,
448 struct mlx5_core_qp *qp, 515 struct mlx5_core_qp *qp,
449 struct mlx5_create_qp_mbox_in *in, 516 struct mlx5_create_qp_mbox_in *in,
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index 9ee0d2e51b16..1ea0b65c4cfb 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -46,17 +46,12 @@ struct ib_umem {
46 int page_size; 46 int page_size;
47 int writable; 47 int writable;
48 int hugetlb; 48 int hugetlb;
49 struct list_head chunk_list;
50 struct work_struct work; 49 struct work_struct work;
51 struct mm_struct *mm; 50 struct mm_struct *mm;
52 unsigned long diff; 51 unsigned long diff;
53}; 52 struct sg_table sg_head;
54 53 int nmap;
55struct ib_umem_chunk { 54 int npages;
56 struct list_head list;
57 int nents;
58 int nmap;
59 struct scatterlist page_list[0];
60}; 55};
61 56
62#ifdef CONFIG_INFINIBAND_USER_MEM 57#ifdef CONFIG_INFINIBAND_USER_MEM
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 6793f32ccb58..82ab5c1e7605 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -122,7 +122,19 @@ enum ib_device_cap_flags {
122 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22), 122 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
123 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<23), 123 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<23),
124 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<24), 124 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<24),
125 IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29) 125 IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29),
126 IB_DEVICE_SIGNATURE_HANDOVER = (1<<30)
127};
128
129enum ib_signature_prot_cap {
130 IB_PROT_T10DIF_TYPE_1 = 1,
131 IB_PROT_T10DIF_TYPE_2 = 1 << 1,
132 IB_PROT_T10DIF_TYPE_3 = 1 << 2,
133};
134
135enum ib_signature_guard_cap {
136 IB_GUARD_T10DIF_CRC = 1,
137 IB_GUARD_T10DIF_CSUM = 1 << 1,
126}; 138};
127 139
128enum ib_atomic_cap { 140enum ib_atomic_cap {
@@ -172,6 +184,8 @@ struct ib_device_attr {
172 unsigned int max_fast_reg_page_list_len; 184 unsigned int max_fast_reg_page_list_len;
173 u16 max_pkeys; 185 u16 max_pkeys;
174 u8 local_ca_ack_delay; 186 u8 local_ca_ack_delay;
187 int sig_prot_cap;
188 int sig_guard_cap;
175}; 189};
176 190
177enum ib_mtu { 191enum ib_mtu {
@@ -461,6 +475,130 @@ int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
461 */ 475 */
462int ib_rate_to_mbps(enum ib_rate rate) __attribute_const__; 476int ib_rate_to_mbps(enum ib_rate rate) __attribute_const__;
463 477
478enum ib_mr_create_flags {
479 IB_MR_SIGNATURE_EN = 1,
480};
481
482/**
483 * ib_mr_init_attr - Memory region init attributes passed to routine
484 * ib_create_mr.
485 * @max_reg_descriptors: max number of registration descriptors that
486 * may be used with registration work requests.
487 * @flags: MR creation flags bit mask.
488 */
489struct ib_mr_init_attr {
490 int max_reg_descriptors;
491 u32 flags;
492};
493
494enum ib_signature_type {
495 IB_SIG_TYPE_T10_DIF,
496};
497
498/**
499 * T10-DIF Signature types
500 * T10-DIF types are defined by SCSI
501 * specifications.
502 */
503enum ib_t10_dif_type {
504 IB_T10DIF_NONE,
505 IB_T10DIF_TYPE1,
506 IB_T10DIF_TYPE2,
507 IB_T10DIF_TYPE3
508};
509
510/**
511 * Signature T10-DIF block-guard types
512 * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules.
513 * IB_T10DIF_CSUM: Corresponds to IP checksum rules.
514 */
515enum ib_t10_dif_bg_type {
516 IB_T10DIF_CRC,
517 IB_T10DIF_CSUM
518};
519
520/**
521 * struct ib_t10_dif_domain - Parameters specific for T10-DIF
522 * domain.
523 * @type: T10-DIF type (0|1|2|3)
524 * @bg_type: T10-DIF block guard type (CRC|CSUM)
525 * @pi_interval: protection information interval.
526 * @bg: seed of guard computation.
527 * @app_tag: application tag of guard block
528 * @ref_tag: initial guard block reference tag.
529 * @type3_inc_reftag: T10-DIF type 3 does not state
530 * about the reference tag, it is the user
531 * choice to increment it or not.
532 */
533struct ib_t10_dif_domain {
534 enum ib_t10_dif_type type;
535 enum ib_t10_dif_bg_type bg_type;
536 u16 pi_interval;
537 u16 bg;
538 u16 app_tag;
539 u32 ref_tag;
540 bool type3_inc_reftag;
541};
542
543/**
544 * struct ib_sig_domain - Parameters for signature domain
545 * @sig_type: specific signauture type
546 * @sig: union of all signature domain attributes that may
547 * be used to set domain layout.
548 */
549struct ib_sig_domain {
550 enum ib_signature_type sig_type;
551 union {
552 struct ib_t10_dif_domain dif;
553 } sig;
554};
555
556/**
557 * struct ib_sig_attrs - Parameters for signature handover operation
558 * @check_mask: bitmask for signature byte check (8 bytes)
559 * @mem: memory domain layout desciptor.
560 * @wire: wire domain layout desciptor.
561 */
562struct ib_sig_attrs {
563 u8 check_mask;
564 struct ib_sig_domain mem;
565 struct ib_sig_domain wire;
566};
567
568enum ib_sig_err_type {
569 IB_SIG_BAD_GUARD,
570 IB_SIG_BAD_REFTAG,
571 IB_SIG_BAD_APPTAG,
572};
573
574/**
575 * struct ib_sig_err - signature error descriptor
576 */
577struct ib_sig_err {
578 enum ib_sig_err_type err_type;
579 u32 expected;
580 u32 actual;
581 u64 sig_err_offset;
582 u32 key;
583};
584
585enum ib_mr_status_check {
586 IB_MR_CHECK_SIG_STATUS = 1,
587};
588
589/**
590 * struct ib_mr_status - Memory region status container
591 *
592 * @fail_status: Bitmask of MR checks status. For each
593 * failed check a corresponding status bit is set.
594 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
595 * failure.
596 */
597struct ib_mr_status {
598 u32 fail_status;
599 struct ib_sig_err sig_err;
600};
601
464/** 602/**
465 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate 603 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
466 * enum. 604 * enum.
@@ -644,6 +782,7 @@ enum ib_qp_create_flags {
644 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0, 782 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
645 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1, 783 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
646 IB_QP_CREATE_NETIF_QP = 1 << 5, 784 IB_QP_CREATE_NETIF_QP = 1 << 5,
785 IB_QP_CREATE_SIGNATURE_EN = 1 << 6,
647 /* reserve bits 26-31 for low level drivers' internal use */ 786 /* reserve bits 26-31 for low level drivers' internal use */
648 IB_QP_CREATE_RESERVED_START = 1 << 26, 787 IB_QP_CREATE_RESERVED_START = 1 << 26,
649 IB_QP_CREATE_RESERVED_END = 1 << 31, 788 IB_QP_CREATE_RESERVED_END = 1 << 31,
@@ -808,6 +947,7 @@ enum ib_wr_opcode {
808 IB_WR_MASKED_ATOMIC_CMP_AND_SWP, 947 IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
809 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD, 948 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
810 IB_WR_BIND_MW, 949 IB_WR_BIND_MW,
950 IB_WR_REG_SIG_MR,
811 /* reserve values for low level drivers' internal use. 951 /* reserve values for low level drivers' internal use.
812 * These values will not be used at all in the ib core layer. 952 * These values will not be used at all in the ib core layer.
813 */ 953 */
@@ -913,6 +1053,12 @@ struct ib_send_wr {
913 u32 rkey; 1053 u32 rkey;
914 struct ib_mw_bind_info bind_info; 1054 struct ib_mw_bind_info bind_info;
915 } bind_mw; 1055 } bind_mw;
1056 struct {
1057 struct ib_sig_attrs *sig_attrs;
1058 struct ib_mr *sig_mr;
1059 int access_flags;
1060 struct ib_sge *prot;
1061 } sig_handover;
916 } wr; 1062 } wr;
917 u32 xrc_remote_srq_num; /* XRC TGT QPs only */ 1063 u32 xrc_remote_srq_num; /* XRC TGT QPs only */
918}; 1064};
@@ -1407,6 +1553,9 @@ struct ib_device {
1407 int (*query_mr)(struct ib_mr *mr, 1553 int (*query_mr)(struct ib_mr *mr,
1408 struct ib_mr_attr *mr_attr); 1554 struct ib_mr_attr *mr_attr);
1409 int (*dereg_mr)(struct ib_mr *mr); 1555 int (*dereg_mr)(struct ib_mr *mr);
1556 int (*destroy_mr)(struct ib_mr *mr);
1557 struct ib_mr * (*create_mr)(struct ib_pd *pd,
1558 struct ib_mr_init_attr *mr_init_attr);
1410 struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd, 1559 struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd,
1411 int max_page_list_len); 1560 int max_page_list_len);
1412 struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device, 1561 struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
@@ -1455,6 +1604,8 @@ struct ib_device {
1455 *flow_attr, 1604 *flow_attr,
1456 int domain); 1605 int domain);
1457 int (*destroy_flow)(struct ib_flow *flow_id); 1606 int (*destroy_flow)(struct ib_flow *flow_id);
1607 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
1608 struct ib_mr_status *mr_status);
1458 1609
1459 struct ib_dma_mapping_ops *dma_ops; 1610 struct ib_dma_mapping_ops *dma_ops;
1460 1611
@@ -2250,6 +2401,25 @@ int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
2250 */ 2401 */
2251int ib_dereg_mr(struct ib_mr *mr); 2402int ib_dereg_mr(struct ib_mr *mr);
2252 2403
2404
2405/**
2406 * ib_create_mr - Allocates a memory region that may be used for
2407 * signature handover operations.
2408 * @pd: The protection domain associated with the region.
2409 * @mr_init_attr: memory region init attributes.
2410 */
2411struct ib_mr *ib_create_mr(struct ib_pd *pd,
2412 struct ib_mr_init_attr *mr_init_attr);
2413
2414/**
2415 * ib_destroy_mr - Destroys a memory region that was created using
2416 * ib_create_mr and removes it from HW translation tables.
2417 * @mr: The memory region to destroy.
2418 *
2419 * This function can fail, if the memory region has memory windows bound to it.
2420 */
2421int ib_destroy_mr(struct ib_mr *mr);
2422
2253/** 2423/**
2254 * ib_alloc_fast_reg_mr - Allocates memory region usable with the 2424 * ib_alloc_fast_reg_mr - Allocates memory region usable with the
2255 * IB_WR_FAST_REG_MR send work request. 2425 * IB_WR_FAST_REG_MR send work request.
@@ -2435,4 +2605,19 @@ static inline int ib_check_mr_access(int flags)
2435 return 0; 2605 return 0;
2436} 2606}
2437 2607
2608/**
2609 * ib_check_mr_status: lightweight check of MR status.
2610 * This routine may provide status checks on a selected
2611 * ib_mr. first use is for signature status check.
2612 *
2613 * @mr: A memory region.
2614 * @check_mask: Bitmask of which checks to perform from
2615 * ib_mr_status_check enumeration.
2616 * @mr_status: The container of relevant status checks.
2617 * failed checks will be indicated in the status bitmask
2618 * and the relevant info shall be in the error item.
2619 */
2620int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
2621 struct ib_mr_status *mr_status);
2622
2438#endif /* IB_VERBS_H */ 2623#endif /* IB_VERBS_H */