aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c119
-rw-r--r--include/linux/mlx5/device.h54
-rw-r--r--include/linux/mlx5/driver.h12
-rw-r--r--include/linux/mlx5/qp.h55
6 files changed, 291 insertions, 2 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index ab684463780b..da82991239a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -157,6 +157,8 @@ static const char *eqe_type_str(u8 type)
157 return "MLX5_EVENT_TYPE_CMD"; 157 return "MLX5_EVENT_TYPE_CMD";
158 case MLX5_EVENT_TYPE_PAGE_REQUEST: 158 case MLX5_EVENT_TYPE_PAGE_REQUEST:
159 return "MLX5_EVENT_TYPE_PAGE_REQUEST"; 159 return "MLX5_EVENT_TYPE_PAGE_REQUEST";
160 case MLX5_EVENT_TYPE_PAGE_FAULT:
161 return "MLX5_EVENT_TYPE_PAGE_FAULT";
160 default: 162 default:
161 return "Unrecognized event"; 163 return "Unrecognized event";
162 } 164 }
@@ -279,6 +281,11 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
279 } 281 }
280 break; 282 break;
281 283
284#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
285 case MLX5_EVENT_TYPE_PAGE_FAULT:
286 mlx5_eq_pagefault(dev, eqe);
287 break;
288#endif
282 289
283 default: 290 default:
284 mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", 291 mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
@@ -446,8 +453,12 @@ void mlx5_eq_cleanup(struct mlx5_core_dev *dev)
446int mlx5_start_eqs(struct mlx5_core_dev *dev) 453int mlx5_start_eqs(struct mlx5_core_dev *dev)
447{ 454{
448 struct mlx5_eq_table *table = &dev->priv.eq_table; 455 struct mlx5_eq_table *table = &dev->priv.eq_table;
456 u32 async_event_mask = MLX5_ASYNC_EVENT_MASK;
449 int err; 457 int err;
450 458
459 if (dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG)
460 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PAGE_FAULT);
461
451 err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, 462 err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
452 MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD, 463 MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
453 "mlx5_cmd_eq", &dev->priv.uuari.uars[0]); 464 "mlx5_cmd_eq", &dev->priv.uuari.uars[0]);
@@ -459,7 +470,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
459 mlx5_cmd_use_events(dev); 470 mlx5_cmd_use_events(dev);
460 471
461 err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC, 472 err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC,
462 MLX5_NUM_ASYNC_EQE, MLX5_ASYNC_EVENT_MASK, 473 MLX5_NUM_ASYNC_EQE, async_event_mask,
463 "mlx5_async_eq", &dev->priv.uuari.uars[0]); 474 "mlx5_async_eq", &dev->priv.uuari.uars[0]);
464 if (err) { 475 if (err) {
465 mlx5_core_warn(dev, "failed to create async EQ %d\n", err); 476 mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 087c4c797deb..06f9036acd83 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -69,6 +69,46 @@ int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, struct mlx5_caps *caps)
69 return mlx5_core_get_caps(dev, caps, HCA_CAP_OPMOD_GET_CUR); 69 return mlx5_core_get_caps(dev, caps, HCA_CAP_OPMOD_GET_CUR);
70} 70}
71 71
72int mlx5_query_odp_caps(struct mlx5_core_dev *dev, struct mlx5_odp_caps *caps)
73{
74 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
75 int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
76 void *out;
77 int err;
78
79 if (!(dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG))
80 return -ENOTSUPP;
81
82 memset(in, 0, sizeof(in));
83 out = kzalloc(out_sz, GFP_KERNEL);
84 if (!out)
85 return -ENOMEM;
86 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
87 MLX5_SET(query_hca_cap_in, in, op_mod, HCA_CAP_OPMOD_GET_ODP_CUR);
88 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
89 if (err)
90 goto out;
91
92 err = mlx5_cmd_status_to_err_v2(out);
93 if (err) {
94 mlx5_core_warn(dev, "query cur hca ODP caps failed, %d\n", err);
95 goto out;
96 }
97
98 memcpy(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct),
99 sizeof(*caps));
100
101 mlx5_core_dbg(dev, "on-demand paging capabilities:\nrc: %08x\nuc: %08x\nud: %08x\n",
102 be32_to_cpu(caps->per_transport_caps.rc_odp_caps),
103 be32_to_cpu(caps->per_transport_caps.uc_odp_caps),
104 be32_to_cpu(caps->per_transport_caps.ud_odp_caps));
105
106out:
107 kfree(out);
108 return err;
109}
110EXPORT_SYMBOL(mlx5_query_odp_caps);
111
72int mlx5_cmd_init_hca(struct mlx5_core_dev *dev) 112int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
73{ 113{
74 struct mlx5_cmd_init_hca_mbox_in in; 114 struct mlx5_cmd_init_hca_mbox_in in;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 5261a2b0da43..575d853dbe05 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -88,6 +88,95 @@ void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
88 mlx5_core_put_rsc(common); 88 mlx5_core_put_rsc(common);
89} 89}
90 90
91#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
92void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
93{
94 struct mlx5_eqe_page_fault *pf_eqe = &eqe->data.page_fault;
95 int qpn = be32_to_cpu(pf_eqe->flags_qpn) & MLX5_QPN_MASK;
96 struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, qpn);
97 struct mlx5_core_qp *qp =
98 container_of(common, struct mlx5_core_qp, common);
99 struct mlx5_pagefault pfault;
100
101 if (!qp) {
102 mlx5_core_warn(dev, "ODP event for non-existent QP %06x\n",
103 qpn);
104 return;
105 }
106
107 pfault.event_subtype = eqe->sub_type;
108 pfault.flags = (be32_to_cpu(pf_eqe->flags_qpn) >> MLX5_QPN_BITS) &
109 (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE | MLX5_PFAULT_RDMA);
110 pfault.bytes_committed = be32_to_cpu(
111 pf_eqe->bytes_committed);
112
113 mlx5_core_dbg(dev,
114 "PAGE_FAULT: subtype: 0x%02x, flags: 0x%02x,\n",
115 eqe->sub_type, pfault.flags);
116
117 switch (eqe->sub_type) {
118 case MLX5_PFAULT_SUBTYPE_RDMA:
119 /* RDMA based event */
120 pfault.rdma.r_key =
121 be32_to_cpu(pf_eqe->rdma.r_key);
122 pfault.rdma.packet_size =
123 be16_to_cpu(pf_eqe->rdma.packet_length);
124 pfault.rdma.rdma_op_len =
125 be32_to_cpu(pf_eqe->rdma.rdma_op_len);
126 pfault.rdma.rdma_va =
127 be64_to_cpu(pf_eqe->rdma.rdma_va);
128 mlx5_core_dbg(dev,
129 "PAGE_FAULT: qpn: 0x%06x, r_key: 0x%08x,\n",
130 qpn, pfault.rdma.r_key);
131 mlx5_core_dbg(dev,
132 "PAGE_FAULT: rdma_op_len: 0x%08x,\n",
133 pfault.rdma.rdma_op_len);
134 mlx5_core_dbg(dev,
135 "PAGE_FAULT: rdma_va: 0x%016llx,\n",
136 pfault.rdma.rdma_va);
137 mlx5_core_dbg(dev,
138 "PAGE_FAULT: bytes_committed: 0x%06x\n",
139 pfault.bytes_committed);
140 break;
141
142 case MLX5_PFAULT_SUBTYPE_WQE:
143 /* WQE based event */
144 pfault.wqe.wqe_index =
145 be16_to_cpu(pf_eqe->wqe.wqe_index);
146 pfault.wqe.packet_size =
147 be16_to_cpu(pf_eqe->wqe.packet_length);
148 mlx5_core_dbg(dev,
149 "PAGE_FAULT: qpn: 0x%06x, wqe_index: 0x%04x,\n",
150 qpn, pfault.wqe.wqe_index);
151 mlx5_core_dbg(dev,
152 "PAGE_FAULT: bytes_committed: 0x%06x\n",
153 pfault.bytes_committed);
154 break;
155
156 default:
157 mlx5_core_warn(dev,
158 "Unsupported page fault event sub-type: 0x%02hhx, QP %06x\n",
159 eqe->sub_type, qpn);
160 /* Unsupported page faults should still be resolved by the
161 * page fault handler
162 */
163 }
164
165 if (qp->pfault_handler) {
166 qp->pfault_handler(qp, &pfault);
167 } else {
168 mlx5_core_err(dev,
169 "ODP event for QP %08x, without a fault handler in QP\n",
170 qpn);
171 /* Page fault will remain unresolved. QP will hang until it is
172 * destroyed
173 */
174 }
175
176 mlx5_core_put_rsc(common);
177}
178#endif
179
91int mlx5_core_create_qp(struct mlx5_core_dev *dev, 180int mlx5_core_create_qp(struct mlx5_core_dev *dev,
92 struct mlx5_core_qp *qp, 181 struct mlx5_core_qp *qp,
93 struct mlx5_create_qp_mbox_in *in, 182 struct mlx5_create_qp_mbox_in *in,
@@ -322,3 +411,33 @@ int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
322 return err; 411 return err;
323} 412}
324EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc); 413EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
414
415#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
416int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
417 u8 flags, int error)
418{
419 struct mlx5_page_fault_resume_mbox_in in;
420 struct mlx5_page_fault_resume_mbox_out out;
421 int err;
422
423 memset(&in, 0, sizeof(in));
424 memset(&out, 0, sizeof(out));
425 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_PAGE_FAULT_RESUME);
426 in.hdr.opmod = 0;
427 flags &= (MLX5_PAGE_FAULT_RESUME_REQUESTOR |
428 MLX5_PAGE_FAULT_RESUME_WRITE |
429 MLX5_PAGE_FAULT_RESUME_RDMA);
430 flags |= (error ? MLX5_PAGE_FAULT_RESUME_ERROR : 0);
431 in.flags_qpn = cpu_to_be32((qpn & MLX5_QPN_MASK) |
432 (flags << MLX5_QPN_BITS));
433 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
434 if (err)
435 return err;
436
437 if (out.hdr.status)
438 err = mlx5_cmd_status_to_err(&out.hdr);
439
440 return err;
441}
442EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume);
443#endif
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 096abe543d2c..70c28239e339 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -120,6 +120,15 @@ enum {
120}; 120};
121 121
122enum { 122enum {
123 MLX5_MKEY_INBOX_PG_ACCESS = 1 << 31
124};
125
126enum {
127 MLX5_PFAULT_SUBTYPE_WQE = 0,
128 MLX5_PFAULT_SUBTYPE_RDMA = 1,
129};
130
131enum {
123 MLX5_PERM_LOCAL_READ = 1 << 2, 132 MLX5_PERM_LOCAL_READ = 1 << 2,
124 MLX5_PERM_LOCAL_WRITE = 1 << 3, 133 MLX5_PERM_LOCAL_WRITE = 1 << 3,
125 MLX5_PERM_REMOTE_READ = 1 << 4, 134 MLX5_PERM_REMOTE_READ = 1 << 4,
@@ -215,6 +224,8 @@ enum mlx5_event {
215 224
216 MLX5_EVENT_TYPE_CMD = 0x0a, 225 MLX5_EVENT_TYPE_CMD = 0x0a,
217 MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb, 226 MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb,
227
228 MLX5_EVENT_TYPE_PAGE_FAULT = 0xc,
218}; 229};
219 230
220enum { 231enum {
@@ -300,6 +311,8 @@ enum {
300enum { 311enum {
301 HCA_CAP_OPMOD_GET_MAX = 0, 312 HCA_CAP_OPMOD_GET_MAX = 0,
302 HCA_CAP_OPMOD_GET_CUR = 1, 313 HCA_CAP_OPMOD_GET_CUR = 1,
314 HCA_CAP_OPMOD_GET_ODP_MAX = 4,
315 HCA_CAP_OPMOD_GET_ODP_CUR = 5
303}; 316};
304 317
305struct mlx5_inbox_hdr { 318struct mlx5_inbox_hdr {
@@ -329,6 +342,23 @@ struct mlx5_cmd_query_adapter_mbox_out {
329 u8 vsd_psid[16]; 342 u8 vsd_psid[16];
330}; 343};
331 344
345enum mlx5_odp_transport_cap_bits {
346 MLX5_ODP_SUPPORT_SEND = 1 << 31,
347 MLX5_ODP_SUPPORT_RECV = 1 << 30,
348 MLX5_ODP_SUPPORT_WRITE = 1 << 29,
349 MLX5_ODP_SUPPORT_READ = 1 << 28,
350};
351
352struct mlx5_odp_caps {
353 char reserved[0x10];
354 struct {
355 __be32 rc_odp_caps;
356 __be32 uc_odp_caps;
357 __be32 ud_odp_caps;
358 } per_transport_caps;
359 char reserved2[0xe4];
360};
361
332struct mlx5_cmd_init_hca_mbox_in { 362struct mlx5_cmd_init_hca_mbox_in {
333 struct mlx5_inbox_hdr hdr; 363 struct mlx5_inbox_hdr hdr;
334 u8 rsvd0[2]; 364 u8 rsvd0[2];
@@ -449,6 +479,27 @@ struct mlx5_eqe_page_req {
449 __be32 rsvd1[5]; 479 __be32 rsvd1[5];
450}; 480};
451 481
482struct mlx5_eqe_page_fault {
483 __be32 bytes_committed;
484 union {
485 struct {
486 u16 reserved1;
487 __be16 wqe_index;
488 u16 reserved2;
489 __be16 packet_length;
490 u8 reserved3[12];
491 } __packed wqe;
492 struct {
493 __be32 r_key;
494 u16 reserved1;
495 __be16 packet_length;
496 __be32 rdma_op_len;
497 __be64 rdma_va;
498 } __packed rdma;
499 } __packed;
500 __be32 flags_qpn;
501} __packed;
502
452union ev_data { 503union ev_data {
453 __be32 raw[7]; 504 __be32 raw[7];
454 struct mlx5_eqe_cmd cmd; 505 struct mlx5_eqe_cmd cmd;
@@ -460,6 +511,7 @@ union ev_data {
460 struct mlx5_eqe_congestion cong; 511 struct mlx5_eqe_congestion cong;
461 struct mlx5_eqe_stall_vl stall_vl; 512 struct mlx5_eqe_stall_vl stall_vl;
462 struct mlx5_eqe_page_req req_pages; 513 struct mlx5_eqe_page_req req_pages;
514 struct mlx5_eqe_page_fault page_fault;
463} __packed; 515} __packed;
464 516
465struct mlx5_eqe { 517struct mlx5_eqe {
@@ -826,7 +878,7 @@ struct mlx5_query_special_ctxs_mbox_out {
826struct mlx5_create_mkey_mbox_in { 878struct mlx5_create_mkey_mbox_in {
827 struct mlx5_inbox_hdr hdr; 879 struct mlx5_inbox_hdr hdr;
828 __be32 input_mkey_index; 880 __be32 input_mkey_index;
829 u8 rsvd0[4]; 881 __be32 flags;
830 struct mlx5_mkey_seg seg; 882 struct mlx5_mkey_seg seg;
831 u8 rsvd1[16]; 883 u8 rsvd1[16];
832 __be32 xlat_oct_act_size; 884 __be32 xlat_oct_act_size;
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index b1bf41556b32..7088dcd19214 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -113,6 +113,13 @@ enum {
113 MLX5_REG_HOST_ENDIANNESS = 0x7004, 113 MLX5_REG_HOST_ENDIANNESS = 0x7004,
114}; 114};
115 115
116enum mlx5_page_fault_resume_flags {
117 MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0,
118 MLX5_PAGE_FAULT_RESUME_WRITE = 1 << 1,
119 MLX5_PAGE_FAULT_RESUME_RDMA = 1 << 2,
120 MLX5_PAGE_FAULT_RESUME_ERROR = 1 << 7,
121};
122
116enum dbg_rsc_type { 123enum dbg_rsc_type {
117 MLX5_DBG_RSC_QP, 124 MLX5_DBG_RSC_QP,
118 MLX5_DBG_RSC_EQ, 125 MLX5_DBG_RSC_EQ,
@@ -703,6 +710,9 @@ void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
703void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); 710void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
704void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); 711void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
705void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); 712void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
713#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
714void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
715#endif
706void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); 716void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
707struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); 717struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
708void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector); 718void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector);
@@ -740,6 +750,8 @@ int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
740 int npsvs, u32 *sig_index); 750 int npsvs, u32 *sig_index);
741int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num); 751int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
742void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common); 752void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
753int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
754 struct mlx5_odp_caps *odp_caps);
743 755
744static inline u32 mlx5_mkey_to_idx(u32 mkey) 756static inline u32 mlx5_mkey_to_idx(u32 mkey)
745{ 757{
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 67f4b9660b06..6b1d6f60c7e6 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -50,6 +50,9 @@
50#define MLX5_BSF_APPTAG_ESCAPE 0x1 50#define MLX5_BSF_APPTAG_ESCAPE 0x1
51#define MLX5_BSF_APPREF_ESCAPE 0x2 51#define MLX5_BSF_APPREF_ESCAPE 0x2
52 52
53#define MLX5_QPN_BITS 24
54#define MLX5_QPN_MASK ((1 << MLX5_QPN_BITS) - 1)
55
53enum mlx5_qp_optpar { 56enum mlx5_qp_optpar {
54 MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, 57 MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
55 MLX5_QP_OPTPAR_RRE = 1 << 1, 58 MLX5_QP_OPTPAR_RRE = 1 << 1,
@@ -363,9 +366,46 @@ struct mlx5_stride_block_ctrl_seg {
363 __be16 num_entries; 366 __be16 num_entries;
364}; 367};
365 368
369enum mlx5_pagefault_flags {
370 MLX5_PFAULT_REQUESTOR = 1 << 0,
371 MLX5_PFAULT_WRITE = 1 << 1,
372 MLX5_PFAULT_RDMA = 1 << 2,
373};
374
375/* Contains the details of a pagefault. */
376struct mlx5_pagefault {
377 u32 bytes_committed;
378 u8 event_subtype;
379 enum mlx5_pagefault_flags flags;
380 union {
381 /* Initiator or send message responder pagefault details. */
382 struct {
383 /* Received packet size, only valid for responders. */
384 u32 packet_size;
385 /*
386 * WQE index. Refers to either the send queue or
387 * receive queue, according to event_subtype.
388 */
389 u16 wqe_index;
390 } wqe;
391 /* RDMA responder pagefault details */
392 struct {
393 u32 r_key;
394 /*
395 * Received packet size, minimal size page fault
396 * resolution required for forward progress.
397 */
398 u32 packet_size;
399 u32 rdma_op_len;
400 u64 rdma_va;
401 } rdma;
402 };
403};
404
366struct mlx5_core_qp { 405struct mlx5_core_qp {
367 struct mlx5_core_rsc_common common; /* must be first */ 406 struct mlx5_core_rsc_common common; /* must be first */
368 void (*event) (struct mlx5_core_qp *, int); 407 void (*event) (struct mlx5_core_qp *, int);
408 void (*pfault_handler)(struct mlx5_core_qp *, struct mlx5_pagefault *);
369 int qpn; 409 int qpn;
370 struct mlx5_rsc_debug *dbg; 410 struct mlx5_rsc_debug *dbg;
371 int pid; 411 int pid;
@@ -533,6 +573,17 @@ static inline struct mlx5_core_mr *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u
533 return radix_tree_lookup(&dev->priv.mr_table.tree, key); 573 return radix_tree_lookup(&dev->priv.mr_table.tree, key);
534} 574}
535 575
576struct mlx5_page_fault_resume_mbox_in {
577 struct mlx5_inbox_hdr hdr;
578 __be32 flags_qpn;
579 u8 reserved[4];
580};
581
582struct mlx5_page_fault_resume_mbox_out {
583 struct mlx5_outbox_hdr hdr;
584 u8 rsvd[8];
585};
586
536int mlx5_core_create_qp(struct mlx5_core_dev *dev, 587int mlx5_core_create_qp(struct mlx5_core_dev *dev,
537 struct mlx5_core_qp *qp, 588 struct mlx5_core_qp *qp,
538 struct mlx5_create_qp_mbox_in *in, 589 struct mlx5_create_qp_mbox_in *in,
@@ -552,6 +603,10 @@ void mlx5_init_qp_table(struct mlx5_core_dev *dev);
552void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev); 603void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev);
553int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); 604int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
554void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); 605void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
606#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
607int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
608 u8 context, int error);
609#endif
555 610
556static inline const char *mlx5_qp_type_str(int type) 611static inline const char *mlx5_qp_type_str(int type)
557{ 612{