aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-18 23:10:44 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-18 23:10:44 -0500
commit4c929feed7e9ce69efbe85e3932393db67fbce76 (patch)
treee71435174ea1c22e98c93d3c0f93598d5841ce02 /drivers/net/ethernet
parent018cb13eb33383cbc3fb6d3a286ef32ecb816779 (diff)
parenta7cfef21e3d066343bec14d3113a9f9c92d1c2a8 (diff)
Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
Pull infiniband updates from Roland Dreier: "Main batch of InfiniBand/RDMA changes for 3.19: - On-demand paging support in core midlayer and mlx5 driver. This lets userspace create non-pinned memory regions and have the adapter HW trigger page faults. - iSER and IPoIB updates and fixes. - Low-level HW driver updates for cxgb4, mlx4 and ocrdma. - Other miscellaneous fixes" * tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (56 commits) IB/mlx5: Implement on demand paging by adding support for MMU notifiers IB/mlx5: Add support for RDMA read/write responder page faults IB/mlx5: Handle page faults IB/mlx5: Page faults handling infrastructure IB/mlx5: Add mlx5_ib_update_mtt to update page tables after creation IB/mlx5: Changes in memory region creation to support on-demand paging IB/mlx5: Implement the ODP capability query verb mlx5_core: Add support for page faults events and low level handling mlx5_core: Re-add MLX5_DEV_CAP_FLAG_ON_DMND_PG flag IB/srp: Allow newline separator for connection string IB/core: Implement support for MMU notifiers regarding on demand paging regions IB/core: Add support for on demand paging regions IB/core: Add flags for on demand paging support IB/core: Add support for extended query device caps IB/mlx5: Add function to read WQE from user-space IB/core: Add umem function to read data from user-space IB/core: Replace ib_umem's offset field with a full address IB/mlx5: Enhance UMR support to allow partial page table update IB/mlx5: Remove per-MR pas and dma pointers RDMA/ocrdma: Always resolve destination mac from GRH for UD QPs ...
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c119
4 files changed, 174 insertions, 4 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index b935bf3d0bb3..943cbd47d832 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -171,9 +171,9 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
171{ 171{
172 int i; 172 int i;
173 173
174 for (i = 0; i < dev->caps.num_ports - 1; i++) { 174 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
175 if (port_type[i] != port_type[i + 1]) { 175 for (i = 0; i < dev->caps.num_ports - 1; i++) {
176 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { 176 if (port_type[i] != port_type[i + 1]) {
177 mlx4_err(dev, "Only same port types supported on this HCA, aborting\n"); 177 mlx4_err(dev, "Only same port types supported on this HCA, aborting\n");
178 return -EINVAL; 178 return -EINVAL;
179 } 179 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index ab684463780b..da82991239a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -157,6 +157,8 @@ static const char *eqe_type_str(u8 type)
157 return "MLX5_EVENT_TYPE_CMD"; 157 return "MLX5_EVENT_TYPE_CMD";
158 case MLX5_EVENT_TYPE_PAGE_REQUEST: 158 case MLX5_EVENT_TYPE_PAGE_REQUEST:
159 return "MLX5_EVENT_TYPE_PAGE_REQUEST"; 159 return "MLX5_EVENT_TYPE_PAGE_REQUEST";
160 case MLX5_EVENT_TYPE_PAGE_FAULT:
161 return "MLX5_EVENT_TYPE_PAGE_FAULT";
160 default: 162 default:
161 return "Unrecognized event"; 163 return "Unrecognized event";
162 } 164 }
@@ -279,6 +281,11 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
279 } 281 }
280 break; 282 break;
281 283
284#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
285 case MLX5_EVENT_TYPE_PAGE_FAULT:
286 mlx5_eq_pagefault(dev, eqe);
287 break;
288#endif
282 289
283 default: 290 default:
284 mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", 291 mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
@@ -446,8 +453,12 @@ void mlx5_eq_cleanup(struct mlx5_core_dev *dev)
446int mlx5_start_eqs(struct mlx5_core_dev *dev) 453int mlx5_start_eqs(struct mlx5_core_dev *dev)
447{ 454{
448 struct mlx5_eq_table *table = &dev->priv.eq_table; 455 struct mlx5_eq_table *table = &dev->priv.eq_table;
456 u32 async_event_mask = MLX5_ASYNC_EVENT_MASK;
449 int err; 457 int err;
450 458
459 if (dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG)
460 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PAGE_FAULT);
461
451 err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, 462 err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
452 MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD, 463 MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
453 "mlx5_cmd_eq", &dev->priv.uuari.uars[0]); 464 "mlx5_cmd_eq", &dev->priv.uuari.uars[0]);
@@ -459,7 +470,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
459 mlx5_cmd_use_events(dev); 470 mlx5_cmd_use_events(dev);
460 471
461 err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC, 472 err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC,
462 MLX5_NUM_ASYNC_EQE, MLX5_ASYNC_EVENT_MASK, 473 MLX5_NUM_ASYNC_EQE, async_event_mask,
463 "mlx5_async_eq", &dev->priv.uuari.uars[0]); 474 "mlx5_async_eq", &dev->priv.uuari.uars[0]);
464 if (err) { 475 if (err) {
465 mlx5_core_warn(dev, "failed to create async EQ %d\n", err); 476 mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 087c4c797deb..06f9036acd83 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -69,6 +69,46 @@ int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, struct mlx5_caps *caps)
69 return mlx5_core_get_caps(dev, caps, HCA_CAP_OPMOD_GET_CUR); 69 return mlx5_core_get_caps(dev, caps, HCA_CAP_OPMOD_GET_CUR);
70} 70}
71 71
72int mlx5_query_odp_caps(struct mlx5_core_dev *dev, struct mlx5_odp_caps *caps)
73{
74 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
75 int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
76 void *out;
77 int err;
78
79 if (!(dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG))
80 return -ENOTSUPP;
81
82 memset(in, 0, sizeof(in));
83 out = kzalloc(out_sz, GFP_KERNEL);
84 if (!out)
85 return -ENOMEM;
86 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
87 MLX5_SET(query_hca_cap_in, in, op_mod, HCA_CAP_OPMOD_GET_ODP_CUR);
88 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
89 if (err)
90 goto out;
91
92 err = mlx5_cmd_status_to_err_v2(out);
93 if (err) {
94 mlx5_core_warn(dev, "query cur hca ODP caps failed, %d\n", err);
95 goto out;
96 }
97
98 memcpy(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct),
99 sizeof(*caps));
100
101 mlx5_core_dbg(dev, "on-demand paging capabilities:\nrc: %08x\nuc: %08x\nud: %08x\n",
102 be32_to_cpu(caps->per_transport_caps.rc_odp_caps),
103 be32_to_cpu(caps->per_transport_caps.uc_odp_caps),
104 be32_to_cpu(caps->per_transport_caps.ud_odp_caps));
105
106out:
107 kfree(out);
108 return err;
109}
110EXPORT_SYMBOL(mlx5_query_odp_caps);
111
72int mlx5_cmd_init_hca(struct mlx5_core_dev *dev) 112int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
73{ 113{
74 struct mlx5_cmd_init_hca_mbox_in in; 114 struct mlx5_cmd_init_hca_mbox_in in;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 5261a2b0da43..575d853dbe05 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -88,6 +88,95 @@ void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
88 mlx5_core_put_rsc(common); 88 mlx5_core_put_rsc(common);
89} 89}
90 90
91#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
92void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
93{
94 struct mlx5_eqe_page_fault *pf_eqe = &eqe->data.page_fault;
95 int qpn = be32_to_cpu(pf_eqe->flags_qpn) & MLX5_QPN_MASK;
96 struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, qpn);
97 struct mlx5_core_qp *qp =
98 container_of(common, struct mlx5_core_qp, common);
99 struct mlx5_pagefault pfault;
100
101 if (!qp) {
102 mlx5_core_warn(dev, "ODP event for non-existent QP %06x\n",
103 qpn);
104 return;
105 }
106
107 pfault.event_subtype = eqe->sub_type;
108 pfault.flags = (be32_to_cpu(pf_eqe->flags_qpn) >> MLX5_QPN_BITS) &
109 (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE | MLX5_PFAULT_RDMA);
110 pfault.bytes_committed = be32_to_cpu(
111 pf_eqe->bytes_committed);
112
113 mlx5_core_dbg(dev,
114 "PAGE_FAULT: subtype: 0x%02x, flags: 0x%02x,\n",
115 eqe->sub_type, pfault.flags);
116
117 switch (eqe->sub_type) {
118 case MLX5_PFAULT_SUBTYPE_RDMA:
119 /* RDMA based event */
120 pfault.rdma.r_key =
121 be32_to_cpu(pf_eqe->rdma.r_key);
122 pfault.rdma.packet_size =
123 be16_to_cpu(pf_eqe->rdma.packet_length);
124 pfault.rdma.rdma_op_len =
125 be32_to_cpu(pf_eqe->rdma.rdma_op_len);
126 pfault.rdma.rdma_va =
127 be64_to_cpu(pf_eqe->rdma.rdma_va);
128 mlx5_core_dbg(dev,
129 "PAGE_FAULT: qpn: 0x%06x, r_key: 0x%08x,\n",
130 qpn, pfault.rdma.r_key);
131 mlx5_core_dbg(dev,
132 "PAGE_FAULT: rdma_op_len: 0x%08x,\n",
133 pfault.rdma.rdma_op_len);
134 mlx5_core_dbg(dev,
135 "PAGE_FAULT: rdma_va: 0x%016llx,\n",
136 pfault.rdma.rdma_va);
137 mlx5_core_dbg(dev,
138 "PAGE_FAULT: bytes_committed: 0x%06x\n",
139 pfault.bytes_committed);
140 break;
141
142 case MLX5_PFAULT_SUBTYPE_WQE:
143 /* WQE based event */
144 pfault.wqe.wqe_index =
145 be16_to_cpu(pf_eqe->wqe.wqe_index);
146 pfault.wqe.packet_size =
147 be16_to_cpu(pf_eqe->wqe.packet_length);
148 mlx5_core_dbg(dev,
149 "PAGE_FAULT: qpn: 0x%06x, wqe_index: 0x%04x,\n",
150 qpn, pfault.wqe.wqe_index);
151 mlx5_core_dbg(dev,
152 "PAGE_FAULT: bytes_committed: 0x%06x\n",
153 pfault.bytes_committed);
154 break;
155
156 default:
157 mlx5_core_warn(dev,
158 "Unsupported page fault event sub-type: 0x%02hhx, QP %06x\n",
159 eqe->sub_type, qpn);
160 /* Unsupported page faults should still be resolved by the
161 * page fault handler
162 */
163 }
164
165 if (qp->pfault_handler) {
166 qp->pfault_handler(qp, &pfault);
167 } else {
168 mlx5_core_err(dev,
169 "ODP event for QP %08x, without a fault handler in QP\n",
170 qpn);
171 /* Page fault will remain unresolved. QP will hang until it is
172 * destroyed
173 */
174 }
175
176 mlx5_core_put_rsc(common);
177}
178#endif
179
91int mlx5_core_create_qp(struct mlx5_core_dev *dev, 180int mlx5_core_create_qp(struct mlx5_core_dev *dev,
92 struct mlx5_core_qp *qp, 181 struct mlx5_core_qp *qp,
93 struct mlx5_create_qp_mbox_in *in, 182 struct mlx5_create_qp_mbox_in *in,
@@ -322,3 +411,33 @@ int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
322 return err; 411 return err;
323} 412}
324EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc); 413EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
414
415#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
416int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
417 u8 flags, int error)
418{
419 struct mlx5_page_fault_resume_mbox_in in;
420 struct mlx5_page_fault_resume_mbox_out out;
421 int err;
422
423 memset(&in, 0, sizeof(in));
424 memset(&out, 0, sizeof(out));
425 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_PAGE_FAULT_RESUME);
426 in.hdr.opmod = 0;
427 flags &= (MLX5_PAGE_FAULT_RESUME_REQUESTOR |
428 MLX5_PAGE_FAULT_RESUME_WRITE |
429 MLX5_PAGE_FAULT_RESUME_RDMA);
430 flags |= (error ? MLX5_PAGE_FAULT_RESUME_ERROR : 0);
431 in.flags_qpn = cpu_to_be32((qpn & MLX5_QPN_MASK) |
432 (flags << MLX5_QPN_BITS));
433 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
434 if (err)
435 return err;
436
437 if (out.hdr.status)
438 err = mlx5_cmd_status_to_err(&out.hdr);
439
440 return err;
441}
442EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume);
443#endif