aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorHoang-Nam Nguyen <hnguyen@de.ibm.com>2008-01-25 15:18:27 -0500
committerRoland Dreier <rolandd@cisco.com>2008-02-04 23:20:42 -0500
commit2b5e6b120e58d44cace68e6c7204b541a8b0b43f (patch)
treefa2cb052b146b22ec285928a000f133da3e7d73c /drivers/infiniband/hw
parent528b03f73247c30750b740dcad16ad1914e56e89 (diff)
IB/ehca: Add PMA support
This patch enables ehca to redirect any PMA queries to the actual PMA QP. Signed-off-by: Hoang-Nam Nguyen <hnguyen@de.ibm.com> Reviewed-by: Joachim Fenkes <fenkes@de.ibm.com> Reviewed-by: Christoph Raisch <raisch@de.ibm.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_iverbs.h5
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_sqp.c91
4 files changed, 98 insertions, 1 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index f281d16040f5..92cce8aacbb7 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -101,6 +101,7 @@ struct ehca_sport {
101 spinlock_t mod_sqp_lock; 101 spinlock_t mod_sqp_lock;
102 enum ib_port_state port_state; 102 enum ib_port_state port_state;
103 struct ehca_sma_attr saved_attr; 103 struct ehca_sma_attr saved_attr;
104 u32 pma_qp_nr;
104}; 105};
105 106
106#define HCA_CAP_MR_PGSIZE_4K 0x80000000 107#define HCA_CAP_MR_PGSIZE_4K 0x80000000
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
index c469bfde2708..a8a2ea585d2f 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -187,6 +187,11 @@ int ehca_dealloc_ucontext(struct ib_ucontext *context);
187 187
188int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); 188int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
189 189
190int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
191 struct ib_wc *in_wc, struct ib_grh *in_grh,
192 struct ib_mad *in_mad,
193 struct ib_mad *out_mad);
194
190void ehca_poll_eqs(unsigned long data); 195void ehca_poll_eqs(unsigned long data);
191 196
192int ehca_calc_ipd(struct ehca_shca *shca, int port, 197int ehca_calc_ipd(struct ehca_shca *shca, int port,
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 84c9b7b8669b..a86ebcc79a95 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -472,7 +472,7 @@ int ehca_init_device(struct ehca_shca *shca)
472 shca->ib_device.dealloc_fmr = ehca_dealloc_fmr; 472 shca->ib_device.dealloc_fmr = ehca_dealloc_fmr;
473 shca->ib_device.attach_mcast = ehca_attach_mcast; 473 shca->ib_device.attach_mcast = ehca_attach_mcast;
474 shca->ib_device.detach_mcast = ehca_detach_mcast; 474 shca->ib_device.detach_mcast = ehca_detach_mcast;
475 /* shca->ib_device.process_mad = ehca_process_mad; */ 475 shca->ib_device.process_mad = ehca_process_mad;
476 shca->ib_device.mmap = ehca_mmap; 476 shca->ib_device.mmap = ehca_mmap;
477 477
478 if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) { 478 if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c
index 79e72b25b252..706d97ad5555 100644
--- a/drivers/infiniband/hw/ehca/ehca_sqp.c
+++ b/drivers/infiniband/hw/ehca/ehca_sqp.c
@@ -39,12 +39,18 @@
39 * POSSIBILITY OF SUCH DAMAGE. 39 * POSSIBILITY OF SUCH DAMAGE.
40 */ 40 */
41 41
42#include <rdma/ib_mad.h>
42 43
43#include "ehca_classes.h" 44#include "ehca_classes.h"
44#include "ehca_tools.h" 45#include "ehca_tools.h"
45#include "ehca_iverbs.h" 46#include "ehca_iverbs.h"
46#include "hcp_if.h" 47#include "hcp_if.h"
47 48
49#define IB_MAD_STATUS_REDIRECT __constant_htons(0x0002)
50#define IB_MAD_STATUS_UNSUP_VERSION __constant_htons(0x0004)
51#define IB_MAD_STATUS_UNSUP_METHOD __constant_htons(0x0008)
52
53#define IB_PMA_CLASS_PORT_INFO __constant_htons(0x0001)
48 54
49/** 55/**
50 * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue 56 * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue
@@ -83,6 +89,9 @@ u64 ehca_define_sqp(struct ehca_shca *shca,
83 port, ret); 89 port, ret);
84 return ret; 90 return ret;
85 } 91 }
92 shca->sport[port - 1].pma_qp_nr = pma_qp_nr;
93 ehca_dbg(&shca->ib_device, "port=%x pma_qp_nr=%x",
94 port, pma_qp_nr);
86 break; 95 break;
87 default: 96 default:
88 ehca_err(&shca->ib_device, "invalid qp_type=%x", 97 ehca_err(&shca->ib_device, "invalid qp_type=%x",
@@ -109,3 +118,85 @@ u64 ehca_define_sqp(struct ehca_shca *shca,
109 118
110 return H_SUCCESS; 119 return H_SUCCESS;
111} 120}
121
122struct ib_perf {
123 struct ib_mad_hdr mad_hdr;
124 u8 reserved[40];
125 u8 data[192];
126} __attribute__ ((packed));
127
128
129static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
130 struct ib_mad *in_mad, struct ib_mad *out_mad)
131{
132 struct ib_perf *in_perf = (struct ib_perf *)in_mad;
133 struct ib_perf *out_perf = (struct ib_perf *)out_mad;
134 struct ib_class_port_info *poi =
135 (struct ib_class_port_info *)out_perf->data;
136 struct ehca_shca *shca =
137 container_of(ibdev, struct ehca_shca, ib_device);
138 struct ehca_sport *sport = &shca->sport[port_num - 1];
139
140 ehca_dbg(ibdev, "method=%x", in_perf->mad_hdr.method);
141
142 *out_mad = *in_mad;
143
144 if (in_perf->mad_hdr.class_version != 1) {
145 ehca_warn(ibdev, "Unsupported class_version=%x",
146 in_perf->mad_hdr.class_version);
147 out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_VERSION;
148 goto perf_reply;
149 }
150
151 switch (in_perf->mad_hdr.method) {
152 case IB_MGMT_METHOD_GET:
153 case IB_MGMT_METHOD_SET:
154 /* set class port info for redirection */
155 out_perf->mad_hdr.attr_id = IB_PMA_CLASS_PORT_INFO;
156 out_perf->mad_hdr.status = IB_MAD_STATUS_REDIRECT;
157 memset(poi, 0, sizeof(*poi));
158 poi->base_version = 1;
159 poi->class_version = 1;
160 poi->resp_time_value = 18;
161 poi->redirect_lid = sport->saved_attr.lid;
162 poi->redirect_qp = sport->pma_qp_nr;
163 poi->redirect_qkey = IB_QP1_QKEY;
164 poi->redirect_pkey = IB_DEFAULT_PKEY_FULL;
165
166 ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x",
167 sport->saved_attr.lid, sport->pma_qp_nr);
168 break;
169
170 case IB_MGMT_METHOD_GET_RESP:
171 return IB_MAD_RESULT_FAILURE;
172
173 default:
174 out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_METHOD;
175 break;
176 }
177
178perf_reply:
179 out_perf->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
180
181 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
182}
183
184int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
185 struct ib_wc *in_wc, struct ib_grh *in_grh,
186 struct ib_mad *in_mad,
187 struct ib_mad *out_mad)
188{
189 int ret;
190
191 if (!port_num || port_num > ibdev->phys_port_cnt)
192 return IB_MAD_RESULT_FAILURE;
193
194 /* accept only pma request */
195 if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
196 return IB_MAD_RESULT_SUCCESS;
197
198 ehca_dbg(ibdev, "port_num=%x src_qp=%x", port_num, in_wc->src_qp);
199 ret = ehca_process_perf(ibdev, port_num, in_mad, out_mad);
200
201 return ret;
202}