aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHal Rosenstock <halr@voltaire.com>2006-03-28 19:40:04 -0500
committerRoland Dreier <rolandd@cisco.com>2006-03-30 10:19:51 -0500
commit618a3c03fcfdf1ac4543247c8ddfb0c9d775ff33 (patch)
tree9b3c0baf53f0fcab01848b7816aace785afd8a94
parentfa9656bbd9af5b95adc43eaa0a143992346378cb (diff)
IB/mad: RMPP support for additional classes
Add RMPP support for additional management classes that support it. Also, validate RMPP is consistent with management class specified. Signed-off-by: Hal Rosenstock <halr@voltaire.com> Signed-off-by: Sean Hefty <sean.hefty@intel.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
-rw-r--r--drivers/infiniband/core/mad.c54
-rw-r--r--drivers/infiniband/core/mad_rmpp.c19
-rw-r--r--drivers/infiniband/core/user_mad.c30
-rw-r--r--include/rdma/ib_mad.h27
4 files changed, 85 insertions, 45 deletions
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index d4d07012a5ca..ba54c856b0e5 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -227,6 +227,14 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
227 if (!is_vendor_oui(mad_reg_req->oui)) 227 if (!is_vendor_oui(mad_reg_req->oui))
228 goto error1; 228 goto error1;
229 } 229 }
230 /* Make sure class supplied is consistent with RMPP */
231 if (ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
232 if (!rmpp_version)
233 goto error1;
234 } else {
235 if (rmpp_version)
236 goto error1;
237 }
230 /* Make sure class supplied is consistent with QP type */ 238 /* Make sure class supplied is consistent with QP type */
231 if (qp_type == IB_QPT_SMI) { 239 if (qp_type == IB_QPT_SMI) {
232 if ((mad_reg_req->mgmt_class != 240 if ((mad_reg_req->mgmt_class !=
@@ -890,6 +898,35 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
890} 898}
891EXPORT_SYMBOL(ib_create_send_mad); 899EXPORT_SYMBOL(ib_create_send_mad);
892 900
901int ib_get_mad_data_offset(u8 mgmt_class)
902{
903 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
904 return IB_MGMT_SA_HDR;
905 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
906 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
907 (mgmt_class == IB_MGMT_CLASS_BIS))
908 return IB_MGMT_DEVICE_HDR;
909 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
910 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
911 return IB_MGMT_VENDOR_HDR;
912 else
913 return IB_MGMT_MAD_HDR;
914}
915EXPORT_SYMBOL(ib_get_mad_data_offset);
916
917int ib_is_mad_class_rmpp(u8 mgmt_class)
918{
919 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
920 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
921 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
922 (mgmt_class == IB_MGMT_CLASS_BIS) ||
923 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
924 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
925 return 1;
926 return 0;
927}
928EXPORT_SYMBOL(ib_is_mad_class_rmpp);
929
893void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num) 930void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
894{ 931{
895 struct ib_mad_send_wr_private *mad_send_wr; 932 struct ib_mad_send_wr_private *mad_send_wr;
@@ -1022,6 +1059,13 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1022 goto error; 1059 goto error;
1023 } 1060 }
1024 1061
1062 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1063 if (mad_agent_priv->agent.rmpp_version) {
1064 ret = -EINVAL;
1065 goto error;
1066 }
1067 }
1068
1025 /* 1069 /*
1026 * Save pointer to next work request to post in case the 1070 * Save pointer to next work request to post in case the
1027 * current one completes, and the user modifies the work 1071 * current one completes, and the user modifies the work
@@ -2454,11 +2498,11 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2454 } 2498 }
2455 } 2499 }
2456 sg_list.addr = dma_map_single(qp_info->port_priv-> 2500 sg_list.addr = dma_map_single(qp_info->port_priv->
2457 device->dma_device, 2501 device->dma_device,
2458 &mad_priv->grh, 2502 &mad_priv->grh,
2459 sizeof *mad_priv - 2503 sizeof *mad_priv -
2460 sizeof mad_priv->header, 2504 sizeof mad_priv->header,
2461 DMA_FROM_DEVICE); 2505 DMA_FROM_DEVICE);
2462 pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr); 2506 pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
2463 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list; 2507 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2464 mad_priv->header.mad_list.mad_queue = recv_queue; 2508 mad_priv->header.mad_list.mad_queue = recv_queue;
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index a6405079c285..dfd4e588ce03 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (c) 2005 Intel Inc. All rights reserved. 2 * Copyright (c) 2005 Intel Inc. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 3 * Copyright (c) 2005-2006 Voltaire, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -100,17 +100,6 @@ void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
100 } 100 }
101} 101}
102 102
103static int data_offset(u8 mgmt_class)
104{
105 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
106 return IB_MGMT_SA_HDR;
107 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
108 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
109 return IB_MGMT_VENDOR_HDR;
110 else
111 return IB_MGMT_RMPP_HDR;
112}
113
114static void format_ack(struct ib_mad_send_buf *msg, 103static void format_ack(struct ib_mad_send_buf *msg,
115 struct ib_rmpp_mad *data, 104 struct ib_rmpp_mad *data,
116 struct mad_rmpp_recv *rmpp_recv) 105 struct mad_rmpp_recv *rmpp_recv)
@@ -137,7 +126,7 @@ static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
137 struct ib_mad_send_buf *msg; 126 struct ib_mad_send_buf *msg;
138 int ret, hdr_len; 127 int ret, hdr_len;
139 128
140 hdr_len = data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); 129 hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
141 msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, 130 msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
142 recv_wc->wc->pkey_index, 1, hdr_len, 131 recv_wc->wc->pkey_index, 1, hdr_len,
143 0, GFP_KERNEL); 132 0, GFP_KERNEL);
@@ -163,7 +152,7 @@ static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent,
163 if (IS_ERR(ah)) 152 if (IS_ERR(ah))
164 return (void *) ah; 153 return (void *) ah;
165 154
166 hdr_len = data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); 155 hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
167 msg = ib_create_send_mad(agent, recv_wc->wc->src_qp, 156 msg = ib_create_send_mad(agent, recv_wc->wc->src_qp,
168 recv_wc->wc->pkey_index, 1, 157 recv_wc->wc->pkey_index, 1,
169 hdr_len, 0, GFP_KERNEL); 158 hdr_len, 0, GFP_KERNEL);
@@ -408,7 +397,7 @@ static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
408 397
409 rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad; 398 rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad;
410 399
411 hdr_size = data_offset(rmpp_mad->mad_hdr.mgmt_class); 400 hdr_size = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
412 data_size = sizeof(struct ib_rmpp_mad) - hdr_size; 401 data_size = sizeof(struct ib_rmpp_mad) - hdr_size;
413 pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); 402 pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
414 if (pad > IB_MGMT_RMPP_DATA || pad < 0) 403 if (pad > IB_MGMT_RMPP_DATA || pad < 0)
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index fb6cd42601f9..afe70a549c2f 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -177,17 +177,6 @@ static int queue_packet(struct ib_umad_file *file,
177 return ret; 177 return ret;
178} 178}
179 179
180static int data_offset(u8 mgmt_class)
181{
182 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
183 return IB_MGMT_SA_HDR;
184 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
185 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
186 return IB_MGMT_VENDOR_HDR;
187 else
188 return IB_MGMT_RMPP_HDR;
189}
190
191static void send_handler(struct ib_mad_agent *agent, 180static void send_handler(struct ib_mad_agent *agent,
192 struct ib_mad_send_wc *send_wc) 181 struct ib_mad_send_wc *send_wc)
193{ 182{
@@ -283,7 +272,7 @@ static ssize_t copy_recv_mad(char __user *buf, struct ib_umad_packet *packet,
283 */ 272 */
284 return -ENOSPC; 273 return -ENOSPC;
285 } 274 }
286 offset = data_offset(recv_buf->mad->mad_hdr.mgmt_class); 275 offset = ib_get_mad_data_offset(recv_buf->mad->mad_hdr.mgmt_class);
287 max_seg_payload = sizeof (struct ib_mad) - offset; 276 max_seg_payload = sizeof (struct ib_mad) - offset;
288 277
289 for (left = packet->length - seg_payload, buf += seg_payload; 278 for (left = packet->length - seg_payload, buf += seg_payload;
@@ -441,21 +430,14 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
441 } 430 }
442 431
443 rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data; 432 rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data;
444 if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) { 433 hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
445 hdr_len = IB_MGMT_SA_HDR; 434 if (!ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)) {
446 copy_offset = IB_MGMT_RMPP_HDR; 435 copy_offset = IB_MGMT_MAD_HDR;
447 rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 436 rmpp_active = 0;
448 IB_MGMT_RMPP_FLAG_ACTIVE; 437 } else {
449 } else if (rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START &&
450 rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END) {
451 hdr_len = IB_MGMT_VENDOR_HDR;
452 copy_offset = IB_MGMT_RMPP_HDR; 438 copy_offset = IB_MGMT_RMPP_HDR;
453 rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 439 rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
454 IB_MGMT_RMPP_FLAG_ACTIVE; 440 IB_MGMT_RMPP_FLAG_ACTIVE;
455 } else {
456 hdr_len = IB_MGMT_MAD_HDR;
457 copy_offset = IB_MGMT_MAD_HDR;
458 rmpp_active = 0;
459 } 441 }
460 442
461 data_len = count - sizeof (struct ib_user_mad) - hdr_len; 443 data_len = count - sizeof (struct ib_user_mad) - hdr_len;
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 51ab8eddb295..5ff77558013b 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -3,7 +3,7 @@
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 6 * Copyright (c) 2004-2006 Voltaire Corporation. All rights reserved.
7 * 7 *
8 * This software is available to you under a choice of one of two 8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU 9 * licenses. You may choose to be licensed under the terms of the GNU
@@ -55,6 +55,10 @@
55#define IB_MGMT_CLASS_DEVICE_MGMT 0x06 55#define IB_MGMT_CLASS_DEVICE_MGMT 0x06
56#define IB_MGMT_CLASS_CM 0x07 56#define IB_MGMT_CLASS_CM 0x07
57#define IB_MGMT_CLASS_SNMP 0x08 57#define IB_MGMT_CLASS_SNMP 0x08
58#define IB_MGMT_CLASS_DEVICE_ADM 0x10
59#define IB_MGMT_CLASS_BOOT_MGMT 0x11
60#define IB_MGMT_CLASS_BIS 0x12
61#define IB_MGMT_CLASS_CONG_MGMT 0x21
58#define IB_MGMT_CLASS_VENDOR_RANGE2_START 0x30 62#define IB_MGMT_CLASS_VENDOR_RANGE2_START 0x30
59#define IB_MGMT_CLASS_VENDOR_RANGE2_END 0x4F 63#define IB_MGMT_CLASS_VENDOR_RANGE2_END 0x4F
60 64
@@ -117,6 +121,8 @@ enum {
117 IB_MGMT_VENDOR_DATA = 216, 121 IB_MGMT_VENDOR_DATA = 216,
118 IB_MGMT_SA_HDR = 56, 122 IB_MGMT_SA_HDR = 56,
119 IB_MGMT_SA_DATA = 200, 123 IB_MGMT_SA_DATA = 200,
124 IB_MGMT_DEVICE_HDR = 64,
125 IB_MGMT_DEVICE_DATA = 192,
120}; 126};
121 127
122struct ib_mad_hdr { 128struct ib_mad_hdr {
@@ -603,6 +609,25 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
603 gfp_t gfp_mask); 609 gfp_t gfp_mask);
604 610
605/** 611/**
612 * ib_is_mad_class_rmpp - returns whether given management class
613 * supports RMPP.
614 * @mgmt_class: management class
615 *
616 * This routine returns whether the management class supports RMPP.
617 */
618int ib_is_mad_class_rmpp(u8 mgmt_class);
619
620/**
621 * ib_get_mad_data_offset - returns the data offset for a given
622 * management class.
623 * @mgmt_class: management class
624 *
625 * This routine returns the data offset in the MAD for the management
626 * class requested.
627 */
628int ib_get_mad_data_offset(u8 mgmt_class);
629
630/**
606 * ib_get_rmpp_segment - returns the data buffer for a given RMPP segment. 631 * ib_get_rmpp_segment - returns the data buffer for a given RMPP segment.
607 * @send_buf: Previously allocated send data buffer. 632 * @send_buf: Previously allocated send data buffer.
608 * @seg_num: number of segment to return 633 * @seg_num: number of segment to return