diff options
author | Hal Rosenstock <halr@voltaire.com> | 2006-03-28 19:40:04 -0500 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2006-03-30 10:19:51 -0500 |
commit | 618a3c03fcfdf1ac4543247c8ddfb0c9d775ff33 (patch) | |
tree | 9b3c0baf53f0fcab01848b7816aace785afd8a94 /drivers/infiniband | |
parent | fa9656bbd9af5b95adc43eaa0a143992346378cb (diff) |
IB/mad: RMPP support for additional classes
Add RMPP support for additional management classes that support it.
Also, validate RMPP is consistent with management class specified.
Signed-off-by: Hal Rosenstock <halr@voltaire.com>
Signed-off-by: Sean Hefty <sean.hefty@intel.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/core/mad.c | 54 | ||||
-rw-r--r-- | drivers/infiniband/core/mad_rmpp.c | 19 | ||||
-rw-r--r-- | drivers/infiniband/core/user_mad.c | 30 |
3 files changed, 59 insertions, 44 deletions
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index d4d07012a5ca..ba54c856b0e5 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -227,6 +227,14 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | |||
227 | if (!is_vendor_oui(mad_reg_req->oui)) | 227 | if (!is_vendor_oui(mad_reg_req->oui)) |
228 | goto error1; | 228 | goto error1; |
229 | } | 229 | } |
230 | /* Make sure class supplied is consistent with RMPP */ | ||
231 | if (ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { | ||
232 | if (!rmpp_version) | ||
233 | goto error1; | ||
234 | } else { | ||
235 | if (rmpp_version) | ||
236 | goto error1; | ||
237 | } | ||
230 | /* Make sure class supplied is consistent with QP type */ | 238 | /* Make sure class supplied is consistent with QP type */ |
231 | if (qp_type == IB_QPT_SMI) { | 239 | if (qp_type == IB_QPT_SMI) { |
232 | if ((mad_reg_req->mgmt_class != | 240 | if ((mad_reg_req->mgmt_class != |
@@ -890,6 +898,35 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, | |||
890 | } | 898 | } |
891 | EXPORT_SYMBOL(ib_create_send_mad); | 899 | EXPORT_SYMBOL(ib_create_send_mad); |
892 | 900 | ||
901 | int ib_get_mad_data_offset(u8 mgmt_class) | ||
902 | { | ||
903 | if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) | ||
904 | return IB_MGMT_SA_HDR; | ||
905 | else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || | ||
906 | (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || | ||
907 | (mgmt_class == IB_MGMT_CLASS_BIS)) | ||
908 | return IB_MGMT_DEVICE_HDR; | ||
909 | else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && | ||
910 | (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) | ||
911 | return IB_MGMT_VENDOR_HDR; | ||
912 | else | ||
913 | return IB_MGMT_MAD_HDR; | ||
914 | } | ||
915 | EXPORT_SYMBOL(ib_get_mad_data_offset); | ||
916 | |||
917 | int ib_is_mad_class_rmpp(u8 mgmt_class) | ||
918 | { | ||
919 | if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) || | ||
920 | (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || | ||
921 | (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || | ||
922 | (mgmt_class == IB_MGMT_CLASS_BIS) || | ||
923 | ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && | ||
924 | (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))) | ||
925 | return 1; | ||
926 | return 0; | ||
927 | } | ||
928 | EXPORT_SYMBOL(ib_is_mad_class_rmpp); | ||
929 | |||
893 | void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num) | 930 | void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num) |
894 | { | 931 | { |
895 | struct ib_mad_send_wr_private *mad_send_wr; | 932 | struct ib_mad_send_wr_private *mad_send_wr; |
@@ -1022,6 +1059,13 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf, | |||
1022 | goto error; | 1059 | goto error; |
1023 | } | 1060 | } |
1024 | 1061 | ||
1062 | if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) { | ||
1063 | if (mad_agent_priv->agent.rmpp_version) { | ||
1064 | ret = -EINVAL; | ||
1065 | goto error; | ||
1066 | } | ||
1067 | } | ||
1068 | |||
1025 | /* | 1069 | /* |
1026 | * Save pointer to next work request to post in case the | 1070 | * Save pointer to next work request to post in case the |
1027 | * current one completes, and the user modifies the work | 1071 | * current one completes, and the user modifies the work |
@@ -2454,11 +2498,11 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, | |||
2454 | } | 2498 | } |
2455 | } | 2499 | } |
2456 | sg_list.addr = dma_map_single(qp_info->port_priv-> | 2500 | sg_list.addr = dma_map_single(qp_info->port_priv-> |
2457 | device->dma_device, | 2501 | device->dma_device, |
2458 | &mad_priv->grh, | 2502 | &mad_priv->grh, |
2459 | sizeof *mad_priv - | 2503 | sizeof *mad_priv - |
2460 | sizeof mad_priv->header, | 2504 | sizeof mad_priv->header, |
2461 | DMA_FROM_DEVICE); | 2505 | DMA_FROM_DEVICE); |
2462 | pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr); | 2506 | pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr); |
2463 | recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list; | 2507 | recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list; |
2464 | mad_priv->header.mad_list.mad_queue = recv_queue; | 2508 | mad_priv->header.mad_list.mad_queue = recv_queue; |
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c index a6405079c285..dfd4e588ce03 100644 --- a/drivers/infiniband/core/mad_rmpp.c +++ b/drivers/infiniband/core/mad_rmpp.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2005 Intel Inc. All rights reserved. | 2 | * Copyright (c) 2005 Intel Inc. All rights reserved. |
3 | * Copyright (c) 2005 Voltaire, Inc. All rights reserved. | 3 | * Copyright (c) 2005-2006 Voltaire, Inc. All rights reserved. |
4 | * | 4 | * |
5 | * This software is available to you under a choice of one of two | 5 | * This software is available to you under a choice of one of two |
6 | * licenses. You may choose to be licensed under the terms of the GNU | 6 | * licenses. You may choose to be licensed under the terms of the GNU |
@@ -100,17 +100,6 @@ void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent) | |||
100 | } | 100 | } |
101 | } | 101 | } |
102 | 102 | ||
103 | static int data_offset(u8 mgmt_class) | ||
104 | { | ||
105 | if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) | ||
106 | return IB_MGMT_SA_HDR; | ||
107 | else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && | ||
108 | (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) | ||
109 | return IB_MGMT_VENDOR_HDR; | ||
110 | else | ||
111 | return IB_MGMT_RMPP_HDR; | ||
112 | } | ||
113 | |||
114 | static void format_ack(struct ib_mad_send_buf *msg, | 103 | static void format_ack(struct ib_mad_send_buf *msg, |
115 | struct ib_rmpp_mad *data, | 104 | struct ib_rmpp_mad *data, |
116 | struct mad_rmpp_recv *rmpp_recv) | 105 | struct mad_rmpp_recv *rmpp_recv) |
@@ -137,7 +126,7 @@ static void ack_recv(struct mad_rmpp_recv *rmpp_recv, | |||
137 | struct ib_mad_send_buf *msg; | 126 | struct ib_mad_send_buf *msg; |
138 | int ret, hdr_len; | 127 | int ret, hdr_len; |
139 | 128 | ||
140 | hdr_len = data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); | 129 | hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); |
141 | msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, | 130 | msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, |
142 | recv_wc->wc->pkey_index, 1, hdr_len, | 131 | recv_wc->wc->pkey_index, 1, hdr_len, |
143 | 0, GFP_KERNEL); | 132 | 0, GFP_KERNEL); |
@@ -163,7 +152,7 @@ static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent, | |||
163 | if (IS_ERR(ah)) | 152 | if (IS_ERR(ah)) |
164 | return (void *) ah; | 153 | return (void *) ah; |
165 | 154 | ||
166 | hdr_len = data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); | 155 | hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); |
167 | msg = ib_create_send_mad(agent, recv_wc->wc->src_qp, | 156 | msg = ib_create_send_mad(agent, recv_wc->wc->src_qp, |
168 | recv_wc->wc->pkey_index, 1, | 157 | recv_wc->wc->pkey_index, 1, |
169 | hdr_len, 0, GFP_KERNEL); | 158 | hdr_len, 0, GFP_KERNEL); |
@@ -408,7 +397,7 @@ static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv) | |||
408 | 397 | ||
409 | rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad; | 398 | rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad; |
410 | 399 | ||
411 | hdr_size = data_offset(rmpp_mad->mad_hdr.mgmt_class); | 400 | hdr_size = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class); |
412 | data_size = sizeof(struct ib_rmpp_mad) - hdr_size; | 401 | data_size = sizeof(struct ib_rmpp_mad) - hdr_size; |
413 | pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); | 402 | pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); |
414 | if (pad > IB_MGMT_RMPP_DATA || pad < 0) | 403 | if (pad > IB_MGMT_RMPP_DATA || pad < 0) |
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index fb6cd42601f9..afe70a549c2f 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c | |||
@@ -177,17 +177,6 @@ static int queue_packet(struct ib_umad_file *file, | |||
177 | return ret; | 177 | return ret; |
178 | } | 178 | } |
179 | 179 | ||
180 | static int data_offset(u8 mgmt_class) | ||
181 | { | ||
182 | if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) | ||
183 | return IB_MGMT_SA_HDR; | ||
184 | else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && | ||
185 | (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) | ||
186 | return IB_MGMT_VENDOR_HDR; | ||
187 | else | ||
188 | return IB_MGMT_RMPP_HDR; | ||
189 | } | ||
190 | |||
191 | static void send_handler(struct ib_mad_agent *agent, | 180 | static void send_handler(struct ib_mad_agent *agent, |
192 | struct ib_mad_send_wc *send_wc) | 181 | struct ib_mad_send_wc *send_wc) |
193 | { | 182 | { |
@@ -283,7 +272,7 @@ static ssize_t copy_recv_mad(char __user *buf, struct ib_umad_packet *packet, | |||
283 | */ | 272 | */ |
284 | return -ENOSPC; | 273 | return -ENOSPC; |
285 | } | 274 | } |
286 | offset = data_offset(recv_buf->mad->mad_hdr.mgmt_class); | 275 | offset = ib_get_mad_data_offset(recv_buf->mad->mad_hdr.mgmt_class); |
287 | max_seg_payload = sizeof (struct ib_mad) - offset; | 276 | max_seg_payload = sizeof (struct ib_mad) - offset; |
288 | 277 | ||
289 | for (left = packet->length - seg_payload, buf += seg_payload; | 278 | for (left = packet->length - seg_payload, buf += seg_payload; |
@@ -441,21 +430,14 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, | |||
441 | } | 430 | } |
442 | 431 | ||
443 | rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data; | 432 | rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data; |
444 | if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) { | 433 | hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class); |
445 | hdr_len = IB_MGMT_SA_HDR; | 434 | if (!ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)) { |
446 | copy_offset = IB_MGMT_RMPP_HDR; | 435 | copy_offset = IB_MGMT_MAD_HDR; |
447 | rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & | 436 | rmpp_active = 0; |
448 | IB_MGMT_RMPP_FLAG_ACTIVE; | 437 | } else { |
449 | } else if (rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START && | ||
450 | rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END) { | ||
451 | hdr_len = IB_MGMT_VENDOR_HDR; | ||
452 | copy_offset = IB_MGMT_RMPP_HDR; | 438 | copy_offset = IB_MGMT_RMPP_HDR; |
453 | rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & | 439 | rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & |
454 | IB_MGMT_RMPP_FLAG_ACTIVE; | 440 | IB_MGMT_RMPP_FLAG_ACTIVE; |
455 | } else { | ||
456 | hdr_len = IB_MGMT_MAD_HDR; | ||
457 | copy_offset = IB_MGMT_MAD_HDR; | ||
458 | rmpp_active = 0; | ||
459 | } | 441 | } |
460 | 442 | ||
461 | data_len = count - sizeof (struct ib_user_mad) - hdr_len; | 443 | data_len = count - sizeof (struct ib_user_mad) - hdr_len; |