aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core/mad_rmpp.c
diff options
context:
space:
mode:
authorSean Hefty <sean.hefty@intel.com>2005-10-25 13:51:39 -0400
committerRoland Dreier <rolandd@cisco.com>2005-10-25 13:51:39 -0400
commit34816ad98efe4d47ffd858a0345321f9d85d9420 (patch)
tree8a5ed6a9b80e667c4c02d9993711ced06d158555 /drivers/infiniband/core/mad_rmpp.c
parentae7971a7706384ca373fb7e212fe195698e6c5a1 (diff)
[IB] Fix MAD layer DMA mappings to avoid touching data buffer once mapped
The MAD layer was violating the DMA API by touching data buffers used for sends after the DMA mapping was done. This causes problems on non-cache-coherent architectures, because the device doing DMA won't see updates to the payload buffers that exist only in the CPU cache. Fix this by having all MAD consumers use ib_create_send_mad() to allocate their send buffers, and moving the DMA mapping into the MAD layer so it can be done just before calling send (and after any modifications of the send buffer by the MAD layer). Tested on a non-cache-coherent PowerPC 440SPe system. Signed-off-by: Sean Hefty <sean.hefty@intel.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/core/mad_rmpp.c')
-rw-r--r--drivers/infiniband/core/mad_rmpp.c87
1 files changed, 39 insertions, 48 deletions
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index e23836d0e21b..ba112cd5f93c 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -103,12 +103,12 @@ void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
103static int data_offset(u8 mgmt_class) 103static int data_offset(u8 mgmt_class)
104{ 104{
105 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) 105 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
106 return offsetof(struct ib_sa_mad, data); 106 return IB_MGMT_SA_HDR;
107 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 107 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
108 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) 108 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
109 return offsetof(struct ib_vendor_mad, data); 109 return IB_MGMT_VENDOR_HDR;
110 else 110 else
111 return offsetof(struct ib_rmpp_mad, data); 111 return IB_MGMT_RMPP_HDR;
112} 112}
113 113
114static void format_ack(struct ib_rmpp_mad *ack, 114static void format_ack(struct ib_rmpp_mad *ack,
@@ -135,21 +135,18 @@ static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
135 struct ib_mad_recv_wc *recv_wc) 135 struct ib_mad_recv_wc *recv_wc)
136{ 136{
137 struct ib_mad_send_buf *msg; 137 struct ib_mad_send_buf *msg;
138 struct ib_send_wr *bad_send_wr; 138 int ret;
139 int hdr_len, ret;
140 139
141 hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr);
142 msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, 140 msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
143 recv_wc->wc->pkey_index, rmpp_recv->ah, 1, 141 recv_wc->wc->pkey_index, 1, IB_MGMT_RMPP_HDR,
144 hdr_len, sizeof(struct ib_rmpp_mad) - hdr_len, 142 IB_MGMT_RMPP_DATA, GFP_KERNEL);
145 GFP_KERNEL);
146 if (!msg) 143 if (!msg)
147 return; 144 return;
148 145
149 format_ack((struct ib_rmpp_mad *) msg->mad, 146 format_ack(msg->mad, (struct ib_rmpp_mad *) recv_wc->recv_buf.mad,
150 (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv); 147 rmpp_recv);
151 ret = ib_post_send_mad(&rmpp_recv->agent->agent, &msg->send_wr, 148 msg->ah = rmpp_recv->ah;
152 &bad_send_wr); 149 ret = ib_post_send_mad(msg, NULL);
153 if (ret) 150 if (ret)
154 ib_free_send_mad(msg); 151 ib_free_send_mad(msg);
155} 152}
@@ -160,30 +157,31 @@ static int alloc_response_msg(struct ib_mad_agent *agent,
160{ 157{
161 struct ib_mad_send_buf *m; 158 struct ib_mad_send_buf *m;
162 struct ib_ah *ah; 159 struct ib_ah *ah;
163 int hdr_len;
164 160
165 ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc, 161 ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc,
166 recv_wc->recv_buf.grh, agent->port_num); 162 recv_wc->recv_buf.grh, agent->port_num);
167 if (IS_ERR(ah)) 163 if (IS_ERR(ah))
168 return PTR_ERR(ah); 164 return PTR_ERR(ah);
169 165
170 hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr);
171 m = ib_create_send_mad(agent, recv_wc->wc->src_qp, 166 m = ib_create_send_mad(agent, recv_wc->wc->src_qp,
172 recv_wc->wc->pkey_index, ah, 1, hdr_len, 167 recv_wc->wc->pkey_index, 1,
173 sizeof(struct ib_rmpp_mad) - hdr_len, 168 IB_MGMT_RMPP_HDR, IB_MGMT_RMPP_DATA, GFP_KERNEL);
174 GFP_KERNEL);
175 if (IS_ERR(m)) { 169 if (IS_ERR(m)) {
176 ib_destroy_ah(ah); 170 ib_destroy_ah(ah);
177 return PTR_ERR(m); 171 return PTR_ERR(m);
178 } 172 }
173 m->ah = ah;
179 *msg = m; 174 *msg = m;
180 return 0; 175 return 0;
181} 176}
182 177
183static void free_msg(struct ib_mad_send_buf *msg) 178void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc)
184{ 179{
185 ib_destroy_ah(msg->send_wr.wr.ud.ah); 180 struct ib_rmpp_mad *rmpp_mad = mad_send_wc->send_buf->mad;
186 ib_free_send_mad(msg); 181
182 if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_ACK)
183 ib_destroy_ah(mad_send_wc->send_buf->ah);
184 ib_free_send_mad(mad_send_wc->send_buf);
187} 185}
188 186
189static void nack_recv(struct ib_mad_agent_private *agent, 187static void nack_recv(struct ib_mad_agent_private *agent,
@@ -191,14 +189,13 @@ static void nack_recv(struct ib_mad_agent_private *agent,
191{ 189{
192 struct ib_mad_send_buf *msg; 190 struct ib_mad_send_buf *msg;
193 struct ib_rmpp_mad *rmpp_mad; 191 struct ib_rmpp_mad *rmpp_mad;
194 struct ib_send_wr *bad_send_wr;
195 int ret; 192 int ret;
196 193
197 ret = alloc_response_msg(&agent->agent, recv_wc, &msg); 194 ret = alloc_response_msg(&agent->agent, recv_wc, &msg);
198 if (ret) 195 if (ret)
199 return; 196 return;
200 197
201 rmpp_mad = (struct ib_rmpp_mad *) msg->mad; 198 rmpp_mad = msg->mad;
202 memcpy(rmpp_mad, recv_wc->recv_buf.mad, 199 memcpy(rmpp_mad, recv_wc->recv_buf.mad,
203 data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class)); 200 data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class));
204 201
@@ -210,9 +207,11 @@ static void nack_recv(struct ib_mad_agent_private *agent,
210 rmpp_mad->rmpp_hdr.seg_num = 0; 207 rmpp_mad->rmpp_hdr.seg_num = 0;
211 rmpp_mad->rmpp_hdr.paylen_newwin = 0; 208 rmpp_mad->rmpp_hdr.paylen_newwin = 0;
212 209
213 ret = ib_post_send_mad(&agent->agent, &msg->send_wr, &bad_send_wr); 210 ret = ib_post_send_mad(msg, NULL);
214 if (ret) 211 if (ret) {
215 free_msg(msg); 212 ib_destroy_ah(msg->ah);
213 ib_free_send_mad(msg);
214 }
216} 215}
217 216
218static void recv_timeout_handler(void *data) 217static void recv_timeout_handler(void *data)
@@ -585,7 +584,7 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
585 int timeout; 584 int timeout;
586 u32 paylen; 585 u32 paylen;
587 586
588 rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr; 587 rmpp_mad = mad_send_wr->send_buf.mad;
589 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); 588 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
590 rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(mad_send_wr->seg_num); 589 rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(mad_send_wr->seg_num);
591 590
@@ -612,7 +611,7 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
612 } 611 }
613 612
614 /* 2 seconds for an ACK until we can find the packet lifetime */ 613 /* 2 seconds for an ACK until we can find the packet lifetime */
615 timeout = mad_send_wr->send_wr.wr.ud.timeout_ms; 614 timeout = mad_send_wr->send_buf.timeout_ms;
616 if (!timeout || timeout > 2000) 615 if (!timeout || timeout > 2000)
617 mad_send_wr->timeout = msecs_to_jiffies(2000); 616 mad_send_wr->timeout = msecs_to_jiffies(2000);
618 mad_send_wr->seg_num++; 617 mad_send_wr->seg_num++;
@@ -640,7 +639,7 @@ static void abort_send(struct ib_mad_agent_private *agent, __be64 tid,
640 639
641 wc.status = IB_WC_REM_ABORT_ERR; 640 wc.status = IB_WC_REM_ABORT_ERR;
642 wc.vendor_err = rmpp_status; 641 wc.vendor_err = rmpp_status;
643 wc.wr_id = mad_send_wr->wr_id; 642 wc.send_buf = &mad_send_wr->send_buf;
644 ib_mad_complete_send_wr(mad_send_wr, &wc); 643 ib_mad_complete_send_wr(mad_send_wr, &wc);
645 return; 644 return;
646out: 645out:
@@ -694,12 +693,12 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
694 693
695 if (seg_num > mad_send_wr->last_ack) { 694 if (seg_num > mad_send_wr->last_ack) {
696 mad_send_wr->last_ack = seg_num; 695 mad_send_wr->last_ack = seg_num;
697 mad_send_wr->retries = mad_send_wr->send_wr.wr.ud.retries; 696 mad_send_wr->retries = mad_send_wr->send_buf.retries;
698 } 697 }
699 mad_send_wr->newwin = newwin; 698 mad_send_wr->newwin = newwin;
700 if (mad_send_wr->last_ack == mad_send_wr->total_seg) { 699 if (mad_send_wr->last_ack == mad_send_wr->total_seg) {
701 /* If no response is expected, the ACK completes the send */ 700 /* If no response is expected, the ACK completes the send */
702 if (!mad_send_wr->send_wr.wr.ud.timeout_ms) { 701 if (!mad_send_wr->send_buf.timeout_ms) {
703 struct ib_mad_send_wc wc; 702 struct ib_mad_send_wc wc;
704 703
705 ib_mark_mad_done(mad_send_wr); 704 ib_mark_mad_done(mad_send_wr);
@@ -707,13 +706,13 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
707 706
708 wc.status = IB_WC_SUCCESS; 707 wc.status = IB_WC_SUCCESS;
709 wc.vendor_err = 0; 708 wc.vendor_err = 0;
710 wc.wr_id = mad_send_wr->wr_id; 709 wc.send_buf = &mad_send_wr->send_buf;
711 ib_mad_complete_send_wr(mad_send_wr, &wc); 710 ib_mad_complete_send_wr(mad_send_wr, &wc);
712 return; 711 return;
713 } 712 }
714 if (mad_send_wr->refcount == 1) 713 if (mad_send_wr->refcount == 1)
715 ib_reset_mad_timeout(mad_send_wr, mad_send_wr-> 714 ib_reset_mad_timeout(mad_send_wr,
716 send_wr.wr.ud.timeout_ms); 715 mad_send_wr->send_buf.timeout_ms);
717 } else if (mad_send_wr->refcount == 1 && 716 } else if (mad_send_wr->refcount == 1 &&
718 mad_send_wr->seg_num < mad_send_wr->newwin && 717 mad_send_wr->seg_num < mad_send_wr->newwin &&
719 mad_send_wr->seg_num <= mad_send_wr->total_seg) { 718 mad_send_wr->seg_num <= mad_send_wr->total_seg) {
@@ -842,7 +841,7 @@ int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
842 struct ib_rmpp_mad *rmpp_mad; 841 struct ib_rmpp_mad *rmpp_mad;
843 int i, total_len, ret; 842 int i, total_len, ret;
844 843
845 rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr; 844 rmpp_mad = mad_send_wr->send_buf.mad;
846 if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 845 if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
847 IB_MGMT_RMPP_FLAG_ACTIVE)) 846 IB_MGMT_RMPP_FLAG_ACTIVE))
848 return IB_RMPP_RESULT_UNHANDLED; 847 return IB_RMPP_RESULT_UNHANDLED;
@@ -863,7 +862,7 @@ int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
863 862
864 mad_send_wr->total_seg = (total_len - mad_send_wr->data_offset) / 863 mad_send_wr->total_seg = (total_len - mad_send_wr->data_offset) /
865 (sizeof(struct ib_rmpp_mad) - mad_send_wr->data_offset); 864 (sizeof(struct ib_rmpp_mad) - mad_send_wr->data_offset);
866 mad_send_wr->pad = total_len - offsetof(struct ib_rmpp_mad, data) - 865 mad_send_wr->pad = total_len - IB_MGMT_RMPP_HDR -
867 be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); 866 be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
868 867
869 /* We need to wait for the final ACK even if there isn't a response */ 868 /* We need to wait for the final ACK even if there isn't a response */
@@ -878,23 +877,15 @@ int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
878 struct ib_mad_send_wc *mad_send_wc) 877 struct ib_mad_send_wc *mad_send_wc)
879{ 878{
880 struct ib_rmpp_mad *rmpp_mad; 879 struct ib_rmpp_mad *rmpp_mad;
881 struct ib_mad_send_buf *msg;
882 int ret; 880 int ret;
883 881
884 rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr; 882 rmpp_mad = mad_send_wr->send_buf.mad;
885 if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 883 if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
886 IB_MGMT_RMPP_FLAG_ACTIVE)) 884 IB_MGMT_RMPP_FLAG_ACTIVE))
887 return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */ 885 return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
888 886
889 if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) { 887 if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA)
890 msg = (struct ib_mad_send_buf *) (unsigned long)
891 mad_send_wc->wr_id;
892 if (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_ACK)
893 ib_free_send_mad(msg);
894 else
895 free_msg(msg);
896 return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */ 888 return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */
897 }
898 889
899 if (mad_send_wc->status != IB_WC_SUCCESS || 890 if (mad_send_wc->status != IB_WC_SUCCESS ||
900 mad_send_wr->status != IB_WC_SUCCESS) 891 mad_send_wr->status != IB_WC_SUCCESS)
@@ -905,7 +896,7 @@ int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
905 896
906 if (mad_send_wr->last_ack == mad_send_wr->total_seg) { 897 if (mad_send_wr->last_ack == mad_send_wr->total_seg) {
907 mad_send_wr->timeout = 898 mad_send_wr->timeout =
908 msecs_to_jiffies(mad_send_wr->send_wr.wr.ud.timeout_ms); 899 msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
909 return IB_RMPP_RESULT_PROCESSED; /* Send done */ 900 return IB_RMPP_RESULT_PROCESSED; /* Send done */
910 } 901 }
911 902
@@ -926,7 +917,7 @@ int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr)
926 struct ib_rmpp_mad *rmpp_mad; 917 struct ib_rmpp_mad *rmpp_mad;
927 int ret; 918 int ret;
928 919
929 rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr; 920 rmpp_mad = mad_send_wr->send_buf.mad;
930 if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 921 if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
931 IB_MGMT_RMPP_FLAG_ACTIVE)) 922 IB_MGMT_RMPP_FLAG_ACTIVE))
932 return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */ 923 return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */