aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-16 13:32:31 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-16 13:32:31 -0500
commite69381b4175ba162229646f6753ff1d87c24d468 (patch)
treeac4c03f6a0a1a0426832aa4f5c3b7732080c51cc /drivers/infiniband/ulp
parent238ccbb050a243e935bb3fc679c2e4bbff7004aa (diff)
parent14f369d1d61e7ac6578c54ca9ce3caaf4072412c (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (45 commits) RDMA/cxgb3: Fix error paths in post_send and post_recv RDMA/nes: Fix stale ARP issue RDMA/nes: FIN during MPA startup causes timeout RDMA/nes: Free kmap() resources RDMA/nes: Check for zero STag RDMA/nes: Fix Xansation test crash on cm_node ref_count RDMA/nes: Abnormal listener exit causes loopback node crash RDMA/nes: Fix crash in nes_accept() RDMA/nes: Resource not freed for REJECTed connections RDMA/nes: MPA request/response error checking RDMA/nes: Fix query of ORD values RDMA/nes: Fix MAX_CM_BUFFER define RDMA/nes: Pass correct size to ioremap_nocache() RDMA/nes: Update copyright and branding string RDMA/nes: Add max_cqe check to nes_create_cq() RDMA/nes: Clean up struct nes_qp RDMA/nes: Implement IB_SIGNAL_ALL_WR as an iWARP extension RDMA/nes: Add additional SFP+ PHY uC status check and PHY reset RDMA/nes: Correct fast memory registration implementation IB/ehca: Fix error paths in post_send and post_recv ...
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c1
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c122
2 files changed, 57 insertions, 66 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 2bf5116deec4..df3eb8c9fd96 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -884,6 +884,7 @@ struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour,
884 884
885 neigh->neighbour = neighbour; 885 neigh->neighbour = neighbour;
886 neigh->dev = dev; 886 neigh->dev = dev;
887 memset(&neigh->dgid.raw, 0, sizeof (union ib_gid));
887 *to_ipoib_neigh(neighbour) = neigh; 888 *to_ipoib_neigh(neighbour) = neigh;
888 skb_queue_head_init(&neigh->queue); 889 skb_queue_head_init(&neigh->queue);
889 ipoib_cm_set(neigh, NULL); 890 ipoib_cm_set(neigh, NULL);
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index b9453d068e9d..274c883ef3ea 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -209,6 +209,8 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
209 mem_copy->copy_buf = NULL; 209 mem_copy->copy_buf = NULL;
210} 210}
211 211
212#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
213
212/** 214/**
213 * iser_sg_to_page_vec - Translates scatterlist entries to physical addresses 215 * iser_sg_to_page_vec - Translates scatterlist entries to physical addresses
214 * and returns the length of resulting physical address array (may be less than 216 * and returns the length of resulting physical address array (may be less than
@@ -221,62 +223,52 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
221 * where --few fragments of the same page-- are present in the SG as 223 * where --few fragments of the same page-- are present in the SG as
222 * consecutive elements. Also, it handles one entry SG. 224 * consecutive elements. Also, it handles one entry SG.
223 */ 225 */
226
224static int iser_sg_to_page_vec(struct iser_data_buf *data, 227static int iser_sg_to_page_vec(struct iser_data_buf *data,
225 struct iser_page_vec *page_vec, 228 struct iser_page_vec *page_vec,
226 struct ib_device *ibdev) 229 struct ib_device *ibdev)
227{ 230{
228 struct scatterlist *sgl = (struct scatterlist *)data->buf; 231 struct scatterlist *sg, *sgl = (struct scatterlist *)data->buf;
229 struct scatterlist *sg; 232 u64 start_addr, end_addr, page, chunk_start = 0;
230 u64 first_addr, last_addr, page;
231 int end_aligned;
232 unsigned int cur_page = 0;
233 unsigned long total_sz = 0; 233 unsigned long total_sz = 0;
234 int i; 234 unsigned int dma_len;
235 int i, new_chunk, cur_page, last_ent = data->dma_nents - 1;
235 236
236 /* compute the offset of first element */ 237 /* compute the offset of first element */
237 page_vec->offset = (u64) sgl[0].offset & ~MASK_4K; 238 page_vec->offset = (u64) sgl[0].offset & ~MASK_4K;
238 239
240 new_chunk = 1;
241 cur_page = 0;
239 for_each_sg(sgl, sg, data->dma_nents, i) { 242 for_each_sg(sgl, sg, data->dma_nents, i) {
240 unsigned int dma_len = ib_sg_dma_len(ibdev, sg); 243 start_addr = ib_sg_dma_address(ibdev, sg);
241 244 if (new_chunk)
245 chunk_start = start_addr;
246 dma_len = ib_sg_dma_len(ibdev, sg);
247 end_addr = start_addr + dma_len;
242 total_sz += dma_len; 248 total_sz += dma_len;
243 249
244 first_addr = ib_sg_dma_address(ibdev, sg); 250 /* collect page fragments until aligned or end of SG list */
245 last_addr = first_addr + dma_len; 251 if (!IS_4K_ALIGNED(end_addr) && i < last_ent) {
246 252 new_chunk = 0;
247 end_aligned = !(last_addr & ~MASK_4K); 253 continue;
248
249 /* continue to collect page fragments till aligned or SG ends */
250 while (!end_aligned && (i + 1 < data->dma_nents)) {
251 sg = sg_next(sg);
252 i++;
253 dma_len = ib_sg_dma_len(ibdev, sg);
254 total_sz += dma_len;
255 last_addr = ib_sg_dma_address(ibdev, sg) + dma_len;
256 end_aligned = !(last_addr & ~MASK_4K);
257 } 254 }
258 255 new_chunk = 1;
259 /* handle the 1st page in the 1st DMA element */ 256
260 if (cur_page == 0) { 257 /* address of the first page in the contiguous chunk;
261 page = first_addr & MASK_4K; 258 masking relevant for the very first SG entry,
262 page_vec->pages[cur_page] = page; 259 which might be unaligned */
263 cur_page++; 260 page = chunk_start & MASK_4K;
261 do {
262 page_vec->pages[cur_page++] = page;
264 page += SIZE_4K; 263 page += SIZE_4K;
265 } else 264 } while (page < end_addr);
266 page = first_addr;
267
268 for (; page < last_addr; page += SIZE_4K) {
269 page_vec->pages[cur_page] = page;
270 cur_page++;
271 }
272
273 } 265 }
266
274 page_vec->data_size = total_sz; 267 page_vec->data_size = total_sz;
275 iser_dbg("page_vec->data_size:%d cur_page %d\n", page_vec->data_size,cur_page); 268 iser_dbg("page_vec->data_size:%d cur_page %d\n", page_vec->data_size,cur_page);
276 return cur_page; 269 return cur_page;
277} 270}
278 271
279#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
280 272
281/** 273/**
282 * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned 274 * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned
@@ -284,42 +276,40 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
284 * the number of entries which are aligned correctly. Supports the case where 276 * the number of entries which are aligned correctly. Supports the case where
285 * consecutive SG elements are actually fragments of the same physcial page. 277 * consecutive SG elements are actually fragments of the same physcial page.
286 */ 278 */
287static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data, 279static int iser_data_buf_aligned_len(struct iser_data_buf *data,
288 struct ib_device *ibdev) 280 struct ib_device *ibdev)
289{ 281{
290 struct scatterlist *sgl, *sg; 282 struct scatterlist *sgl, *sg, *next_sg = NULL;
291 u64 end_addr, next_addr; 283 u64 start_addr, end_addr;
292 int i, cnt; 284 int i, ret_len, start_check = 0;
293 unsigned int ret_len = 0; 285
286 if (data->dma_nents == 1)
287 return 1;
294 288
295 sgl = (struct scatterlist *)data->buf; 289 sgl = (struct scatterlist *)data->buf;
290 start_addr = ib_sg_dma_address(ibdev, sgl);
296 291
297 cnt = 0;
298 for_each_sg(sgl, sg, data->dma_nents, i) { 292 for_each_sg(sgl, sg, data->dma_nents, i) {
299 /* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX " 293 if (start_check && !IS_4K_ALIGNED(start_addr))
300 "offset: %ld sz: %ld\n", i, 294 break;
301 (unsigned long)sg_phys(sg), 295
302 (unsigned long)sg->offset, 296 next_sg = sg_next(sg);
303 (unsigned long)sg->length); */ 297 if (!next_sg)
304 end_addr = ib_sg_dma_address(ibdev, sg) + 298 break;
305 ib_sg_dma_len(ibdev, sg); 299
306 /* iser_dbg("Checking sg iobuf end address " 300 end_addr = start_addr + ib_sg_dma_len(ibdev, sg);
307 "0x%08lX\n", end_addr); */ 301 start_addr = ib_sg_dma_address(ibdev, next_sg);
308 if (i + 1 < data->dma_nents) { 302
309 next_addr = ib_sg_dma_address(ibdev, sg_next(sg)); 303 if (end_addr == start_addr) {
310 /* are i, i+1 fragments of the same page? */ 304 start_check = 0;
311 if (end_addr == next_addr) { 305 continue;
312 cnt++; 306 } else
313 continue; 307 start_check = 1;
314 } else if (!IS_4K_ALIGNED(end_addr)) { 308
315 ret_len = cnt + 1; 309 if (!IS_4K_ALIGNED(end_addr))
316 break; 310 break;
317 }
318 }
319 cnt++;
320 } 311 }
321 if (i == data->dma_nents) 312 ret_len = (next_sg) ? i : i+1;
322 ret_len = cnt; /* loop ended */
323 iser_dbg("Found %d aligned entries out of %d in sg:0x%p\n", 313 iser_dbg("Found %d aligned entries out of %d in sg:0x%p\n",
324 ret_len, data->dma_nents, data); 314 ret_len, data->dma_nents, data);
325 return ret_len; 315 return ret_len;