aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorErez Zilber <erezz@voltaire.com>2006-09-11 05:22:30 -0400
committerRoland Dreier <rolandd@cisco.com>2006-09-22 18:22:51 -0400
commit8dfa0876d3dde5f9c1818a4c35caaabc3ddba78b (patch)
treeca68e1f128305185c8cb807e15ab67b36b2be2a5
parent8072ec2f8f6790df91e85d833e672c9c30a7ab3c (diff)
IB/iser: make FMR "page size" be 4K and not PAGE_SIZE
As iser is able to use at most one rdma operation for the execution of a scsi command, and registration of the sg associated with scsi command has its restrictions, the code checks if an sg is "aligned for rdma". Alignment for rdma is measured in "fmr page" units whose possible resolutions are different between HCAs and can be smaller, equal or bigger to the system page size. When the system page size is bigger than 4KB (eg the default with ia64 kernels) there a bigger chance that an sg would be aligned for rdma if the fmr page size is 4KB. Change the code to create FMR whose pages are of size 4KB and to take that into account when processing the sg. Signed-off-by: Erez Zilber <erezz@voltaire.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h6
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c31
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c4
3 files changed, 27 insertions, 14 deletions
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 3350ba690cfe..0ba02abb0414 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -82,8 +82,12 @@
82 __func__ , ## arg); \ 82 __func__ , ## arg); \
83 } while (0) 83 } while (0)
84 84
85#define SHIFT_4K 12
86#define SIZE_4K (1UL << SHIFT_4K)
87#define MASK_4K (~(SIZE_4K-1))
88
85 /* support upto 512KB in one RDMA */ 89 /* support upto 512KB in one RDMA */
86#define ISCSI_ISER_SG_TABLESIZE (0x80000 >> PAGE_SHIFT) 90#define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K)
87#define ISCSI_ISER_MAX_LUN 256 91#define ISCSI_ISER_MAX_LUN 256
88#define ISCSI_ISER_MAX_CMD_LEN 16 92#define ISCSI_ISER_MAX_CMD_LEN 16
89 93
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 53af9567632e..bcef0d31f756 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -42,6 +42,7 @@
42#include "iscsi_iser.h" 42#include "iscsi_iser.h"
43 43
44#define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */ 44#define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */
45
45/** 46/**
46 * Decrements the reference count for the 47 * Decrements the reference count for the
47 * registered buffer & releases it 48 * registered buffer & releases it
@@ -239,7 +240,7 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
239 int i; 240 int i;
240 241
241 /* compute the offset of first element */ 242 /* compute the offset of first element */
242 page_vec->offset = (u64) sg[0].offset; 243 page_vec->offset = (u64) sg[0].offset & ~MASK_4K;
243 244
244 for (i = 0; i < data->dma_nents; i++) { 245 for (i = 0; i < data->dma_nents; i++) {
245 total_sz += sg_dma_len(&sg[i]); 246 total_sz += sg_dma_len(&sg[i]);
@@ -247,21 +248,30 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
247 first_addr = sg_dma_address(&sg[i]); 248 first_addr = sg_dma_address(&sg[i]);
248 last_addr = first_addr + sg_dma_len(&sg[i]); 249 last_addr = first_addr + sg_dma_len(&sg[i]);
249 250
250 start_aligned = !(first_addr & ~PAGE_MASK); 251 start_aligned = !(first_addr & ~MASK_4K);
251 end_aligned = !(last_addr & ~PAGE_MASK); 252 end_aligned = !(last_addr & ~MASK_4K);
252 253
253 /* continue to collect page fragments till aligned or SG ends */ 254 /* continue to collect page fragments till aligned or SG ends */
254 while (!end_aligned && (i + 1 < data->dma_nents)) { 255 while (!end_aligned && (i + 1 < data->dma_nents)) {
255 i++; 256 i++;
256 total_sz += sg_dma_len(&sg[i]); 257 total_sz += sg_dma_len(&sg[i]);
257 last_addr = sg_dma_address(&sg[i]) + sg_dma_len(&sg[i]); 258 last_addr = sg_dma_address(&sg[i]) + sg_dma_len(&sg[i]);
258 end_aligned = !(last_addr & ~PAGE_MASK); 259 end_aligned = !(last_addr & ~MASK_4K);
259 } 260 }
260 261
261 first_addr = first_addr & PAGE_MASK; 262 /* handle the 1st page in the 1st DMA element */
262 263 if (cur_page == 0) {
263 for (page = first_addr; page < last_addr; page += PAGE_SIZE) 264 page = first_addr & MASK_4K;
264 page_vec->pages[cur_page++] = page; 265 page_vec->pages[cur_page] = page;
266 cur_page++;
267 page += SIZE_4K;
268 } else
269 page = first_addr;
270
271 for (; page < last_addr; page += SIZE_4K) {
272 page_vec->pages[cur_page] = page;
273 cur_page++;
274 }
265 275
266 } 276 }
267 page_vec->data_size = total_sz; 277 page_vec->data_size = total_sz;
@@ -269,8 +279,7 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
269 return cur_page; 279 return cur_page;
270} 280}
271 281
272#define MASK_4K ((1UL << 12) - 1) /* 0xFFF */ 282#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
273#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & MASK_4K) == 0)
274 283
275/** 284/**
276 * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned 285 * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned
@@ -352,7 +361,7 @@ static void iser_page_vec_build(struct iser_data_buf *data,
352 361
353 page_vec->length = page_vec_len; 362 page_vec->length = page_vec_len;
354 363
355 if (page_vec_len * PAGE_SIZE < page_vec->data_size) { 364 if (page_vec_len * SIZE_4K < page_vec->data_size) {
356 iser_err("page_vec too short to hold this SG\n"); 365 iser_err("page_vec too short to hold this SG\n");
357 iser_data_buf_dump(data); 366 iser_data_buf_dump(data);
358 iser_dump_page_vec(page_vec); 367 iser_dump_page_vec(page_vec);
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 72febf1f8ff8..9b27a7c26aa8 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -150,7 +150,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
150 } 150 }
151 ib_conn->page_vec->pages = (u64 *) (ib_conn->page_vec + 1); 151 ib_conn->page_vec->pages = (u64 *) (ib_conn->page_vec + 1);
152 152
153 params.page_shift = PAGE_SHIFT; 153 params.page_shift = SHIFT_4K;
154 /* when the first/last SG element are not start/end * 154 /* when the first/last SG element are not start/end *
155 * page aligned, the map whould be of N+1 pages */ 155 * page aligned, the map whould be of N+1 pages */
156 params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1; 156 params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1;
@@ -604,7 +604,7 @@ int iser_reg_page_vec(struct iser_conn *ib_conn,
604 604
605 mem_reg->lkey = mem->fmr->lkey; 605 mem_reg->lkey = mem->fmr->lkey;
606 mem_reg->rkey = mem->fmr->rkey; 606 mem_reg->rkey = mem->fmr->rkey;
607 mem_reg->len = page_vec->length * PAGE_SIZE; 607 mem_reg->len = page_vec->length * SIZE_4K;
608 mem_reg->va = io_addr; 608 mem_reg->va = io_addr;
609 mem_reg->mem_h = (void *)mem; 609 mem_reg->mem_h = (void *)mem;
610 610