diff options
| author | Jens Axboe <jens.axboe@oracle.com> | 2007-10-16 06:29:34 -0400 |
|---|---|---|
| committer | Jens Axboe <jens.axboe@oracle.com> | 2007-10-16 06:29:34 -0400 |
| commit | 3eed13fd933dbb81db12f7cdec6de9268c4443b5 (patch) | |
| tree | c16913706acffd4a0b29ec12cd68906b708c9a8a /drivers/infiniband/ulp | |
| parent | a39d113936370ba524fa9e34d6954c3625c8aa64 (diff) | |
| parent | 2c941a204070ab32d92d40318a3196a7fb994c00 (diff) | |
Merge branch 'sglist-arch' into for-linus
Diffstat (limited to 'drivers/infiniband/ulp')
| -rw-r--r-- | drivers/infiniband/ulp/iser/iser_memory.c | 75 |
1 files changed, 41 insertions, 34 deletions
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index e05690e359..f3529b6f0a 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c | |||
| @@ -124,17 +124,19 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, | |||
| 124 | 124 | ||
| 125 | if (cmd_dir == ISER_DIR_OUT) { | 125 | if (cmd_dir == ISER_DIR_OUT) { |
| 126 | /* copy the unaligned sg the buffer which is used for RDMA */ | 126 | /* copy the unaligned sg the buffer which is used for RDMA */ |
| 127 | struct scatterlist *sg = (struct scatterlist *)data->buf; | 127 | struct scatterlist *sgl = (struct scatterlist *)data->buf; |
| 128 | struct scatterlist *sg; | ||
| 128 | int i; | 129 | int i; |
| 129 | char *p, *from; | 130 | char *p, *from; |
| 130 | 131 | ||
| 131 | for (p = mem, i = 0; i < data->size; i++) { | 132 | p = mem; |
| 132 | from = kmap_atomic(sg[i].page, KM_USER0); | 133 | for_each_sg(sgl, sg, data->size, i) { |
| 134 | from = kmap_atomic(sg->page, KM_USER0); | ||
| 133 | memcpy(p, | 135 | memcpy(p, |
| 134 | from + sg[i].offset, | 136 | from + sg->offset, |
| 135 | sg[i].length); | 137 | sg->length); |
| 136 | kunmap_atomic(from, KM_USER0); | 138 | kunmap_atomic(from, KM_USER0); |
| 137 | p += sg[i].length; | 139 | p += sg->length; |
| 138 | } | 140 | } |
| 139 | } | 141 | } |
| 140 | 142 | ||
| @@ -176,7 +178,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, | |||
| 176 | 178 | ||
| 177 | if (cmd_dir == ISER_DIR_IN) { | 179 | if (cmd_dir == ISER_DIR_IN) { |
| 178 | char *mem; | 180 | char *mem; |
| 179 | struct scatterlist *sg; | 181 | struct scatterlist *sgl, *sg; |
| 180 | unsigned char *p, *to; | 182 | unsigned char *p, *to; |
| 181 | unsigned int sg_size; | 183 | unsigned int sg_size; |
| 182 | int i; | 184 | int i; |
| @@ -184,16 +186,17 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, | |||
| 184 | /* copy back read RDMA to unaligned sg */ | 186 | /* copy back read RDMA to unaligned sg */ |
| 185 | mem = mem_copy->copy_buf; | 187 | mem = mem_copy->copy_buf; |
| 186 | 188 | ||
| 187 | sg = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf; | 189 | sgl = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf; |
| 188 | sg_size = iser_ctask->data[ISER_DIR_IN].size; | 190 | sg_size = iser_ctask->data[ISER_DIR_IN].size; |
| 189 | 191 | ||
| 190 | for (p = mem, i = 0; i < sg_size; i++){ | 192 | p = mem; |
| 191 | to = kmap_atomic(sg[i].page, KM_SOFTIRQ0); | 193 | for_each_sg(sgl, sg, sg_size, i) { |
| 192 | memcpy(to + sg[i].offset, | 194 | to = kmap_atomic(sg->page, KM_SOFTIRQ0); |
| 195 | memcpy(to + sg->offset, | ||
| 193 | p, | 196 | p, |
| 194 | sg[i].length); | 197 | sg->length); |
| 195 | kunmap_atomic(to, KM_SOFTIRQ0); | 198 | kunmap_atomic(to, KM_SOFTIRQ0); |
| 196 | p += sg[i].length; | 199 | p += sg->length; |
| 197 | } | 200 | } |
| 198 | } | 201 | } |
| 199 | 202 | ||
| @@ -224,7 +227,8 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data, | |||
| 224 | struct iser_page_vec *page_vec, | 227 | struct iser_page_vec *page_vec, |
| 225 | struct ib_device *ibdev) | 228 | struct ib_device *ibdev) |
| 226 | { | 229 | { |
| 227 | struct scatterlist *sg = (struct scatterlist *)data->buf; | 230 | struct scatterlist *sgl = (struct scatterlist *)data->buf; |
| 231 | struct scatterlist *sg; | ||
| 228 | u64 first_addr, last_addr, page; | 232 | u64 first_addr, last_addr, page; |
| 229 | int end_aligned; | 233 | int end_aligned; |
| 230 | unsigned int cur_page = 0; | 234 | unsigned int cur_page = 0; |
| @@ -232,24 +236,25 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data, | |||
| 232 | int i; | 236 | int i; |
| 233 | 237 | ||
| 234 | /* compute the offset of first element */ | 238 | /* compute the offset of first element */ |
| 235 | page_vec->offset = (u64) sg[0].offset & ~MASK_4K; | 239 | page_vec->offset = (u64) sgl[0].offset & ~MASK_4K; |
| 236 | 240 | ||
| 237 | for (i = 0; i < data->dma_nents; i++) { | 241 | for_each_sg(sgl, sg, data->dma_nents, i) { |
| 238 | unsigned int dma_len = ib_sg_dma_len(ibdev, &sg[i]); | 242 | unsigned int dma_len = ib_sg_dma_len(ibdev, sg); |
| 239 | 243 | ||
| 240 | total_sz += dma_len; | 244 | total_sz += dma_len; |
| 241 | 245 | ||
| 242 | first_addr = ib_sg_dma_address(ibdev, &sg[i]); | 246 | first_addr = ib_sg_dma_address(ibdev, sg); |
| 243 | last_addr = first_addr + dma_len; | 247 | last_addr = first_addr + dma_len; |
| 244 | 248 | ||
| 245 | end_aligned = !(last_addr & ~MASK_4K); | 249 | end_aligned = !(last_addr & ~MASK_4K); |
| 246 | 250 | ||
| 247 | /* continue to collect page fragments till aligned or SG ends */ | 251 | /* continue to collect page fragments till aligned or SG ends */ |
| 248 | while (!end_aligned && (i + 1 < data->dma_nents)) { | 252 | while (!end_aligned && (i + 1 < data->dma_nents)) { |
| 253 | sg = sg_next(sg); | ||
| 249 | i++; | 254 | i++; |
| 250 | dma_len = ib_sg_dma_len(ibdev, &sg[i]); | 255 | dma_len = ib_sg_dma_len(ibdev, sg); |
| 251 | total_sz += dma_len; | 256 | total_sz += dma_len; |
| 252 | last_addr = ib_sg_dma_address(ibdev, &sg[i]) + dma_len; | 257 | last_addr = ib_sg_dma_address(ibdev, sg) + dma_len; |
| 253 | end_aligned = !(last_addr & ~MASK_4K); | 258 | end_aligned = !(last_addr & ~MASK_4K); |
| 254 | } | 259 | } |
| 255 | 260 | ||
| @@ -284,25 +289,26 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data, | |||
| 284 | static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data, | 289 | static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data, |
| 285 | struct ib_device *ibdev) | 290 | struct ib_device *ibdev) |
| 286 | { | 291 | { |
| 287 | struct scatterlist *sg; | 292 | struct scatterlist *sgl, *sg; |
| 288 | u64 end_addr, next_addr; | 293 | u64 end_addr, next_addr; |
| 289 | int i, cnt; | 294 | int i, cnt; |
| 290 | unsigned int ret_len = 0; | 295 | unsigned int ret_len = 0; |
| 291 | 296 | ||
| 292 | sg = (struct scatterlist *)data->buf; | 297 | sgl = (struct scatterlist *)data->buf; |
| 293 | 298 | ||
| 294 | for (cnt = 0, i = 0; i < data->dma_nents; i++, cnt++) { | 299 | cnt = 0; |
| 300 | for_each_sg(sgl, sg, data->dma_nents, i) { | ||
| 295 | /* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX " | 301 | /* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX " |
| 296 | "offset: %ld sz: %ld\n", i, | 302 | "offset: %ld sz: %ld\n", i, |
| 297 | (unsigned long)page_to_phys(sg[i].page), | 303 | (unsigned long)page_to_phys(sg->page), |
| 298 | (unsigned long)sg[i].offset, | 304 | (unsigned long)sg->offset, |
| 299 | (unsigned long)sg[i].length); */ | 305 | (unsigned long)sg->length); */ |
| 300 | end_addr = ib_sg_dma_address(ibdev, &sg[i]) + | 306 | end_addr = ib_sg_dma_address(ibdev, sg) + |
| 301 | ib_sg_dma_len(ibdev, &sg[i]); | 307 | ib_sg_dma_len(ibdev, sg); |
| 302 | /* iser_dbg("Checking sg iobuf end address " | 308 | /* iser_dbg("Checking sg iobuf end address " |
| 303 | "0x%08lX\n", end_addr); */ | 309 | "0x%08lX\n", end_addr); */ |
| 304 | if (i + 1 < data->dma_nents) { | 310 | if (i + 1 < data->dma_nents) { |
| 305 | next_addr = ib_sg_dma_address(ibdev, &sg[i+1]); | 311 | next_addr = ib_sg_dma_address(ibdev, sg_next(sg)); |
| 306 | /* are i, i+1 fragments of the same page? */ | 312 | /* are i, i+1 fragments of the same page? */ |
| 307 | if (end_addr == next_addr) | 313 | if (end_addr == next_addr) |
| 308 | continue; | 314 | continue; |
| @@ -322,15 +328,16 @@ static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data, | |||
| 322 | static void iser_data_buf_dump(struct iser_data_buf *data, | 328 | static void iser_data_buf_dump(struct iser_data_buf *data, |
| 323 | struct ib_device *ibdev) | 329 | struct ib_device *ibdev) |
| 324 | { | 330 | { |
| 325 | struct scatterlist *sg = (struct scatterlist *)data->buf; | 331 | struct scatterlist *sgl = (struct scatterlist *)data->buf; |
| 332 | struct scatterlist *sg; | ||
| 326 | int i; | 333 | int i; |
| 327 | 334 | ||
| 328 | for (i = 0; i < data->dma_nents; i++) | 335 | for_each_sg(sgl, sg, data->dma_nents, i) |
| 329 | iser_err("sg[%d] dma_addr:0x%lX page:0x%p " | 336 | iser_err("sg[%d] dma_addr:0x%lX page:0x%p " |
| 330 | "off:0x%x sz:0x%x dma_len:0x%x\n", | 337 | "off:0x%x sz:0x%x dma_len:0x%x\n", |
| 331 | i, (unsigned long)ib_sg_dma_address(ibdev, &sg[i]), | 338 | i, (unsigned long)ib_sg_dma_address(ibdev, sg), |
| 332 | sg[i].page, sg[i].offset, | 339 | sg->page, sg->offset, |
| 333 | sg[i].length, ib_sg_dma_len(ibdev, &sg[i])); | 340 | sg->length, ib_sg_dma_len(ibdev, sg)); |
| 334 | } | 341 | } |
| 335 | 342 | ||
| 336 | static void iser_dump_page_vec(struct iser_page_vec *page_vec) | 343 | static void iser_dump_page_vec(struct iser_page_vec *page_vec) |
