diff options
| -rw-r--r-- | drivers/infiniband/ulp/iser/iscsi_iser.h | 2 | ||||
| -rw-r--r-- | drivers/infiniband/ulp/iser/iser_memory.c | 122 |
2 files changed, 61 insertions, 63 deletions
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 234e5b061a75..cae8c96a55f8 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h | |||
| @@ -182,7 +182,7 @@ struct iser_regd_buf { | |||
| 182 | struct iser_mem_reg reg; /* memory registration info */ | 182 | struct iser_mem_reg reg; /* memory registration info */ |
| 183 | void *virt_addr; | 183 | void *virt_addr; |
| 184 | struct iser_device *device; /* device->device for dma_unmap */ | 184 | struct iser_device *device; /* device->device for dma_unmap */ |
| 185 | dma_addr_t dma_addr; /* if non zero, addr for dma_unmap */ | 185 | u64 dma_addr; /* if non zero, addr for dma_unmap */ |
| 186 | enum dma_data_direction direction; /* direction for dma_unmap */ | 186 | enum dma_data_direction direction; /* direction for dma_unmap */ |
| 187 | unsigned int data_size; | 187 | unsigned int data_size; |
| 188 | atomic_t ref_count; /* refcount, freed when dec to 0 */ | 188 | atomic_t ref_count; /* refcount, freed when dec to 0 */ |
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 86b6915cb9c7..fc9f1fd0ae54 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c | |||
| @@ -52,7 +52,7 @@ | |||
| 52 | */ | 52 | */ |
| 53 | int iser_regd_buff_release(struct iser_regd_buf *regd_buf) | 53 | int iser_regd_buff_release(struct iser_regd_buf *regd_buf) |
| 54 | { | 54 | { |
| 55 | struct device *dma_device; | 55 | struct ib_device *dev; |
| 56 | 56 | ||
| 57 | if ((atomic_read(®d_buf->ref_count) == 0) || | 57 | if ((atomic_read(®d_buf->ref_count) == 0) || |
| 58 | atomic_dec_and_test(®d_buf->ref_count)) { | 58 | atomic_dec_and_test(®d_buf->ref_count)) { |
| @@ -61,8 +61,8 @@ int iser_regd_buff_release(struct iser_regd_buf *regd_buf) | |||
| 61 | iser_unreg_mem(®d_buf->reg); | 61 | iser_unreg_mem(®d_buf->reg); |
| 62 | 62 | ||
| 63 | if (regd_buf->dma_addr) { | 63 | if (regd_buf->dma_addr) { |
| 64 | dma_device = regd_buf->device->ib_device->dma_device; | 64 | dev = regd_buf->device->ib_device; |
| 65 | dma_unmap_single(dma_device, | 65 | ib_dma_unmap_single(dev, |
| 66 | regd_buf->dma_addr, | 66 | regd_buf->dma_addr, |
| 67 | regd_buf->data_size, | 67 | regd_buf->data_size, |
| 68 | regd_buf->direction); | 68 | regd_buf->direction); |
| @@ -84,12 +84,12 @@ void iser_reg_single(struct iser_device *device, | |||
| 84 | struct iser_regd_buf *regd_buf, | 84 | struct iser_regd_buf *regd_buf, |
| 85 | enum dma_data_direction direction) | 85 | enum dma_data_direction direction) |
| 86 | { | 86 | { |
| 87 | dma_addr_t dma_addr; | 87 | u64 dma_addr; |
| 88 | 88 | ||
| 89 | dma_addr = dma_map_single(device->ib_device->dma_device, | 89 | dma_addr = ib_dma_map_single(device->ib_device, |
| 90 | regd_buf->virt_addr, | 90 | regd_buf->virt_addr, |
| 91 | regd_buf->data_size, direction); | 91 | regd_buf->data_size, direction); |
| 92 | BUG_ON(dma_mapping_error(dma_addr)); | 92 | BUG_ON(ib_dma_mapping_error(device->ib_device, dma_addr)); |
| 93 | 93 | ||
| 94 | regd_buf->reg.lkey = device->mr->lkey; | 94 | regd_buf->reg.lkey = device->mr->lkey; |
| 95 | regd_buf->reg.len = regd_buf->data_size; | 95 | regd_buf->reg.len = regd_buf->data_size; |
| @@ -107,7 +107,7 @@ int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, | |||
| 107 | enum iser_data_dir cmd_dir) | 107 | enum iser_data_dir cmd_dir) |
| 108 | { | 108 | { |
| 109 | int dma_nents; | 109 | int dma_nents; |
| 110 | struct device *dma_device; | 110 | struct ib_device *dev; |
| 111 | char *mem = NULL; | 111 | char *mem = NULL; |
| 112 | struct iser_data_buf *data = &iser_ctask->data[cmd_dir]; | 112 | struct iser_data_buf *data = &iser_ctask->data[cmd_dir]; |
| 113 | unsigned long cmd_data_len = data->data_len; | 113 | unsigned long cmd_data_len = data->data_len; |
| @@ -147,17 +147,12 @@ int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, | |||
| 147 | 147 | ||
| 148 | iser_ctask->data_copy[cmd_dir].copy_buf = mem; | 148 | iser_ctask->data_copy[cmd_dir].copy_buf = mem; |
| 149 | 149 | ||
| 150 | dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device; | 150 | dev = iser_ctask->iser_conn->ib_conn->device->ib_device; |
| 151 | 151 | dma_nents = ib_dma_map_sg(dev, | |
| 152 | if (cmd_dir == ISER_DIR_OUT) | 152 | &iser_ctask->data_copy[cmd_dir].sg_single, |
| 153 | dma_nents = dma_map_sg(dma_device, | 153 | 1, |
| 154 | &iser_ctask->data_copy[cmd_dir].sg_single, | 154 | (cmd_dir == ISER_DIR_OUT) ? |
| 155 | 1, DMA_TO_DEVICE); | 155 | DMA_TO_DEVICE : DMA_FROM_DEVICE); |
| 156 | else | ||
| 157 | dma_nents = dma_map_sg(dma_device, | ||
| 158 | &iser_ctask->data_copy[cmd_dir].sg_single, | ||
| 159 | 1, DMA_FROM_DEVICE); | ||
| 160 | |||
| 161 | BUG_ON(dma_nents == 0); | 156 | BUG_ON(dma_nents == 0); |
| 162 | 157 | ||
| 163 | iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents; | 158 | iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents; |
| @@ -170,19 +165,16 @@ int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, | |||
| 170 | void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, | 165 | void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, |
| 171 | enum iser_data_dir cmd_dir) | 166 | enum iser_data_dir cmd_dir) |
| 172 | { | 167 | { |
| 173 | struct device *dma_device; | 168 | struct ib_device *dev; |
| 174 | struct iser_data_buf *mem_copy; | 169 | struct iser_data_buf *mem_copy; |
| 175 | unsigned long cmd_data_len; | 170 | unsigned long cmd_data_len; |
| 176 | 171 | ||
| 177 | dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device; | 172 | dev = iser_ctask->iser_conn->ib_conn->device->ib_device; |
| 178 | mem_copy = &iser_ctask->data_copy[cmd_dir]; | 173 | mem_copy = &iser_ctask->data_copy[cmd_dir]; |
| 179 | 174 | ||
| 180 | if (cmd_dir == ISER_DIR_OUT) | 175 | ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1, |
| 181 | dma_unmap_sg(dma_device, &mem_copy->sg_single, 1, | 176 | (cmd_dir == ISER_DIR_OUT) ? |
| 182 | DMA_TO_DEVICE); | 177 | DMA_TO_DEVICE : DMA_FROM_DEVICE); |
| 183 | else | ||
| 184 | dma_unmap_sg(dma_device, &mem_copy->sg_single, 1, | ||
| 185 | DMA_FROM_DEVICE); | ||
| 186 | 178 | ||
| 187 | if (cmd_dir == ISER_DIR_IN) { | 179 | if (cmd_dir == ISER_DIR_IN) { |
| 188 | char *mem; | 180 | char *mem; |
| @@ -231,10 +223,11 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, | |||
| 231 | * consecutive elements. Also, it handles one entry SG. | 223 | * consecutive elements. Also, it handles one entry SG. |
| 232 | */ | 224 | */ |
| 233 | static int iser_sg_to_page_vec(struct iser_data_buf *data, | 225 | static int iser_sg_to_page_vec(struct iser_data_buf *data, |
| 234 | struct iser_page_vec *page_vec) | 226 | struct iser_page_vec *page_vec, |
| 227 | struct ib_device *ibdev) | ||
| 235 | { | 228 | { |
| 236 | struct scatterlist *sg = (struct scatterlist *)data->buf; | 229 | struct scatterlist *sg = (struct scatterlist *)data->buf; |
| 237 | dma_addr_t first_addr, last_addr, page; | 230 | u64 first_addr, last_addr, page; |
| 238 | int end_aligned; | 231 | int end_aligned; |
| 239 | unsigned int cur_page = 0; | 232 | unsigned int cur_page = 0; |
| 240 | unsigned long total_sz = 0; | 233 | unsigned long total_sz = 0; |
| @@ -244,18 +237,21 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data, | |||
| 244 | page_vec->offset = (u64) sg[0].offset & ~MASK_4K; | 237 | page_vec->offset = (u64) sg[0].offset & ~MASK_4K; |
| 245 | 238 | ||
| 246 | for (i = 0; i < data->dma_nents; i++) { | 239 | for (i = 0; i < data->dma_nents; i++) { |
| 247 | total_sz += sg_dma_len(&sg[i]); | 240 | unsigned int dma_len = ib_sg_dma_len(ibdev, &sg[i]); |
| 241 | |||
| 242 | total_sz += dma_len; | ||
| 248 | 243 | ||
| 249 | first_addr = sg_dma_address(&sg[i]); | 244 | first_addr = ib_sg_dma_address(ibdev, &sg[i]); |
| 250 | last_addr = first_addr + sg_dma_len(&sg[i]); | 245 | last_addr = first_addr + dma_len; |
| 251 | 246 | ||
| 252 | end_aligned = !(last_addr & ~MASK_4K); | 247 | end_aligned = !(last_addr & ~MASK_4K); |
| 253 | 248 | ||
| 254 | /* continue to collect page fragments till aligned or SG ends */ | 249 | /* continue to collect page fragments till aligned or SG ends */ |
| 255 | while (!end_aligned && (i + 1 < data->dma_nents)) { | 250 | while (!end_aligned && (i + 1 < data->dma_nents)) { |
| 256 | i++; | 251 | i++; |
| 257 | total_sz += sg_dma_len(&sg[i]); | 252 | dma_len = ib_sg_dma_len(ibdev, &sg[i]); |
| 258 | last_addr = sg_dma_address(&sg[i]) + sg_dma_len(&sg[i]); | 253 | total_sz += dma_len; |
| 254 | last_addr = ib_sg_dma_address(ibdev, &sg[i]) + dma_len; | ||
| 259 | end_aligned = !(last_addr & ~MASK_4K); | 255 | end_aligned = !(last_addr & ~MASK_4K); |
| 260 | } | 256 | } |
| 261 | 257 | ||
| @@ -287,10 +283,11 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data, | |||
| 287 | * the number of entries which are aligned correctly. Supports the case where | 283 | * the number of entries which are aligned correctly. Supports the case where |
| 288 | * consecutive SG elements are actually fragments of the same physcial page. | 284 | * consecutive SG elements are actually fragments of the same physcial page. |
| 289 | */ | 285 | */ |
| 290 | static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data) | 286 | static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data, |
| 287 | struct ib_device *ibdev) | ||
| 291 | { | 288 | { |
| 292 | struct scatterlist *sg; | 289 | struct scatterlist *sg; |
| 293 | dma_addr_t end_addr, next_addr; | 290 | u64 end_addr, next_addr; |
| 294 | int i, cnt; | 291 | int i, cnt; |
| 295 | unsigned int ret_len = 0; | 292 | unsigned int ret_len = 0; |
| 296 | 293 | ||
| @@ -302,12 +299,12 @@ static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data) | |||
| 302 | (unsigned long)page_to_phys(sg[i].page), | 299 | (unsigned long)page_to_phys(sg[i].page), |
| 303 | (unsigned long)sg[i].offset, | 300 | (unsigned long)sg[i].offset, |
| 304 | (unsigned long)sg[i].length); */ | 301 | (unsigned long)sg[i].length); */ |
| 305 | end_addr = sg_dma_address(&sg[i]) + | 302 | end_addr = ib_sg_dma_address(ibdev, &sg[i]) + |
| 306 | sg_dma_len(&sg[i]); | 303 | ib_sg_dma_len(ibdev, &sg[i]); |
| 307 | /* iser_dbg("Checking sg iobuf end address " | 304 | /* iser_dbg("Checking sg iobuf end address " |
| 308 | "0x%08lX\n", end_addr); */ | 305 | "0x%08lX\n", end_addr); */ |
| 309 | if (i + 1 < data->dma_nents) { | 306 | if (i + 1 < data->dma_nents) { |
| 310 | next_addr = sg_dma_address(&sg[i+1]); | 307 | next_addr = ib_sg_dma_address(ibdev, &sg[i+1]); |
| 311 | /* are i, i+1 fragments of the same page? */ | 308 | /* are i, i+1 fragments of the same page? */ |
| 312 | if (end_addr == next_addr) | 309 | if (end_addr == next_addr) |
| 313 | continue; | 310 | continue; |
| @@ -324,7 +321,8 @@ static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data) | |||
| 324 | return ret_len; | 321 | return ret_len; |
| 325 | } | 322 | } |
| 326 | 323 | ||
| 327 | static void iser_data_buf_dump(struct iser_data_buf *data) | 324 | static void iser_data_buf_dump(struct iser_data_buf *data, |
| 325 | struct ib_device *ibdev) | ||
| 328 | { | 326 | { |
| 329 | struct scatterlist *sg = (struct scatterlist *)data->buf; | 327 | struct scatterlist *sg = (struct scatterlist *)data->buf; |
| 330 | int i; | 328 | int i; |
| @@ -332,9 +330,9 @@ static void iser_data_buf_dump(struct iser_data_buf *data) | |||
| 332 | for (i = 0; i < data->dma_nents; i++) | 330 | for (i = 0; i < data->dma_nents; i++) |
| 333 | iser_err("sg[%d] dma_addr:0x%lX page:0x%p " | 331 | iser_err("sg[%d] dma_addr:0x%lX page:0x%p " |
| 334 | "off:0x%x sz:0x%x dma_len:0x%x\n", | 332 | "off:0x%x sz:0x%x dma_len:0x%x\n", |
| 335 | i, (unsigned long)sg_dma_address(&sg[i]), | 333 | i, (unsigned long)ib_sg_dma_address(ibdev, &sg[i]), |
| 336 | sg[i].page, sg[i].offset, | 334 | sg[i].page, sg[i].offset, |
| 337 | sg[i].length,sg_dma_len(&sg[i])); | 335 | sg[i].length, ib_sg_dma_len(ibdev, &sg[i])); |
| 338 | } | 336 | } |
| 339 | 337 | ||
| 340 | static void iser_dump_page_vec(struct iser_page_vec *page_vec) | 338 | static void iser_dump_page_vec(struct iser_page_vec *page_vec) |
| @@ -348,7 +346,8 @@ static void iser_dump_page_vec(struct iser_page_vec *page_vec) | |||
| 348 | } | 346 | } |
| 349 | 347 | ||
| 350 | static void iser_page_vec_build(struct iser_data_buf *data, | 348 | static void iser_page_vec_build(struct iser_data_buf *data, |
| 351 | struct iser_page_vec *page_vec) | 349 | struct iser_page_vec *page_vec, |
| 350 | struct ib_device *ibdev) | ||
| 352 | { | 351 | { |
| 353 | int page_vec_len = 0; | 352 | int page_vec_len = 0; |
| 354 | 353 | ||
| @@ -356,14 +355,14 @@ static void iser_page_vec_build(struct iser_data_buf *data, | |||
| 356 | page_vec->offset = 0; | 355 | page_vec->offset = 0; |
| 357 | 356 | ||
| 358 | iser_dbg("Translating sg sz: %d\n", data->dma_nents); | 357 | iser_dbg("Translating sg sz: %d\n", data->dma_nents); |
| 359 | page_vec_len = iser_sg_to_page_vec(data,page_vec); | 358 | page_vec_len = iser_sg_to_page_vec(data, page_vec, ibdev); |
| 360 | iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents,page_vec_len); | 359 | iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents,page_vec_len); |
| 361 | 360 | ||
| 362 | page_vec->length = page_vec_len; | 361 | page_vec->length = page_vec_len; |
| 363 | 362 | ||
| 364 | if (page_vec_len * SIZE_4K < page_vec->data_size) { | 363 | if (page_vec_len * SIZE_4K < page_vec->data_size) { |
| 365 | iser_err("page_vec too short to hold this SG\n"); | 364 | iser_err("page_vec too short to hold this SG\n"); |
| 366 | iser_data_buf_dump(data); | 365 | iser_data_buf_dump(data, ibdev); |
| 367 | iser_dump_page_vec(page_vec); | 366 | iser_dump_page_vec(page_vec); |
| 368 | BUG(); | 367 | BUG(); |
| 369 | } | 368 | } |
| @@ -374,13 +373,12 @@ int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask, | |||
| 374 | enum iser_data_dir iser_dir, | 373 | enum iser_data_dir iser_dir, |
| 375 | enum dma_data_direction dma_dir) | 374 | enum dma_data_direction dma_dir) |
| 376 | { | 375 | { |
| 377 | struct device *dma_device; | 376 | struct ib_device *dev; |
| 378 | 377 | ||
| 379 | iser_ctask->dir[iser_dir] = 1; | 378 | iser_ctask->dir[iser_dir] = 1; |
| 380 | dma_device = | 379 | dev = iser_ctask->iser_conn->ib_conn->device->ib_device; |
| 381 | iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device; | ||
| 382 | 380 | ||
| 383 | data->dma_nents = dma_map_sg(dma_device, data->buf, data->size, dma_dir); | 381 | data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir); |
| 384 | if (data->dma_nents == 0) { | 382 | if (data->dma_nents == 0) { |
| 385 | iser_err("dma_map_sg failed!!!\n"); | 383 | iser_err("dma_map_sg failed!!!\n"); |
| 386 | return -EINVAL; | 384 | return -EINVAL; |
| @@ -390,20 +388,19 @@ int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask, | |||
| 390 | 388 | ||
| 391 | void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask) | 389 | void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask) |
| 392 | { | 390 | { |
| 393 | struct device *dma_device; | 391 | struct ib_device *dev; |
| 394 | struct iser_data_buf *data; | 392 | struct iser_data_buf *data; |
| 395 | 393 | ||
| 396 | dma_device = | 394 | dev = iser_ctask->iser_conn->ib_conn->device->ib_device; |
| 397 | iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device; | ||
| 398 | 395 | ||
| 399 | if (iser_ctask->dir[ISER_DIR_IN]) { | 396 | if (iser_ctask->dir[ISER_DIR_IN]) { |
| 400 | data = &iser_ctask->data[ISER_DIR_IN]; | 397 | data = &iser_ctask->data[ISER_DIR_IN]; |
| 401 | dma_unmap_sg(dma_device, data->buf, data->size, DMA_FROM_DEVICE); | 398 | ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); |
| 402 | } | 399 | } |
| 403 | 400 | ||
| 404 | if (iser_ctask->dir[ISER_DIR_OUT]) { | 401 | if (iser_ctask->dir[ISER_DIR_OUT]) { |
| 405 | data = &iser_ctask->data[ISER_DIR_OUT]; | 402 | data = &iser_ctask->data[ISER_DIR_OUT]; |
| 406 | dma_unmap_sg(dma_device, data->buf, data->size, DMA_TO_DEVICE); | 403 | ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE); |
| 407 | } | 404 | } |
| 408 | } | 405 | } |
| 409 | 406 | ||
| @@ -418,6 +415,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask, | |||
| 418 | { | 415 | { |
| 419 | struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn; | 416 | struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn; |
| 420 | struct iser_device *device = ib_conn->device; | 417 | struct iser_device *device = ib_conn->device; |
| 418 | struct ib_device *ibdev = device->ib_device; | ||
| 421 | struct iser_data_buf *mem = &iser_ctask->data[cmd_dir]; | 419 | struct iser_data_buf *mem = &iser_ctask->data[cmd_dir]; |
| 422 | struct iser_regd_buf *regd_buf; | 420 | struct iser_regd_buf *regd_buf; |
| 423 | int aligned_len; | 421 | int aligned_len; |
| @@ -427,11 +425,11 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask, | |||
| 427 | 425 | ||
| 428 | regd_buf = &iser_ctask->rdma_regd[cmd_dir]; | 426 | regd_buf = &iser_ctask->rdma_regd[cmd_dir]; |
| 429 | 427 | ||
| 430 | aligned_len = iser_data_buf_aligned_len(mem); | 428 | aligned_len = iser_data_buf_aligned_len(mem, ibdev); |
| 431 | if (aligned_len != mem->dma_nents) { | 429 | if (aligned_len != mem->dma_nents) { |
| 432 | iser_err("rdma alignment violation %d/%d aligned\n", | 430 | iser_err("rdma alignment violation %d/%d aligned\n", |
| 433 | aligned_len, mem->size); | 431 | aligned_len, mem->size); |
| 434 | iser_data_buf_dump(mem); | 432 | iser_data_buf_dump(mem, ibdev); |
| 435 | 433 | ||
| 436 | /* unmap the command data before accessing it */ | 434 | /* unmap the command data before accessing it */ |
| 437 | iser_dma_unmap_task_data(iser_ctask); | 435 | iser_dma_unmap_task_data(iser_ctask); |
| @@ -449,8 +447,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask, | |||
| 449 | 447 | ||
| 450 | regd_buf->reg.lkey = device->mr->lkey; | 448 | regd_buf->reg.lkey = device->mr->lkey; |
| 451 | regd_buf->reg.rkey = device->mr->rkey; | 449 | regd_buf->reg.rkey = device->mr->rkey; |
| 452 | regd_buf->reg.len = sg_dma_len(&sg[0]); | 450 | regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]); |
| 453 | regd_buf->reg.va = sg_dma_address(&sg[0]); | 451 | regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]); |
| 454 | regd_buf->reg.is_fmr = 0; | 452 | regd_buf->reg.is_fmr = 0; |
| 455 | 453 | ||
| 456 | iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X " | 454 | iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X " |
| @@ -460,10 +458,10 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask, | |||
| 460 | (unsigned long)regd_buf->reg.va, | 458 | (unsigned long)regd_buf->reg.va, |
| 461 | (unsigned long)regd_buf->reg.len); | 459 | (unsigned long)regd_buf->reg.len); |
| 462 | } else { /* use FMR for multiple dma entries */ | 460 | } else { /* use FMR for multiple dma entries */ |
| 463 | iser_page_vec_build(mem, ib_conn->page_vec); | 461 | iser_page_vec_build(mem, ib_conn->page_vec, ibdev); |
| 464 | err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, ®d_buf->reg); | 462 | err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, ®d_buf->reg); |
| 465 | if (err) { | 463 | if (err) { |
| 466 | iser_data_buf_dump(mem); | 464 | iser_data_buf_dump(mem, ibdev); |
| 467 | iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents, | 465 | iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents, |
| 468 | ntoh24(iser_ctask->desc.iscsi_header.dlength)); | 466 | ntoh24(iser_ctask->desc.iscsi_header.dlength)); |
| 469 | iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", | 467 | iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", |
