diff options
Diffstat (limited to 'drivers/scsi/ibmvscsi/ibmvscsi.c')
| -rw-r--r-- | drivers/scsi/ibmvscsi/ibmvscsi.c | 251 | 
1 files changed, 128 insertions, 123 deletions
| diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index eaefeddb2b4a..0a8ad37ae899 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c | |||
| @@ -168,7 +168,7 @@ static void release_event_pool(struct event_pool *pool, | |||
| 168 | ++in_use; | 168 | ++in_use; | 
| 169 | if (pool->events[i].ext_list) { | 169 | if (pool->events[i].ext_list) { | 
| 170 | dma_free_coherent(hostdata->dev, | 170 | dma_free_coherent(hostdata->dev, | 
| 171 | SG_ALL * sizeof(struct memory_descriptor), | 171 | SG_ALL * sizeof(struct srp_direct_buf), | 
| 172 | pool->events[i].ext_list, | 172 | pool->events[i].ext_list, | 
| 173 | pool->events[i].ext_list_token); | 173 | pool->events[i].ext_list_token); | 
| 174 | } | 174 | } | 
| @@ -284,40 +284,37 @@ static void set_srp_direction(struct scsi_cmnd *cmd, | |||
| 284 | struct srp_cmd *srp_cmd, | 284 | struct srp_cmd *srp_cmd, | 
| 285 | int numbuf) | 285 | int numbuf) | 
| 286 | { | 286 | { | 
| 287 | u8 fmt; | ||
| 288 | |||
| 287 | if (numbuf == 0) | 289 | if (numbuf == 0) | 
| 288 | return; | 290 | return; | 
| 289 | 291 | ||
| 290 | if (numbuf == 1) { | 292 | if (numbuf == 1) | 
| 293 | fmt = SRP_DATA_DESC_DIRECT; | ||
| 294 | else { | ||
| 295 | fmt = SRP_DATA_DESC_INDIRECT; | ||
| 296 | numbuf = min(numbuf, MAX_INDIRECT_BUFS); | ||
| 297 | |||
| 291 | if (cmd->sc_data_direction == DMA_TO_DEVICE) | 298 | if (cmd->sc_data_direction == DMA_TO_DEVICE) | 
| 292 | srp_cmd->data_out_format = SRP_DIRECT_BUFFER; | 299 | srp_cmd->data_out_desc_cnt = numbuf; | 
| 293 | else | 300 | else | 
| 294 | srp_cmd->data_in_format = SRP_DIRECT_BUFFER; | 301 | srp_cmd->data_in_desc_cnt = numbuf; | 
| 295 | } else { | ||
| 296 | if (cmd->sc_data_direction == DMA_TO_DEVICE) { | ||
| 297 | srp_cmd->data_out_format = SRP_INDIRECT_BUFFER; | ||
| 298 | srp_cmd->data_out_count = | ||
| 299 | numbuf < MAX_INDIRECT_BUFS ? | ||
| 300 | numbuf: MAX_INDIRECT_BUFS; | ||
| 301 | } else { | ||
| 302 | srp_cmd->data_in_format = SRP_INDIRECT_BUFFER; | ||
| 303 | srp_cmd->data_in_count = | ||
| 304 | numbuf < MAX_INDIRECT_BUFS ? | ||
| 305 | numbuf: MAX_INDIRECT_BUFS; | ||
| 306 | } | ||
| 307 | } | 302 | } | 
| 303 | |||
| 304 | if (cmd->sc_data_direction == DMA_TO_DEVICE) | ||
| 305 | srp_cmd->buf_fmt = fmt << 4; | ||
| 306 | else | ||
| 307 | srp_cmd->buf_fmt = fmt; | ||
| 308 | } | 308 | } | 
| 309 | 309 | ||
| 310 | static void unmap_sg_list(int num_entries, | 310 | static void unmap_sg_list(int num_entries, | 
| 311 | struct device *dev, | 311 | struct device *dev, | 
| 312 | struct memory_descriptor *md) | 312 | struct srp_direct_buf *md) | 
| 313 | { | 313 | { | 
| 314 | int i; | 314 | int i; | 
| 315 | 315 | ||
| 316 | for (i = 0; i < num_entries; ++i) { | 316 | for (i = 0; i < num_entries; ++i) | 
| 317 | dma_unmap_single(dev, | 317 | dma_unmap_single(dev, md[i].va, md[i].len, DMA_BIDIRECTIONAL); | 
| 318 | md[i].virtual_address, | ||
| 319 | md[i].length, DMA_BIDIRECTIONAL); | ||
| 320 | } | ||
| 321 | } | 318 | } | 
| 322 | 319 | ||
| 323 | /** | 320 | /** | 
| @@ -330,23 +327,26 @@ static void unmap_cmd_data(struct srp_cmd *cmd, | |||
| 330 | struct srp_event_struct *evt_struct, | 327 | struct srp_event_struct *evt_struct, | 
| 331 | struct device *dev) | 328 | struct device *dev) | 
| 332 | { | 329 | { | 
| 333 | if ((cmd->data_out_format == SRP_NO_BUFFER) && | 330 | u8 out_fmt, in_fmt; | 
| 334 | (cmd->data_in_format == SRP_NO_BUFFER)) | 331 | |
| 332 | out_fmt = cmd->buf_fmt >> 4; | ||
| 333 | in_fmt = cmd->buf_fmt & ((1U << 4) - 1); | ||
| 334 | |||
| 335 | if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC) | ||
| 335 | return; | 336 | return; | 
| 336 | else if ((cmd->data_out_format == SRP_DIRECT_BUFFER) || | 337 | else if (out_fmt == SRP_DATA_DESC_DIRECT || | 
| 337 | (cmd->data_in_format == SRP_DIRECT_BUFFER)) { | 338 | in_fmt == SRP_DATA_DESC_DIRECT) { | 
| 338 | struct memory_descriptor *data = | 339 | struct srp_direct_buf *data = | 
| 339 | (struct memory_descriptor *)cmd->additional_data; | 340 | (struct srp_direct_buf *) cmd->add_data; | 
| 340 | dma_unmap_single(dev, data->virtual_address, data->length, | 341 | dma_unmap_single(dev, data->va, data->len, DMA_BIDIRECTIONAL); | 
| 341 | DMA_BIDIRECTIONAL); | ||
| 342 | } else { | 342 | } else { | 
| 343 | struct indirect_descriptor *indirect = | 343 | struct srp_indirect_buf *indirect = | 
| 344 | (struct indirect_descriptor *)cmd->additional_data; | 344 | (struct srp_indirect_buf *) cmd->add_data; | 
| 345 | int num_mapped = indirect->head.length / | 345 | int num_mapped = indirect->table_desc.len / | 
| 346 | sizeof(indirect->list[0]); | 346 | sizeof(struct srp_direct_buf); | 
| 347 | 347 | ||
| 348 | if (num_mapped <= MAX_INDIRECT_BUFS) { | 348 | if (num_mapped <= MAX_INDIRECT_BUFS) { | 
| 349 | unmap_sg_list(num_mapped, dev, &indirect->list[0]); | 349 | unmap_sg_list(num_mapped, dev, &indirect->desc_list[0]); | 
| 350 | return; | 350 | return; | 
| 351 | } | 351 | } | 
| 352 | 352 | ||
| @@ -356,17 +356,17 @@ static void unmap_cmd_data(struct srp_cmd *cmd, | |||
| 356 | 356 | ||
| 357 | static int map_sg_list(int num_entries, | 357 | static int map_sg_list(int num_entries, | 
| 358 | struct scatterlist *sg, | 358 | struct scatterlist *sg, | 
| 359 | struct memory_descriptor *md) | 359 | struct srp_direct_buf *md) | 
| 360 | { | 360 | { | 
| 361 | int i; | 361 | int i; | 
| 362 | u64 total_length = 0; | 362 | u64 total_length = 0; | 
| 363 | 363 | ||
| 364 | for (i = 0; i < num_entries; ++i) { | 364 | for (i = 0; i < num_entries; ++i) { | 
| 365 | struct memory_descriptor *descr = md + i; | 365 | struct srp_direct_buf *descr = md + i; | 
| 366 | struct scatterlist *sg_entry = &sg[i]; | 366 | struct scatterlist *sg_entry = &sg[i]; | 
| 367 | descr->virtual_address = sg_dma_address(sg_entry); | 367 | descr->va = sg_dma_address(sg_entry); | 
| 368 | descr->length = sg_dma_len(sg_entry); | 368 | descr->len = sg_dma_len(sg_entry); | 
| 369 | descr->memory_handle = 0; | 369 | descr->key = 0; | 
| 370 | total_length += sg_dma_len(sg_entry); | 370 | total_length += sg_dma_len(sg_entry); | 
| 371 | } | 371 | } | 
| 372 | return total_length; | 372 | return total_length; | 
| @@ -389,10 +389,10 @@ static int map_sg_data(struct scsi_cmnd *cmd, | |||
| 389 | int sg_mapped; | 389 | int sg_mapped; | 
| 390 | u64 total_length = 0; | 390 | u64 total_length = 0; | 
| 391 | struct scatterlist *sg = cmd->request_buffer; | 391 | struct scatterlist *sg = cmd->request_buffer; | 
| 392 | struct memory_descriptor *data = | 392 | struct srp_direct_buf *data = | 
| 393 | (struct memory_descriptor *)srp_cmd->additional_data; | 393 | (struct srp_direct_buf *) srp_cmd->add_data; | 
| 394 | struct indirect_descriptor *indirect = | 394 | struct srp_indirect_buf *indirect = | 
| 395 | (struct indirect_descriptor *)data; | 395 | (struct srp_indirect_buf *) data; | 
| 396 | 396 | ||
| 397 | sg_mapped = dma_map_sg(dev, sg, cmd->use_sg, DMA_BIDIRECTIONAL); | 397 | sg_mapped = dma_map_sg(dev, sg, cmd->use_sg, DMA_BIDIRECTIONAL); | 
| 398 | 398 | ||
| @@ -403,9 +403,9 @@ static int map_sg_data(struct scsi_cmnd *cmd, | |||
| 403 | 403 | ||
| 404 | /* special case; we can use a single direct descriptor */ | 404 | /* special case; we can use a single direct descriptor */ | 
| 405 | if (sg_mapped == 1) { | 405 | if (sg_mapped == 1) { | 
| 406 | data->virtual_address = sg_dma_address(&sg[0]); | 406 | data->va = sg_dma_address(&sg[0]); | 
| 407 | data->length = sg_dma_len(&sg[0]); | 407 | data->len = sg_dma_len(&sg[0]); | 
| 408 | data->memory_handle = 0; | 408 | data->key = 0; | 
| 409 | return 1; | 409 | return 1; | 
| 410 | } | 410 | } | 
| 411 | 411 | ||
| @@ -416,25 +416,26 @@ static int map_sg_data(struct scsi_cmnd *cmd, | |||
| 416 | return 0; | 416 | return 0; | 
| 417 | } | 417 | } | 
| 418 | 418 | ||
| 419 | indirect->head.virtual_address = 0; | 419 | indirect->table_desc.va = 0; | 
| 420 | indirect->head.length = sg_mapped * sizeof(indirect->list[0]); | 420 | indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf); | 
| 421 | indirect->head.memory_handle = 0; | 421 | indirect->table_desc.key = 0; | 
| 422 | 422 | ||
| 423 | if (sg_mapped <= MAX_INDIRECT_BUFS) { | 423 | if (sg_mapped <= MAX_INDIRECT_BUFS) { | 
| 424 | total_length = map_sg_list(sg_mapped, sg, &indirect->list[0]); | 424 | total_length = map_sg_list(sg_mapped, sg, | 
| 425 | indirect->total_length = total_length; | 425 | &indirect->desc_list[0]); | 
| 426 | indirect->len = total_length; | ||
| 426 | return 1; | 427 | return 1; | 
| 427 | } | 428 | } | 
| 428 | 429 | ||
| 429 | /* get indirect table */ | 430 | /* get indirect table */ | 
| 430 | if (!evt_struct->ext_list) { | 431 | if (!evt_struct->ext_list) { | 
| 431 | evt_struct->ext_list =(struct memory_descriptor*) | 432 | evt_struct->ext_list = (struct srp_direct_buf *) | 
| 432 | dma_alloc_coherent(dev, | 433 | dma_alloc_coherent(dev, | 
| 433 | SG_ALL * sizeof(struct memory_descriptor), | 434 | SG_ALL * sizeof(struct srp_direct_buf), | 
| 434 | &evt_struct->ext_list_token, 0); | 435 | &evt_struct->ext_list_token, 0); | 
| 435 | if (!evt_struct->ext_list) { | 436 | if (!evt_struct->ext_list) { | 
| 436 | printk(KERN_ERR | 437 | printk(KERN_ERR | 
| 437 | "ibmvscsi: Can't allocate memory for indirect table\n"); | 438 | "ibmvscsi: Can't allocate memory for indirect table\n"); | 
| 438 | return 0; | 439 | return 0; | 
| 439 | 440 | ||
| 440 | } | 441 | } | 
| @@ -442,11 +443,11 @@ static int map_sg_data(struct scsi_cmnd *cmd, | |||
| 442 | 443 | ||
| 443 | total_length = map_sg_list(sg_mapped, sg, evt_struct->ext_list); | 444 | total_length = map_sg_list(sg_mapped, sg, evt_struct->ext_list); | 
| 444 | 445 | ||
| 445 | indirect->total_length = total_length; | 446 | indirect->len = total_length; | 
| 446 | indirect->head.virtual_address = evt_struct->ext_list_token; | 447 | indirect->table_desc.va = evt_struct->ext_list_token; | 
| 447 | indirect->head.length = sg_mapped * sizeof(indirect->list[0]); | 448 | indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]); | 
| 448 | memcpy(indirect->list, evt_struct->ext_list, | 449 | memcpy(indirect->desc_list, evt_struct->ext_list, | 
| 449 | MAX_INDIRECT_BUFS * sizeof(struct memory_descriptor)); | 450 | MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf)); | 
| 450 | 451 | ||
| 451 | return 1; | 452 | return 1; | 
| 452 | } | 453 | } | 
| @@ -463,20 +464,20 @@ static int map_sg_data(struct scsi_cmnd *cmd, | |||
| 463 | static int map_single_data(struct scsi_cmnd *cmd, | 464 | static int map_single_data(struct scsi_cmnd *cmd, | 
| 464 | struct srp_cmd *srp_cmd, struct device *dev) | 465 | struct srp_cmd *srp_cmd, struct device *dev) | 
| 465 | { | 466 | { | 
| 466 | struct memory_descriptor *data = | 467 | struct srp_direct_buf *data = | 
| 467 | (struct memory_descriptor *)srp_cmd->additional_data; | 468 | (struct srp_direct_buf *) srp_cmd->add_data; | 
| 468 | 469 | ||
| 469 | data->virtual_address = | 470 | data->va = | 
| 470 | dma_map_single(dev, cmd->request_buffer, | 471 | dma_map_single(dev, cmd->request_buffer, | 
| 471 | cmd->request_bufflen, | 472 | cmd->request_bufflen, | 
| 472 | DMA_BIDIRECTIONAL); | 473 | DMA_BIDIRECTIONAL); | 
| 473 | if (dma_mapping_error(data->virtual_address)) { | 474 | if (dma_mapping_error(data->va)) { | 
| 474 | printk(KERN_ERR | 475 | printk(KERN_ERR | 
| 475 | "ibmvscsi: Unable to map request_buffer for command!\n"); | 476 | "ibmvscsi: Unable to map request_buffer for command!\n"); | 
| 476 | return 0; | 477 | return 0; | 
| 477 | } | 478 | } | 
| 478 | data->length = cmd->request_bufflen; | 479 | data->len = cmd->request_bufflen; | 
| 479 | data->memory_handle = 0; | 480 | data->key = 0; | 
| 480 | 481 | ||
| 481 | set_srp_direction(cmd, srp_cmd, 1); | 482 | set_srp_direction(cmd, srp_cmd, 1); | 
| 482 | 483 | ||
| @@ -548,7 +549,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, | |||
| 548 | 549 | ||
| 549 | /* Copy the IU into the transfer area */ | 550 | /* Copy the IU into the transfer area */ | 
| 550 | *evt_struct->xfer_iu = evt_struct->iu; | 551 | *evt_struct->xfer_iu = evt_struct->iu; | 
| 551 | evt_struct->xfer_iu->srp.generic.tag = (u64)evt_struct; | 552 | evt_struct->xfer_iu->srp.rsp.tag = (u64)evt_struct; | 
| 552 | 553 | ||
| 553 | /* Add this to the sent list. We need to do this | 554 | /* Add this to the sent list. We need to do this | 
| 554 | * before we actually send | 555 | * before we actually send | 
| @@ -586,27 +587,27 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct) | |||
| 586 | struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp; | 587 | struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp; | 
| 587 | struct scsi_cmnd *cmnd = evt_struct->cmnd; | 588 | struct scsi_cmnd *cmnd = evt_struct->cmnd; | 
| 588 | 589 | ||
| 589 | if (unlikely(rsp->type != SRP_RSP_TYPE)) { | 590 | if (unlikely(rsp->opcode != SRP_RSP)) { | 
| 590 | if (printk_ratelimit()) | 591 | if (printk_ratelimit()) | 
| 591 | printk(KERN_WARNING | 592 | printk(KERN_WARNING | 
| 592 | "ibmvscsi: bad SRP RSP type %d\n", | 593 | "ibmvscsi: bad SRP RSP type %d\n", | 
| 593 | rsp->type); | 594 | rsp->opcode); | 
| 594 | } | 595 | } | 
| 595 | 596 | ||
| 596 | if (cmnd) { | 597 | if (cmnd) { | 
| 597 | cmnd->result = rsp->status; | 598 | cmnd->result = rsp->status; | 
| 598 | if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION) | 599 | if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION) | 
| 599 | memcpy(cmnd->sense_buffer, | 600 | memcpy(cmnd->sense_buffer, | 
| 600 | rsp->sense_and_response_data, | 601 | rsp->data, | 
| 601 | rsp->sense_data_list_length); | 602 | rsp->sense_data_len); | 
| 602 | unmap_cmd_data(&evt_struct->iu.srp.cmd, | 603 | unmap_cmd_data(&evt_struct->iu.srp.cmd, | 
| 603 | evt_struct, | 604 | evt_struct, | 
| 604 | evt_struct->hostdata->dev); | 605 | evt_struct->hostdata->dev); | 
| 605 | 606 | ||
| 606 | if (rsp->doover) | 607 | if (rsp->flags & SRP_RSP_FLAG_DOOVER) | 
| 607 | cmnd->resid = rsp->data_out_residual_count; | 608 | cmnd->resid = rsp->data_out_res_cnt; | 
| 608 | else if (rsp->diover) | 609 | else if (rsp->flags & SRP_RSP_FLAG_DIOVER) | 
| 609 | cmnd->resid = rsp->data_in_residual_count; | 610 | cmnd->resid = rsp->data_in_res_cnt; | 
| 610 | } | 611 | } | 
| 611 | 612 | ||
| 612 | if (evt_struct->cmnd_done) | 613 | if (evt_struct->cmnd_done) | 
| @@ -633,10 +634,11 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, | |||
| 633 | { | 634 | { | 
| 634 | struct srp_cmd *srp_cmd; | 635 | struct srp_cmd *srp_cmd; | 
| 635 | struct srp_event_struct *evt_struct; | 636 | struct srp_event_struct *evt_struct; | 
| 636 | struct indirect_descriptor *indirect; | 637 | struct srp_indirect_buf *indirect; | 
| 637 | struct ibmvscsi_host_data *hostdata = | 638 | struct ibmvscsi_host_data *hostdata = | 
| 638 | (struct ibmvscsi_host_data *)&cmnd->device->host->hostdata; | 639 | (struct ibmvscsi_host_data *)&cmnd->device->host->hostdata; | 
| 639 | u16 lun = lun_from_dev(cmnd->device); | 640 | u16 lun = lun_from_dev(cmnd->device); | 
| 641 | u8 out_fmt, in_fmt; | ||
| 640 | 642 | ||
| 641 | evt_struct = get_event_struct(&hostdata->pool); | 643 | evt_struct = get_event_struct(&hostdata->pool); | 
| 642 | if (!evt_struct) | 644 | if (!evt_struct) | 
| @@ -644,8 +646,8 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, | |||
| 644 | 646 | ||
| 645 | /* Set up the actual SRP IU */ | 647 | /* Set up the actual SRP IU */ | 
| 646 | srp_cmd = &evt_struct->iu.srp.cmd; | 648 | srp_cmd = &evt_struct->iu.srp.cmd; | 
| 647 | memset(srp_cmd, 0x00, sizeof(*srp_cmd)); | 649 | memset(srp_cmd, 0x00, SRP_MAX_IU_LEN); | 
| 648 | srp_cmd->type = SRP_CMD_TYPE; | 650 | srp_cmd->opcode = SRP_CMD; | 
| 649 | memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd)); | 651 | memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd)); | 
| 650 | srp_cmd->lun = ((u64) lun) << 48; | 652 | srp_cmd->lun = ((u64) lun) << 48; | 
| 651 | 653 | ||
| @@ -664,13 +666,15 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, | |||
| 664 | evt_struct->cmnd_done = done; | 666 | evt_struct->cmnd_done = done; | 
| 665 | 667 | ||
| 666 | /* Fix up dma address of the buffer itself */ | 668 | /* Fix up dma address of the buffer itself */ | 
| 667 | indirect = (struct indirect_descriptor *)srp_cmd->additional_data; | 669 | indirect = (struct srp_indirect_buf *) srp_cmd->add_data; | 
| 668 | if (((srp_cmd->data_out_format == SRP_INDIRECT_BUFFER) || | 670 | out_fmt = srp_cmd->buf_fmt >> 4; | 
| 669 | (srp_cmd->data_in_format == SRP_INDIRECT_BUFFER)) && | 671 | in_fmt = srp_cmd->buf_fmt & ((1U << 4) - 1); | 
| 670 | (indirect->head.virtual_address == 0)) { | 672 | if ((in_fmt == SRP_DATA_DESC_INDIRECT || | 
| 671 | indirect->head.virtual_address = evt_struct->crq.IU_data_ptr + | 673 | out_fmt == SRP_DATA_DESC_INDIRECT) && | 
| 672 | offsetof(struct srp_cmd, additional_data) + | 674 | indirect->table_desc.va == 0) { | 
| 673 | offsetof(struct indirect_descriptor, list); | 675 | indirect->table_desc.va = evt_struct->crq.IU_data_ptr + | 
| 676 | offsetof(struct srp_cmd, add_data) + | ||
| 677 | offsetof(struct srp_indirect_buf, desc_list); | ||
| 674 | } | 678 | } | 
| 675 | 679 | ||
| 676 | return ibmvscsi_send_srp_event(evt_struct, hostdata); | 680 | return ibmvscsi_send_srp_event(evt_struct, hostdata); | 
| @@ -780,10 +784,10 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) | |||
| 780 | static void login_rsp(struct srp_event_struct *evt_struct) | 784 | static void login_rsp(struct srp_event_struct *evt_struct) | 
| 781 | { | 785 | { | 
| 782 | struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; | 786 | struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; | 
| 783 | switch (evt_struct->xfer_iu->srp.generic.type) { | 787 | switch (evt_struct->xfer_iu->srp.login_rsp.opcode) { | 
| 784 | case SRP_LOGIN_RSP_TYPE: /* it worked! */ | 788 | case SRP_LOGIN_RSP: /* it worked! */ | 
| 785 | break; | 789 | break; | 
| 786 | case SRP_LOGIN_REJ_TYPE: /* refused! */ | 790 | case SRP_LOGIN_REJ: /* refused! */ | 
| 787 | printk(KERN_INFO "ibmvscsi: SRP_LOGIN_REJ reason %u\n", | 791 | printk(KERN_INFO "ibmvscsi: SRP_LOGIN_REJ reason %u\n", | 
| 788 | evt_struct->xfer_iu->srp.login_rej.reason); | 792 | evt_struct->xfer_iu->srp.login_rej.reason); | 
| 789 | /* Login failed. */ | 793 | /* Login failed. */ | 
| @@ -792,7 +796,7 @@ static void login_rsp(struct srp_event_struct *evt_struct) | |||
| 792 | default: | 796 | default: | 
| 793 | printk(KERN_ERR | 797 | printk(KERN_ERR | 
| 794 | "ibmvscsi: Invalid login response typecode 0x%02x!\n", | 798 | "ibmvscsi: Invalid login response typecode 0x%02x!\n", | 
| 795 | evt_struct->xfer_iu->srp.generic.type); | 799 | evt_struct->xfer_iu->srp.login_rsp.opcode); | 
| 796 | /* Login failed. */ | 800 | /* Login failed. */ | 
| 797 | atomic_set(&hostdata->request_limit, -1); | 801 | atomic_set(&hostdata->request_limit, -1); | 
| 798 | return; | 802 | return; | 
| @@ -800,17 +804,17 @@ static void login_rsp(struct srp_event_struct *evt_struct) | |||
| 800 | 804 | ||
| 801 | printk(KERN_INFO "ibmvscsi: SRP_LOGIN succeeded\n"); | 805 | printk(KERN_INFO "ibmvscsi: SRP_LOGIN succeeded\n"); | 
| 802 | 806 | ||
| 803 | if (evt_struct->xfer_iu->srp.login_rsp.request_limit_delta > | 807 | if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta > | 
| 804 | (max_requests - 2)) | 808 | (max_requests - 2)) | 
| 805 | evt_struct->xfer_iu->srp.login_rsp.request_limit_delta = | 809 | evt_struct->xfer_iu->srp.login_rsp.req_lim_delta = | 
| 806 | max_requests - 2; | 810 | max_requests - 2; | 
| 807 | 811 | ||
| 808 | /* Now we know what the real request-limit is */ | 812 | /* Now we know what the real request-limit is */ | 
| 809 | atomic_set(&hostdata->request_limit, | 813 | atomic_set(&hostdata->request_limit, | 
| 810 | evt_struct->xfer_iu->srp.login_rsp.request_limit_delta); | 814 | evt_struct->xfer_iu->srp.login_rsp.req_lim_delta); | 
| 811 | 815 | ||
| 812 | hostdata->host->can_queue = | 816 | hostdata->host->can_queue = | 
| 813 | evt_struct->xfer_iu->srp.login_rsp.request_limit_delta - 2; | 817 | evt_struct->xfer_iu->srp.login_rsp.req_lim_delta - 2; | 
| 814 | 818 | ||
| 815 | if (hostdata->host->can_queue < 1) { | 819 | if (hostdata->host->can_queue < 1) { | 
| 816 | printk(KERN_ERR "ibmvscsi: Invalid request_limit_delta\n"); | 820 | printk(KERN_ERR "ibmvscsi: Invalid request_limit_delta\n"); | 
| @@ -849,18 +853,19 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata) | |||
| 849 | 853 | ||
| 850 | login = &evt_struct->iu.srp.login_req; | 854 | login = &evt_struct->iu.srp.login_req; | 
| 851 | memset(login, 0x00, sizeof(struct srp_login_req)); | 855 | memset(login, 0x00, sizeof(struct srp_login_req)); | 
| 852 | login->type = SRP_LOGIN_REQ_TYPE; | 856 | login->opcode = SRP_LOGIN_REQ; | 
| 853 | login->max_requested_initiator_to_target_iulen = sizeof(union srp_iu); | 857 | login->req_it_iu_len = sizeof(union srp_iu); | 
| 854 | login->required_buffer_formats = 0x0006; | 858 | login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT; | 
| 855 | 859 | ||
| 860 | spin_lock_irqsave(hostdata->host->host_lock, flags); | ||
| 856 | /* Start out with a request limit of 1, since this is negotiated in | 861 | /* Start out with a request limit of 1, since this is negotiated in | 
| 857 | * the login request we are just sending | 862 | * the login request we are just sending | 
| 858 | */ | 863 | */ | 
| 859 | atomic_set(&hostdata->request_limit, 1); | 864 | atomic_set(&hostdata->request_limit, 1); | 
| 860 | 865 | ||
| 861 | spin_lock_irqsave(hostdata->host->host_lock, flags); | ||
| 862 | rc = ibmvscsi_send_srp_event(evt_struct, hostdata); | 866 | rc = ibmvscsi_send_srp_event(evt_struct, hostdata); | 
| 863 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | 867 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | 
| 868 | printk("ibmvscsic: sent SRP login\n"); | ||
| 864 | return rc; | 869 | return rc; | 
| 865 | }; | 870 | }; | 
| 866 | 871 | ||
| @@ -928,13 +933,13 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) | |||
| 928 | 933 | ||
| 929 | /* Set up an abort SRP command */ | 934 | /* Set up an abort SRP command */ | 
| 930 | memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); | 935 | memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); | 
| 931 | tsk_mgmt->type = SRP_TSK_MGMT_TYPE; | 936 | tsk_mgmt->opcode = SRP_TSK_MGMT; | 
| 932 | tsk_mgmt->lun = ((u64) lun) << 48; | 937 | tsk_mgmt->lun = ((u64) lun) << 48; | 
| 933 | tsk_mgmt->task_mgmt_flags = 0x01; /* ABORT TASK */ | 938 | tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK; | 
| 934 | tsk_mgmt->managed_task_tag = (u64) found_evt; | 939 | tsk_mgmt->task_tag = (u64) found_evt; | 
| 935 | 940 | ||
| 936 | printk(KERN_INFO "ibmvscsi: aborting command. lun 0x%lx, tag 0x%lx\n", | 941 | printk(KERN_INFO "ibmvscsi: aborting command. lun 0x%lx, tag 0x%lx\n", | 
| 937 | tsk_mgmt->lun, tsk_mgmt->managed_task_tag); | 942 | tsk_mgmt->lun, tsk_mgmt->task_tag); | 
| 938 | 943 | ||
| 939 | evt->sync_srp = &srp_rsp; | 944 | evt->sync_srp = &srp_rsp; | 
| 940 | init_completion(&evt->comp); | 945 | init_completion(&evt->comp); | 
| @@ -948,25 +953,25 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) | |||
| 948 | wait_for_completion(&evt->comp); | 953 | wait_for_completion(&evt->comp); | 
| 949 | 954 | ||
| 950 | /* make sure we got a good response */ | 955 | /* make sure we got a good response */ | 
| 951 | if (unlikely(srp_rsp.srp.generic.type != SRP_RSP_TYPE)) { | 956 | if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) { | 
| 952 | if (printk_ratelimit()) | 957 | if (printk_ratelimit()) | 
| 953 | printk(KERN_WARNING | 958 | printk(KERN_WARNING | 
| 954 | "ibmvscsi: abort bad SRP RSP type %d\n", | 959 | "ibmvscsi: abort bad SRP RSP type %d\n", | 
| 955 | srp_rsp.srp.generic.type); | 960 | srp_rsp.srp.rsp.opcode); | 
| 956 | return FAILED; | 961 | return FAILED; | 
| 957 | } | 962 | } | 
| 958 | 963 | ||
| 959 | if (srp_rsp.srp.rsp.rspvalid) | 964 | if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID) | 
| 960 | rsp_rc = *((int *)srp_rsp.srp.rsp.sense_and_response_data); | 965 | rsp_rc = *((int *)srp_rsp.srp.rsp.data); | 
| 961 | else | 966 | else | 
| 962 | rsp_rc = srp_rsp.srp.rsp.status; | 967 | rsp_rc = srp_rsp.srp.rsp.status; | 
| 963 | 968 | ||
| 964 | if (rsp_rc) { | 969 | if (rsp_rc) { | 
| 965 | if (printk_ratelimit()) | 970 | if (printk_ratelimit()) | 
| 966 | printk(KERN_WARNING | 971 | printk(KERN_WARNING | 
| 967 | "ibmvscsi: abort code %d for task tag 0x%lx\n", | 972 | "ibmvscsi: abort code %d for task tag 0x%lx\n", | 
| 968 | rsp_rc, | 973 | rsp_rc, | 
| 969 | tsk_mgmt->managed_task_tag); | 974 | tsk_mgmt->task_tag); | 
| 970 | return FAILED; | 975 | return FAILED; | 
| 971 | } | 976 | } | 
| 972 | 977 | ||
| @@ -987,13 +992,13 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) | |||
| 987 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | 992 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | 
| 988 | printk(KERN_INFO | 993 | printk(KERN_INFO | 
| 989 | "ibmvscsi: aborted task tag 0x%lx completed\n", | 994 | "ibmvscsi: aborted task tag 0x%lx completed\n", | 
| 990 | tsk_mgmt->managed_task_tag); | 995 | tsk_mgmt->task_tag); | 
| 991 | return SUCCESS; | 996 | return SUCCESS; | 
| 992 | } | 997 | } | 
| 993 | 998 | ||
| 994 | printk(KERN_INFO | 999 | printk(KERN_INFO | 
| 995 | "ibmvscsi: successfully aborted task tag 0x%lx\n", | 1000 | "ibmvscsi: successfully aborted task tag 0x%lx\n", | 
| 996 | tsk_mgmt->managed_task_tag); | 1001 | tsk_mgmt->task_tag); | 
| 997 | 1002 | ||
| 998 | cmd->result = (DID_ABORT << 16); | 1003 | cmd->result = (DID_ABORT << 16); | 
| 999 | list_del(&found_evt->list); | 1004 | list_del(&found_evt->list); | 
| @@ -1040,9 +1045,9 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) | |||
| 1040 | 1045 | ||
| 1041 | /* Set up a lun reset SRP command */ | 1046 | /* Set up a lun reset SRP command */ | 
| 1042 | memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); | 1047 | memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); | 
| 1043 | tsk_mgmt->type = SRP_TSK_MGMT_TYPE; | 1048 | tsk_mgmt->opcode = SRP_TSK_MGMT; | 
| 1044 | tsk_mgmt->lun = ((u64) lun) << 48; | 1049 | tsk_mgmt->lun = ((u64) lun) << 48; | 
| 1045 | tsk_mgmt->task_mgmt_flags = 0x08; /* LUN RESET */ | 1050 | tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET; | 
| 1046 | 1051 | ||
| 1047 | printk(KERN_INFO "ibmvscsi: resetting device. lun 0x%lx\n", | 1052 | printk(KERN_INFO "ibmvscsi: resetting device. lun 0x%lx\n", | 
| 1048 | tsk_mgmt->lun); | 1053 | tsk_mgmt->lun); | 
| @@ -1059,16 +1064,16 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) | |||
| 1059 | wait_for_completion(&evt->comp); | 1064 | wait_for_completion(&evt->comp); | 
| 1060 | 1065 | ||
| 1061 | /* make sure we got a good response */ | 1066 | /* make sure we got a good response */ | 
| 1062 | if (unlikely(srp_rsp.srp.generic.type != SRP_RSP_TYPE)) { | 1067 | if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) { | 
| 1063 | if (printk_ratelimit()) | 1068 | if (printk_ratelimit()) | 
| 1064 | printk(KERN_WARNING | 1069 | printk(KERN_WARNING | 
| 1065 | "ibmvscsi: reset bad SRP RSP type %d\n", | 1070 | "ibmvscsi: reset bad SRP RSP type %d\n", | 
| 1066 | srp_rsp.srp.generic.type); | 1071 | srp_rsp.srp.rsp.opcode); | 
| 1067 | return FAILED; | 1072 | return FAILED; | 
| 1068 | } | 1073 | } | 
| 1069 | 1074 | ||
| 1070 | if (srp_rsp.srp.rsp.rspvalid) | 1075 | if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID) | 
| 1071 | rsp_rc = *((int *)srp_rsp.srp.rsp.sense_and_response_data); | 1076 | rsp_rc = *((int *)srp_rsp.srp.rsp.data); | 
| 1072 | else | 1077 | else | 
| 1073 | rsp_rc = srp_rsp.srp.rsp.status; | 1078 | rsp_rc = srp_rsp.srp.rsp.status; | 
| 1074 | 1079 | ||
| @@ -1076,8 +1081,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) | |||
| 1076 | if (printk_ratelimit()) | 1081 | if (printk_ratelimit()) | 
| 1077 | printk(KERN_WARNING | 1082 | printk(KERN_WARNING | 
| 1078 | "ibmvscsi: reset code %d for task tag 0x%lx\n", | 1083 | "ibmvscsi: reset code %d for task tag 0x%lx\n", | 
| 1079 | rsp_rc, | 1084 | rsp_rc, tsk_mgmt->task_tag); | 
| 1080 | tsk_mgmt->managed_task_tag); | ||
| 1081 | return FAILED; | 1085 | return FAILED; | 
| 1082 | } | 1086 | } | 
| 1083 | 1087 | ||
| @@ -1179,6 +1183,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, | |||
| 1179 | /* We need to re-setup the interpartition connection */ | 1183 | /* We need to re-setup the interpartition connection */ | 
| 1180 | printk(KERN_INFO | 1184 | printk(KERN_INFO | 
| 1181 | "ibmvscsi: Re-enabling adapter!\n"); | 1185 | "ibmvscsi: Re-enabling adapter!\n"); | 
| 1186 | atomic_set(&hostdata->request_limit, -1); | ||
| 1182 | purge_requests(hostdata, DID_REQUEUE); | 1187 | purge_requests(hostdata, DID_REQUEUE); | 
| 1183 | if (ibmvscsi_reenable_crq_queue(&hostdata->queue, | 1188 | if (ibmvscsi_reenable_crq_queue(&hostdata->queue, | 
| 1184 | hostdata) == 0) | 1189 | hostdata) == 0) | 
| @@ -1226,7 +1231,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, | |||
| 1226 | } | 1231 | } | 
| 1227 | 1232 | ||
| 1228 | if (crq->format == VIOSRP_SRP_FORMAT) | 1233 | if (crq->format == VIOSRP_SRP_FORMAT) | 
| 1229 | atomic_add(evt_struct->xfer_iu->srp.rsp.request_limit_delta, | 1234 | atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta, | 
| 1230 | &hostdata->request_limit); | 1235 | &hostdata->request_limit); | 
| 1231 | 1236 | ||
| 1232 | if (evt_struct->done) | 1237 | if (evt_struct->done) | 
