aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorFUJITA Tomonori <tomof@acm.org>2006-03-25 13:57:14 -0500
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2006-04-13 11:13:17 -0400
commitef265673434680f2307ceafae4a2badc657e94cc (patch)
tree2cb37e46c89673b65dde2e20685517a3106f84bf /drivers
parent9b833e428ad24dc7cec2c0c7e6898caa91449c95 (diff)
[SCSI] ibmvscsi: convert the ibmvscsi driver to use include/scsi/srp.h
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c247
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h2
-rw-r--r--drivers/scsi/ibmvscsi/rpa_vscsi.c1
-rw-r--r--drivers/scsi/ibmvscsi/viosrp.h17
4 files changed, 142 insertions, 125 deletions
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index a279d0a8dff5..0a8ad37ae899 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -168,7 +168,7 @@ static void release_event_pool(struct event_pool *pool,
168 ++in_use; 168 ++in_use;
169 if (pool->events[i].ext_list) { 169 if (pool->events[i].ext_list) {
170 dma_free_coherent(hostdata->dev, 170 dma_free_coherent(hostdata->dev,
171 SG_ALL * sizeof(struct memory_descriptor), 171 SG_ALL * sizeof(struct srp_direct_buf),
172 pool->events[i].ext_list, 172 pool->events[i].ext_list,
173 pool->events[i].ext_list_token); 173 pool->events[i].ext_list_token);
174 } 174 }
@@ -284,40 +284,37 @@ static void set_srp_direction(struct scsi_cmnd *cmd,
284 struct srp_cmd *srp_cmd, 284 struct srp_cmd *srp_cmd,
285 int numbuf) 285 int numbuf)
286{ 286{
287 u8 fmt;
288
287 if (numbuf == 0) 289 if (numbuf == 0)
288 return; 290 return;
289 291
290 if (numbuf == 1) { 292 if (numbuf == 1)
293 fmt = SRP_DATA_DESC_DIRECT;
294 else {
295 fmt = SRP_DATA_DESC_INDIRECT;
296 numbuf = min(numbuf, MAX_INDIRECT_BUFS);
297
291 if (cmd->sc_data_direction == DMA_TO_DEVICE) 298 if (cmd->sc_data_direction == DMA_TO_DEVICE)
292 srp_cmd->data_out_format = SRP_DIRECT_BUFFER; 299 srp_cmd->data_out_desc_cnt = numbuf;
293 else 300 else
294 srp_cmd->data_in_format = SRP_DIRECT_BUFFER; 301 srp_cmd->data_in_desc_cnt = numbuf;
295 } else {
296 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
297 srp_cmd->data_out_format = SRP_INDIRECT_BUFFER;
298 srp_cmd->data_out_count =
299 numbuf < MAX_INDIRECT_BUFS ?
300 numbuf: MAX_INDIRECT_BUFS;
301 } else {
302 srp_cmd->data_in_format = SRP_INDIRECT_BUFFER;
303 srp_cmd->data_in_count =
304 numbuf < MAX_INDIRECT_BUFS ?
305 numbuf: MAX_INDIRECT_BUFS;
306 }
307 } 302 }
303
304 if (cmd->sc_data_direction == DMA_TO_DEVICE)
305 srp_cmd->buf_fmt = fmt << 4;
306 else
307 srp_cmd->buf_fmt = fmt;
308} 308}
309 309
310static void unmap_sg_list(int num_entries, 310static void unmap_sg_list(int num_entries,
311 struct device *dev, 311 struct device *dev,
312 struct memory_descriptor *md) 312 struct srp_direct_buf *md)
313{ 313{
314 int i; 314 int i;
315 315
316 for (i = 0; i < num_entries; ++i) { 316 for (i = 0; i < num_entries; ++i)
317 dma_unmap_single(dev, 317 dma_unmap_single(dev, md[i].va, md[i].len, DMA_BIDIRECTIONAL);
318 md[i].virtual_address,
319 md[i].length, DMA_BIDIRECTIONAL);
320 }
321} 318}
322 319
323/** 320/**
@@ -330,23 +327,26 @@ static void unmap_cmd_data(struct srp_cmd *cmd,
330 struct srp_event_struct *evt_struct, 327 struct srp_event_struct *evt_struct,
331 struct device *dev) 328 struct device *dev)
332{ 329{
333 if ((cmd->data_out_format == SRP_NO_BUFFER) && 330 u8 out_fmt, in_fmt;
334 (cmd->data_in_format == SRP_NO_BUFFER)) 331
332 out_fmt = cmd->buf_fmt >> 4;
333 in_fmt = cmd->buf_fmt & ((1U << 4) - 1);
334
335 if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC)
335 return; 336 return;
336 else if ((cmd->data_out_format == SRP_DIRECT_BUFFER) || 337 else if (out_fmt == SRP_DATA_DESC_DIRECT ||
337 (cmd->data_in_format == SRP_DIRECT_BUFFER)) { 338 in_fmt == SRP_DATA_DESC_DIRECT) {
338 struct memory_descriptor *data = 339 struct srp_direct_buf *data =
339 (struct memory_descriptor *)cmd->additional_data; 340 (struct srp_direct_buf *) cmd->add_data;
340 dma_unmap_single(dev, data->virtual_address, data->length, 341 dma_unmap_single(dev, data->va, data->len, DMA_BIDIRECTIONAL);
341 DMA_BIDIRECTIONAL);
342 } else { 342 } else {
343 struct indirect_descriptor *indirect = 343 struct srp_indirect_buf *indirect =
344 (struct indirect_descriptor *)cmd->additional_data; 344 (struct srp_indirect_buf *) cmd->add_data;
345 int num_mapped = indirect->head.length / 345 int num_mapped = indirect->table_desc.len /
346 sizeof(indirect->list[0]); 346 sizeof(struct srp_direct_buf);
347 347
348 if (num_mapped <= MAX_INDIRECT_BUFS) { 348 if (num_mapped <= MAX_INDIRECT_BUFS) {
349 unmap_sg_list(num_mapped, dev, &indirect->list[0]); 349 unmap_sg_list(num_mapped, dev, &indirect->desc_list[0]);
350 return; 350 return;
351 } 351 }
352 352
@@ -356,17 +356,17 @@ static void unmap_cmd_data(struct srp_cmd *cmd,
356 356
357static int map_sg_list(int num_entries, 357static int map_sg_list(int num_entries,
358 struct scatterlist *sg, 358 struct scatterlist *sg,
359 struct memory_descriptor *md) 359 struct srp_direct_buf *md)
360{ 360{
361 int i; 361 int i;
362 u64 total_length = 0; 362 u64 total_length = 0;
363 363
364 for (i = 0; i < num_entries; ++i) { 364 for (i = 0; i < num_entries; ++i) {
365 struct memory_descriptor *descr = md + i; 365 struct srp_direct_buf *descr = md + i;
366 struct scatterlist *sg_entry = &sg[i]; 366 struct scatterlist *sg_entry = &sg[i];
367 descr->virtual_address = sg_dma_address(sg_entry); 367 descr->va = sg_dma_address(sg_entry);
368 descr->length = sg_dma_len(sg_entry); 368 descr->len = sg_dma_len(sg_entry);
369 descr->memory_handle = 0; 369 descr->key = 0;
370 total_length += sg_dma_len(sg_entry); 370 total_length += sg_dma_len(sg_entry);
371 } 371 }
372 return total_length; 372 return total_length;
@@ -389,10 +389,10 @@ static int map_sg_data(struct scsi_cmnd *cmd,
389 int sg_mapped; 389 int sg_mapped;
390 u64 total_length = 0; 390 u64 total_length = 0;
391 struct scatterlist *sg = cmd->request_buffer; 391 struct scatterlist *sg = cmd->request_buffer;
392 struct memory_descriptor *data = 392 struct srp_direct_buf *data =
393 (struct memory_descriptor *)srp_cmd->additional_data; 393 (struct srp_direct_buf *) srp_cmd->add_data;
394 struct indirect_descriptor *indirect = 394 struct srp_indirect_buf *indirect =
395 (struct indirect_descriptor *)data; 395 (struct srp_indirect_buf *) data;
396 396
397 sg_mapped = dma_map_sg(dev, sg, cmd->use_sg, DMA_BIDIRECTIONAL); 397 sg_mapped = dma_map_sg(dev, sg, cmd->use_sg, DMA_BIDIRECTIONAL);
398 398
@@ -403,9 +403,9 @@ static int map_sg_data(struct scsi_cmnd *cmd,
403 403
404 /* special case; we can use a single direct descriptor */ 404 /* special case; we can use a single direct descriptor */
405 if (sg_mapped == 1) { 405 if (sg_mapped == 1) {
406 data->virtual_address = sg_dma_address(&sg[0]); 406 data->va = sg_dma_address(&sg[0]);
407 data->length = sg_dma_len(&sg[0]); 407 data->len = sg_dma_len(&sg[0]);
408 data->memory_handle = 0; 408 data->key = 0;
409 return 1; 409 return 1;
410 } 410 }
411 411
@@ -416,25 +416,26 @@ static int map_sg_data(struct scsi_cmnd *cmd,
416 return 0; 416 return 0;
417 } 417 }
418 418
419 indirect->head.virtual_address = 0; 419 indirect->table_desc.va = 0;
420 indirect->head.length = sg_mapped * sizeof(indirect->list[0]); 420 indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf);
421 indirect->head.memory_handle = 0; 421 indirect->table_desc.key = 0;
422 422
423 if (sg_mapped <= MAX_INDIRECT_BUFS) { 423 if (sg_mapped <= MAX_INDIRECT_BUFS) {
424 total_length = map_sg_list(sg_mapped, sg, &indirect->list[0]); 424 total_length = map_sg_list(sg_mapped, sg,
425 indirect->total_length = total_length; 425 &indirect->desc_list[0]);
426 indirect->len = total_length;
426 return 1; 427 return 1;
427 } 428 }
428 429
429 /* get indirect table */ 430 /* get indirect table */
430 if (!evt_struct->ext_list) { 431 if (!evt_struct->ext_list) {
431 evt_struct->ext_list =(struct memory_descriptor*) 432 evt_struct->ext_list = (struct srp_direct_buf *)
432 dma_alloc_coherent(dev, 433 dma_alloc_coherent(dev,
433 SG_ALL * sizeof(struct memory_descriptor), 434 SG_ALL * sizeof(struct srp_direct_buf),
434 &evt_struct->ext_list_token, 0); 435 &evt_struct->ext_list_token, 0);
435 if (!evt_struct->ext_list) { 436 if (!evt_struct->ext_list) {
436 printk(KERN_ERR 437 printk(KERN_ERR
437 "ibmvscsi: Can't allocate memory for indirect table\n"); 438 "ibmvscsi: Can't allocate memory for indirect table\n");
438 return 0; 439 return 0;
439 440
440 } 441 }
@@ -442,11 +443,11 @@ static int map_sg_data(struct scsi_cmnd *cmd,
442 443
443 total_length = map_sg_list(sg_mapped, sg, evt_struct->ext_list); 444 total_length = map_sg_list(sg_mapped, sg, evt_struct->ext_list);
444 445
445 indirect->total_length = total_length; 446 indirect->len = total_length;
446 indirect->head.virtual_address = evt_struct->ext_list_token; 447 indirect->table_desc.va = evt_struct->ext_list_token;
447 indirect->head.length = sg_mapped * sizeof(indirect->list[0]); 448 indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]);
448 memcpy(indirect->list, evt_struct->ext_list, 449 memcpy(indirect->desc_list, evt_struct->ext_list,
449 MAX_INDIRECT_BUFS * sizeof(struct memory_descriptor)); 450 MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf));
450 451
451 return 1; 452 return 1;
452} 453}
@@ -463,20 +464,20 @@ static int map_sg_data(struct scsi_cmnd *cmd,
463static int map_single_data(struct scsi_cmnd *cmd, 464static int map_single_data(struct scsi_cmnd *cmd,
464 struct srp_cmd *srp_cmd, struct device *dev) 465 struct srp_cmd *srp_cmd, struct device *dev)
465{ 466{
466 struct memory_descriptor *data = 467 struct srp_direct_buf *data =
467 (struct memory_descriptor *)srp_cmd->additional_data; 468 (struct srp_direct_buf *) srp_cmd->add_data;
468 469
469 data->virtual_address = 470 data->va =
470 dma_map_single(dev, cmd->request_buffer, 471 dma_map_single(dev, cmd->request_buffer,
471 cmd->request_bufflen, 472 cmd->request_bufflen,
472 DMA_BIDIRECTIONAL); 473 DMA_BIDIRECTIONAL);
473 if (dma_mapping_error(data->virtual_address)) { 474 if (dma_mapping_error(data->va)) {
474 printk(KERN_ERR 475 printk(KERN_ERR
475 "ibmvscsi: Unable to map request_buffer for command!\n"); 476 "ibmvscsi: Unable to map request_buffer for command!\n");
476 return 0; 477 return 0;
477 } 478 }
478 data->length = cmd->request_bufflen; 479 data->len = cmd->request_bufflen;
479 data->memory_handle = 0; 480 data->key = 0;
480 481
481 set_srp_direction(cmd, srp_cmd, 1); 482 set_srp_direction(cmd, srp_cmd, 1);
482 483
@@ -548,7 +549,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
548 549
549 /* Copy the IU into the transfer area */ 550 /* Copy the IU into the transfer area */
550 *evt_struct->xfer_iu = evt_struct->iu; 551 *evt_struct->xfer_iu = evt_struct->iu;
551 evt_struct->xfer_iu->srp.generic.tag = (u64)evt_struct; 552 evt_struct->xfer_iu->srp.rsp.tag = (u64)evt_struct;
552 553
553 /* Add this to the sent list. We need to do this 554 /* Add this to the sent list. We need to do this
554 * before we actually send 555 * before we actually send
@@ -586,27 +587,27 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
586 struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp; 587 struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp;
587 struct scsi_cmnd *cmnd = evt_struct->cmnd; 588 struct scsi_cmnd *cmnd = evt_struct->cmnd;
588 589
589 if (unlikely(rsp->type != SRP_RSP_TYPE)) { 590 if (unlikely(rsp->opcode != SRP_RSP)) {
590 if (printk_ratelimit()) 591 if (printk_ratelimit())
591 printk(KERN_WARNING 592 printk(KERN_WARNING
592 "ibmvscsi: bad SRP RSP type %d\n", 593 "ibmvscsi: bad SRP RSP type %d\n",
593 rsp->type); 594 rsp->opcode);
594 } 595 }
595 596
596 if (cmnd) { 597 if (cmnd) {
597 cmnd->result = rsp->status; 598 cmnd->result = rsp->status;
598 if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION) 599 if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)
599 memcpy(cmnd->sense_buffer, 600 memcpy(cmnd->sense_buffer,
600 rsp->sense_and_response_data, 601 rsp->data,
601 rsp->sense_data_list_length); 602 rsp->sense_data_len);
602 unmap_cmd_data(&evt_struct->iu.srp.cmd, 603 unmap_cmd_data(&evt_struct->iu.srp.cmd,
603 evt_struct, 604 evt_struct,
604 evt_struct->hostdata->dev); 605 evt_struct->hostdata->dev);
605 606
606 if (rsp->doover) 607 if (rsp->flags & SRP_RSP_FLAG_DOOVER)
607 cmnd->resid = rsp->data_out_residual_count; 608 cmnd->resid = rsp->data_out_res_cnt;
608 else if (rsp->diover) 609 else if (rsp->flags & SRP_RSP_FLAG_DIOVER)
609 cmnd->resid = rsp->data_in_residual_count; 610 cmnd->resid = rsp->data_in_res_cnt;
610 } 611 }
611 612
612 if (evt_struct->cmnd_done) 613 if (evt_struct->cmnd_done)
@@ -633,10 +634,11 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
633{ 634{
634 struct srp_cmd *srp_cmd; 635 struct srp_cmd *srp_cmd;
635 struct srp_event_struct *evt_struct; 636 struct srp_event_struct *evt_struct;
636 struct indirect_descriptor *indirect; 637 struct srp_indirect_buf *indirect;
637 struct ibmvscsi_host_data *hostdata = 638 struct ibmvscsi_host_data *hostdata =
638 (struct ibmvscsi_host_data *)&cmnd->device->host->hostdata; 639 (struct ibmvscsi_host_data *)&cmnd->device->host->hostdata;
639 u16 lun = lun_from_dev(cmnd->device); 640 u16 lun = lun_from_dev(cmnd->device);
641 u8 out_fmt, in_fmt;
640 642
641 evt_struct = get_event_struct(&hostdata->pool); 643 evt_struct = get_event_struct(&hostdata->pool);
642 if (!evt_struct) 644 if (!evt_struct)
@@ -644,8 +646,8 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
644 646
645 /* Set up the actual SRP IU */ 647 /* Set up the actual SRP IU */
646 srp_cmd = &evt_struct->iu.srp.cmd; 648 srp_cmd = &evt_struct->iu.srp.cmd;
647 memset(srp_cmd, 0x00, sizeof(*srp_cmd)); 649 memset(srp_cmd, 0x00, SRP_MAX_IU_LEN);
648 srp_cmd->type = SRP_CMD_TYPE; 650 srp_cmd->opcode = SRP_CMD;
649 memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd)); 651 memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd));
650 srp_cmd->lun = ((u64) lun) << 48; 652 srp_cmd->lun = ((u64) lun) << 48;
651 653
@@ -664,13 +666,15 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
664 evt_struct->cmnd_done = done; 666 evt_struct->cmnd_done = done;
665 667
666 /* Fix up dma address of the buffer itself */ 668 /* Fix up dma address of the buffer itself */
667 indirect = (struct indirect_descriptor *)srp_cmd->additional_data; 669 indirect = (struct srp_indirect_buf *) srp_cmd->add_data;
668 if (((srp_cmd->data_out_format == SRP_INDIRECT_BUFFER) || 670 out_fmt = srp_cmd->buf_fmt >> 4;
669 (srp_cmd->data_in_format == SRP_INDIRECT_BUFFER)) && 671 in_fmt = srp_cmd->buf_fmt & ((1U << 4) - 1);
670 (indirect->head.virtual_address == 0)) { 672 if ((in_fmt == SRP_DATA_DESC_INDIRECT ||
671 indirect->head.virtual_address = evt_struct->crq.IU_data_ptr + 673 out_fmt == SRP_DATA_DESC_INDIRECT) &&
672 offsetof(struct srp_cmd, additional_data) + 674 indirect->table_desc.va == 0) {
673 offsetof(struct indirect_descriptor, list); 675 indirect->table_desc.va = evt_struct->crq.IU_data_ptr +
676 offsetof(struct srp_cmd, add_data) +
677 offsetof(struct srp_indirect_buf, desc_list);
674 } 678 }
675 679
676 return ibmvscsi_send_srp_event(evt_struct, hostdata); 680 return ibmvscsi_send_srp_event(evt_struct, hostdata);
@@ -780,10 +784,10 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
780static void login_rsp(struct srp_event_struct *evt_struct) 784static void login_rsp(struct srp_event_struct *evt_struct)
781{ 785{
782 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; 786 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
783 switch (evt_struct->xfer_iu->srp.generic.type) { 787 switch (evt_struct->xfer_iu->srp.login_rsp.opcode) {
784 case SRP_LOGIN_RSP_TYPE: /* it worked! */ 788 case SRP_LOGIN_RSP: /* it worked! */
785 break; 789 break;
786 case SRP_LOGIN_REJ_TYPE: /* refused! */ 790 case SRP_LOGIN_REJ: /* refused! */
787 printk(KERN_INFO "ibmvscsi: SRP_LOGIN_REJ reason %u\n", 791 printk(KERN_INFO "ibmvscsi: SRP_LOGIN_REJ reason %u\n",
788 evt_struct->xfer_iu->srp.login_rej.reason); 792 evt_struct->xfer_iu->srp.login_rej.reason);
789 /* Login failed. */ 793 /* Login failed. */
@@ -792,7 +796,7 @@ static void login_rsp(struct srp_event_struct *evt_struct)
792 default: 796 default:
793 printk(KERN_ERR 797 printk(KERN_ERR
794 "ibmvscsi: Invalid login response typecode 0x%02x!\n", 798 "ibmvscsi: Invalid login response typecode 0x%02x!\n",
795 evt_struct->xfer_iu->srp.generic.type); 799 evt_struct->xfer_iu->srp.login_rsp.opcode);
796 /* Login failed. */ 800 /* Login failed. */
797 atomic_set(&hostdata->request_limit, -1); 801 atomic_set(&hostdata->request_limit, -1);
798 return; 802 return;
@@ -800,17 +804,17 @@ static void login_rsp(struct srp_event_struct *evt_struct)
800 804
801 printk(KERN_INFO "ibmvscsi: SRP_LOGIN succeeded\n"); 805 printk(KERN_INFO "ibmvscsi: SRP_LOGIN succeeded\n");
802 806
803 if (evt_struct->xfer_iu->srp.login_rsp.request_limit_delta > 807 if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta >
804 (max_requests - 2)) 808 (max_requests - 2))
805 evt_struct->xfer_iu->srp.login_rsp.request_limit_delta = 809 evt_struct->xfer_iu->srp.login_rsp.req_lim_delta =
806 max_requests - 2; 810 max_requests - 2;
807 811
808 /* Now we know what the real request-limit is */ 812 /* Now we know what the real request-limit is */
809 atomic_set(&hostdata->request_limit, 813 atomic_set(&hostdata->request_limit,
810 evt_struct->xfer_iu->srp.login_rsp.request_limit_delta); 814 evt_struct->xfer_iu->srp.login_rsp.req_lim_delta);
811 815
812 hostdata->host->can_queue = 816 hostdata->host->can_queue =
813 evt_struct->xfer_iu->srp.login_rsp.request_limit_delta - 2; 817 evt_struct->xfer_iu->srp.login_rsp.req_lim_delta - 2;
814 818
815 if (hostdata->host->can_queue < 1) { 819 if (hostdata->host->can_queue < 1) {
816 printk(KERN_ERR "ibmvscsi: Invalid request_limit_delta\n"); 820 printk(KERN_ERR "ibmvscsi: Invalid request_limit_delta\n");
@@ -849,9 +853,9 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
849 853
850 login = &evt_struct->iu.srp.login_req; 854 login = &evt_struct->iu.srp.login_req;
851 memset(login, 0x00, sizeof(struct srp_login_req)); 855 memset(login, 0x00, sizeof(struct srp_login_req));
852 login->type = SRP_LOGIN_REQ_TYPE; 856 login->opcode = SRP_LOGIN_REQ;
853 login->max_requested_initiator_to_target_iulen = sizeof(union srp_iu); 857 login->req_it_iu_len = sizeof(union srp_iu);
854 login->required_buffer_formats = 0x0006; 858 login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
855 859
856 spin_lock_irqsave(hostdata->host->host_lock, flags); 860 spin_lock_irqsave(hostdata->host->host_lock, flags);
857 /* Start out with a request limit of 1, since this is negotiated in 861 /* Start out with a request limit of 1, since this is negotiated in
@@ -929,13 +933,13 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
929 933
930 /* Set up an abort SRP command */ 934 /* Set up an abort SRP command */
931 memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); 935 memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
932 tsk_mgmt->type = SRP_TSK_MGMT_TYPE; 936 tsk_mgmt->opcode = SRP_TSK_MGMT;
933 tsk_mgmt->lun = ((u64) lun) << 48; 937 tsk_mgmt->lun = ((u64) lun) << 48;
934 tsk_mgmt->task_mgmt_flags = 0x01; /* ABORT TASK */ 938 tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
935 tsk_mgmt->managed_task_tag = (u64) found_evt; 939 tsk_mgmt->task_tag = (u64) found_evt;
936 940
937 printk(KERN_INFO "ibmvscsi: aborting command. lun 0x%lx, tag 0x%lx\n", 941 printk(KERN_INFO "ibmvscsi: aborting command. lun 0x%lx, tag 0x%lx\n",
938 tsk_mgmt->lun, tsk_mgmt->managed_task_tag); 942 tsk_mgmt->lun, tsk_mgmt->task_tag);
939 943
940 evt->sync_srp = &srp_rsp; 944 evt->sync_srp = &srp_rsp;
941 init_completion(&evt->comp); 945 init_completion(&evt->comp);
@@ -949,25 +953,25 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
949 wait_for_completion(&evt->comp); 953 wait_for_completion(&evt->comp);
950 954
951 /* make sure we got a good response */ 955 /* make sure we got a good response */
952 if (unlikely(srp_rsp.srp.generic.type != SRP_RSP_TYPE)) { 956 if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
953 if (printk_ratelimit()) 957 if (printk_ratelimit())
954 printk(KERN_WARNING 958 printk(KERN_WARNING
955 "ibmvscsi: abort bad SRP RSP type %d\n", 959 "ibmvscsi: abort bad SRP RSP type %d\n",
956 srp_rsp.srp.generic.type); 960 srp_rsp.srp.rsp.opcode);
957 return FAILED; 961 return FAILED;
958 } 962 }
959 963
960 if (srp_rsp.srp.rsp.rspvalid) 964 if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID)
961 rsp_rc = *((int *)srp_rsp.srp.rsp.sense_and_response_data); 965 rsp_rc = *((int *)srp_rsp.srp.rsp.data);
962 else 966 else
963 rsp_rc = srp_rsp.srp.rsp.status; 967 rsp_rc = srp_rsp.srp.rsp.status;
964 968
965 if (rsp_rc) { 969 if (rsp_rc) {
966 if (printk_ratelimit()) 970 if (printk_ratelimit())
967 printk(KERN_WARNING 971 printk(KERN_WARNING
968 "ibmvscsi: abort code %d for task tag 0x%lx\n", 972 "ibmvscsi: abort code %d for task tag 0x%lx\n",
969 rsp_rc, 973 rsp_rc,
970 tsk_mgmt->managed_task_tag); 974 tsk_mgmt->task_tag);
971 return FAILED; 975 return FAILED;
972 } 976 }
973 977
@@ -988,13 +992,13 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
988 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 992 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
989 printk(KERN_INFO 993 printk(KERN_INFO
990 "ibmvscsi: aborted task tag 0x%lx completed\n", 994 "ibmvscsi: aborted task tag 0x%lx completed\n",
991 tsk_mgmt->managed_task_tag); 995 tsk_mgmt->task_tag);
992 return SUCCESS; 996 return SUCCESS;
993 } 997 }
994 998
995 printk(KERN_INFO 999 printk(KERN_INFO
996 "ibmvscsi: successfully aborted task tag 0x%lx\n", 1000 "ibmvscsi: successfully aborted task tag 0x%lx\n",
997 tsk_mgmt->managed_task_tag); 1001 tsk_mgmt->task_tag);
998 1002
999 cmd->result = (DID_ABORT << 16); 1003 cmd->result = (DID_ABORT << 16);
1000 list_del(&found_evt->list); 1004 list_del(&found_evt->list);
@@ -1041,9 +1045,9 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1041 1045
1042 /* Set up a lun reset SRP command */ 1046 /* Set up a lun reset SRP command */
1043 memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); 1047 memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
1044 tsk_mgmt->type = SRP_TSK_MGMT_TYPE; 1048 tsk_mgmt->opcode = SRP_TSK_MGMT;
1045 tsk_mgmt->lun = ((u64) lun) << 48; 1049 tsk_mgmt->lun = ((u64) lun) << 48;
1046 tsk_mgmt->task_mgmt_flags = 0x08; /* LUN RESET */ 1050 tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
1047 1051
1048 printk(KERN_INFO "ibmvscsi: resetting device. lun 0x%lx\n", 1052 printk(KERN_INFO "ibmvscsi: resetting device. lun 0x%lx\n",
1049 tsk_mgmt->lun); 1053 tsk_mgmt->lun);
@@ -1060,16 +1064,16 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1060 wait_for_completion(&evt->comp); 1064 wait_for_completion(&evt->comp);
1061 1065
1062 /* make sure we got a good response */ 1066 /* make sure we got a good response */
1063 if (unlikely(srp_rsp.srp.generic.type != SRP_RSP_TYPE)) { 1067 if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
1064 if (printk_ratelimit()) 1068 if (printk_ratelimit())
1065 printk(KERN_WARNING 1069 printk(KERN_WARNING
1066 "ibmvscsi: reset bad SRP RSP type %d\n", 1070 "ibmvscsi: reset bad SRP RSP type %d\n",
1067 srp_rsp.srp.generic.type); 1071 srp_rsp.srp.rsp.opcode);
1068 return FAILED; 1072 return FAILED;
1069 } 1073 }
1070 1074
1071 if (srp_rsp.srp.rsp.rspvalid) 1075 if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID)
1072 rsp_rc = *((int *)srp_rsp.srp.rsp.sense_and_response_data); 1076 rsp_rc = *((int *)srp_rsp.srp.rsp.data);
1073 else 1077 else
1074 rsp_rc = srp_rsp.srp.rsp.status; 1078 rsp_rc = srp_rsp.srp.rsp.status;
1075 1079
@@ -1077,8 +1081,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1077 if (printk_ratelimit()) 1081 if (printk_ratelimit())
1078 printk(KERN_WARNING 1082 printk(KERN_WARNING
1079 "ibmvscsi: reset code %d for task tag 0x%lx\n", 1083 "ibmvscsi: reset code %d for task tag 0x%lx\n",
1080 rsp_rc, 1084 rsp_rc, tsk_mgmt->task_tag);
1081 tsk_mgmt->managed_task_tag);
1082 return FAILED; 1085 return FAILED;
1083 } 1086 }
1084 1087
@@ -1228,7 +1231,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1228 } 1231 }
1229 1232
1230 if (crq->format == VIOSRP_SRP_FORMAT) 1233 if (crq->format == VIOSRP_SRP_FORMAT)
1231 atomic_add(evt_struct->xfer_iu->srp.rsp.request_limit_delta, 1234 atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta,
1232 &hostdata->request_limit); 1235 &hostdata->request_limit);
1233 1236
1234 if (evt_struct->done) 1237 if (evt_struct->done)
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index 4550d71e4744..5c6d93582929 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -68,7 +68,7 @@ struct srp_event_struct {
68 void (*cmnd_done) (struct scsi_cmnd *); 68 void (*cmnd_done) (struct scsi_cmnd *);
69 struct completion comp; 69 struct completion comp;
70 union viosrp_iu *sync_srp; 70 union viosrp_iu *sync_srp;
71 struct memory_descriptor *ext_list; 71 struct srp_direct_buf *ext_list;
72 dma_addr_t ext_list_token; 72 dma_addr_t ext_list_token;
73}; 73};
74 74
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c
index 892e8ed63091..1a9992bdfef8 100644
--- a/drivers/scsi/ibmvscsi/rpa_vscsi.c
+++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c
@@ -34,7 +34,6 @@
34#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
35#include <linux/interrupt.h> 35#include <linux/interrupt.h>
36#include "ibmvscsi.h" 36#include "ibmvscsi.h"
37#include "srp.h"
38 37
39static char partition_name[97] = "UNKNOWN"; 38static char partition_name[97] = "UNKNOWN";
40static unsigned int partition_number = -1; 39static unsigned int partition_number = -1;
diff --git a/drivers/scsi/ibmvscsi/viosrp.h b/drivers/scsi/ibmvscsi/viosrp.h
index 6a6bba8a2f34..90f1a61283ad 100644
--- a/drivers/scsi/ibmvscsi/viosrp.h
+++ b/drivers/scsi/ibmvscsi/viosrp.h
@@ -33,7 +33,22 @@
33/*****************************************************************************/ 33/*****************************************************************************/
34#ifndef VIOSRP_H 34#ifndef VIOSRP_H
35#define VIOSRP_H 35#define VIOSRP_H
36#include "srp.h" 36#include <scsi/srp.h>
37
38#define SRP_VERSION "16.a"
39#define SRP_MAX_IU_LEN 256
40
41union srp_iu {
42 struct srp_login_req login_req;
43 struct srp_login_rsp login_rsp;
44 struct srp_login_rej login_rej;
45 struct srp_i_logout i_logout;
46 struct srp_t_logout t_logout;
47 struct srp_tsk_mgmt tsk_mgmt;
48 struct srp_cmd cmd;
49 struct srp_rsp rsp;
50 u8 reserved[SRP_MAX_IU_LEN];
51};
37 52
38enum viosrp_crq_formats { 53enum viosrp_crq_formats {
39 VIOSRP_SRP_FORMAT = 0x01, 54 VIOSRP_SRP_FORMAT = 0x01,