diff options
Diffstat (limited to 'drivers/scsi')
-rw-r--r-- | drivers/scsi/aacraid/commsup.c | 8 | ||||
-rw-r--r-- | drivers/scsi/aacraid/linit.c | 7 | ||||
-rw-r--r-- | drivers/scsi/atari_scsi.c | 10 | ||||
-rw-r--r-- | drivers/scsi/dtc.c | 5 | ||||
-rw-r--r-- | drivers/scsi/g_NCR5380.c | 5 | ||||
-rw-r--r-- | drivers/scsi/iscsi_tcp.c | 139 | ||||
-rw-r--r-- | drivers/scsi/iscsi_tcp.h | 34 | ||||
-rw-r--r-- | drivers/scsi/libiscsi.c | 3 | ||||
-rw-r--r-- | drivers/scsi/mac_scsi.c | 4 | ||||
-rw-r--r-- | drivers/scsi/pas16.c | 5 | ||||
-rw-r--r-- | drivers/scsi/qla1280.c | 387 | ||||
-rw-r--r-- | drivers/scsi/sun3_scsi.c | 4 | ||||
-rw-r--r-- | drivers/scsi/sun3_scsi_vme.c | 4 | ||||
-rw-r--r-- | drivers/scsi/t128.c | 5 |
14 files changed, 283 insertions, 337 deletions
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 240a0bb8986f..abce48ccc85b 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c | |||
@@ -1339,10 +1339,10 @@ int aac_check_health(struct aac_dev * aac) | |||
1339 | aif = (struct aac_aifcmd *)hw_fib->data; | 1339 | aif = (struct aac_aifcmd *)hw_fib->data; |
1340 | aif->command = cpu_to_le32(AifCmdEventNotify); | 1340 | aif->command = cpu_to_le32(AifCmdEventNotify); |
1341 | aif->seqnum = cpu_to_le32(0xFFFFFFFF); | 1341 | aif->seqnum = cpu_to_le32(0xFFFFFFFF); |
1342 | aif->data[0] = cpu_to_le32(AifEnExpEvent); | 1342 | aif->data[0] = AifEnExpEvent; |
1343 | aif->data[1] = cpu_to_le32(AifExeFirmwarePanic); | 1343 | aif->data[1] = AifExeFirmwarePanic; |
1344 | aif->data[2] = cpu_to_le32(AifHighPriority); | 1344 | aif->data[2] = AifHighPriority; |
1345 | aif->data[3] = cpu_to_le32(BlinkLED); | 1345 | aif->data[3] = BlinkLED; |
1346 | 1346 | ||
1347 | /* | 1347 | /* |
1348 | * Put the FIB onto the | 1348 | * Put the FIB onto the |
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 038980be763d..9dd331bc29b0 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c | |||
@@ -636,7 +636,7 @@ static int aac_cfg_open(struct inode *inode, struct file *file) | |||
636 | static int aac_cfg_ioctl(struct inode *inode, struct file *file, | 636 | static int aac_cfg_ioctl(struct inode *inode, struct file *file, |
637 | unsigned int cmd, unsigned long arg) | 637 | unsigned int cmd, unsigned long arg) |
638 | { | 638 | { |
639 | if (!capable(CAP_SYS_ADMIN)) | 639 | if (!capable(CAP_SYS_RAWIO)) |
640 | return -EPERM; | 640 | return -EPERM; |
641 | return aac_do_ioctl(file->private_data, cmd, (void __user *)arg); | 641 | return aac_do_ioctl(file->private_data, cmd, (void __user *)arg); |
642 | } | 642 | } |
@@ -691,7 +691,7 @@ static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) | |||
691 | 691 | ||
692 | static long aac_compat_cfg_ioctl(struct file *file, unsigned cmd, unsigned long arg) | 692 | static long aac_compat_cfg_ioctl(struct file *file, unsigned cmd, unsigned long arg) |
693 | { | 693 | { |
694 | if (!capable(CAP_SYS_ADMIN)) | 694 | if (!capable(CAP_SYS_RAWIO)) |
695 | return -EPERM; | 695 | return -EPERM; |
696 | return aac_compat_do_ioctl((struct aac_dev *)file->private_data, cmd, arg); | 696 | return aac_compat_do_ioctl((struct aac_dev *)file->private_data, cmd, arg); |
697 | } | 697 | } |
@@ -950,7 +950,8 @@ static struct scsi_host_template aac_driver_template = { | |||
950 | 950 | ||
951 | static void __aac_shutdown(struct aac_dev * aac) | 951 | static void __aac_shutdown(struct aac_dev * aac) |
952 | { | 952 | { |
953 | kthread_stop(aac->thread); | 953 | if (aac->aif_thread) |
954 | kthread_stop(aac->thread); | ||
954 | aac_send_shutdown(aac); | 955 | aac_send_shutdown(aac); |
955 | aac_adapter_disable_int(aac); | 956 | aac_adapter_disable_int(aac); |
956 | free_irq(aac->pdev->irq, aac); | 957 | free_irq(aac->pdev->irq, aac); |
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c index 6f8403b82ba1..f5732d8f67fe 100644 --- a/drivers/scsi/atari_scsi.c +++ b/drivers/scsi/atari_scsi.c | |||
@@ -393,7 +393,7 @@ static irqreturn_t scsi_tt_intr(int irq, void *dummy) | |||
393 | 393 | ||
394 | #endif /* REAL_DMA */ | 394 | #endif /* REAL_DMA */ |
395 | 395 | ||
396 | NCR5380_intr(0, 0); | 396 | NCR5380_intr(irq, dummy); |
397 | 397 | ||
398 | #if 0 | 398 | #if 0 |
399 | /* To be sure the int is not masked */ | 399 | /* To be sure the int is not masked */ |
@@ -458,7 +458,7 @@ static irqreturn_t scsi_falcon_intr(int irq, void *dummy) | |||
458 | 458 | ||
459 | #endif /* REAL_DMA */ | 459 | #endif /* REAL_DMA */ |
460 | 460 | ||
461 | NCR5380_intr(0, 0); | 461 | NCR5380_intr(irq, dummy); |
462 | return IRQ_HANDLED; | 462 | return IRQ_HANDLED; |
463 | } | 463 | } |
464 | 464 | ||
@@ -684,7 +684,7 @@ int atari_scsi_detect(struct scsi_host_template *host) | |||
684 | * interrupt after having cleared the pending flag for the DMA | 684 | * interrupt after having cleared the pending flag for the DMA |
685 | * interrupt. */ | 685 | * interrupt. */ |
686 | if (request_irq(IRQ_TT_MFP_SCSI, scsi_tt_intr, IRQ_TYPE_SLOW, | 686 | if (request_irq(IRQ_TT_MFP_SCSI, scsi_tt_intr, IRQ_TYPE_SLOW, |
687 | "SCSI NCR5380", scsi_tt_intr)) { | 687 | "SCSI NCR5380", instance)) { |
688 | printk(KERN_ERR "atari_scsi_detect: cannot allocate irq %d, aborting",IRQ_TT_MFP_SCSI); | 688 | printk(KERN_ERR "atari_scsi_detect: cannot allocate irq %d, aborting",IRQ_TT_MFP_SCSI); |
689 | scsi_unregister(atari_scsi_host); | 689 | scsi_unregister(atari_scsi_host); |
690 | atari_stram_free(atari_dma_buffer); | 690 | atari_stram_free(atari_dma_buffer); |
@@ -701,7 +701,7 @@ int atari_scsi_detect(struct scsi_host_template *host) | |||
701 | IRQ_TYPE_PRIO, "Hades DMA emulator", | 701 | IRQ_TYPE_PRIO, "Hades DMA emulator", |
702 | hades_dma_emulator)) { | 702 | hades_dma_emulator)) { |
703 | printk(KERN_ERR "atari_scsi_detect: cannot allocate irq %d, aborting (MACH_IS_HADES)",IRQ_AUTO_2); | 703 | printk(KERN_ERR "atari_scsi_detect: cannot allocate irq %d, aborting (MACH_IS_HADES)",IRQ_AUTO_2); |
704 | free_irq(IRQ_TT_MFP_SCSI, scsi_tt_intr); | 704 | free_irq(IRQ_TT_MFP_SCSI, instance); |
705 | scsi_unregister(atari_scsi_host); | 705 | scsi_unregister(atari_scsi_host); |
706 | atari_stram_free(atari_dma_buffer); | 706 | atari_stram_free(atari_dma_buffer); |
707 | atari_dma_buffer = 0; | 707 | atari_dma_buffer = 0; |
@@ -761,7 +761,7 @@ int atari_scsi_detect(struct scsi_host_template *host) | |||
761 | int atari_scsi_release(struct Scsi_Host *sh) | 761 | int atari_scsi_release(struct Scsi_Host *sh) |
762 | { | 762 | { |
763 | if (IS_A_TT()) | 763 | if (IS_A_TT()) |
764 | free_irq(IRQ_TT_MFP_SCSI, scsi_tt_intr); | 764 | free_irq(IRQ_TT_MFP_SCSI, sh); |
765 | if (atari_dma_buffer) | 765 | if (atari_dma_buffer) |
766 | atari_stram_free(atari_dma_buffer); | 766 | atari_stram_free(atari_dma_buffer); |
767 | return 1; | 767 | return 1; |
diff --git a/drivers/scsi/dtc.c b/drivers/scsi/dtc.c index 2596165096d3..c2677ba29c74 100644 --- a/drivers/scsi/dtc.c +++ b/drivers/scsi/dtc.c | |||
@@ -277,7 +277,8 @@ found: | |||
277 | /* With interrupts enabled, it will sometimes hang when doing heavy | 277 | /* With interrupts enabled, it will sometimes hang when doing heavy |
278 | * reads. So better not enable them until I finger it out. */ | 278 | * reads. So better not enable them until I finger it out. */ |
279 | if (instance->irq != SCSI_IRQ_NONE) | 279 | if (instance->irq != SCSI_IRQ_NONE) |
280 | if (request_irq(instance->irq, dtc_intr, IRQF_DISABLED, "dtc", instance)) { | 280 | if (request_irq(instance->irq, dtc_intr, IRQF_DISABLED, |
281 | "dtc", instance)) { | ||
281 | printk(KERN_ERR "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); | 282 | printk(KERN_ERR "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); |
282 | instance->irq = SCSI_IRQ_NONE; | 283 | instance->irq = SCSI_IRQ_NONE; |
283 | } | 284 | } |
@@ -459,7 +460,7 @@ static int dtc_release(struct Scsi_Host *shost) | |||
459 | NCR5380_local_declare(); | 460 | NCR5380_local_declare(); |
460 | NCR5380_setup(shost); | 461 | NCR5380_setup(shost); |
461 | if (shost->irq) | 462 | if (shost->irq) |
462 | free_irq(shost->irq, NULL); | 463 | free_irq(shost->irq, shost); |
463 | NCR5380_exit(shost); | 464 | NCR5380_exit(shost); |
464 | if (shost->io_port && shost->n_io_port) | 465 | if (shost->io_port && shost->n_io_port) |
465 | release_region(shost->io_port, shost->n_io_port); | 466 | release_region(shost->io_port, shost->n_io_port); |
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c index 607336f56d55..75585a52c88b 100644 --- a/drivers/scsi/g_NCR5380.c +++ b/drivers/scsi/g_NCR5380.c | |||
@@ -460,7 +460,8 @@ int __init generic_NCR5380_detect(struct scsi_host_template * tpnt) | |||
460 | instance->irq = NCR5380_probe_irq(instance, 0xffff); | 460 | instance->irq = NCR5380_probe_irq(instance, 0xffff); |
461 | 461 | ||
462 | if (instance->irq != SCSI_IRQ_NONE) | 462 | if (instance->irq != SCSI_IRQ_NONE) |
463 | if (request_irq(instance->irq, generic_NCR5380_intr, IRQF_DISABLED, "NCR5380", instance)) { | 463 | if (request_irq(instance->irq, generic_NCR5380_intr, |
464 | IRQF_DISABLED, "NCR5380", instance)) { | ||
464 | printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); | 465 | printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); |
465 | instance->irq = SCSI_IRQ_NONE; | 466 | instance->irq = SCSI_IRQ_NONE; |
466 | } | 467 | } |
@@ -513,7 +514,7 @@ int generic_NCR5380_release_resources(struct Scsi_Host *instance) | |||
513 | NCR5380_setup(instance); | 514 | NCR5380_setup(instance); |
514 | 515 | ||
515 | if (instance->irq != SCSI_IRQ_NONE) | 516 | if (instance->irq != SCSI_IRQ_NONE) |
516 | free_irq(instance->irq, NULL); | 517 | free_irq(instance->irq, instance); |
517 | NCR5380_exit(instance); | 518 | NCR5380_exit(instance); |
518 | 519 | ||
519 | #ifndef CONFIG_SCSI_G_NCR5380_MEM | 520 | #ifndef CONFIG_SCSI_G_NCR5380_MEM |
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 4bcf916c21a7..57ce2251abc8 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c | |||
@@ -197,7 +197,7 @@ iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
197 | if (unlikely(!sc)) | 197 | if (unlikely(!sc)) |
198 | return; | 198 | return; |
199 | 199 | ||
200 | tcp_ctask->xmstate = XMSTATE_IDLE; | 200 | tcp_ctask->xmstate = XMSTATE_VALUE_IDLE; |
201 | tcp_ctask->r2t = NULL; | 201 | tcp_ctask->r2t = NULL; |
202 | } | 202 | } |
203 | 203 | ||
@@ -409,7 +409,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
409 | 409 | ||
410 | tcp_ctask->exp_datasn = r2tsn + 1; | 410 | tcp_ctask->exp_datasn = r2tsn + 1; |
411 | __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*)); | 411 | __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*)); |
412 | tcp_ctask->xmstate |= XMSTATE_SOL_HDR_INIT; | 412 | set_bit(XMSTATE_BIT_SOL_HDR_INIT, &tcp_ctask->xmstate); |
413 | list_move_tail(&ctask->running, &conn->xmitqueue); | 413 | list_move_tail(&ctask->running, &conn->xmitqueue); |
414 | 414 | ||
415 | scsi_queue_work(session->host, &conn->xmitwork); | 415 | scsi_queue_work(session->host, &conn->xmitwork); |
@@ -1254,7 +1254,7 @@ static void iscsi_set_padding(struct iscsi_tcp_cmd_task *tcp_ctask, | |||
1254 | 1254 | ||
1255 | tcp_ctask->pad_count = ISCSI_PAD_LEN - tcp_ctask->pad_count; | 1255 | tcp_ctask->pad_count = ISCSI_PAD_LEN - tcp_ctask->pad_count; |
1256 | debug_scsi("write padding %d bytes\n", tcp_ctask->pad_count); | 1256 | debug_scsi("write padding %d bytes\n", tcp_ctask->pad_count); |
1257 | tcp_ctask->xmstate |= XMSTATE_W_PAD; | 1257 | set_bit(XMSTATE_BIT_W_PAD, &tcp_ctask->xmstate); |
1258 | } | 1258 | } |
1259 | 1259 | ||
1260 | /** | 1260 | /** |
@@ -1269,7 +1269,7 @@ iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask) | |||
1269 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | 1269 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; |
1270 | 1270 | ||
1271 | BUG_ON(__kfifo_len(tcp_ctask->r2tqueue)); | 1271 | BUG_ON(__kfifo_len(tcp_ctask->r2tqueue)); |
1272 | tcp_ctask->xmstate = XMSTATE_CMD_HDR_INIT; | 1272 | tcp_ctask->xmstate = 1 << XMSTATE_BIT_CMD_HDR_INIT; |
1273 | } | 1273 | } |
1274 | 1274 | ||
1275 | /** | 1275 | /** |
@@ -1283,10 +1283,10 @@ iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask) | |||
1283 | * xmit. | 1283 | * xmit. |
1284 | * | 1284 | * |
1285 | * Management xmit state machine consists of these states: | 1285 | * Management xmit state machine consists of these states: |
1286 | * XMSTATE_IMM_HDR_INIT - calculate digest of PDU Header | 1286 | * XMSTATE_BIT_IMM_HDR_INIT - calculate digest of PDU Header |
1287 | * XMSTATE_IMM_HDR - PDU Header xmit in progress | 1287 | * XMSTATE_BIT_IMM_HDR - PDU Header xmit in progress |
1288 | * XMSTATE_IMM_DATA - PDU Data xmit in progress | 1288 | * XMSTATE_BIT_IMM_DATA - PDU Data xmit in progress |
1289 | * XMSTATE_IDLE - management PDU is done | 1289 | * XMSTATE_VALUE_IDLE - management PDU is done |
1290 | **/ | 1290 | **/ |
1291 | static int | 1291 | static int |
1292 | iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) | 1292 | iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) |
@@ -1297,12 +1297,12 @@ iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) | |||
1297 | debug_scsi("mtask deq [cid %d state %x itt 0x%x]\n", | 1297 | debug_scsi("mtask deq [cid %d state %x itt 0x%x]\n", |
1298 | conn->id, tcp_mtask->xmstate, mtask->itt); | 1298 | conn->id, tcp_mtask->xmstate, mtask->itt); |
1299 | 1299 | ||
1300 | if (tcp_mtask->xmstate & XMSTATE_IMM_HDR_INIT) { | 1300 | if (test_bit(XMSTATE_BIT_IMM_HDR_INIT, &tcp_mtask->xmstate)) { |
1301 | iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr, | 1301 | iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr, |
1302 | sizeof(struct iscsi_hdr)); | 1302 | sizeof(struct iscsi_hdr)); |
1303 | 1303 | ||
1304 | if (mtask->data_count) { | 1304 | if (mtask->data_count) { |
1305 | tcp_mtask->xmstate |= XMSTATE_IMM_DATA; | 1305 | set_bit(XMSTATE_BIT_IMM_DATA, &tcp_mtask->xmstate); |
1306 | iscsi_buf_init_iov(&tcp_mtask->sendbuf, | 1306 | iscsi_buf_init_iov(&tcp_mtask->sendbuf, |
1307 | (char*)mtask->data, | 1307 | (char*)mtask->data, |
1308 | mtask->data_count); | 1308 | mtask->data_count); |
@@ -1315,21 +1315,20 @@ iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) | |||
1315 | (u8*)tcp_mtask->hdrext); | 1315 | (u8*)tcp_mtask->hdrext); |
1316 | 1316 | ||
1317 | tcp_mtask->sent = 0; | 1317 | tcp_mtask->sent = 0; |
1318 | tcp_mtask->xmstate &= ~XMSTATE_IMM_HDR_INIT; | 1318 | clear_bit(XMSTATE_BIT_IMM_HDR_INIT, &tcp_mtask->xmstate); |
1319 | tcp_mtask->xmstate |= XMSTATE_IMM_HDR; | 1319 | set_bit(XMSTATE_BIT_IMM_HDR, &tcp_mtask->xmstate); |
1320 | } | 1320 | } |
1321 | 1321 | ||
1322 | if (tcp_mtask->xmstate & XMSTATE_IMM_HDR) { | 1322 | if (test_bit(XMSTATE_BIT_IMM_HDR, &tcp_mtask->xmstate)) { |
1323 | rc = iscsi_sendhdr(conn, &tcp_mtask->headbuf, | 1323 | rc = iscsi_sendhdr(conn, &tcp_mtask->headbuf, |
1324 | mtask->data_count); | 1324 | mtask->data_count); |
1325 | if (rc) | 1325 | if (rc) |
1326 | return rc; | 1326 | return rc; |
1327 | tcp_mtask->xmstate &= ~XMSTATE_IMM_HDR; | 1327 | clear_bit(XMSTATE_BIT_IMM_HDR, &tcp_mtask->xmstate); |
1328 | } | 1328 | } |
1329 | 1329 | ||
1330 | if (tcp_mtask->xmstate & XMSTATE_IMM_DATA) { | 1330 | if (test_and_clear_bit(XMSTATE_BIT_IMM_DATA, &tcp_mtask->xmstate)) { |
1331 | BUG_ON(!mtask->data_count); | 1331 | BUG_ON(!mtask->data_count); |
1332 | tcp_mtask->xmstate &= ~XMSTATE_IMM_DATA; | ||
1333 | /* FIXME: implement. | 1332 | /* FIXME: implement. |
1334 | * Virtual buffer could be spreaded across multiple pages... | 1333 | * Virtual buffer could be spreaded across multiple pages... |
1335 | */ | 1334 | */ |
@@ -1339,13 +1338,13 @@ iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) | |||
1339 | rc = iscsi_sendpage(conn, &tcp_mtask->sendbuf, | 1338 | rc = iscsi_sendpage(conn, &tcp_mtask->sendbuf, |
1340 | &mtask->data_count, &tcp_mtask->sent); | 1339 | &mtask->data_count, &tcp_mtask->sent); |
1341 | if (rc) { | 1340 | if (rc) { |
1342 | tcp_mtask->xmstate |= XMSTATE_IMM_DATA; | 1341 | set_bit(XMSTATE_BIT_IMM_DATA, &tcp_mtask->xmstate); |
1343 | return rc; | 1342 | return rc; |
1344 | } | 1343 | } |
1345 | } while (mtask->data_count); | 1344 | } while (mtask->data_count); |
1346 | } | 1345 | } |
1347 | 1346 | ||
1348 | BUG_ON(tcp_mtask->xmstate != XMSTATE_IDLE); | 1347 | BUG_ON(tcp_mtask->xmstate != XMSTATE_VALUE_IDLE); |
1349 | if (mtask->hdr->itt == RESERVED_ITT) { | 1348 | if (mtask->hdr->itt == RESERVED_ITT) { |
1350 | struct iscsi_session *session = conn->session; | 1349 | struct iscsi_session *session = conn->session; |
1351 | 1350 | ||
@@ -1365,7 +1364,7 @@ iscsi_send_cmd_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
1365 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | 1364 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; |
1366 | int rc = 0; | 1365 | int rc = 0; |
1367 | 1366 | ||
1368 | if (tcp_ctask->xmstate & XMSTATE_CMD_HDR_INIT) { | 1367 | if (test_bit(XMSTATE_BIT_CMD_HDR_INIT, &tcp_ctask->xmstate)) { |
1369 | tcp_ctask->sent = 0; | 1368 | tcp_ctask->sent = 0; |
1370 | tcp_ctask->sg_count = 0; | 1369 | tcp_ctask->sg_count = 0; |
1371 | tcp_ctask->exp_datasn = 0; | 1370 | tcp_ctask->exp_datasn = 0; |
@@ -1390,21 +1389,21 @@ iscsi_send_cmd_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
1390 | if (conn->hdrdgst_en) | 1389 | if (conn->hdrdgst_en) |
1391 | iscsi_hdr_digest(conn, &tcp_ctask->headbuf, | 1390 | iscsi_hdr_digest(conn, &tcp_ctask->headbuf, |
1392 | (u8*)tcp_ctask->hdrext); | 1391 | (u8*)tcp_ctask->hdrext); |
1393 | tcp_ctask->xmstate &= ~XMSTATE_CMD_HDR_INIT; | 1392 | clear_bit(XMSTATE_BIT_CMD_HDR_INIT, &tcp_ctask->xmstate); |
1394 | tcp_ctask->xmstate |= XMSTATE_CMD_HDR_XMIT; | 1393 | set_bit(XMSTATE_BIT_CMD_HDR_XMIT, &tcp_ctask->xmstate); |
1395 | } | 1394 | } |
1396 | 1395 | ||
1397 | if (tcp_ctask->xmstate & XMSTATE_CMD_HDR_XMIT) { | 1396 | if (test_bit(XMSTATE_BIT_CMD_HDR_XMIT, &tcp_ctask->xmstate)) { |
1398 | rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->imm_count); | 1397 | rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->imm_count); |
1399 | if (rc) | 1398 | if (rc) |
1400 | return rc; | 1399 | return rc; |
1401 | tcp_ctask->xmstate &= ~XMSTATE_CMD_HDR_XMIT; | 1400 | clear_bit(XMSTATE_BIT_CMD_HDR_XMIT, &tcp_ctask->xmstate); |
1402 | 1401 | ||
1403 | if (sc->sc_data_direction != DMA_TO_DEVICE) | 1402 | if (sc->sc_data_direction != DMA_TO_DEVICE) |
1404 | return 0; | 1403 | return 0; |
1405 | 1404 | ||
1406 | if (ctask->imm_count) { | 1405 | if (ctask->imm_count) { |
1407 | tcp_ctask->xmstate |= XMSTATE_IMM_DATA; | 1406 | set_bit(XMSTATE_BIT_IMM_DATA, &tcp_ctask->xmstate); |
1408 | iscsi_set_padding(tcp_ctask, ctask->imm_count); | 1407 | iscsi_set_padding(tcp_ctask, ctask->imm_count); |
1409 | 1408 | ||
1410 | if (ctask->conn->datadgst_en) { | 1409 | if (ctask->conn->datadgst_en) { |
@@ -1414,9 +1413,10 @@ iscsi_send_cmd_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
1414 | } | 1413 | } |
1415 | } | 1414 | } |
1416 | 1415 | ||
1417 | if (ctask->unsol_count) | 1416 | if (ctask->unsol_count) { |
1418 | tcp_ctask->xmstate |= | 1417 | set_bit(XMSTATE_BIT_UNS_HDR, &tcp_ctask->xmstate); |
1419 | XMSTATE_UNS_HDR | XMSTATE_UNS_INIT; | 1418 | set_bit(XMSTATE_BIT_UNS_INIT, &tcp_ctask->xmstate); |
1419 | } | ||
1420 | } | 1420 | } |
1421 | return rc; | 1421 | return rc; |
1422 | } | 1422 | } |
@@ -1428,25 +1428,25 @@ iscsi_send_padding(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
1428 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | 1428 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
1429 | int sent = 0, rc; | 1429 | int sent = 0, rc; |
1430 | 1430 | ||
1431 | if (tcp_ctask->xmstate & XMSTATE_W_PAD) { | 1431 | if (test_bit(XMSTATE_BIT_W_PAD, &tcp_ctask->xmstate)) { |
1432 | iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad, | 1432 | iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad, |
1433 | tcp_ctask->pad_count); | 1433 | tcp_ctask->pad_count); |
1434 | if (conn->datadgst_en) | 1434 | if (conn->datadgst_en) |
1435 | crypto_hash_update(&tcp_conn->tx_hash, | 1435 | crypto_hash_update(&tcp_conn->tx_hash, |
1436 | &tcp_ctask->sendbuf.sg, | 1436 | &tcp_ctask->sendbuf.sg, |
1437 | tcp_ctask->sendbuf.sg.length); | 1437 | tcp_ctask->sendbuf.sg.length); |
1438 | } else if (!(tcp_ctask->xmstate & XMSTATE_W_RESEND_PAD)) | 1438 | } else if (!test_bit(XMSTATE_BIT_W_RESEND_PAD, &tcp_ctask->xmstate)) |
1439 | return 0; | 1439 | return 0; |
1440 | 1440 | ||
1441 | tcp_ctask->xmstate &= ~XMSTATE_W_PAD; | 1441 | clear_bit(XMSTATE_BIT_W_PAD, &tcp_ctask->xmstate); |
1442 | tcp_ctask->xmstate &= ~XMSTATE_W_RESEND_PAD; | 1442 | clear_bit(XMSTATE_BIT_W_RESEND_PAD, &tcp_ctask->xmstate); |
1443 | debug_scsi("sending %d pad bytes for itt 0x%x\n", | 1443 | debug_scsi("sending %d pad bytes for itt 0x%x\n", |
1444 | tcp_ctask->pad_count, ctask->itt); | 1444 | tcp_ctask->pad_count, ctask->itt); |
1445 | rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, &tcp_ctask->pad_count, | 1445 | rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, &tcp_ctask->pad_count, |
1446 | &sent); | 1446 | &sent); |
1447 | if (rc) { | 1447 | if (rc) { |
1448 | debug_scsi("padding send failed %d\n", rc); | 1448 | debug_scsi("padding send failed %d\n", rc); |
1449 | tcp_ctask->xmstate |= XMSTATE_W_RESEND_PAD; | 1449 | set_bit(XMSTATE_BIT_W_RESEND_PAD, &tcp_ctask->xmstate); |
1450 | } | 1450 | } |
1451 | return rc; | 1451 | return rc; |
1452 | } | 1452 | } |
@@ -1465,11 +1465,11 @@ iscsi_send_digest(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, | |||
1465 | tcp_ctask = ctask->dd_data; | 1465 | tcp_ctask = ctask->dd_data; |
1466 | tcp_conn = conn->dd_data; | 1466 | tcp_conn = conn->dd_data; |
1467 | 1467 | ||
1468 | if (!(tcp_ctask->xmstate & XMSTATE_W_RESEND_DATA_DIGEST)) { | 1468 | if (!test_bit(XMSTATE_BIT_W_RESEND_DATA_DIGEST, &tcp_ctask->xmstate)) { |
1469 | crypto_hash_final(&tcp_conn->tx_hash, (u8*)digest); | 1469 | crypto_hash_final(&tcp_conn->tx_hash, (u8*)digest); |
1470 | iscsi_buf_init_iov(buf, (char*)digest, 4); | 1470 | iscsi_buf_init_iov(buf, (char*)digest, 4); |
1471 | } | 1471 | } |
1472 | tcp_ctask->xmstate &= ~XMSTATE_W_RESEND_DATA_DIGEST; | 1472 | clear_bit(XMSTATE_BIT_W_RESEND_DATA_DIGEST, &tcp_ctask->xmstate); |
1473 | 1473 | ||
1474 | rc = iscsi_sendpage(conn, buf, &tcp_ctask->digest_count, &sent); | 1474 | rc = iscsi_sendpage(conn, buf, &tcp_ctask->digest_count, &sent); |
1475 | if (!rc) | 1475 | if (!rc) |
@@ -1478,7 +1478,7 @@ iscsi_send_digest(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, | |||
1478 | else { | 1478 | else { |
1479 | debug_scsi("sending digest 0x%x failed for itt 0x%x!\n", | 1479 | debug_scsi("sending digest 0x%x failed for itt 0x%x!\n", |
1480 | *digest, ctask->itt); | 1480 | *digest, ctask->itt); |
1481 | tcp_ctask->xmstate |= XMSTATE_W_RESEND_DATA_DIGEST; | 1481 | set_bit(XMSTATE_BIT_W_RESEND_DATA_DIGEST, &tcp_ctask->xmstate); |
1482 | } | 1482 | } |
1483 | return rc; | 1483 | return rc; |
1484 | } | 1484 | } |
@@ -1526,8 +1526,8 @@ iscsi_send_unsol_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
1526 | struct iscsi_data_task *dtask; | 1526 | struct iscsi_data_task *dtask; |
1527 | int rc; | 1527 | int rc; |
1528 | 1528 | ||
1529 | tcp_ctask->xmstate |= XMSTATE_UNS_DATA; | 1529 | set_bit(XMSTATE_BIT_UNS_DATA, &tcp_ctask->xmstate); |
1530 | if (tcp_ctask->xmstate & XMSTATE_UNS_INIT) { | 1530 | if (test_bit(XMSTATE_BIT_UNS_INIT, &tcp_ctask->xmstate)) { |
1531 | dtask = &tcp_ctask->unsol_dtask; | 1531 | dtask = &tcp_ctask->unsol_dtask; |
1532 | 1532 | ||
1533 | iscsi_prep_unsolicit_data_pdu(ctask, &dtask->hdr); | 1533 | iscsi_prep_unsolicit_data_pdu(ctask, &dtask->hdr); |
@@ -1537,14 +1537,14 @@ iscsi_send_unsol_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
1537 | iscsi_hdr_digest(conn, &tcp_ctask->headbuf, | 1537 | iscsi_hdr_digest(conn, &tcp_ctask->headbuf, |
1538 | (u8*)dtask->hdrext); | 1538 | (u8*)dtask->hdrext); |
1539 | 1539 | ||
1540 | tcp_ctask->xmstate &= ~XMSTATE_UNS_INIT; | 1540 | clear_bit(XMSTATE_BIT_UNS_INIT, &tcp_ctask->xmstate); |
1541 | iscsi_set_padding(tcp_ctask, ctask->data_count); | 1541 | iscsi_set_padding(tcp_ctask, ctask->data_count); |
1542 | } | 1542 | } |
1543 | 1543 | ||
1544 | rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->data_count); | 1544 | rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->data_count); |
1545 | if (rc) { | 1545 | if (rc) { |
1546 | tcp_ctask->xmstate &= ~XMSTATE_UNS_DATA; | 1546 | clear_bit(XMSTATE_BIT_UNS_DATA, &tcp_ctask->xmstate); |
1547 | tcp_ctask->xmstate |= XMSTATE_UNS_HDR; | 1547 | set_bit(XMSTATE_BIT_UNS_HDR, &tcp_ctask->xmstate); |
1548 | return rc; | 1548 | return rc; |
1549 | } | 1549 | } |
1550 | 1550 | ||
@@ -1565,16 +1565,15 @@ iscsi_send_unsol_pdu(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
1565 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; | 1565 | struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; |
1566 | int rc; | 1566 | int rc; |
1567 | 1567 | ||
1568 | if (tcp_ctask->xmstate & XMSTATE_UNS_HDR) { | 1568 | if (test_and_clear_bit(XMSTATE_BIT_UNS_HDR, &tcp_ctask->xmstate)) { |
1569 | BUG_ON(!ctask->unsol_count); | 1569 | BUG_ON(!ctask->unsol_count); |
1570 | tcp_ctask->xmstate &= ~XMSTATE_UNS_HDR; | ||
1571 | send_hdr: | 1570 | send_hdr: |
1572 | rc = iscsi_send_unsol_hdr(conn, ctask); | 1571 | rc = iscsi_send_unsol_hdr(conn, ctask); |
1573 | if (rc) | 1572 | if (rc) |
1574 | return rc; | 1573 | return rc; |
1575 | } | 1574 | } |
1576 | 1575 | ||
1577 | if (tcp_ctask->xmstate & XMSTATE_UNS_DATA) { | 1576 | if (test_bit(XMSTATE_BIT_UNS_DATA, &tcp_ctask->xmstate)) { |
1578 | struct iscsi_data_task *dtask = &tcp_ctask->unsol_dtask; | 1577 | struct iscsi_data_task *dtask = &tcp_ctask->unsol_dtask; |
1579 | int start = tcp_ctask->sent; | 1578 | int start = tcp_ctask->sent; |
1580 | 1579 | ||
@@ -1584,14 +1583,14 @@ send_hdr: | |||
1584 | ctask->unsol_count -= tcp_ctask->sent - start; | 1583 | ctask->unsol_count -= tcp_ctask->sent - start; |
1585 | if (rc) | 1584 | if (rc) |
1586 | return rc; | 1585 | return rc; |
1587 | tcp_ctask->xmstate &= ~XMSTATE_UNS_DATA; | 1586 | clear_bit(XMSTATE_BIT_UNS_DATA, &tcp_ctask->xmstate); |
1588 | /* | 1587 | /* |
1589 | * Done with the Data-Out. Next, check if we need | 1588 | * Done with the Data-Out. Next, check if we need |
1590 | * to send another unsolicited Data-Out. | 1589 | * to send another unsolicited Data-Out. |
1591 | */ | 1590 | */ |
1592 | if (ctask->unsol_count) { | 1591 | if (ctask->unsol_count) { |
1593 | debug_scsi("sending more uns\n"); | 1592 | debug_scsi("sending more uns\n"); |
1594 | tcp_ctask->xmstate |= XMSTATE_UNS_INIT; | 1593 | set_bit(XMSTATE_BIT_UNS_INIT, &tcp_ctask->xmstate); |
1595 | goto send_hdr; | 1594 | goto send_hdr; |
1596 | } | 1595 | } |
1597 | } | 1596 | } |
@@ -1607,7 +1606,7 @@ static int iscsi_send_sol_pdu(struct iscsi_conn *conn, | |||
1607 | struct iscsi_data_task *dtask; | 1606 | struct iscsi_data_task *dtask; |
1608 | int left, rc; | 1607 | int left, rc; |
1609 | 1608 | ||
1610 | if (tcp_ctask->xmstate & XMSTATE_SOL_HDR_INIT) { | 1609 | if (test_bit(XMSTATE_BIT_SOL_HDR_INIT, &tcp_ctask->xmstate)) { |
1611 | if (!tcp_ctask->r2t) { | 1610 | if (!tcp_ctask->r2t) { |
1612 | spin_lock_bh(&session->lock); | 1611 | spin_lock_bh(&session->lock); |
1613 | __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t, | 1612 | __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t, |
@@ -1621,19 +1620,19 @@ send_hdr: | |||
1621 | if (conn->hdrdgst_en) | 1620 | if (conn->hdrdgst_en) |
1622 | iscsi_hdr_digest(conn, &r2t->headbuf, | 1621 | iscsi_hdr_digest(conn, &r2t->headbuf, |
1623 | (u8*)dtask->hdrext); | 1622 | (u8*)dtask->hdrext); |
1624 | tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR_INIT; | 1623 | clear_bit(XMSTATE_BIT_SOL_HDR_INIT, &tcp_ctask->xmstate); |
1625 | tcp_ctask->xmstate |= XMSTATE_SOL_HDR; | 1624 | set_bit(XMSTATE_BIT_SOL_HDR, &tcp_ctask->xmstate); |
1626 | } | 1625 | } |
1627 | 1626 | ||
1628 | if (tcp_ctask->xmstate & XMSTATE_SOL_HDR) { | 1627 | if (test_bit(XMSTATE_BIT_SOL_HDR, &tcp_ctask->xmstate)) { |
1629 | r2t = tcp_ctask->r2t; | 1628 | r2t = tcp_ctask->r2t; |
1630 | dtask = &r2t->dtask; | 1629 | dtask = &r2t->dtask; |
1631 | 1630 | ||
1632 | rc = iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count); | 1631 | rc = iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count); |
1633 | if (rc) | 1632 | if (rc) |
1634 | return rc; | 1633 | return rc; |
1635 | tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR; | 1634 | clear_bit(XMSTATE_BIT_SOL_HDR, &tcp_ctask->xmstate); |
1636 | tcp_ctask->xmstate |= XMSTATE_SOL_DATA; | 1635 | set_bit(XMSTATE_BIT_SOL_DATA, &tcp_ctask->xmstate); |
1637 | 1636 | ||
1638 | if (conn->datadgst_en) { | 1637 | if (conn->datadgst_en) { |
1639 | iscsi_data_digest_init(conn->dd_data, tcp_ctask); | 1638 | iscsi_data_digest_init(conn->dd_data, tcp_ctask); |
@@ -1646,7 +1645,7 @@ send_hdr: | |||
1646 | r2t->sent); | 1645 | r2t->sent); |
1647 | } | 1646 | } |
1648 | 1647 | ||
1649 | if (tcp_ctask->xmstate & XMSTATE_SOL_DATA) { | 1648 | if (test_bit(XMSTATE_BIT_SOL_DATA, &tcp_ctask->xmstate)) { |
1650 | r2t = tcp_ctask->r2t; | 1649 | r2t = tcp_ctask->r2t; |
1651 | dtask = &r2t->dtask; | 1650 | dtask = &r2t->dtask; |
1652 | 1651 | ||
@@ -1655,7 +1654,7 @@ send_hdr: | |||
1655 | &dtask->digestbuf, &dtask->digest); | 1654 | &dtask->digestbuf, &dtask->digest); |
1656 | if (rc) | 1655 | if (rc) |
1657 | return rc; | 1656 | return rc; |
1658 | tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA; | 1657 | clear_bit(XMSTATE_BIT_SOL_DATA, &tcp_ctask->xmstate); |
1659 | 1658 | ||
1660 | /* | 1659 | /* |
1661 | * Done with this Data-Out. Next, check if we have | 1660 | * Done with this Data-Out. Next, check if we have |
@@ -1700,32 +1699,32 @@ send_hdr: | |||
1700 | * xmit stages. | 1699 | * xmit stages. |
1701 | * | 1700 | * |
1702 | *iscsi_send_cmd_hdr() | 1701 | *iscsi_send_cmd_hdr() |
1703 | * XMSTATE_CMD_HDR_INIT - prepare Header and Data buffers Calculate | 1702 | * XMSTATE_BIT_CMD_HDR_INIT - prepare Header and Data buffers Calculate |
1704 | * Header Digest | 1703 | * Header Digest |
1705 | * XMSTATE_CMD_HDR_XMIT - Transmit header in progress | 1704 | * XMSTATE_BIT_CMD_HDR_XMIT - Transmit header in progress |
1706 | * | 1705 | * |
1707 | *iscsi_send_padding | 1706 | *iscsi_send_padding |
1708 | * XMSTATE_W_PAD - Prepare and send pading | 1707 | * XMSTATE_BIT_W_PAD - Prepare and send pading |
1709 | * XMSTATE_W_RESEND_PAD - retry send pading | 1708 | * XMSTATE_BIT_W_RESEND_PAD - retry send pading |
1710 | * | 1709 | * |
1711 | *iscsi_send_digest | 1710 | *iscsi_send_digest |
1712 | * XMSTATE_W_RESEND_DATA_DIGEST - Finalize and send Data Digest | 1711 | * XMSTATE_BIT_W_RESEND_DATA_DIGEST - Finalize and send Data Digest |
1713 | * XMSTATE_W_RESEND_DATA_DIGEST - retry sending digest | 1712 | * XMSTATE_BIT_W_RESEND_DATA_DIGEST - retry sending digest |
1714 | * | 1713 | * |
1715 | *iscsi_send_unsol_hdr | 1714 | *iscsi_send_unsol_hdr |
1716 | * XMSTATE_UNS_INIT - prepare un-solicit data header and digest | 1715 | * XMSTATE_BIT_UNS_INIT - prepare un-solicit data header and digest |
1717 | * XMSTATE_UNS_HDR - send un-solicit header | 1716 | * XMSTATE_BIT_UNS_HDR - send un-solicit header |
1718 | * | 1717 | * |
1719 | *iscsi_send_unsol_pdu | 1718 | *iscsi_send_unsol_pdu |
1720 | * XMSTATE_UNS_DATA - send un-solicit data in progress | 1719 | * XMSTATE_BIT_UNS_DATA - send un-solicit data in progress |
1721 | * | 1720 | * |
1722 | *iscsi_send_sol_pdu | 1721 | *iscsi_send_sol_pdu |
1723 | * XMSTATE_SOL_HDR_INIT - solicit data header and digest initialize | 1722 | * XMSTATE_BIT_SOL_HDR_INIT - solicit data header and digest initialize |
1724 | * XMSTATE_SOL_HDR - send solicit header | 1723 | * XMSTATE_BIT_SOL_HDR - send solicit header |
1725 | * XMSTATE_SOL_DATA - send solicit data | 1724 | * XMSTATE_BIT_SOL_DATA - send solicit data |
1726 | * | 1725 | * |
1727 | *iscsi_tcp_ctask_xmit | 1726 | *iscsi_tcp_ctask_xmit |
1728 | * XMSTATE_IMM_DATA - xmit managment data (??) | 1727 | * XMSTATE_BIT_IMM_DATA - xmit managment data (??) |
1729 | **/ | 1728 | **/ |
1730 | static int | 1729 | static int |
1731 | iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | 1730 | iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) |
@@ -1742,13 +1741,13 @@ iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) | |||
1742 | if (ctask->sc->sc_data_direction != DMA_TO_DEVICE) | 1741 | if (ctask->sc->sc_data_direction != DMA_TO_DEVICE) |
1743 | return 0; | 1742 | return 0; |
1744 | 1743 | ||
1745 | if (tcp_ctask->xmstate & XMSTATE_IMM_DATA) { | 1744 | if (test_bit(XMSTATE_BIT_IMM_DATA, &tcp_ctask->xmstate)) { |
1746 | rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg, | 1745 | rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg, |
1747 | &tcp_ctask->sent, &ctask->imm_count, | 1746 | &tcp_ctask->sent, &ctask->imm_count, |
1748 | &tcp_ctask->immbuf, &tcp_ctask->immdigest); | 1747 | &tcp_ctask->immbuf, &tcp_ctask->immdigest); |
1749 | if (rc) | 1748 | if (rc) |
1750 | return rc; | 1749 | return rc; |
1751 | tcp_ctask->xmstate &= ~XMSTATE_IMM_DATA; | 1750 | clear_bit(XMSTATE_BIT_IMM_DATA, &tcp_ctask->xmstate); |
1752 | } | 1751 | } |
1753 | 1752 | ||
1754 | rc = iscsi_send_unsol_pdu(conn, ctask); | 1753 | rc = iscsi_send_unsol_pdu(conn, ctask); |
@@ -1981,7 +1980,7 @@ static void | |||
1981 | iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) | 1980 | iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) |
1982 | { | 1981 | { |
1983 | struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data; | 1982 | struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data; |
1984 | tcp_mtask->xmstate = XMSTATE_IMM_HDR_INIT; | 1983 | tcp_mtask->xmstate = 1 << XMSTATE_BIT_IMM_HDR_INIT; |
1985 | } | 1984 | } |
1986 | 1985 | ||
1987 | static int | 1986 | static int |
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h index 7eba44df0a7f..68c36cc8997e 100644 --- a/drivers/scsi/iscsi_tcp.h +++ b/drivers/scsi/iscsi_tcp.h | |||
@@ -32,21 +32,21 @@ | |||
32 | #define IN_PROGRESS_PAD_RECV 0x4 | 32 | #define IN_PROGRESS_PAD_RECV 0x4 |
33 | 33 | ||
34 | /* xmit state machine */ | 34 | /* xmit state machine */ |
35 | #define XMSTATE_IDLE 0x0 | 35 | #define XMSTATE_VALUE_IDLE 0 |
36 | #define XMSTATE_CMD_HDR_INIT 0x1 | 36 | #define XMSTATE_BIT_CMD_HDR_INIT 0 |
37 | #define XMSTATE_CMD_HDR_XMIT 0x2 | 37 | #define XMSTATE_BIT_CMD_HDR_XMIT 1 |
38 | #define XMSTATE_IMM_HDR 0x4 | 38 | #define XMSTATE_BIT_IMM_HDR 2 |
39 | #define XMSTATE_IMM_DATA 0x8 | 39 | #define XMSTATE_BIT_IMM_DATA 3 |
40 | #define XMSTATE_UNS_INIT 0x10 | 40 | #define XMSTATE_BIT_UNS_INIT 4 |
41 | #define XMSTATE_UNS_HDR 0x20 | 41 | #define XMSTATE_BIT_UNS_HDR 5 |
42 | #define XMSTATE_UNS_DATA 0x40 | 42 | #define XMSTATE_BIT_UNS_DATA 6 |
43 | #define XMSTATE_SOL_HDR 0x80 | 43 | #define XMSTATE_BIT_SOL_HDR 7 |
44 | #define XMSTATE_SOL_DATA 0x100 | 44 | #define XMSTATE_BIT_SOL_DATA 8 |
45 | #define XMSTATE_W_PAD 0x200 | 45 | #define XMSTATE_BIT_W_PAD 9 |
46 | #define XMSTATE_W_RESEND_PAD 0x400 | 46 | #define XMSTATE_BIT_W_RESEND_PAD 10 |
47 | #define XMSTATE_W_RESEND_DATA_DIGEST 0x800 | 47 | #define XMSTATE_BIT_W_RESEND_DATA_DIGEST 11 |
48 | #define XMSTATE_IMM_HDR_INIT 0x1000 | 48 | #define XMSTATE_BIT_IMM_HDR_INIT 12 |
49 | #define XMSTATE_SOL_HDR_INIT 0x2000 | 49 | #define XMSTATE_BIT_SOL_HDR_INIT 13 |
50 | 50 | ||
51 | #define ISCSI_PAD_LEN 4 | 51 | #define ISCSI_PAD_LEN 4 |
52 | #define ISCSI_SG_TABLESIZE SG_ALL | 52 | #define ISCSI_SG_TABLESIZE SG_ALL |
@@ -122,7 +122,7 @@ struct iscsi_data_task { | |||
122 | struct iscsi_tcp_mgmt_task { | 122 | struct iscsi_tcp_mgmt_task { |
123 | struct iscsi_hdr hdr; | 123 | struct iscsi_hdr hdr; |
124 | char hdrext[sizeof(__u32)]; /* Header-Digest */ | 124 | char hdrext[sizeof(__u32)]; /* Header-Digest */ |
125 | int xmstate; /* mgmt xmit progress */ | 125 | unsigned long xmstate; /* mgmt xmit progress */ |
126 | struct iscsi_buf headbuf; /* header buffer */ | 126 | struct iscsi_buf headbuf; /* header buffer */ |
127 | struct iscsi_buf sendbuf; /* in progress buffer */ | 127 | struct iscsi_buf sendbuf; /* in progress buffer */ |
128 | int sent; | 128 | int sent; |
@@ -150,7 +150,7 @@ struct iscsi_tcp_cmd_task { | |||
150 | int pad_count; /* padded bytes */ | 150 | int pad_count; /* padded bytes */ |
151 | struct iscsi_buf headbuf; /* header buf (xmit) */ | 151 | struct iscsi_buf headbuf; /* header buf (xmit) */ |
152 | struct iscsi_buf sendbuf; /* in progress buffer*/ | 152 | struct iscsi_buf sendbuf; /* in progress buffer*/ |
153 | int xmstate; /* xmit xtate machine */ | 153 | unsigned long xmstate; /* xmit xtate machine */ |
154 | int sent; | 154 | int sent; |
155 | struct scatterlist *sg; /* per-cmd SG list */ | 155 | struct scatterlist *sg; /* per-cmd SG list */ |
156 | struct scatterlist *bad_sg; /* assert statement */ | 156 | struct scatterlist *bad_sg; /* assert statement */ |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index efceed451b46..8b57af5baaec 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
@@ -291,9 +291,6 @@ invalid_datalen: | |||
291 | min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE)); | 291 | min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE)); |
292 | } | 292 | } |
293 | 293 | ||
294 | if (sc->sc_data_direction == DMA_TO_DEVICE) | ||
295 | goto out; | ||
296 | |||
297 | if (rhdr->flags & ISCSI_FLAG_CMD_UNDERFLOW) { | 294 | if (rhdr->flags & ISCSI_FLAG_CMD_UNDERFLOW) { |
298 | int res_count = be32_to_cpu(rhdr->residual_count); | 295 | int res_count = be32_to_cpu(rhdr->residual_count); |
299 | 296 | ||
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c index abe2bda6ac37..3b09ab21d701 100644 --- a/drivers/scsi/mac_scsi.c +++ b/drivers/scsi/mac_scsi.c | |||
@@ -303,7 +303,7 @@ int macscsi_detect(struct scsi_host_template * tpnt) | |||
303 | 303 | ||
304 | if (instance->irq != SCSI_IRQ_NONE) | 304 | if (instance->irq != SCSI_IRQ_NONE) |
305 | if (request_irq(instance->irq, NCR5380_intr, IRQ_FLG_SLOW, | 305 | if (request_irq(instance->irq, NCR5380_intr, IRQ_FLG_SLOW, |
306 | "ncr5380", instance)) { | 306 | "ncr5380", instance)) { |
307 | printk(KERN_WARNING "scsi%d: IRQ%d not free, interrupts disabled\n", | 307 | printk(KERN_WARNING "scsi%d: IRQ%d not free, interrupts disabled\n", |
308 | instance->host_no, instance->irq); | 308 | instance->host_no, instance->irq); |
309 | instance->irq = SCSI_IRQ_NONE; | 309 | instance->irq = SCSI_IRQ_NONE; |
@@ -326,7 +326,7 @@ int macscsi_detect(struct scsi_host_template * tpnt) | |||
326 | int macscsi_release (struct Scsi_Host *shpnt) | 326 | int macscsi_release (struct Scsi_Host *shpnt) |
327 | { | 327 | { |
328 | if (shpnt->irq != SCSI_IRQ_NONE) | 328 | if (shpnt->irq != SCSI_IRQ_NONE) |
329 | free_irq (shpnt->irq, NCR5380_intr); | 329 | free_irq(shpnt->irq, shpnt); |
330 | NCR5380_exit(shpnt); | 330 | NCR5380_exit(shpnt); |
331 | 331 | ||
332 | return 0; | 332 | return 0; |
diff --git a/drivers/scsi/pas16.c b/drivers/scsi/pas16.c index ee5965659971..f2018b46f494 100644 --- a/drivers/scsi/pas16.c +++ b/drivers/scsi/pas16.c | |||
@@ -453,7 +453,8 @@ int __init pas16_detect(struct scsi_host_template * tpnt) | |||
453 | instance->irq = NCR5380_probe_irq(instance, PAS16_IRQS); | 453 | instance->irq = NCR5380_probe_irq(instance, PAS16_IRQS); |
454 | 454 | ||
455 | if (instance->irq != SCSI_IRQ_NONE) | 455 | if (instance->irq != SCSI_IRQ_NONE) |
456 | if (request_irq(instance->irq, pas16_intr, IRQF_DISABLED, "pas16", instance)) { | 456 | if (request_irq(instance->irq, pas16_intr, IRQF_DISABLED, |
457 | "pas16", instance)) { | ||
457 | printk("scsi%d : IRQ%d not free, interrupts disabled\n", | 458 | printk("scsi%d : IRQ%d not free, interrupts disabled\n", |
458 | instance->host_no, instance->irq); | 459 | instance->host_no, instance->irq); |
459 | instance->irq = SCSI_IRQ_NONE; | 460 | instance->irq = SCSI_IRQ_NONE; |
@@ -604,7 +605,7 @@ static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src | |||
604 | static int pas16_release(struct Scsi_Host *shost) | 605 | static int pas16_release(struct Scsi_Host *shost) |
605 | { | 606 | { |
606 | if (shost->irq) | 607 | if (shost->irq) |
607 | free_irq(shost->irq, NULL); | 608 | free_irq(shost->irq, shost); |
608 | NCR5380_exit(shost); | 609 | NCR5380_exit(shost); |
609 | if (shost->dma_channel != 0xff) | 610 | if (shost->dma_channel != 0xff) |
610 | free_dma(shost->dma_channel); | 611 | free_dma(shost->dma_channel); |
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index 3aeb68bcb7ac..146d540f6281 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c | |||
@@ -1310,14 +1310,7 @@ qla1280_done(struct scsi_qla_host *ha) | |||
1310 | } | 1310 | } |
1311 | 1311 | ||
1312 | /* Release memory used for this I/O */ | 1312 | /* Release memory used for this I/O */ |
1313 | if (cmd->use_sg) { | 1313 | scsi_dma_unmap(cmd); |
1314 | pci_unmap_sg(ha->pdev, cmd->request_buffer, | ||
1315 | cmd->use_sg, cmd->sc_data_direction); | ||
1316 | } else if (cmd->request_bufflen) { | ||
1317 | pci_unmap_single(ha->pdev, sp->saved_dma_handle, | ||
1318 | cmd->request_bufflen, | ||
1319 | cmd->sc_data_direction); | ||
1320 | } | ||
1321 | 1314 | ||
1322 | /* Call the mid-level driver interrupt handler */ | 1315 | /* Call the mid-level driver interrupt handler */ |
1323 | CMD_HANDLE(sp->cmd) = (unsigned char *)INVALID_HANDLE; | 1316 | CMD_HANDLE(sp->cmd) = (unsigned char *)INVALID_HANDLE; |
@@ -1406,14 +1399,14 @@ qla1280_return_status(struct response * sts, struct scsi_cmnd *cp) | |||
1406 | break; | 1399 | break; |
1407 | 1400 | ||
1408 | case CS_DATA_UNDERRUN: | 1401 | case CS_DATA_UNDERRUN: |
1409 | if ((cp->request_bufflen - residual_length) < | 1402 | if ((scsi_bufflen(cp) - residual_length) < |
1410 | cp->underflow) { | 1403 | cp->underflow) { |
1411 | printk(KERN_WARNING | 1404 | printk(KERN_WARNING |
1412 | "scsi: Underflow detected - retrying " | 1405 | "scsi: Underflow detected - retrying " |
1413 | "command.\n"); | 1406 | "command.\n"); |
1414 | host_status = DID_ERROR; | 1407 | host_status = DID_ERROR; |
1415 | } else { | 1408 | } else { |
1416 | cp->resid = residual_length; | 1409 | scsi_set_resid(cp, residual_length); |
1417 | host_status = DID_OK; | 1410 | host_status = DID_OK; |
1418 | } | 1411 | } |
1419 | break; | 1412 | break; |
@@ -2775,33 +2768,28 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) | |||
2775 | struct device_reg __iomem *reg = ha->iobase; | 2768 | struct device_reg __iomem *reg = ha->iobase; |
2776 | struct scsi_cmnd *cmd = sp->cmd; | 2769 | struct scsi_cmnd *cmd = sp->cmd; |
2777 | cmd_a64_entry_t *pkt; | 2770 | cmd_a64_entry_t *pkt; |
2778 | struct scatterlist *sg = NULL, *s; | ||
2779 | __le32 *dword_ptr; | 2771 | __le32 *dword_ptr; |
2780 | dma_addr_t dma_handle; | 2772 | dma_addr_t dma_handle; |
2781 | int status = 0; | 2773 | int status = 0; |
2782 | int cnt; | 2774 | int cnt; |
2783 | int req_cnt; | 2775 | int req_cnt; |
2784 | u16 seg_cnt; | 2776 | int seg_cnt; |
2785 | u8 dir; | 2777 | u8 dir; |
2786 | 2778 | ||
2787 | ENTER("qla1280_64bit_start_scsi:"); | 2779 | ENTER("qla1280_64bit_start_scsi:"); |
2788 | 2780 | ||
2789 | /* Calculate number of entries and segments required. */ | 2781 | /* Calculate number of entries and segments required. */ |
2790 | req_cnt = 1; | 2782 | req_cnt = 1; |
2791 | if (cmd->use_sg) { | 2783 | seg_cnt = scsi_dma_map(cmd); |
2792 | sg = (struct scatterlist *) cmd->request_buffer; | 2784 | if (seg_cnt > 0) { |
2793 | seg_cnt = pci_map_sg(ha->pdev, sg, cmd->use_sg, | ||
2794 | cmd->sc_data_direction); | ||
2795 | |||
2796 | if (seg_cnt > 2) { | 2785 | if (seg_cnt > 2) { |
2797 | req_cnt += (seg_cnt - 2) / 5; | 2786 | req_cnt += (seg_cnt - 2) / 5; |
2798 | if ((seg_cnt - 2) % 5) | 2787 | if ((seg_cnt - 2) % 5) |
2799 | req_cnt++; | 2788 | req_cnt++; |
2800 | } | 2789 | } |
2801 | } else if (cmd->request_bufflen) { /* If data transfer. */ | 2790 | } else if (seg_cnt < 0) { |
2802 | seg_cnt = 1; | 2791 | status = 1; |
2803 | } else { | 2792 | goto out; |
2804 | seg_cnt = 0; | ||
2805 | } | 2793 | } |
2806 | 2794 | ||
2807 | if ((req_cnt + 2) >= ha->req_q_cnt) { | 2795 | if ((req_cnt + 2) >= ha->req_q_cnt) { |
@@ -2889,124 +2877,104 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) | |||
2889 | * Load data segments. | 2877 | * Load data segments. |
2890 | */ | 2878 | */ |
2891 | if (seg_cnt) { /* If data transfer. */ | 2879 | if (seg_cnt) { /* If data transfer. */ |
2880 | struct scatterlist *sg, *s; | ||
2892 | int remseg = seg_cnt; | 2881 | int remseg = seg_cnt; |
2882 | |||
2883 | sg = scsi_sglist(cmd); | ||
2884 | |||
2893 | /* Setup packet address segment pointer. */ | 2885 | /* Setup packet address segment pointer. */ |
2894 | dword_ptr = (u32 *)&pkt->dseg_0_address; | 2886 | dword_ptr = (u32 *)&pkt->dseg_0_address; |
2895 | 2887 | ||
2896 | if (cmd->use_sg) { /* If scatter gather */ | 2888 | /* Load command entry data segments. */ |
2897 | /* Load command entry data segments. */ | 2889 | for_each_sg(sg, s, seg_cnt, cnt) { |
2898 | for_each_sg(sg, s, seg_cnt, cnt) { | 2890 | if (cnt == 2) |
2899 | if (cnt == 2) | 2891 | break; |
2892 | |||
2893 | dma_handle = sg_dma_address(s); | ||
2894 | #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) | ||
2895 | if (ha->flags.use_pci_vchannel) | ||
2896 | sn_pci_set_vchan(ha->pdev, | ||
2897 | (unsigned long *)&dma_handle, | ||
2898 | SCSI_BUS_32(cmd)); | ||
2899 | #endif | ||
2900 | *dword_ptr++ = | ||
2901 | cpu_to_le32(pci_dma_lo32(dma_handle)); | ||
2902 | *dword_ptr++ = | ||
2903 | cpu_to_le32(pci_dma_hi32(dma_handle)); | ||
2904 | *dword_ptr++ = cpu_to_le32(sg_dma_len(s)); | ||
2905 | dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n", | ||
2906 | cpu_to_le32(pci_dma_hi32(dma_handle)), | ||
2907 | cpu_to_le32(pci_dma_lo32(dma_handle)), | ||
2908 | cpu_to_le32(sg_dma_len(sg_next(s)))); | ||
2909 | remseg--; | ||
2910 | } | ||
2911 | dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather " | ||
2912 | "command packet data - b %i, t %i, l %i \n", | ||
2913 | SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), | ||
2914 | SCSI_LUN_32(cmd)); | ||
2915 | qla1280_dump_buffer(5, (char *)pkt, | ||
2916 | REQUEST_ENTRY_SIZE); | ||
2917 | |||
2918 | /* | ||
2919 | * Build continuation packets. | ||
2920 | */ | ||
2921 | dprintk(3, "S/G Building Continuation...seg_cnt=0x%x " | ||
2922 | "remains\n", seg_cnt); | ||
2923 | |||
2924 | while (remseg > 0) { | ||
2925 | /* Update sg start */ | ||
2926 | sg = s; | ||
2927 | /* Adjust ring index. */ | ||
2928 | ha->req_ring_index++; | ||
2929 | if (ha->req_ring_index == REQUEST_ENTRY_CNT) { | ||
2930 | ha->req_ring_index = 0; | ||
2931 | ha->request_ring_ptr = | ||
2932 | ha->request_ring; | ||
2933 | } else | ||
2934 | ha->request_ring_ptr++; | ||
2935 | |||
2936 | pkt = (cmd_a64_entry_t *)ha->request_ring_ptr; | ||
2937 | |||
2938 | /* Zero out packet. */ | ||
2939 | memset(pkt, 0, REQUEST_ENTRY_SIZE); | ||
2940 | |||
2941 | /* Load packet defaults. */ | ||
2942 | ((struct cont_a64_entry *) pkt)->entry_type = | ||
2943 | CONTINUE_A64_TYPE; | ||
2944 | ((struct cont_a64_entry *) pkt)->entry_count = 1; | ||
2945 | ((struct cont_a64_entry *) pkt)->sys_define = | ||
2946 | (uint8_t)ha->req_ring_index; | ||
2947 | /* Setup packet address segment pointer. */ | ||
2948 | dword_ptr = | ||
2949 | (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address; | ||
2950 | |||
2951 | /* Load continuation entry data segments. */ | ||
2952 | for_each_sg(sg, s, remseg, cnt) { | ||
2953 | if (cnt == 5) | ||
2900 | break; | 2954 | break; |
2901 | dma_handle = sg_dma_address(s); | 2955 | dma_handle = sg_dma_address(s); |
2902 | #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) | 2956 | #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) |
2903 | if (ha->flags.use_pci_vchannel) | 2957 | if (ha->flags.use_pci_vchannel) |
2904 | sn_pci_set_vchan(ha->pdev, | 2958 | sn_pci_set_vchan(ha->pdev, |
2905 | (unsigned long *)&dma_handle, | 2959 | (unsigned long *)&dma_handle, |
2906 | SCSI_BUS_32(cmd)); | 2960 | SCSI_BUS_32(cmd)); |
2907 | #endif | 2961 | #endif |
2908 | *dword_ptr++ = | 2962 | *dword_ptr++ = |
2909 | cpu_to_le32(pci_dma_lo32(dma_handle)); | 2963 | cpu_to_le32(pci_dma_lo32(dma_handle)); |
2910 | *dword_ptr++ = | 2964 | *dword_ptr++ = |
2911 | cpu_to_le32(pci_dma_hi32(dma_handle)); | 2965 | cpu_to_le32(pci_dma_hi32(dma_handle)); |
2912 | *dword_ptr++ = cpu_to_le32(sg_dma_len(s)); | 2966 | *dword_ptr++ = |
2913 | dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n", | 2967 | cpu_to_le32(sg_dma_len(s)); |
2968 | dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n", | ||
2914 | cpu_to_le32(pci_dma_hi32(dma_handle)), | 2969 | cpu_to_le32(pci_dma_hi32(dma_handle)), |
2915 | cpu_to_le32(pci_dma_lo32(dma_handle)), | 2970 | cpu_to_le32(pci_dma_lo32(dma_handle)), |
2916 | cpu_to_le32(sg_dma_len(sg_next(s)))); | 2971 | cpu_to_le32(sg_dma_len(s))); |
2917 | remseg--; | ||
2918 | } | 2972 | } |
2919 | dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather " | 2973 | remseg -= cnt; |
2920 | "command packet data - b %i, t %i, l %i \n", | 2974 | dprintk(5, "qla1280_64bit_start_scsi: " |
2921 | SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), | 2975 | "continuation packet data - b %i, t " |
2922 | SCSI_LUN_32(cmd)); | 2976 | "%i, l %i \n", SCSI_BUS_32(cmd), |
2923 | qla1280_dump_buffer(5, (char *)pkt, | 2977 | SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); |
2924 | REQUEST_ENTRY_SIZE); | ||
2925 | |||
2926 | /* | ||
2927 | * Build continuation packets. | ||
2928 | */ | ||
2929 | dprintk(3, "S/G Building Continuation...seg_cnt=0x%x " | ||
2930 | "remains\n", seg_cnt); | ||
2931 | |||
2932 | while (remseg > 0) { | ||
2933 | /* Update sg start */ | ||
2934 | sg = s; | ||
2935 | /* Adjust ring index. */ | ||
2936 | ha->req_ring_index++; | ||
2937 | if (ha->req_ring_index == REQUEST_ENTRY_CNT) { | ||
2938 | ha->req_ring_index = 0; | ||
2939 | ha->request_ring_ptr = | ||
2940 | ha->request_ring; | ||
2941 | } else | ||
2942 | ha->request_ring_ptr++; | ||
2943 | |||
2944 | pkt = (cmd_a64_entry_t *)ha->request_ring_ptr; | ||
2945 | |||
2946 | /* Zero out packet. */ | ||
2947 | memset(pkt, 0, REQUEST_ENTRY_SIZE); | ||
2948 | |||
2949 | /* Load packet defaults. */ | ||
2950 | ((struct cont_a64_entry *) pkt)->entry_type = | ||
2951 | CONTINUE_A64_TYPE; | ||
2952 | ((struct cont_a64_entry *) pkt)->entry_count = 1; | ||
2953 | ((struct cont_a64_entry *) pkt)->sys_define = | ||
2954 | (uint8_t)ha->req_ring_index; | ||
2955 | /* Setup packet address segment pointer. */ | ||
2956 | dword_ptr = | ||
2957 | (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address; | ||
2958 | |||
2959 | /* Load continuation entry data segments. */ | ||
2960 | for_each_sg(sg, s, remseg, cnt) { | ||
2961 | if (cnt == 5) | ||
2962 | break; | ||
2963 | dma_handle = sg_dma_address(s); | ||
2964 | #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) | ||
2965 | if (ha->flags.use_pci_vchannel) | ||
2966 | sn_pci_set_vchan(ha->pdev, | ||
2967 | (unsigned long *)&dma_handle, | ||
2968 | SCSI_BUS_32(cmd)); | ||
2969 | #endif | ||
2970 | *dword_ptr++ = | ||
2971 | cpu_to_le32(pci_dma_lo32(dma_handle)); | ||
2972 | *dword_ptr++ = | ||
2973 | cpu_to_le32(pci_dma_hi32(dma_handle)); | ||
2974 | *dword_ptr++ = | ||
2975 | cpu_to_le32(sg_dma_len(s)); | ||
2976 | dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n", | ||
2977 | cpu_to_le32(pci_dma_hi32(dma_handle)), | ||
2978 | cpu_to_le32(pci_dma_lo32(dma_handle)), | ||
2979 | cpu_to_le32(sg_dma_len(s))); | ||
2980 | } | ||
2981 | remseg -= cnt; | ||
2982 | dprintk(5, "qla1280_64bit_start_scsi: " | ||
2983 | "continuation packet data - b %i, t " | ||
2984 | "%i, l %i \n", SCSI_BUS_32(cmd), | ||
2985 | SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); | ||
2986 | qla1280_dump_buffer(5, (char *)pkt, | ||
2987 | REQUEST_ENTRY_SIZE); | ||
2988 | } | ||
2989 | } else { /* No scatter gather data transfer */ | ||
2990 | dma_handle = pci_map_single(ha->pdev, | ||
2991 | cmd->request_buffer, | ||
2992 | cmd->request_bufflen, | ||
2993 | cmd->sc_data_direction); | ||
2994 | |||
2995 | sp->saved_dma_handle = dma_handle; | ||
2996 | #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) | ||
2997 | if (ha->flags.use_pci_vchannel) | ||
2998 | sn_pci_set_vchan(ha->pdev, | ||
2999 | (unsigned long *)&dma_handle, | ||
3000 | SCSI_BUS_32(cmd)); | ||
3001 | #endif | ||
3002 | *dword_ptr++ = cpu_to_le32(pci_dma_lo32(dma_handle)); | ||
3003 | *dword_ptr++ = cpu_to_le32(pci_dma_hi32(dma_handle)); | ||
3004 | *dword_ptr = cpu_to_le32(cmd->request_bufflen); | ||
3005 | |||
3006 | dprintk(5, "qla1280_64bit_start_scsi: No scatter/" | ||
3007 | "gather command packet data - b %i, t %i, " | ||
3008 | "l %i \n", SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), | ||
3009 | SCSI_LUN_32(cmd)); | ||
3010 | qla1280_dump_buffer(5, (char *)pkt, | 2978 | qla1280_dump_buffer(5, (char *)pkt, |
3011 | REQUEST_ENTRY_SIZE); | 2979 | REQUEST_ENTRY_SIZE); |
3012 | } | 2980 | } |
@@ -3068,12 +3036,11 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) | |||
3068 | struct device_reg __iomem *reg = ha->iobase; | 3036 | struct device_reg __iomem *reg = ha->iobase; |
3069 | struct scsi_cmnd *cmd = sp->cmd; | 3037 | struct scsi_cmnd *cmd = sp->cmd; |
3070 | struct cmd_entry *pkt; | 3038 | struct cmd_entry *pkt; |
3071 | struct scatterlist *sg = NULL, *s; | ||
3072 | __le32 *dword_ptr; | 3039 | __le32 *dword_ptr; |
3073 | int status = 0; | 3040 | int status = 0; |
3074 | int cnt; | 3041 | int cnt; |
3075 | int req_cnt; | 3042 | int req_cnt; |
3076 | uint16_t seg_cnt; | 3043 | int seg_cnt; |
3077 | dma_addr_t dma_handle; | 3044 | dma_addr_t dma_handle; |
3078 | u8 dir; | 3045 | u8 dir; |
3079 | 3046 | ||
@@ -3083,18 +3050,8 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) | |||
3083 | cmd->cmnd[0]); | 3050 | cmd->cmnd[0]); |
3084 | 3051 | ||
3085 | /* Calculate number of entries and segments required. */ | 3052 | /* Calculate number of entries and segments required. */ |
3086 | req_cnt = 1; | 3053 | seg_cnt = scsi_dma_map(cmd); |
3087 | if (cmd->use_sg) { | 3054 | if (seg_cnt) { |
3088 | /* | ||
3089 | * We must build an SG list in adapter format, as the kernel's | ||
3090 | * SG list cannot be used directly because of data field size | ||
3091 | * (__alpha__) differences and the kernel SG list uses virtual | ||
3092 | * addresses where we need physical addresses. | ||
3093 | */ | ||
3094 | sg = (struct scatterlist *) cmd->request_buffer; | ||
3095 | seg_cnt = pci_map_sg(ha->pdev, sg, cmd->use_sg, | ||
3096 | cmd->sc_data_direction); | ||
3097 | |||
3098 | /* | 3055 | /* |
3099 | * if greater than four sg entries then we need to allocate | 3056 | * if greater than four sg entries then we need to allocate |
3100 | * continuation entries | 3057 | * continuation entries |
@@ -3106,14 +3063,9 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) | |||
3106 | } | 3063 | } |
3107 | dprintk(3, "S/G Transfer cmd=%p seg_cnt=0x%x, req_cnt=%x\n", | 3064 | dprintk(3, "S/G Transfer cmd=%p seg_cnt=0x%x, req_cnt=%x\n", |
3108 | cmd, seg_cnt, req_cnt); | 3065 | cmd, seg_cnt, req_cnt); |
3109 | } else if (cmd->request_bufflen) { /* If data transfer. */ | 3066 | } else if (seg_cnt < 0) { |
3110 | dprintk(3, "No S/G transfer t=%x cmd=%p len=%x CDB=%x\n", | 3067 | status = 1; |
3111 | SCSI_TCN_32(cmd), cmd, cmd->request_bufflen, | 3068 | goto out; |
3112 | cmd->cmnd[0]); | ||
3113 | seg_cnt = 1; | ||
3114 | } else { | ||
3115 | /* dprintk(1, "No data transfer \n"); */ | ||
3116 | seg_cnt = 0; | ||
3117 | } | 3069 | } |
3118 | 3070 | ||
3119 | if ((req_cnt + 2) >= ha->req_q_cnt) { | 3071 | if ((req_cnt + 2) >= ha->req_q_cnt) { |
@@ -3194,91 +3146,84 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) | |||
3194 | * Load data segments. | 3146 | * Load data segments. |
3195 | */ | 3147 | */ |
3196 | if (seg_cnt) { | 3148 | if (seg_cnt) { |
3149 | struct scatterlist *sg, *s; | ||
3197 | int remseg = seg_cnt; | 3150 | int remseg = seg_cnt; |
3151 | |||
3152 | sg = scsi_sglist(cmd); | ||
3153 | |||
3198 | /* Setup packet address segment pointer. */ | 3154 | /* Setup packet address segment pointer. */ |
3199 | dword_ptr = &pkt->dseg_0_address; | 3155 | dword_ptr = &pkt->dseg_0_address; |
3200 | 3156 | ||
3201 | if (cmd->use_sg) { /* If scatter gather */ | 3157 | dprintk(3, "Building S/G data segments..\n"); |
3202 | dprintk(3, "Building S/G data segments..\n"); | 3158 | qla1280_dump_buffer(1, (char *)sg, 4 * 16); |
3203 | qla1280_dump_buffer(1, (char *)sg, 4 * 16); | 3159 | |
3160 | /* Load command entry data segments. */ | ||
3161 | for_each_sg(sg, s, seg_cnt, cnt) { | ||
3162 | if (cnt == 4) | ||
3163 | break; | ||
3164 | *dword_ptr++ = | ||
3165 | cpu_to_le32(pci_dma_lo32(sg_dma_address(s))); | ||
3166 | *dword_ptr++ = cpu_to_le32(sg_dma_len(s)); | ||
3167 | dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n", | ||
3168 | (pci_dma_lo32(sg_dma_address(s))), | ||
3169 | (sg_dma_len(s))); | ||
3170 | remseg--; | ||
3171 | } | ||
3172 | /* | ||
3173 | * Build continuation packets. | ||
3174 | */ | ||
3175 | dprintk(3, "S/G Building Continuation" | ||
3176 | "...seg_cnt=0x%x remains\n", seg_cnt); | ||
3177 | while (remseg > 0) { | ||
3178 | /* Continue from end point */ | ||
3179 | sg = s; | ||
3180 | /* Adjust ring index. */ | ||
3181 | ha->req_ring_index++; | ||
3182 | if (ha->req_ring_index == REQUEST_ENTRY_CNT) { | ||
3183 | ha->req_ring_index = 0; | ||
3184 | ha->request_ring_ptr = | ||
3185 | ha->request_ring; | ||
3186 | } else | ||
3187 | ha->request_ring_ptr++; | ||
3188 | |||
3189 | pkt = (struct cmd_entry *)ha->request_ring_ptr; | ||
3190 | |||
3191 | /* Zero out packet. */ | ||
3192 | memset(pkt, 0, REQUEST_ENTRY_SIZE); | ||
3193 | |||
3194 | /* Load packet defaults. */ | ||
3195 | ((struct cont_entry *) pkt)-> | ||
3196 | entry_type = CONTINUE_TYPE; | ||
3197 | ((struct cont_entry *) pkt)->entry_count = 1; | ||
3204 | 3198 | ||
3205 | /* Load command entry data segments. */ | 3199 | ((struct cont_entry *) pkt)->sys_define = |
3206 | for_each_sg(sg, s, seg_cnt, cnt) { | 3200 | (uint8_t) ha->req_ring_index; |
3207 | if (cnt == 4) | 3201 | |
3202 | /* Setup packet address segment pointer. */ | ||
3203 | dword_ptr = | ||
3204 | &((struct cont_entry *) pkt)->dseg_0_address; | ||
3205 | |||
3206 | /* Load continuation entry data segments. */ | ||
3207 | for_each_sg(sg, s, remseg, cnt) { | ||
3208 | if (cnt == 7) | ||
3208 | break; | 3209 | break; |
3209 | *dword_ptr++ = | 3210 | *dword_ptr++ = |
3210 | cpu_to_le32(pci_dma_lo32(sg_dma_address(s))); | 3211 | cpu_to_le32(pci_dma_lo32(sg_dma_address(s))); |
3211 | *dword_ptr++ = cpu_to_le32(sg_dma_len(s)); | 3212 | *dword_ptr++ = |
3212 | dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n", | 3213 | cpu_to_le32(sg_dma_len(s)); |
3213 | (pci_dma_lo32(sg_dma_address(s))), | 3214 | dprintk(1, |
3214 | (sg_dma_len(s))); | 3215 | "S/G Segment Cont. phys_addr=0x%x, " |
3215 | remseg--; | 3216 | "len=0x%x\n", |
3216 | } | 3217 | cpu_to_le32(pci_dma_lo32(sg_dma_address(s))), |
3217 | /* | 3218 | cpu_to_le32(sg_dma_len(s))); |
3218 | * Build continuation packets. | ||
3219 | */ | ||
3220 | dprintk(3, "S/G Building Continuation" | ||
3221 | "...seg_cnt=0x%x remains\n", seg_cnt); | ||
3222 | while (remseg > 0) { | ||
3223 | /* Continue from end point */ | ||
3224 | sg = s; | ||
3225 | /* Adjust ring index. */ | ||
3226 | ha->req_ring_index++; | ||
3227 | if (ha->req_ring_index == REQUEST_ENTRY_CNT) { | ||
3228 | ha->req_ring_index = 0; | ||
3229 | ha->request_ring_ptr = | ||
3230 | ha->request_ring; | ||
3231 | } else | ||
3232 | ha->request_ring_ptr++; | ||
3233 | |||
3234 | pkt = (struct cmd_entry *)ha->request_ring_ptr; | ||
3235 | |||
3236 | /* Zero out packet. */ | ||
3237 | memset(pkt, 0, REQUEST_ENTRY_SIZE); | ||
3238 | |||
3239 | /* Load packet defaults. */ | ||
3240 | ((struct cont_entry *) pkt)-> | ||
3241 | entry_type = CONTINUE_TYPE; | ||
3242 | ((struct cont_entry *) pkt)->entry_count = 1; | ||
3243 | |||
3244 | ((struct cont_entry *) pkt)->sys_define = | ||
3245 | (uint8_t) ha->req_ring_index; | ||
3246 | |||
3247 | /* Setup packet address segment pointer. */ | ||
3248 | dword_ptr = | ||
3249 | &((struct cont_entry *) pkt)->dseg_0_address; | ||
3250 | |||
3251 | /* Load continuation entry data segments. */ | ||
3252 | for_each_sg(sg, s, remseg, cnt) { | ||
3253 | if (cnt == 7) | ||
3254 | break; | ||
3255 | *dword_ptr++ = | ||
3256 | cpu_to_le32(pci_dma_lo32(sg_dma_address(s))); | ||
3257 | *dword_ptr++ = | ||
3258 | cpu_to_le32(sg_dma_len(s)); | ||
3259 | dprintk(1, | ||
3260 | "S/G Segment Cont. phys_addr=0x%x, " | ||
3261 | "len=0x%x\n", | ||
3262 | cpu_to_le32(pci_dma_lo32(sg_dma_address(s))), | ||
3263 | cpu_to_le32(sg_dma_len(s))); | ||
3264 | } | ||
3265 | remseg -= cnt; | ||
3266 | dprintk(5, "qla1280_32bit_start_scsi: " | ||
3267 | "continuation packet data - " | ||
3268 | "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd), | ||
3269 | SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); | ||
3270 | qla1280_dump_buffer(5, (char *)pkt, | ||
3271 | REQUEST_ENTRY_SIZE); | ||
3272 | } | 3219 | } |
3273 | } else { /* No S/G data transfer */ | 3220 | remseg -= cnt; |
3274 | dma_handle = pci_map_single(ha->pdev, | 3221 | dprintk(5, "qla1280_32bit_start_scsi: " |
3275 | cmd->request_buffer, | 3222 | "continuation packet data - " |
3276 | cmd->request_bufflen, | 3223 | "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd), |
3277 | cmd->sc_data_direction); | 3224 | SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); |
3278 | sp->saved_dma_handle = dma_handle; | 3225 | qla1280_dump_buffer(5, (char *)pkt, |
3279 | 3226 | REQUEST_ENTRY_SIZE); | |
3280 | *dword_ptr++ = cpu_to_le32(pci_dma_lo32(dma_handle)); | ||
3281 | *dword_ptr = cpu_to_le32(cmd->request_bufflen); | ||
3282 | } | 3227 | } |
3283 | } else { /* No data transfer at all */ | 3228 | } else { /* No data transfer at all */ |
3284 | dprintk(5, "qla1280_32bit_start_scsi: No data, command " | 3229 | dprintk(5, "qla1280_32bit_start_scsi: No data, command " |
@@ -4086,9 +4031,9 @@ __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd) | |||
4086 | for (i = 0; i < cmd->cmd_len; i++) { | 4031 | for (i = 0; i < cmd->cmd_len; i++) { |
4087 | printk("0x%02x ", cmd->cmnd[i]); | 4032 | printk("0x%02x ", cmd->cmnd[i]); |
4088 | } | 4033 | } |
4089 | printk(" seg_cnt =%d\n", cmd->use_sg); | 4034 | printk(" seg_cnt =%d\n", scsi_sg_count(cmd)); |
4090 | printk(" request buffer=0x%p, request buffer len=0x%x\n", | 4035 | printk(" request buffer=0x%p, request buffer len=0x%x\n", |
4091 | cmd->request_buffer, cmd->request_bufflen); | 4036 | scsi_sglist(cmd), scsi_bufflen(cmd)); |
4092 | /* if (cmd->use_sg) | 4037 | /* if (cmd->use_sg) |
4093 | { | 4038 | { |
4094 | sg = (struct scatterlist *) cmd->request_buffer; | 4039 | sg = (struct scatterlist *) cmd->request_buffer; |
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c index 5e46d842c6f9..e606cf0a2eb7 100644 --- a/drivers/scsi/sun3_scsi.c +++ b/drivers/scsi/sun3_scsi.c | |||
@@ -268,7 +268,7 @@ int sun3scsi_detect(struct scsi_host_template * tpnt) | |||
268 | ((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0; | 268 | ((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0; |
269 | 269 | ||
270 | if (request_irq(instance->irq, scsi_sun3_intr, | 270 | if (request_irq(instance->irq, scsi_sun3_intr, |
271 | 0, "Sun3SCSI-5380", NULL)) { | 271 | 0, "Sun3SCSI-5380", instance)) { |
272 | #ifndef REAL_DMA | 272 | #ifndef REAL_DMA |
273 | printk("scsi%d: IRQ%d not free, interrupts disabled\n", | 273 | printk("scsi%d: IRQ%d not free, interrupts disabled\n", |
274 | instance->host_no, instance->irq); | 274 | instance->host_no, instance->irq); |
@@ -310,7 +310,7 @@ int sun3scsi_detect(struct scsi_host_template * tpnt) | |||
310 | int sun3scsi_release (struct Scsi_Host *shpnt) | 310 | int sun3scsi_release (struct Scsi_Host *shpnt) |
311 | { | 311 | { |
312 | if (shpnt->irq != SCSI_IRQ_NONE) | 312 | if (shpnt->irq != SCSI_IRQ_NONE) |
313 | free_irq (shpnt->irq, NULL); | 313 | free_irq(shpnt->irq, shpnt); |
314 | 314 | ||
315 | iounmap((void *)sun3_scsi_regp); | 315 | iounmap((void *)sun3_scsi_regp); |
316 | 316 | ||
diff --git a/drivers/scsi/sun3_scsi_vme.c b/drivers/scsi/sun3_scsi_vme.c index 7cb4a31453e6..02d9727f017a 100644 --- a/drivers/scsi/sun3_scsi_vme.c +++ b/drivers/scsi/sun3_scsi_vme.c | |||
@@ -230,7 +230,7 @@ static int sun3scsi_detect(struct scsi_host_template * tpnt) | |||
230 | ((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0; | 230 | ((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0; |
231 | 231 | ||
232 | if (request_irq(instance->irq, scsi_sun3_intr, | 232 | if (request_irq(instance->irq, scsi_sun3_intr, |
233 | 0, "Sun3SCSI-5380VME", NULL)) { | 233 | 0, "Sun3SCSI-5380VME", instance)) { |
234 | #ifndef REAL_DMA | 234 | #ifndef REAL_DMA |
235 | printk("scsi%d: IRQ%d not free, interrupts disabled\n", | 235 | printk("scsi%d: IRQ%d not free, interrupts disabled\n", |
236 | instance->host_no, instance->irq); | 236 | instance->host_no, instance->irq); |
@@ -279,7 +279,7 @@ static int sun3scsi_detect(struct scsi_host_template * tpnt) | |||
279 | int sun3scsi_release (struct Scsi_Host *shpnt) | 279 | int sun3scsi_release (struct Scsi_Host *shpnt) |
280 | { | 280 | { |
281 | if (shpnt->irq != SCSI_IRQ_NONE) | 281 | if (shpnt->irq != SCSI_IRQ_NONE) |
282 | free_irq (shpnt->irq, NULL); | 282 | free_irq(shpnt->irq, shpnt); |
283 | 283 | ||
284 | iounmap((void *)sun3_scsi_regp); | 284 | iounmap((void *)sun3_scsi_regp); |
285 | 285 | ||
diff --git a/drivers/scsi/t128.c b/drivers/scsi/t128.c index 248d60b8d899..041eaaace2c3 100644 --- a/drivers/scsi/t128.c +++ b/drivers/scsi/t128.c | |||
@@ -259,7 +259,8 @@ found: | |||
259 | instance->irq = NCR5380_probe_irq(instance, T128_IRQS); | 259 | instance->irq = NCR5380_probe_irq(instance, T128_IRQS); |
260 | 260 | ||
261 | if (instance->irq != SCSI_IRQ_NONE) | 261 | if (instance->irq != SCSI_IRQ_NONE) |
262 | if (request_irq(instance->irq, t128_intr, IRQF_DISABLED, "t128", instance)) { | 262 | if (request_irq(instance->irq, t128_intr, IRQF_DISABLED, "t128", |
263 | instance)) { | ||
263 | printk("scsi%d : IRQ%d not free, interrupts disabled\n", | 264 | printk("scsi%d : IRQ%d not free, interrupts disabled\n", |
264 | instance->host_no, instance->irq); | 265 | instance->host_no, instance->irq); |
265 | instance->irq = SCSI_IRQ_NONE; | 266 | instance->irq = SCSI_IRQ_NONE; |
@@ -295,7 +296,7 @@ static int t128_release(struct Scsi_Host *shost) | |||
295 | NCR5380_local_declare(); | 296 | NCR5380_local_declare(); |
296 | NCR5380_setup(shost); | 297 | NCR5380_setup(shost); |
297 | if (shost->irq) | 298 | if (shost->irq) |
298 | free_irq(shost->irq, NULL); | 299 | free_irq(shost->irq, shost); |
299 | NCR5380_exit(shost); | 300 | NCR5380_exit(shost); |
300 | if (shost->io_port && shost->n_io_port) | 301 | if (shost->io_port && shost->n_io_port) |
301 | release_region(shost->io_port, shost->n_io_port); | 302 | release_region(shost->io_port, shost->n_io_port); |