diff options
Diffstat (limited to 'drivers/scsi')
39 files changed, 661 insertions, 327 deletions
diff --git a/drivers/scsi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgb3i/cxgb3i.h index fde6e4c634e7..a7cf550b9cca 100644 --- a/drivers/scsi/cxgb3i/cxgb3i.h +++ b/drivers/scsi/cxgb3i/cxgb3i.h | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/list.h> | 20 | #include <linux/list.h> |
21 | #include <linux/netdevice.h> | 21 | #include <linux/netdevice.h> |
22 | #include <linux/scatterlist.h> | 22 | #include <linux/scatterlist.h> |
23 | #include <linux/skbuff.h> | ||
23 | #include <scsi/libiscsi_tcp.h> | 24 | #include <scsi/libiscsi_tcp.h> |
24 | 25 | ||
25 | /* from cxgb3 LLD */ | 26 | /* from cxgb3 LLD */ |
@@ -113,6 +114,26 @@ struct cxgb3i_endpoint { | |||
113 | struct cxgb3i_conn *cconn; | 114 | struct cxgb3i_conn *cconn; |
114 | }; | 115 | }; |
115 | 116 | ||
117 | /** | ||
118 | * struct cxgb3i_task_data - private iscsi task data | ||
119 | * | ||
120 | * @nr_frags: # of coalesced page frags (from scsi sgl) | ||
121 | * @frags: coalesced page frags (from scsi sgl) | ||
122 | * @skb: tx pdu skb | ||
123 | * @offset: data offset for the next pdu | ||
124 | * @count: max. possible pdu payload | ||
125 | * @sgoffset: offset to the first sg entry for a given offset | ||
126 | */ | ||
127 | #define MAX_PDU_FRAGS ((ULP2_MAX_PDU_PAYLOAD + 512 - 1) / 512) | ||
128 | struct cxgb3i_task_data { | ||
129 | unsigned short nr_frags; | ||
130 | skb_frag_t frags[MAX_PDU_FRAGS]; | ||
131 | struct sk_buff *skb; | ||
132 | unsigned int offset; | ||
133 | unsigned int count; | ||
134 | unsigned int sgoffset; | ||
135 | }; | ||
136 | |||
116 | int cxgb3i_iscsi_init(void); | 137 | int cxgb3i_iscsi_init(void); |
117 | void cxgb3i_iscsi_cleanup(void); | 138 | void cxgb3i_iscsi_cleanup(void); |
118 | 139 | ||
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.c b/drivers/scsi/cxgb3i/cxgb3i_ddp.c index 08f3a09d9233..a83d36e4926f 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_ddp.c +++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.c | |||
@@ -639,10 +639,11 @@ static int ddp_init(struct t3cdev *tdev) | |||
639 | write_unlock(&cxgb3i_ddp_rwlock); | 639 | write_unlock(&cxgb3i_ddp_rwlock); |
640 | 640 | ||
641 | ddp_log_info("nppods %u (0x%x ~ 0x%x), bits %u, mask 0x%x,0x%x " | 641 | ddp_log_info("nppods %u (0x%x ~ 0x%x), bits %u, mask 0x%x,0x%x " |
642 | "pkt %u,%u.\n", | 642 | "pkt %u/%u, %u/%u.\n", |
643 | ppmax, ddp->llimit, ddp->ulimit, ddp->idx_bits, | 643 | ppmax, ddp->llimit, ddp->ulimit, ddp->idx_bits, |
644 | ddp->idx_mask, ddp->rsvd_tag_mask, | 644 | ddp->idx_mask, ddp->rsvd_tag_mask, |
645 | ddp->max_txsz, ddp->max_rxsz); | 645 | ddp->max_txsz, uinfo.max_txsz, |
646 | ddp->max_rxsz, uinfo.max_rxsz); | ||
646 | return 0; | 647 | return 0; |
647 | 648 | ||
648 | free_ddp_map: | 649 | free_ddp_map: |
@@ -654,8 +655,8 @@ free_ddp_map: | |||
654 | * cxgb3i_adapter_ddp_init - initialize the adapter's ddp resource | 655 | * cxgb3i_adapter_ddp_init - initialize the adapter's ddp resource |
655 | * @tdev: t3cdev adapter | 656 | * @tdev: t3cdev adapter |
656 | * @tformat: tag format | 657 | * @tformat: tag format |
657 | * @txsz: max tx pkt size, filled in by this func. | 658 | * @txsz: max tx pdu payload size, filled in by this func. |
658 | * @rxsz: max rx pkt size, filled in by this func. | 659 | * @rxsz: max rx pdu payload size, filled in by this func. |
659 | * initialize the ddp pagepod manager for a given adapter if needed and | 660 | * initialize the ddp pagepod manager for a given adapter if needed and |
660 | * setup the tag format for a given iscsi entity | 661 | * setup the tag format for a given iscsi entity |
661 | */ | 662 | */ |
@@ -685,10 +686,12 @@ int cxgb3i_adapter_ddp_init(struct t3cdev *tdev, | |||
685 | tformat->sw_bits, tformat->rsvd_bits, | 686 | tformat->sw_bits, tformat->rsvd_bits, |
686 | tformat->rsvd_shift, tformat->rsvd_mask); | 687 | tformat->rsvd_shift, tformat->rsvd_mask); |
687 | 688 | ||
688 | *txsz = ddp->max_txsz; | 689 | *txsz = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, |
689 | *rxsz = ddp->max_rxsz; | 690 | ddp->max_txsz - ISCSI_PDU_NONPAYLOAD_LEN); |
690 | ddp_log_info("ddp max pkt size: %u, %u.\n", | 691 | *rxsz = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, |
691 | ddp->max_txsz, ddp->max_rxsz); | 692 | ddp->max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN); |
693 | ddp_log_info("max payload size: %u/%u, %u/%u.\n", | ||
694 | *txsz, ddp->max_txsz, *rxsz, ddp->max_rxsz); | ||
692 | return 0; | 695 | return 0; |
693 | } | 696 | } |
694 | EXPORT_SYMBOL_GPL(cxgb3i_adapter_ddp_init); | 697 | EXPORT_SYMBOL_GPL(cxgb3i_adapter_ddp_init); |
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.h b/drivers/scsi/cxgb3i/cxgb3i_ddp.h index 5c7c4d95c493..3faae7831c83 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_ddp.h +++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.h | |||
@@ -13,6 +13,8 @@ | |||
13 | #ifndef __CXGB3I_ULP2_DDP_H__ | 13 | #ifndef __CXGB3I_ULP2_DDP_H__ |
14 | #define __CXGB3I_ULP2_DDP_H__ | 14 | #define __CXGB3I_ULP2_DDP_H__ |
15 | 15 | ||
16 | #include <linux/vmalloc.h> | ||
17 | |||
16 | /** | 18 | /** |
17 | * struct cxgb3i_tag_format - cxgb3i ulp tag format for an iscsi entity | 19 | * struct cxgb3i_tag_format - cxgb3i ulp tag format for an iscsi entity |
18 | * | 20 | * |
@@ -85,8 +87,9 @@ struct cxgb3i_ddp_info { | |||
85 | struct sk_buff **gl_skb; | 87 | struct sk_buff **gl_skb; |
86 | }; | 88 | }; |
87 | 89 | ||
90 | #define ISCSI_PDU_NONPAYLOAD_LEN 312 /* bhs(48) + ahs(256) + digest(8) */ | ||
88 | #define ULP2_MAX_PKT_SIZE 16224 | 91 | #define ULP2_MAX_PKT_SIZE 16224 |
89 | #define ULP2_MAX_PDU_PAYLOAD (ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_MAX) | 92 | #define ULP2_MAX_PDU_PAYLOAD (ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_LEN) |
90 | #define PPOD_PAGES_MAX 4 | 93 | #define PPOD_PAGES_MAX 4 |
91 | #define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */ | 94 | #define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */ |
92 | 95 | ||
diff --git a/drivers/scsi/cxgb3i/cxgb3i_init.c b/drivers/scsi/cxgb3i/cxgb3i_init.c index 091ecb4d9f3d..1ce9f244e46c 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_init.c +++ b/drivers/scsi/cxgb3i/cxgb3i_init.c | |||
@@ -12,8 +12,8 @@ | |||
12 | #include "cxgb3i.h" | 12 | #include "cxgb3i.h" |
13 | 13 | ||
14 | #define DRV_MODULE_NAME "cxgb3i" | 14 | #define DRV_MODULE_NAME "cxgb3i" |
15 | #define DRV_MODULE_VERSION "1.0.0" | 15 | #define DRV_MODULE_VERSION "1.0.1" |
16 | #define DRV_MODULE_RELDATE "Jun. 1, 2008" | 16 | #define DRV_MODULE_RELDATE "Jan. 2009" |
17 | 17 | ||
18 | static char version[] = | 18 | static char version[] = |
19 | "Chelsio S3xx iSCSI Driver " DRV_MODULE_NAME | 19 | "Chelsio S3xx iSCSI Driver " DRV_MODULE_NAME |
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c index d83464b9b3f9..fa2a44f37b36 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c +++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c | |||
@@ -364,7 +364,8 @@ cxgb3i_session_create(struct iscsi_endpoint *ep, u16 cmds_max, u16 qdepth, | |||
364 | 364 | ||
365 | cls_session = iscsi_session_setup(&cxgb3i_iscsi_transport, shost, | 365 | cls_session = iscsi_session_setup(&cxgb3i_iscsi_transport, shost, |
366 | cmds_max, | 366 | cmds_max, |
367 | sizeof(struct iscsi_tcp_task), | 367 | sizeof(struct iscsi_tcp_task) + |
368 | sizeof(struct cxgb3i_task_data), | ||
368 | initial_cmdsn, ISCSI_MAX_TARGET); | 369 | initial_cmdsn, ISCSI_MAX_TARGET); |
369 | if (!cls_session) | 370 | if (!cls_session) |
370 | return NULL; | 371 | return NULL; |
@@ -402,17 +403,15 @@ static inline int cxgb3i_conn_max_xmit_dlength(struct iscsi_conn *conn) | |||
402 | { | 403 | { |
403 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | 404 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
404 | struct cxgb3i_conn *cconn = tcp_conn->dd_data; | 405 | struct cxgb3i_conn *cconn = tcp_conn->dd_data; |
405 | unsigned int max = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, | 406 | unsigned int max = max(512 * MAX_SKB_FRAGS, SKB_TX_HEADROOM); |
406 | cconn->hba->snic->tx_max_size - | ||
407 | ISCSI_PDU_NONPAYLOAD_MAX); | ||
408 | 407 | ||
408 | max = min(cconn->hba->snic->tx_max_size, max); | ||
409 | if (conn->max_xmit_dlength) | 409 | if (conn->max_xmit_dlength) |
410 | conn->max_xmit_dlength = min_t(unsigned int, | 410 | conn->max_xmit_dlength = min(conn->max_xmit_dlength, max); |
411 | conn->max_xmit_dlength, max); | ||
412 | else | 411 | else |
413 | conn->max_xmit_dlength = max; | 412 | conn->max_xmit_dlength = max; |
414 | align_pdu_size(conn->max_xmit_dlength); | 413 | align_pdu_size(conn->max_xmit_dlength); |
415 | cxgb3i_log_info("conn 0x%p, max xmit %u.\n", | 414 | cxgb3i_api_debug("conn 0x%p, max xmit %u.\n", |
416 | conn, conn->max_xmit_dlength); | 415 | conn, conn->max_xmit_dlength); |
417 | return 0; | 416 | return 0; |
418 | } | 417 | } |
@@ -427,9 +426,7 @@ static inline int cxgb3i_conn_max_recv_dlength(struct iscsi_conn *conn) | |||
427 | { | 426 | { |
428 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | 427 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
429 | struct cxgb3i_conn *cconn = tcp_conn->dd_data; | 428 | struct cxgb3i_conn *cconn = tcp_conn->dd_data; |
430 | unsigned int max = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, | 429 | unsigned int max = cconn->hba->snic->rx_max_size; |
431 | cconn->hba->snic->rx_max_size - | ||
432 | ISCSI_PDU_NONPAYLOAD_MAX); | ||
433 | 430 | ||
434 | align_pdu_size(max); | 431 | align_pdu_size(max); |
435 | if (conn->max_recv_dlength) { | 432 | if (conn->max_recv_dlength) { |
@@ -439,8 +436,7 @@ static inline int cxgb3i_conn_max_recv_dlength(struct iscsi_conn *conn) | |||
439 | conn->max_recv_dlength, max); | 436 | conn->max_recv_dlength, max); |
440 | return -EINVAL; | 437 | return -EINVAL; |
441 | } | 438 | } |
442 | conn->max_recv_dlength = min_t(unsigned int, | 439 | conn->max_recv_dlength = min(conn->max_recv_dlength, max); |
443 | conn->max_recv_dlength, max); | ||
444 | align_pdu_size(conn->max_recv_dlength); | 440 | align_pdu_size(conn->max_recv_dlength); |
445 | } else | 441 | } else |
446 | conn->max_recv_dlength = max; | 442 | conn->max_recv_dlength = max; |
@@ -844,7 +840,7 @@ static struct scsi_host_template cxgb3i_host_template = { | |||
844 | .proc_name = "cxgb3i", | 840 | .proc_name = "cxgb3i", |
845 | .queuecommand = iscsi_queuecommand, | 841 | .queuecommand = iscsi_queuecommand, |
846 | .change_queue_depth = iscsi_change_queue_depth, | 842 | .change_queue_depth = iscsi_change_queue_depth, |
847 | .can_queue = 128 * (ISCSI_DEF_XMIT_CMDS_MAX - 1), | 843 | .can_queue = CXGB3I_SCSI_QDEPTH_DFLT - 1, |
848 | .sg_tablesize = SG_ALL, | 844 | .sg_tablesize = SG_ALL, |
849 | .max_sectors = 0xFFFF, | 845 | .max_sectors = 0xFFFF, |
850 | .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, | 846 | .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, |
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c index a865f1fefe8b..de3b3b614cca 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_offload.c +++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c | |||
@@ -23,19 +23,19 @@ | |||
23 | #include "cxgb3i_ddp.h" | 23 | #include "cxgb3i_ddp.h" |
24 | 24 | ||
25 | #ifdef __DEBUG_C3CN_CONN__ | 25 | #ifdef __DEBUG_C3CN_CONN__ |
26 | #define c3cn_conn_debug cxgb3i_log_info | 26 | #define c3cn_conn_debug cxgb3i_log_debug |
27 | #else | 27 | #else |
28 | #define c3cn_conn_debug(fmt...) | 28 | #define c3cn_conn_debug(fmt...) |
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | #ifdef __DEBUG_C3CN_TX__ | 31 | #ifdef __DEBUG_C3CN_TX__ |
32 | #define c3cn_tx_debug cxgb3i_log_debug | 32 | #define c3cn_tx_debug cxgb3i_log_debug |
33 | #else | 33 | #else |
34 | #define c3cn_tx_debug(fmt...) | 34 | #define c3cn_tx_debug(fmt...) |
35 | #endif | 35 | #endif |
36 | 36 | ||
37 | #ifdef __DEBUG_C3CN_RX__ | 37 | #ifdef __DEBUG_C3CN_RX__ |
38 | #define c3cn_rx_debug cxgb3i_log_debug | 38 | #define c3cn_rx_debug cxgb3i_log_debug |
39 | #else | 39 | #else |
40 | #define c3cn_rx_debug(fmt...) | 40 | #define c3cn_rx_debug(fmt...) |
41 | #endif | 41 | #endif |
@@ -47,9 +47,9 @@ static int cxgb3_rcv_win = 256 * 1024; | |||
47 | module_param(cxgb3_rcv_win, int, 0644); | 47 | module_param(cxgb3_rcv_win, int, 0644); |
48 | MODULE_PARM_DESC(cxgb3_rcv_win, "TCP receive window in bytes (default=256KB)"); | 48 | MODULE_PARM_DESC(cxgb3_rcv_win, "TCP receive window in bytes (default=256KB)"); |
49 | 49 | ||
50 | static int cxgb3_snd_win = 64 * 1024; | 50 | static int cxgb3_snd_win = 128 * 1024; |
51 | module_param(cxgb3_snd_win, int, 0644); | 51 | module_param(cxgb3_snd_win, int, 0644); |
52 | MODULE_PARM_DESC(cxgb3_snd_win, "TCP send window in bytes (default=64KB)"); | 52 | MODULE_PARM_DESC(cxgb3_snd_win, "TCP send window in bytes (default=128KB)"); |
53 | 53 | ||
54 | static int cxgb3_rx_credit_thres = 10 * 1024; | 54 | static int cxgb3_rx_credit_thres = 10 * 1024; |
55 | module_param(cxgb3_rx_credit_thres, int, 0644); | 55 | module_param(cxgb3_rx_credit_thres, int, 0644); |
@@ -301,8 +301,8 @@ static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb) | |||
301 | static void skb_entail(struct s3_conn *c3cn, struct sk_buff *skb, | 301 | static void skb_entail(struct s3_conn *c3cn, struct sk_buff *skb, |
302 | int flags) | 302 | int flags) |
303 | { | 303 | { |
304 | CXGB3_SKB_CB(skb)->seq = c3cn->write_seq; | 304 | skb_tcp_seq(skb) = c3cn->write_seq; |
305 | CXGB3_SKB_CB(skb)->flags = flags; | 305 | skb_flags(skb) = flags; |
306 | __skb_queue_tail(&c3cn->write_queue, skb); | 306 | __skb_queue_tail(&c3cn->write_queue, skb); |
307 | } | 307 | } |
308 | 308 | ||
@@ -457,12 +457,9 @@ static unsigned int wrlen __read_mostly; | |||
457 | * The number of WRs needed for an skb depends on the number of fragments | 457 | * The number of WRs needed for an skb depends on the number of fragments |
458 | * in the skb and whether it has any payload in its main body. This maps the | 458 | * in the skb and whether it has any payload in its main body. This maps the |
459 | * length of the gather list represented by an skb into the # of necessary WRs. | 459 | * length of the gather list represented by an skb into the # of necessary WRs. |
460 | * | 460 | * The extra two fragments are for iscsi bhs and payload padding. |
461 | * The max. length of an skb is controlled by the max pdu size which is ~16K. | ||
462 | * Also, assume the min. fragment length is the sector size (512), then add | ||
463 | * extra fragment counts for iscsi bhs and payload padding. | ||
464 | */ | 461 | */ |
465 | #define SKB_WR_LIST_SIZE (16384/512 + 3) | 462 | #define SKB_WR_LIST_SIZE (MAX_SKB_FRAGS + 2) |
466 | static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly; | 463 | static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly; |
467 | 464 | ||
468 | static void s3_init_wr_tab(unsigned int wr_len) | 465 | static void s3_init_wr_tab(unsigned int wr_len) |
@@ -485,7 +482,7 @@ static void s3_init_wr_tab(unsigned int wr_len) | |||
485 | 482 | ||
486 | static inline void reset_wr_list(struct s3_conn *c3cn) | 483 | static inline void reset_wr_list(struct s3_conn *c3cn) |
487 | { | 484 | { |
488 | c3cn->wr_pending_head = NULL; | 485 | c3cn->wr_pending_head = c3cn->wr_pending_tail = NULL; |
489 | } | 486 | } |
490 | 487 | ||
491 | /* | 488 | /* |
@@ -496,7 +493,7 @@ static inline void reset_wr_list(struct s3_conn *c3cn) | |||
496 | static inline void enqueue_wr(struct s3_conn *c3cn, | 493 | static inline void enqueue_wr(struct s3_conn *c3cn, |
497 | struct sk_buff *skb) | 494 | struct sk_buff *skb) |
498 | { | 495 | { |
499 | skb_wr_data(skb) = NULL; | 496 | skb_tx_wr_next(skb) = NULL; |
500 | 497 | ||
501 | /* | 498 | /* |
502 | * We want to take an extra reference since both us and the driver | 499 | * We want to take an extra reference since both us and the driver |
@@ -509,10 +506,22 @@ static inline void enqueue_wr(struct s3_conn *c3cn, | |||
509 | if (!c3cn->wr_pending_head) | 506 | if (!c3cn->wr_pending_head) |
510 | c3cn->wr_pending_head = skb; | 507 | c3cn->wr_pending_head = skb; |
511 | else | 508 | else |
512 | skb_wr_data(skb) = skb; | 509 | skb_tx_wr_next(c3cn->wr_pending_tail) = skb; |
513 | c3cn->wr_pending_tail = skb; | 510 | c3cn->wr_pending_tail = skb; |
514 | } | 511 | } |
515 | 512 | ||
513 | static int count_pending_wrs(struct s3_conn *c3cn) | ||
514 | { | ||
515 | int n = 0; | ||
516 | const struct sk_buff *skb = c3cn->wr_pending_head; | ||
517 | |||
518 | while (skb) { | ||
519 | n += skb->csum; | ||
520 | skb = skb_tx_wr_next(skb); | ||
521 | } | ||
522 | return n; | ||
523 | } | ||
524 | |||
516 | static inline struct sk_buff *peek_wr(const struct s3_conn *c3cn) | 525 | static inline struct sk_buff *peek_wr(const struct s3_conn *c3cn) |
517 | { | 526 | { |
518 | return c3cn->wr_pending_head; | 527 | return c3cn->wr_pending_head; |
@@ -529,8 +538,8 @@ static inline struct sk_buff *dequeue_wr(struct s3_conn *c3cn) | |||
529 | 538 | ||
530 | if (likely(skb)) { | 539 | if (likely(skb)) { |
531 | /* Don't bother clearing the tail */ | 540 | /* Don't bother clearing the tail */ |
532 | c3cn->wr_pending_head = skb_wr_data(skb); | 541 | c3cn->wr_pending_head = skb_tx_wr_next(skb); |
533 | skb_wr_data(skb) = NULL; | 542 | skb_tx_wr_next(skb) = NULL; |
534 | } | 543 | } |
535 | return skb; | 544 | return skb; |
536 | } | 545 | } |
@@ -543,13 +552,14 @@ static void purge_wr_queue(struct s3_conn *c3cn) | |||
543 | } | 552 | } |
544 | 553 | ||
545 | static inline void make_tx_data_wr(struct s3_conn *c3cn, struct sk_buff *skb, | 554 | static inline void make_tx_data_wr(struct s3_conn *c3cn, struct sk_buff *skb, |
546 | int len) | 555 | int len, int req_completion) |
547 | { | 556 | { |
548 | struct tx_data_wr *req; | 557 | struct tx_data_wr *req; |
549 | 558 | ||
550 | skb_reset_transport_header(skb); | 559 | skb_reset_transport_header(skb); |
551 | req = (struct tx_data_wr *)__skb_push(skb, sizeof(*req)); | 560 | req = (struct tx_data_wr *)__skb_push(skb, sizeof(*req)); |
552 | req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); | 561 | req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA) | |
562 | (req_completion ? F_WR_COMPL : 0)); | ||
553 | req->wr_lo = htonl(V_WR_TID(c3cn->tid)); | 563 | req->wr_lo = htonl(V_WR_TID(c3cn->tid)); |
554 | req->sndseq = htonl(c3cn->snd_nxt); | 564 | req->sndseq = htonl(c3cn->snd_nxt); |
555 | /* len includes the length of any HW ULP additions */ | 565 | /* len includes the length of any HW ULP additions */ |
@@ -592,7 +602,7 @@ static int c3cn_push_tx_frames(struct s3_conn *c3cn, int req_completion) | |||
592 | 602 | ||
593 | if (unlikely(c3cn->state == C3CN_STATE_CONNECTING || | 603 | if (unlikely(c3cn->state == C3CN_STATE_CONNECTING || |
594 | c3cn->state == C3CN_STATE_CLOSE_WAIT_1 || | 604 | c3cn->state == C3CN_STATE_CLOSE_WAIT_1 || |
595 | c3cn->state == C3CN_STATE_ABORTING)) { | 605 | c3cn->state >= C3CN_STATE_ABORTING)) { |
596 | c3cn_tx_debug("c3cn 0x%p, in closing state %u.\n", | 606 | c3cn_tx_debug("c3cn 0x%p, in closing state %u.\n", |
597 | c3cn, c3cn->state); | 607 | c3cn, c3cn->state); |
598 | return 0; | 608 | return 0; |
@@ -615,7 +625,7 @@ static int c3cn_push_tx_frames(struct s3_conn *c3cn, int req_completion) | |||
615 | if (c3cn->wr_avail < wrs_needed) { | 625 | if (c3cn->wr_avail < wrs_needed) { |
616 | c3cn_tx_debug("c3cn 0x%p, skb len %u/%u, frag %u, " | 626 | c3cn_tx_debug("c3cn 0x%p, skb len %u/%u, frag %u, " |
617 | "wr %d < %u.\n", | 627 | "wr %d < %u.\n", |
618 | c3cn, skb->len, skb->datalen, frags, | 628 | c3cn, skb->len, skb->data_len, frags, |
619 | wrs_needed, c3cn->wr_avail); | 629 | wrs_needed, c3cn->wr_avail); |
620 | break; | 630 | break; |
621 | } | 631 | } |
@@ -627,20 +637,24 @@ static int c3cn_push_tx_frames(struct s3_conn *c3cn, int req_completion) | |||
627 | c3cn->wr_unacked += wrs_needed; | 637 | c3cn->wr_unacked += wrs_needed; |
628 | enqueue_wr(c3cn, skb); | 638 | enqueue_wr(c3cn, skb); |
629 | 639 | ||
630 | if (likely(CXGB3_SKB_CB(skb)->flags & C3CB_FLAG_NEED_HDR)) { | 640 | c3cn_tx_debug("c3cn 0x%p, enqueue, skb len %u/%u, frag %u, " |
631 | len += ulp_extra_len(skb); | 641 | "wr %d, left %u, unack %u.\n", |
632 | make_tx_data_wr(c3cn, skb, len); | 642 | c3cn, skb->len, skb->data_len, frags, |
633 | c3cn->snd_nxt += len; | 643 | wrs_needed, c3cn->wr_avail, c3cn->wr_unacked); |
634 | if ((req_completion | 644 | |
635 | && c3cn->wr_unacked == wrs_needed) | ||
636 | || (CXGB3_SKB_CB(skb)->flags & C3CB_FLAG_COMPL) | ||
637 | || c3cn->wr_unacked >= c3cn->wr_max / 2) { | ||
638 | struct work_request_hdr *wr = cplhdr(skb); | ||
639 | 645 | ||
640 | wr->wr_hi |= htonl(F_WR_COMPL); | 646 | if (likely(skb_flags(skb) & C3CB_FLAG_NEED_HDR)) { |
647 | if ((req_completion && | ||
648 | c3cn->wr_unacked == wrs_needed) || | ||
649 | (skb_flags(skb) & C3CB_FLAG_COMPL) || | ||
650 | c3cn->wr_unacked >= c3cn->wr_max / 2) { | ||
651 | req_completion = 1; | ||
641 | c3cn->wr_unacked = 0; | 652 | c3cn->wr_unacked = 0; |
642 | } | 653 | } |
643 | CXGB3_SKB_CB(skb)->flags &= ~C3CB_FLAG_NEED_HDR; | 654 | len += ulp_extra_len(skb); |
655 | make_tx_data_wr(c3cn, skb, len, req_completion); | ||
656 | c3cn->snd_nxt += len; | ||
657 | skb_flags(skb) &= ~C3CB_FLAG_NEED_HDR; | ||
644 | } | 658 | } |
645 | 659 | ||
646 | total_size += skb->truesize; | 660 | total_size += skb->truesize; |
@@ -735,8 +749,11 @@ static void process_act_establish(struct s3_conn *c3cn, struct sk_buff *skb) | |||
735 | if (unlikely(c3cn_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED))) | 749 | if (unlikely(c3cn_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED))) |
736 | /* upper layer has requested closing */ | 750 | /* upper layer has requested closing */ |
737 | send_abort_req(c3cn); | 751 | send_abort_req(c3cn); |
738 | else if (c3cn_push_tx_frames(c3cn, 1)) | 752 | else { |
753 | if (skb_queue_len(&c3cn->write_queue)) | ||
754 | c3cn_push_tx_frames(c3cn, 1); | ||
739 | cxgb3i_conn_tx_open(c3cn); | 755 | cxgb3i_conn_tx_open(c3cn); |
756 | } | ||
740 | } | 757 | } |
741 | 758 | ||
742 | static int do_act_establish(struct t3cdev *cdev, struct sk_buff *skb, | 759 | static int do_act_establish(struct t3cdev *cdev, struct sk_buff *skb, |
@@ -1082,8 +1099,8 @@ static void process_rx_iscsi_hdr(struct s3_conn *c3cn, struct sk_buff *skb) | |||
1082 | return; | 1099 | return; |
1083 | } | 1100 | } |
1084 | 1101 | ||
1085 | CXGB3_SKB_CB(skb)->seq = ntohl(hdr_cpl->seq); | 1102 | skb_tcp_seq(skb) = ntohl(hdr_cpl->seq); |
1086 | CXGB3_SKB_CB(skb)->flags = 0; | 1103 | skb_flags(skb) = 0; |
1087 | 1104 | ||
1088 | skb_reset_transport_header(skb); | 1105 | skb_reset_transport_header(skb); |
1089 | __skb_pull(skb, sizeof(struct cpl_iscsi_hdr)); | 1106 | __skb_pull(skb, sizeof(struct cpl_iscsi_hdr)); |
@@ -1103,12 +1120,12 @@ static void process_rx_iscsi_hdr(struct s3_conn *c3cn, struct sk_buff *skb) | |||
1103 | goto abort_conn; | 1120 | goto abort_conn; |
1104 | 1121 | ||
1105 | skb_ulp_mode(skb) = ULP2_FLAG_DATA_READY; | 1122 | skb_ulp_mode(skb) = ULP2_FLAG_DATA_READY; |
1106 | skb_ulp_pdulen(skb) = ntohs(ddp_cpl.len); | 1123 | skb_rx_pdulen(skb) = ntohs(ddp_cpl.len); |
1107 | skb_ulp_ddigest(skb) = ntohl(ddp_cpl.ulp_crc); | 1124 | skb_rx_ddigest(skb) = ntohl(ddp_cpl.ulp_crc); |
1108 | status = ntohl(ddp_cpl.ddp_status); | 1125 | status = ntohl(ddp_cpl.ddp_status); |
1109 | 1126 | ||
1110 | c3cn_rx_debug("rx skb 0x%p, len %u, pdulen %u, ddp status 0x%x.\n", | 1127 | c3cn_rx_debug("rx skb 0x%p, len %u, pdulen %u, ddp status 0x%x.\n", |
1111 | skb, skb->len, skb_ulp_pdulen(skb), status); | 1128 | skb, skb->len, skb_rx_pdulen(skb), status); |
1112 | 1129 | ||
1113 | if (status & (1 << RX_DDP_STATUS_HCRC_SHIFT)) | 1130 | if (status & (1 << RX_DDP_STATUS_HCRC_SHIFT)) |
1114 | skb_ulp_mode(skb) |= ULP2_FLAG_HCRC_ERROR; | 1131 | skb_ulp_mode(skb) |= ULP2_FLAG_HCRC_ERROR; |
@@ -1126,7 +1143,7 @@ static void process_rx_iscsi_hdr(struct s3_conn *c3cn, struct sk_buff *skb) | |||
1126 | } else if (status & (1 << RX_DDP_STATUS_DDP_SHIFT)) | 1143 | } else if (status & (1 << RX_DDP_STATUS_DDP_SHIFT)) |
1127 | skb_ulp_mode(skb) |= ULP2_FLAG_DATA_DDPED; | 1144 | skb_ulp_mode(skb) |= ULP2_FLAG_DATA_DDPED; |
1128 | 1145 | ||
1129 | c3cn->rcv_nxt = ntohl(ddp_cpl.seq) + skb_ulp_pdulen(skb); | 1146 | c3cn->rcv_nxt = ntohl(ddp_cpl.seq) + skb_rx_pdulen(skb); |
1130 | __pskb_trim(skb, len); | 1147 | __pskb_trim(skb, len); |
1131 | __skb_queue_tail(&c3cn->receive_queue, skb); | 1148 | __skb_queue_tail(&c3cn->receive_queue, skb); |
1132 | cxgb3i_conn_pdu_ready(c3cn); | 1149 | cxgb3i_conn_pdu_ready(c3cn); |
@@ -1151,12 +1168,27 @@ static int do_iscsi_hdr(struct t3cdev *t3dev, struct sk_buff *skb, void *ctx) | |||
1151 | * Process an acknowledgment of WR completion. Advance snd_una and send the | 1168 | * Process an acknowledgment of WR completion. Advance snd_una and send the |
1152 | * next batch of work requests from the write queue. | 1169 | * next batch of work requests from the write queue. |
1153 | */ | 1170 | */ |
1171 | static void check_wr_invariants(struct s3_conn *c3cn) | ||
1172 | { | ||
1173 | int pending = count_pending_wrs(c3cn); | ||
1174 | |||
1175 | if (unlikely(c3cn->wr_avail + pending != c3cn->wr_max)) | ||
1176 | cxgb3i_log_error("TID %u: credit imbalance: avail %u, " | ||
1177 | "pending %u, total should be %u\n", | ||
1178 | c3cn->tid, c3cn->wr_avail, pending, | ||
1179 | c3cn->wr_max); | ||
1180 | } | ||
1181 | |||
1154 | static void process_wr_ack(struct s3_conn *c3cn, struct sk_buff *skb) | 1182 | static void process_wr_ack(struct s3_conn *c3cn, struct sk_buff *skb) |
1155 | { | 1183 | { |
1156 | struct cpl_wr_ack *hdr = cplhdr(skb); | 1184 | struct cpl_wr_ack *hdr = cplhdr(skb); |
1157 | unsigned int credits = ntohs(hdr->credits); | 1185 | unsigned int credits = ntohs(hdr->credits); |
1158 | u32 snd_una = ntohl(hdr->snd_una); | 1186 | u32 snd_una = ntohl(hdr->snd_una); |
1159 | 1187 | ||
1188 | c3cn_tx_debug("%u WR credits, avail %u, unack %u, TID %u, state %u.\n", | ||
1189 | credits, c3cn->wr_avail, c3cn->wr_unacked, | ||
1190 | c3cn->tid, c3cn->state); | ||
1191 | |||
1160 | c3cn->wr_avail += credits; | 1192 | c3cn->wr_avail += credits; |
1161 | if (c3cn->wr_unacked > c3cn->wr_max - c3cn->wr_avail) | 1193 | if (c3cn->wr_unacked > c3cn->wr_max - c3cn->wr_avail) |
1162 | c3cn->wr_unacked = c3cn->wr_max - c3cn->wr_avail; | 1194 | c3cn->wr_unacked = c3cn->wr_max - c3cn->wr_avail; |
@@ -1171,6 +1203,17 @@ static void process_wr_ack(struct s3_conn *c3cn, struct sk_buff *skb) | |||
1171 | break; | 1203 | break; |
1172 | } | 1204 | } |
1173 | if (unlikely(credits < p->csum)) { | 1205 | if (unlikely(credits < p->csum)) { |
1206 | struct tx_data_wr *w = cplhdr(p); | ||
1207 | cxgb3i_log_error("TID %u got %u WR credits need %u, " | ||
1208 | "len %u, main body %u, frags %u, " | ||
1209 | "seq # %u, ACK una %u, ACK nxt %u, " | ||
1210 | "WR_AVAIL %u, WRs pending %u\n", | ||
1211 | c3cn->tid, credits, p->csum, p->len, | ||
1212 | p->len - p->data_len, | ||
1213 | skb_shinfo(p)->nr_frags, | ||
1214 | ntohl(w->sndseq), snd_una, | ||
1215 | ntohl(hdr->snd_nxt), c3cn->wr_avail, | ||
1216 | count_pending_wrs(c3cn) - credits); | ||
1174 | p->csum -= credits; | 1217 | p->csum -= credits; |
1175 | break; | 1218 | break; |
1176 | } else { | 1219 | } else { |
@@ -1180,15 +1223,24 @@ static void process_wr_ack(struct s3_conn *c3cn, struct sk_buff *skb) | |||
1180 | } | 1223 | } |
1181 | } | 1224 | } |
1182 | 1225 | ||
1183 | if (unlikely(before(snd_una, c3cn->snd_una))) | 1226 | check_wr_invariants(c3cn); |
1227 | |||
1228 | if (unlikely(before(snd_una, c3cn->snd_una))) { | ||
1229 | cxgb3i_log_error("TID %u, unexpected sequence # %u in WR_ACK " | ||
1230 | "snd_una %u\n", | ||
1231 | c3cn->tid, snd_una, c3cn->snd_una); | ||
1184 | goto out_free; | 1232 | goto out_free; |
1233 | } | ||
1185 | 1234 | ||
1186 | if (c3cn->snd_una != snd_una) { | 1235 | if (c3cn->snd_una != snd_una) { |
1187 | c3cn->snd_una = snd_una; | 1236 | c3cn->snd_una = snd_una; |
1188 | dst_confirm(c3cn->dst_cache); | 1237 | dst_confirm(c3cn->dst_cache); |
1189 | } | 1238 | } |
1190 | 1239 | ||
1191 | if (skb_queue_len(&c3cn->write_queue) && c3cn_push_tx_frames(c3cn, 0)) | 1240 | if (skb_queue_len(&c3cn->write_queue)) { |
1241 | if (c3cn_push_tx_frames(c3cn, 0)) | ||
1242 | cxgb3i_conn_tx_open(c3cn); | ||
1243 | } else | ||
1192 | cxgb3i_conn_tx_open(c3cn); | 1244 | cxgb3i_conn_tx_open(c3cn); |
1193 | out_free: | 1245 | out_free: |
1194 | __kfree_skb(skb); | 1246 | __kfree_skb(skb); |
@@ -1452,7 +1504,7 @@ static void init_offload_conn(struct s3_conn *c3cn, | |||
1452 | struct dst_entry *dst) | 1504 | struct dst_entry *dst) |
1453 | { | 1505 | { |
1454 | BUG_ON(c3cn->cdev != cdev); | 1506 | BUG_ON(c3cn->cdev != cdev); |
1455 | c3cn->wr_max = c3cn->wr_avail = T3C_DATA(cdev)->max_wrs; | 1507 | c3cn->wr_max = c3cn->wr_avail = T3C_DATA(cdev)->max_wrs - 1; |
1456 | c3cn->wr_unacked = 0; | 1508 | c3cn->wr_unacked = 0; |
1457 | c3cn->mss_idx = select_mss(c3cn, dst_mtu(dst)); | 1509 | c3cn->mss_idx = select_mss(c3cn, dst_mtu(dst)); |
1458 | 1510 | ||
@@ -1671,9 +1723,17 @@ int cxgb3i_c3cn_send_pdus(struct s3_conn *c3cn, struct sk_buff *skb) | |||
1671 | goto out_err; | 1723 | goto out_err; |
1672 | } | 1724 | } |
1673 | 1725 | ||
1674 | err = -EPIPE; | ||
1675 | if (c3cn->err) { | 1726 | if (c3cn->err) { |
1676 | c3cn_tx_debug("c3cn 0x%p, err %d.\n", c3cn, c3cn->err); | 1727 | c3cn_tx_debug("c3cn 0x%p, err %d.\n", c3cn, c3cn->err); |
1728 | err = -EPIPE; | ||
1729 | goto out_err; | ||
1730 | } | ||
1731 | |||
1732 | if (c3cn->write_seq - c3cn->snd_una >= cxgb3_snd_win) { | ||
1733 | c3cn_tx_debug("c3cn 0x%p, snd %u - %u > %u.\n", | ||
1734 | c3cn, c3cn->write_seq, c3cn->snd_una, | ||
1735 | cxgb3_snd_win); | ||
1736 | err = -EAGAIN; | ||
1677 | goto out_err; | 1737 | goto out_err; |
1678 | } | 1738 | } |
1679 | 1739 | ||
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.h b/drivers/scsi/cxgb3i/cxgb3i_offload.h index d23156907ffd..6344b9eb2589 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_offload.h +++ b/drivers/scsi/cxgb3i/cxgb3i_offload.h | |||
@@ -178,25 +178,33 @@ void cxgb3i_c3cn_release(struct s3_conn *); | |||
178 | * @flag: see C3CB_FLAG_* below | 178 | * @flag: see C3CB_FLAG_* below |
179 | * @ulp_mode: ULP mode/submode of sk_buff | 179 | * @ulp_mode: ULP mode/submode of sk_buff |
180 | * @seq: tcp sequence number | 180 | * @seq: tcp sequence number |
181 | * @ddigest: pdu data digest | ||
182 | * @pdulen: recovered pdu length | ||
183 | * @wr_data: scratch area for tx wr | ||
184 | */ | 181 | */ |
182 | struct cxgb3_skb_rx_cb { | ||
183 | __u32 ddigest; /* data digest */ | ||
184 | __u32 pdulen; /* recovered pdu length */ | ||
185 | }; | ||
186 | |||
187 | struct cxgb3_skb_tx_cb { | ||
188 | struct sk_buff *wr_next; /* next wr */ | ||
189 | }; | ||
190 | |||
185 | struct cxgb3_skb_cb { | 191 | struct cxgb3_skb_cb { |
186 | __u8 flags; | 192 | __u8 flags; |
187 | __u8 ulp_mode; | 193 | __u8 ulp_mode; |
188 | __u32 seq; | 194 | __u32 seq; |
189 | __u32 ddigest; | 195 | union { |
190 | __u32 pdulen; | 196 | struct cxgb3_skb_rx_cb rx; |
191 | struct sk_buff *wr_data; | 197 | struct cxgb3_skb_tx_cb tx; |
198 | }; | ||
192 | }; | 199 | }; |
193 | 200 | ||
194 | #define CXGB3_SKB_CB(skb) ((struct cxgb3_skb_cb *)&((skb)->cb[0])) | 201 | #define CXGB3_SKB_CB(skb) ((struct cxgb3_skb_cb *)&((skb)->cb[0])) |
195 | 202 | #define skb_flags(skb) (CXGB3_SKB_CB(skb)->flags) | |
196 | #define skb_ulp_mode(skb) (CXGB3_SKB_CB(skb)->ulp_mode) | 203 | #define skb_ulp_mode(skb) (CXGB3_SKB_CB(skb)->ulp_mode) |
197 | #define skb_ulp_ddigest(skb) (CXGB3_SKB_CB(skb)->ddigest) | 204 | #define skb_tcp_seq(skb) (CXGB3_SKB_CB(skb)->seq) |
198 | #define skb_ulp_pdulen(skb) (CXGB3_SKB_CB(skb)->pdulen) | 205 | #define skb_rx_ddigest(skb) (CXGB3_SKB_CB(skb)->rx.ddigest) |
199 | #define skb_wr_data(skb) (CXGB3_SKB_CB(skb)->wr_data) | 206 | #define skb_rx_pdulen(skb) (CXGB3_SKB_CB(skb)->rx.pdulen) |
207 | #define skb_tx_wr_next(skb) (CXGB3_SKB_CB(skb)->tx.wr_next) | ||
200 | 208 | ||
201 | enum c3cb_flags { | 209 | enum c3cb_flags { |
202 | C3CB_FLAG_NEED_HDR = 1 << 0, /* packet needs a TX_DATA_WR header */ | 210 | C3CB_FLAG_NEED_HDR = 1 << 0, /* packet needs a TX_DATA_WR header */ |
@@ -217,6 +225,7 @@ struct sge_opaque_hdr { | |||
217 | /* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */ | 225 | /* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */ |
218 | #define TX_HEADER_LEN \ | 226 | #define TX_HEADER_LEN \ |
219 | (sizeof(struct tx_data_wr) + sizeof(struct sge_opaque_hdr)) | 227 | (sizeof(struct tx_data_wr) + sizeof(struct sge_opaque_hdr)) |
228 | #define SKB_TX_HEADROOM SKB_MAX_HEAD(TX_HEADER_LEN) | ||
220 | 229 | ||
221 | /* | 230 | /* |
222 | * get and set private ip for iscsi traffic | 231 | * get and set private ip for iscsi traffic |
diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.c b/drivers/scsi/cxgb3i/cxgb3i_pdu.c index ce7ce8c6094c..17115c230d65 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_pdu.c +++ b/drivers/scsi/cxgb3i/cxgb3i_pdu.c | |||
@@ -32,6 +32,10 @@ | |||
32 | #define cxgb3i_tx_debug(fmt...) | 32 | #define cxgb3i_tx_debug(fmt...) |
33 | #endif | 33 | #endif |
34 | 34 | ||
35 | /* always allocate rooms for AHS */ | ||
36 | #define SKB_TX_PDU_HEADER_LEN \ | ||
37 | (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE) | ||
38 | static unsigned int skb_extra_headroom; | ||
35 | static struct page *pad_page; | 39 | static struct page *pad_page; |
36 | 40 | ||
37 | /* | 41 | /* |
@@ -146,12 +150,13 @@ static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc) | |||
146 | 150 | ||
147 | void cxgb3i_conn_cleanup_task(struct iscsi_task *task) | 151 | void cxgb3i_conn_cleanup_task(struct iscsi_task *task) |
148 | { | 152 | { |
149 | struct iscsi_tcp_task *tcp_task = task->dd_data; | 153 | struct cxgb3i_task_data *tdata = task->dd_data + |
154 | sizeof(struct iscsi_tcp_task); | ||
150 | 155 | ||
151 | /* never reached the xmit task callout */ | 156 | /* never reached the xmit task callout */ |
152 | if (tcp_task->dd_data) | 157 | if (tdata->skb) |
153 | kfree_skb(tcp_task->dd_data); | 158 | __kfree_skb(tdata->skb); |
154 | tcp_task->dd_data = NULL; | 159 | memset(tdata, 0, sizeof(struct cxgb3i_task_data)); |
155 | 160 | ||
156 | /* MNC - Do we need a check in case this is called but | 161 | /* MNC - Do we need a check in case this is called but |
157 | * cxgb3i_conn_alloc_pdu has never been called on the task */ | 162 | * cxgb3i_conn_alloc_pdu has never been called on the task */ |
@@ -159,28 +164,102 @@ void cxgb3i_conn_cleanup_task(struct iscsi_task *task) | |||
159 | iscsi_tcp_cleanup_task(task); | 164 | iscsi_tcp_cleanup_task(task); |
160 | } | 165 | } |
161 | 166 | ||
162 | /* | 167 | static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt, |
163 | * We do not support ahs yet | 168 | unsigned int offset, unsigned int *off, |
164 | */ | 169 | struct scatterlist **sgp) |
170 | { | ||
171 | int i; | ||
172 | struct scatterlist *sg; | ||
173 | |||
174 | for_each_sg(sgl, sg, sgcnt, i) { | ||
175 | if (offset < sg->length) { | ||
176 | *off = offset; | ||
177 | *sgp = sg; | ||
178 | return 0; | ||
179 | } | ||
180 | offset -= sg->length; | ||
181 | } | ||
182 | return -EFAULT; | ||
183 | } | ||
184 | |||
185 | static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset, | ||
186 | unsigned int dlen, skb_frag_t *frags, | ||
187 | int frag_max) | ||
188 | { | ||
189 | unsigned int datalen = dlen; | ||
190 | unsigned int sglen = sg->length - sgoffset; | ||
191 | struct page *page = sg_page(sg); | ||
192 | int i; | ||
193 | |||
194 | i = 0; | ||
195 | do { | ||
196 | unsigned int copy; | ||
197 | |||
198 | if (!sglen) { | ||
199 | sg = sg_next(sg); | ||
200 | if (!sg) { | ||
201 | cxgb3i_log_error("%s, sg NULL, len %u/%u.\n", | ||
202 | __func__, datalen, dlen); | ||
203 | return -EINVAL; | ||
204 | } | ||
205 | sgoffset = 0; | ||
206 | sglen = sg->length; | ||
207 | page = sg_page(sg); | ||
208 | |||
209 | } | ||
210 | copy = min(datalen, sglen); | ||
211 | if (i && page == frags[i - 1].page && | ||
212 | sgoffset + sg->offset == | ||
213 | frags[i - 1].page_offset + frags[i - 1].size) { | ||
214 | frags[i - 1].size += copy; | ||
215 | } else { | ||
216 | if (i >= frag_max) { | ||
217 | cxgb3i_log_error("%s, too many pages %u, " | ||
218 | "dlen %u.\n", __func__, | ||
219 | frag_max, dlen); | ||
220 | return -EINVAL; | ||
221 | } | ||
222 | |||
223 | frags[i].page = page; | ||
224 | frags[i].page_offset = sg->offset + sgoffset; | ||
225 | frags[i].size = copy; | ||
226 | i++; | ||
227 | } | ||
228 | datalen -= copy; | ||
229 | sgoffset += copy; | ||
230 | sglen -= copy; | ||
231 | } while (datalen); | ||
232 | |||
233 | return i; | ||
234 | } | ||
235 | |||
165 | int cxgb3i_conn_alloc_pdu(struct iscsi_task *task, u8 opcode) | 236 | int cxgb3i_conn_alloc_pdu(struct iscsi_task *task, u8 opcode) |
166 | { | 237 | { |
238 | struct iscsi_conn *conn = task->conn; | ||
167 | struct iscsi_tcp_task *tcp_task = task->dd_data; | 239 | struct iscsi_tcp_task *tcp_task = task->dd_data; |
168 | struct sk_buff *skb; | 240 | struct cxgb3i_task_data *tdata = task->dd_data + sizeof(*tcp_task); |
241 | struct scsi_cmnd *sc = task->sc; | ||
242 | int headroom = SKB_TX_PDU_HEADER_LEN; | ||
169 | 243 | ||
244 | tcp_task->dd_data = tdata; | ||
170 | task->hdr = NULL; | 245 | task->hdr = NULL; |
171 | /* always allocate rooms for AHS */ | 246 | |
172 | skb = alloc_skb(sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE + | 247 | /* write command, need to send data pdus */ |
173 | TX_HEADER_LEN, GFP_ATOMIC); | 248 | if (skb_extra_headroom && (opcode == ISCSI_OP_SCSI_DATA_OUT || |
174 | if (!skb) | 249 | (opcode == ISCSI_OP_SCSI_CMD && |
250 | (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE)))) | ||
251 | headroom += min(skb_extra_headroom, conn->max_xmit_dlength); | ||
252 | |||
253 | tdata->skb = alloc_skb(TX_HEADER_LEN + headroom, GFP_ATOMIC); | ||
254 | if (!tdata->skb) | ||
175 | return -ENOMEM; | 255 | return -ENOMEM; |
256 | skb_reserve(tdata->skb, TX_HEADER_LEN); | ||
176 | 257 | ||
177 | cxgb3i_tx_debug("task 0x%p, opcode 0x%x, skb 0x%p.\n", | 258 | cxgb3i_tx_debug("task 0x%p, opcode 0x%x, skb 0x%p.\n", |
178 | task, opcode, skb); | 259 | task, opcode, tdata->skb); |
179 | 260 | ||
180 | tcp_task->dd_data = skb; | 261 | task->hdr = (struct iscsi_hdr *)tdata->skb->data; |
181 | skb_reserve(skb, TX_HEADER_LEN); | 262 | task->hdr_max = SKB_TX_PDU_HEADER_LEN; |
182 | task->hdr = (struct iscsi_hdr *)skb->data; | ||
183 | task->hdr_max = sizeof(struct iscsi_hdr); | ||
184 | 263 | ||
185 | /* data_out uses scsi_cmd's itt */ | 264 | /* data_out uses scsi_cmd's itt */ |
186 | if (opcode != ISCSI_OP_SCSI_DATA_OUT) | 265 | if (opcode != ISCSI_OP_SCSI_DATA_OUT) |
@@ -192,13 +271,13 @@ int cxgb3i_conn_alloc_pdu(struct iscsi_task *task, u8 opcode) | |||
192 | int cxgb3i_conn_init_pdu(struct iscsi_task *task, unsigned int offset, | 271 | int cxgb3i_conn_init_pdu(struct iscsi_task *task, unsigned int offset, |
193 | unsigned int count) | 272 | unsigned int count) |
194 | { | 273 | { |
195 | struct iscsi_tcp_task *tcp_task = task->dd_data; | ||
196 | struct sk_buff *skb = tcp_task->dd_data; | ||
197 | struct iscsi_conn *conn = task->conn; | 274 | struct iscsi_conn *conn = task->conn; |
198 | struct page *pg; | 275 | struct iscsi_tcp_task *tcp_task = task->dd_data; |
276 | struct cxgb3i_task_data *tdata = tcp_task->dd_data; | ||
277 | struct sk_buff *skb = tdata->skb; | ||
199 | unsigned int datalen = count; | 278 | unsigned int datalen = count; |
200 | int i, padlen = iscsi_padding(count); | 279 | int i, padlen = iscsi_padding(count); |
201 | skb_frag_t *frag; | 280 | struct page *pg; |
202 | 281 | ||
203 | cxgb3i_tx_debug("task 0x%p,0x%p, offset %u, count %u, skb 0x%p.\n", | 282 | cxgb3i_tx_debug("task 0x%p,0x%p, offset %u, count %u, skb 0x%p.\n", |
204 | task, task->sc, offset, count, skb); | 283 | task, task->sc, offset, count, skb); |
@@ -209,90 +288,94 @@ int cxgb3i_conn_init_pdu(struct iscsi_task *task, unsigned int offset, | |||
209 | return 0; | 288 | return 0; |
210 | 289 | ||
211 | if (task->sc) { | 290 | if (task->sc) { |
212 | struct scatterlist *sg; | 291 | struct scsi_data_buffer *sdb = scsi_out(task->sc); |
213 | struct scsi_data_buffer *sdb; | 292 | struct scatterlist *sg = NULL; |
214 | unsigned int sgoffset = offset; | 293 | int err; |
215 | struct page *sgpg; | 294 | |
216 | unsigned int sglen; | 295 | tdata->offset = offset; |
217 | 296 | tdata->count = count; | |
218 | sdb = scsi_out(task->sc); | 297 | err = sgl_seek_offset(sdb->table.sgl, sdb->table.nents, |
219 | sg = sdb->table.sgl; | 298 | tdata->offset, &tdata->sgoffset, &sg); |
220 | 299 | if (err < 0) { | |
221 | for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) { | 300 | cxgb3i_log_warn("tpdu, sgl %u, bad offset %u/%u.\n", |
222 | cxgb3i_tx_debug("sg %d, page 0x%p, len %u offset %u\n", | 301 | sdb->table.nents, tdata->offset, |
223 | i, sg_page(sg), sg->length, sg->offset); | 302 | sdb->length); |
224 | 303 | return err; | |
225 | if (sgoffset < sg->length) | ||
226 | break; | ||
227 | sgoffset -= sg->length; | ||
228 | } | 304 | } |
229 | sgpg = sg_page(sg); | 305 | err = sgl_read_to_frags(sg, tdata->sgoffset, tdata->count, |
230 | sglen = sg->length - sgoffset; | 306 | tdata->frags, MAX_PDU_FRAGS); |
231 | 307 | if (err < 0) { | |
232 | do { | 308 | cxgb3i_log_warn("tpdu, sgl %u, bad offset %u + %u.\n", |
233 | int j = skb_shinfo(skb)->nr_frags; | 309 | sdb->table.nents, tdata->offset, |
234 | unsigned int copy; | 310 | tdata->count); |
235 | 311 | return err; | |
236 | if (!sglen) { | 312 | } |
237 | sg = sg_next(sg); | 313 | tdata->nr_frags = err; |
238 | sgpg = sg_page(sg); | 314 | |
239 | sgoffset = 0; | 315 | if (tdata->nr_frags > MAX_SKB_FRAGS || |
240 | sglen = sg->length; | 316 | (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) { |
241 | ++i; | 317 | char *dst = skb->data + task->hdr_len; |
318 | skb_frag_t *frag = tdata->frags; | ||
319 | |||
320 | /* data fits in the skb's headroom */ | ||
321 | for (i = 0; i < tdata->nr_frags; i++, frag++) { | ||
322 | char *src = kmap_atomic(frag->page, | ||
323 | KM_SOFTIRQ0); | ||
324 | |||
325 | memcpy(dst, src+frag->page_offset, frag->size); | ||
326 | dst += frag->size; | ||
327 | kunmap_atomic(src, KM_SOFTIRQ0); | ||
242 | } | 328 | } |
243 | copy = min(sglen, datalen); | 329 | if (padlen) { |
244 | if (j && skb_can_coalesce(skb, j, sgpg, | 330 | memset(dst, 0, padlen); |
245 | sg->offset + sgoffset)) { | 331 | padlen = 0; |
246 | skb_shinfo(skb)->frags[j - 1].size += copy; | ||
247 | } else { | ||
248 | get_page(sgpg); | ||
249 | skb_fill_page_desc(skb, j, sgpg, | ||
250 | sg->offset + sgoffset, copy); | ||
251 | } | 332 | } |
252 | sgoffset += copy; | 333 | skb_put(skb, count + padlen); |
253 | sglen -= copy; | 334 | } else { |
254 | datalen -= copy; | 335 | /* data fit into frag_list */ |
255 | } while (datalen); | 336 | for (i = 0; i < tdata->nr_frags; i++) |
337 | get_page(tdata->frags[i].page); | ||
338 | |||
339 | memcpy(skb_shinfo(skb)->frags, tdata->frags, | ||
340 | sizeof(skb_frag_t) * tdata->nr_frags); | ||
341 | skb_shinfo(skb)->nr_frags = tdata->nr_frags; | ||
342 | skb->len += count; | ||
343 | skb->data_len += count; | ||
344 | skb->truesize += count; | ||
345 | } | ||
346 | |||
256 | } else { | 347 | } else { |
257 | pg = virt_to_page(task->data); | 348 | pg = virt_to_page(task->data); |
258 | 349 | ||
259 | while (datalen) { | 350 | get_page(pg); |
260 | i = skb_shinfo(skb)->nr_frags; | 351 | skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data), |
261 | frag = &skb_shinfo(skb)->frags[i]; | 352 | count); |
262 | 353 | skb->len += count; | |
263 | get_page(pg); | 354 | skb->data_len += count; |
264 | frag->page = pg; | 355 | skb->truesize += count; |
265 | frag->page_offset = 0; | ||
266 | frag->size = min((unsigned int)PAGE_SIZE, datalen); | ||
267 | |||
268 | skb_shinfo(skb)->nr_frags++; | ||
269 | datalen -= frag->size; | ||
270 | pg++; | ||
271 | } | ||
272 | } | 356 | } |
273 | 357 | ||
274 | if (padlen) { | 358 | if (padlen) { |
275 | i = skb_shinfo(skb)->nr_frags; | 359 | i = skb_shinfo(skb)->nr_frags; |
276 | frag = &skb_shinfo(skb)->frags[i]; | 360 | get_page(pad_page); |
277 | frag->page = pad_page; | 361 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, pad_page, 0, |
278 | frag->page_offset = 0; | 362 | padlen); |
279 | frag->size = padlen; | 363 | |
280 | skb_shinfo(skb)->nr_frags++; | 364 | skb->data_len += padlen; |
365 | skb->truesize += padlen; | ||
366 | skb->len += padlen; | ||
281 | } | 367 | } |
282 | 368 | ||
283 | datalen = count + padlen; | ||
284 | skb->data_len += datalen; | ||
285 | skb->truesize += datalen; | ||
286 | skb->len += datalen; | ||
287 | return 0; | 369 | return 0; |
288 | } | 370 | } |
289 | 371 | ||
290 | int cxgb3i_conn_xmit_pdu(struct iscsi_task *task) | 372 | int cxgb3i_conn_xmit_pdu(struct iscsi_task *task) |
291 | { | 373 | { |
292 | struct iscsi_tcp_task *tcp_task = task->dd_data; | ||
293 | struct sk_buff *skb = tcp_task->dd_data; | ||
294 | struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; | 374 | struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; |
295 | struct cxgb3i_conn *cconn = tcp_conn->dd_data; | 375 | struct cxgb3i_conn *cconn = tcp_conn->dd_data; |
376 | struct iscsi_tcp_task *tcp_task = task->dd_data; | ||
377 | struct cxgb3i_task_data *tdata = tcp_task->dd_data; | ||
378 | struct sk_buff *skb = tdata->skb; | ||
296 | unsigned int datalen; | 379 | unsigned int datalen; |
297 | int err; | 380 | int err; |
298 | 381 | ||
@@ -300,13 +383,14 @@ int cxgb3i_conn_xmit_pdu(struct iscsi_task *task) | |||
300 | return 0; | 383 | return 0; |
301 | 384 | ||
302 | datalen = skb->data_len; | 385 | datalen = skb->data_len; |
303 | tcp_task->dd_data = NULL; | 386 | tdata->skb = NULL; |
304 | err = cxgb3i_c3cn_send_pdus(cconn->cep->c3cn, skb); | 387 | err = cxgb3i_c3cn_send_pdus(cconn->cep->c3cn, skb); |
305 | cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n", | ||
306 | task, skb, skb->len, skb->data_len, err); | ||
307 | if (err > 0) { | 388 | if (err > 0) { |
308 | int pdulen = err; | 389 | int pdulen = err; |
309 | 390 | ||
391 | cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n", | ||
392 | task, skb, skb->len, skb->data_len, err); | ||
393 | |||
310 | if (task->conn->hdrdgst_en) | 394 | if (task->conn->hdrdgst_en) |
311 | pdulen += ISCSI_DIGEST_SIZE; | 395 | pdulen += ISCSI_DIGEST_SIZE; |
312 | if (datalen && task->conn->datadgst_en) | 396 | if (datalen && task->conn->datadgst_en) |
@@ -325,12 +409,14 @@ int cxgb3i_conn_xmit_pdu(struct iscsi_task *task) | |||
325 | return err; | 409 | return err; |
326 | } | 410 | } |
327 | /* reset skb to send when we are called again */ | 411 | /* reset skb to send when we are called again */ |
328 | tcp_task->dd_data = skb; | 412 | tdata->skb = skb; |
329 | return -EAGAIN; | 413 | return -EAGAIN; |
330 | } | 414 | } |
331 | 415 | ||
332 | int cxgb3i_pdu_init(void) | 416 | int cxgb3i_pdu_init(void) |
333 | { | 417 | { |
418 | if (SKB_TX_HEADROOM > (512 * MAX_SKB_FRAGS)) | ||
419 | skb_extra_headroom = SKB_TX_HEADROOM; | ||
334 | pad_page = alloc_page(GFP_KERNEL); | 420 | pad_page = alloc_page(GFP_KERNEL); |
335 | if (!pad_page) | 421 | if (!pad_page) |
336 | return -ENOMEM; | 422 | return -ENOMEM; |
@@ -366,7 +452,9 @@ void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn) | |||
366 | skb = skb_peek(&c3cn->receive_queue); | 452 | skb = skb_peek(&c3cn->receive_queue); |
367 | while (!err && skb) { | 453 | while (!err && skb) { |
368 | __skb_unlink(skb, &c3cn->receive_queue); | 454 | __skb_unlink(skb, &c3cn->receive_queue); |
369 | read += skb_ulp_pdulen(skb); | 455 | read += skb_rx_pdulen(skb); |
456 | cxgb3i_rx_debug("conn 0x%p, cn 0x%p, rx skb 0x%p, pdulen %u.\n", | ||
457 | conn, c3cn, skb, skb_rx_pdulen(skb)); | ||
370 | err = cxgb3i_conn_read_pdu_skb(conn, skb); | 458 | err = cxgb3i_conn_read_pdu_skb(conn, skb); |
371 | __kfree_skb(skb); | 459 | __kfree_skb(skb); |
372 | skb = skb_peek(&c3cn->receive_queue); | 460 | skb = skb_peek(&c3cn->receive_queue); |
@@ -377,6 +465,11 @@ void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn) | |||
377 | cxgb3i_c3cn_rx_credits(c3cn, read); | 465 | cxgb3i_c3cn_rx_credits(c3cn, read); |
378 | } | 466 | } |
379 | conn->rxdata_octets += read; | 467 | conn->rxdata_octets += read; |
468 | |||
469 | if (err) { | ||
470 | cxgb3i_log_info("conn 0x%p rx failed err %d.\n", conn, err); | ||
471 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | ||
472 | } | ||
380 | } | 473 | } |
381 | 474 | ||
382 | void cxgb3i_conn_tx_open(struct s3_conn *c3cn) | 475 | void cxgb3i_conn_tx_open(struct s3_conn *c3cn) |
diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.h b/drivers/scsi/cxgb3i/cxgb3i_pdu.h index a3f685cc2362..0770b23d90da 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_pdu.h +++ b/drivers/scsi/cxgb3i/cxgb3i_pdu.h | |||
@@ -53,7 +53,7 @@ struct cpl_rx_data_ddp_norss { | |||
53 | #define ULP2_FLAG_DCRC_ERROR 0x20 | 53 | #define ULP2_FLAG_DCRC_ERROR 0x20 |
54 | #define ULP2_FLAG_PAD_ERROR 0x40 | 54 | #define ULP2_FLAG_PAD_ERROR 0x40 |
55 | 55 | ||
56 | void cxgb3i_conn_closing(struct s3_conn *); | 56 | void cxgb3i_conn_closing(struct s3_conn *c3cn); |
57 | void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn); | 57 | void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn); |
58 | void cxgb3i_conn_tx_open(struct s3_conn *c3cn); | 58 | void cxgb3i_conn_tx_open(struct s3_conn *c3cn); |
59 | #endif | 59 | #endif |
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c index a48e4990fe12..34be88d7afa5 100644 --- a/drivers/scsi/hptiop.c +++ b/drivers/scsi/hptiop.c | |||
@@ -1251,6 +1251,7 @@ static struct pci_device_id hptiop_id_table[] = { | |||
1251 | { PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops }, | 1251 | { PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops }, |
1252 | { PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops }, | 1252 | { PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops }, |
1253 | { PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops }, | 1253 | { PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops }, |
1254 | { PCI_VDEVICE(TTI, 0x4321), (kernel_ulong_t)&hptiop_itl_ops }, | ||
1254 | { PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops }, | 1255 | { PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops }, |
1255 | { PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops }, | 1256 | { PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops }, |
1256 | { PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops }, | 1257 | { PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops }, |
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index ee0739b217b6..ed1e728763a2 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c | |||
@@ -933,7 +933,7 @@ static void ibmvfc_get_host_speed(struct Scsi_Host *shost) | |||
933 | fc_host_speed(shost) = FC_PORTSPEED_16GBIT; | 933 | fc_host_speed(shost) = FC_PORTSPEED_16GBIT; |
934 | break; | 934 | break; |
935 | default: | 935 | default: |
936 | ibmvfc_log(vhost, 3, "Unknown port speed: %ld Gbit\n", | 936 | ibmvfc_log(vhost, 3, "Unknown port speed: %lld Gbit\n", |
937 | vhost->login_buf->resp.link_speed / 100); | 937 | vhost->login_buf->resp.link_speed / 100); |
938 | fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; | 938 | fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; |
939 | break; | 939 | break; |
@@ -1322,7 +1322,9 @@ static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd, | |||
1322 | &evt->ext_list_token); | 1322 | &evt->ext_list_token); |
1323 | 1323 | ||
1324 | if (!evt->ext_list) { | 1324 | if (!evt->ext_list) { |
1325 | scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n"); | 1325 | scsi_dma_unmap(scmd); |
1326 | if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL) | ||
1327 | scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n"); | ||
1326 | return -ENOMEM; | 1328 | return -ENOMEM; |
1327 | } | 1329 | } |
1328 | } | 1330 | } |
@@ -1571,9 +1573,6 @@ static int ibmvfc_queuecommand(struct scsi_cmnd *cmnd, | |||
1571 | vfc_cmd->resp_len = sizeof(vfc_cmd->rsp); | 1573 | vfc_cmd->resp_len = sizeof(vfc_cmd->rsp); |
1572 | vfc_cmd->cancel_key = (unsigned long)cmnd->device->hostdata; | 1574 | vfc_cmd->cancel_key = (unsigned long)cmnd->device->hostdata; |
1573 | vfc_cmd->tgt_scsi_id = rport->port_id; | 1575 | vfc_cmd->tgt_scsi_id = rport->port_id; |
1574 | if ((rport->supported_classes & FC_COS_CLASS3) && | ||
1575 | (fc_host_supported_classes(vhost->host) & FC_COS_CLASS3)) | ||
1576 | vfc_cmd->flags = IBMVFC_CLASS_3_ERR; | ||
1577 | vfc_cmd->iu.xfer_len = scsi_bufflen(cmnd); | 1576 | vfc_cmd->iu.xfer_len = scsi_bufflen(cmnd); |
1578 | int_to_scsilun(cmnd->device->lun, &vfc_cmd->iu.lun); | 1577 | int_to_scsilun(cmnd->device->lun, &vfc_cmd->iu.lun); |
1579 | memcpy(vfc_cmd->iu.cdb, cmnd->cmnd, cmnd->cmd_len); | 1578 | memcpy(vfc_cmd->iu.cdb, cmnd->cmnd, cmnd->cmd_len); |
@@ -2149,8 +2148,8 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, | |||
2149 | { | 2148 | { |
2150 | const char *desc = ibmvfc_get_ae_desc(crq->event); | 2149 | const char *desc = ibmvfc_get_ae_desc(crq->event); |
2151 | 2150 | ||
2152 | ibmvfc_log(vhost, 3, "%s event received. scsi_id: %lx, wwpn: %lx," | 2151 | ibmvfc_log(vhost, 3, "%s event received. scsi_id: %llx, wwpn: %llx," |
2153 | " node_name: %lx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name); | 2152 | " node_name: %llx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name); |
2154 | 2153 | ||
2155 | switch (crq->event) { | 2154 | switch (crq->event) { |
2156 | case IBMVFC_AE_LINK_UP: | 2155 | case IBMVFC_AE_LINK_UP: |
@@ -2184,7 +2183,7 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, | |||
2184 | ibmvfc_link_down(vhost, IBMVFC_HALTED); | 2183 | ibmvfc_link_down(vhost, IBMVFC_HALTED); |
2185 | break; | 2184 | break; |
2186 | default: | 2185 | default: |
2187 | dev_err(vhost->dev, "Unknown async event received: %ld\n", crq->event); | 2186 | dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event); |
2188 | break; | 2187 | break; |
2189 | }; | 2188 | }; |
2190 | } | 2189 | } |
@@ -2261,13 +2260,13 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost) | |||
2261 | * actually sent | 2260 | * actually sent |
2262 | */ | 2261 | */ |
2263 | if (unlikely(!ibmvfc_valid_event(&vhost->pool, evt))) { | 2262 | if (unlikely(!ibmvfc_valid_event(&vhost->pool, evt))) { |
2264 | dev_err(vhost->dev, "Returned correlation_token 0x%08lx is invalid!\n", | 2263 | dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n", |
2265 | crq->ioba); | 2264 | crq->ioba); |
2266 | return; | 2265 | return; |
2267 | } | 2266 | } |
2268 | 2267 | ||
2269 | if (unlikely(atomic_read(&evt->free))) { | 2268 | if (unlikely(atomic_read(&evt->free))) { |
2270 | dev_err(vhost->dev, "Received duplicate correlation_token 0x%08lx!\n", | 2269 | dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n", |
2271 | crq->ioba); | 2270 | crq->ioba); |
2272 | return; | 2271 | return; |
2273 | } | 2272 | } |
@@ -3259,11 +3258,12 @@ static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id) | |||
3259 | 3258 | ||
3260 | tgt = mempool_alloc(vhost->tgt_pool, GFP_KERNEL); | 3259 | tgt = mempool_alloc(vhost->tgt_pool, GFP_KERNEL); |
3261 | if (!tgt) { | 3260 | if (!tgt) { |
3262 | dev_err(vhost->dev, "Target allocation failure for scsi id %08lx\n", | 3261 | dev_err(vhost->dev, "Target allocation failure for scsi id %08llx\n", |
3263 | scsi_id); | 3262 | scsi_id); |
3264 | return -ENOMEM; | 3263 | return -ENOMEM; |
3265 | } | 3264 | } |
3266 | 3265 | ||
3266 | memset(tgt, 0, sizeof(*tgt)); | ||
3267 | tgt->scsi_id = scsi_id; | 3267 | tgt->scsi_id = scsi_id; |
3268 | tgt->new_scsi_id = scsi_id; | 3268 | tgt->new_scsi_id = scsi_id; |
3269 | tgt->vhost = vhost; | 3269 | tgt->vhost = vhost; |
@@ -3574,9 +3574,18 @@ static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events) | |||
3574 | static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt) | 3574 | static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt) |
3575 | { | 3575 | { |
3576 | struct ibmvfc_host *vhost = tgt->vhost; | 3576 | struct ibmvfc_host *vhost = tgt->vhost; |
3577 | struct fc_rport *rport; | 3577 | struct fc_rport *rport = tgt->rport; |
3578 | unsigned long flags; | 3578 | unsigned long flags; |
3579 | 3579 | ||
3580 | if (rport) { | ||
3581 | tgt_dbg(tgt, "Setting rport roles\n"); | ||
3582 | fc_remote_port_rolechg(rport, tgt->ids.roles); | ||
3583 | spin_lock_irqsave(vhost->host->host_lock, flags); | ||
3584 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); | ||
3585 | spin_unlock_irqrestore(vhost->host->host_lock, flags); | ||
3586 | return; | ||
3587 | } | ||
3588 | |||
3580 | tgt_dbg(tgt, "Adding rport\n"); | 3589 | tgt_dbg(tgt, "Adding rport\n"); |
3581 | rport = fc_remote_port_add(vhost->host, 0, &tgt->ids); | 3590 | rport = fc_remote_port_add(vhost->host, 0, &tgt->ids); |
3582 | spin_lock_irqsave(vhost->host->host_lock, flags); | 3591 | spin_lock_irqsave(vhost->host->host_lock, flags); |
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index babdf3db59df..b21e071b9862 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h | |||
@@ -32,7 +32,7 @@ | |||
32 | #define IBMVFC_DRIVER_VERSION "1.0.4" | 32 | #define IBMVFC_DRIVER_VERSION "1.0.4" |
33 | #define IBMVFC_DRIVER_DATE "(November 14, 2008)" | 33 | #define IBMVFC_DRIVER_DATE "(November 14, 2008)" |
34 | 34 | ||
35 | #define IBMVFC_DEFAULT_TIMEOUT 15 | 35 | #define IBMVFC_DEFAULT_TIMEOUT 60 |
36 | #define IBMVFC_INIT_TIMEOUT 120 | 36 | #define IBMVFC_INIT_TIMEOUT 120 |
37 | #define IBMVFC_MAX_REQUESTS_DEFAULT 100 | 37 | #define IBMVFC_MAX_REQUESTS_DEFAULT 100 |
38 | 38 | ||
@@ -691,13 +691,13 @@ struct ibmvfc_host { | |||
691 | #define DBG_CMD(CMD) do { if (ibmvfc_debug) CMD; } while (0) | 691 | #define DBG_CMD(CMD) do { if (ibmvfc_debug) CMD; } while (0) |
692 | 692 | ||
693 | #define tgt_dbg(t, fmt, ...) \ | 693 | #define tgt_dbg(t, fmt, ...) \ |
694 | DBG_CMD(dev_info((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__)) | 694 | DBG_CMD(dev_info((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__)) |
695 | 695 | ||
696 | #define tgt_info(t, fmt, ...) \ | 696 | #define tgt_info(t, fmt, ...) \ |
697 | dev_info((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__) | 697 | dev_info((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__) |
698 | 698 | ||
699 | #define tgt_err(t, fmt, ...) \ | 699 | #define tgt_err(t, fmt, ...) \ |
700 | dev_err((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__) | 700 | dev_err((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__) |
701 | 701 | ||
702 | #define ibmvfc_dbg(vhost, ...) \ | 702 | #define ibmvfc_dbg(vhost, ...) \ |
703 | DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__)) | 703 | DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__)) |
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 5c541f7850f9..c9aa7611e408 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c | |||
@@ -432,6 +432,7 @@ static int map_sg_data(struct scsi_cmnd *cmd, | |||
432 | sdev_printk(KERN_ERR, cmd->device, | 432 | sdev_printk(KERN_ERR, cmd->device, |
433 | "Can't allocate memory " | 433 | "Can't allocate memory " |
434 | "for indirect table\n"); | 434 | "for indirect table\n"); |
435 | scsi_dma_unmap(cmd); | ||
435 | return 0; | 436 | return 0; |
436 | } | 437 | } |
437 | } | 438 | } |
@@ -1061,7 +1062,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) | |||
1061 | } | 1062 | } |
1062 | 1063 | ||
1063 | sdev_printk(KERN_INFO, cmd->device, | 1064 | sdev_printk(KERN_INFO, cmd->device, |
1064 | "aborting command. lun 0x%lx, tag 0x%lx\n", | 1065 | "aborting command. lun 0x%llx, tag 0x%llx\n", |
1065 | (((u64) lun) << 48), (u64) found_evt); | 1066 | (((u64) lun) << 48), (u64) found_evt); |
1066 | 1067 | ||
1067 | wait_for_completion(&evt->comp); | 1068 | wait_for_completion(&evt->comp); |
@@ -1082,7 +1083,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) | |||
1082 | if (rsp_rc) { | 1083 | if (rsp_rc) { |
1083 | if (printk_ratelimit()) | 1084 | if (printk_ratelimit()) |
1084 | sdev_printk(KERN_WARNING, cmd->device, | 1085 | sdev_printk(KERN_WARNING, cmd->device, |
1085 | "abort code %d for task tag 0x%lx\n", | 1086 | "abort code %d for task tag 0x%llx\n", |
1086 | rsp_rc, tsk_mgmt->task_tag); | 1087 | rsp_rc, tsk_mgmt->task_tag); |
1087 | return FAILED; | 1088 | return FAILED; |
1088 | } | 1089 | } |
@@ -1102,12 +1103,12 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) | |||
1102 | 1103 | ||
1103 | if (found_evt == NULL) { | 1104 | if (found_evt == NULL) { |
1104 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); | 1105 | spin_unlock_irqrestore(hostdata->host->host_lock, flags); |
1105 | sdev_printk(KERN_INFO, cmd->device, "aborted task tag 0x%lx completed\n", | 1106 | sdev_printk(KERN_INFO, cmd->device, "aborted task tag 0x%llx completed\n", |
1106 | tsk_mgmt->task_tag); | 1107 | tsk_mgmt->task_tag); |
1107 | return SUCCESS; | 1108 | return SUCCESS; |
1108 | } | 1109 | } |
1109 | 1110 | ||
1110 | sdev_printk(KERN_INFO, cmd->device, "successfully aborted task tag 0x%lx\n", | 1111 | sdev_printk(KERN_INFO, cmd->device, "successfully aborted task tag 0x%llx\n", |
1111 | tsk_mgmt->task_tag); | 1112 | tsk_mgmt->task_tag); |
1112 | 1113 | ||
1113 | cmd->result = (DID_ABORT << 16); | 1114 | cmd->result = (DID_ABORT << 16); |
@@ -1182,7 +1183,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) | |||
1182 | return FAILED; | 1183 | return FAILED; |
1183 | } | 1184 | } |
1184 | 1185 | ||
1185 | sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%lx\n", | 1186 | sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%llx\n", |
1186 | (((u64) lun) << 48)); | 1187 | (((u64) lun) << 48)); |
1187 | 1188 | ||
1188 | wait_for_completion(&evt->comp); | 1189 | wait_for_completion(&evt->comp); |
@@ -1203,7 +1204,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) | |||
1203 | if (rsp_rc) { | 1204 | if (rsp_rc) { |
1204 | if (printk_ratelimit()) | 1205 | if (printk_ratelimit()) |
1205 | sdev_printk(KERN_WARNING, cmd->device, | 1206 | sdev_printk(KERN_WARNING, cmd->device, |
1206 | "reset code %d for task tag 0x%lx\n", | 1207 | "reset code %d for task tag 0x%llx\n", |
1207 | rsp_rc, tsk_mgmt->task_tag); | 1208 | rsp_rc, tsk_mgmt->task_tag); |
1208 | return FAILED; | 1209 | return FAILED; |
1209 | } | 1210 | } |
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 841f460edbc4..07829009a8be 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -4912,7 +4912,7 @@ static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) | |||
4912 | if (res && ipr_is_gata(res)) { | 4912 | if (res && ipr_is_gata(res)) { |
4913 | if (cmd == HDIO_GET_IDENTITY) | 4913 | if (cmd == HDIO_GET_IDENTITY) |
4914 | return -ENOTTY; | 4914 | return -ENOTTY; |
4915 | return ata_scsi_ioctl(sdev, cmd, arg); | 4915 | return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg); |
4916 | } | 4916 | } |
4917 | 4917 | ||
4918 | return -EINVAL; | 4918 | return -EINVAL; |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 7225b6e2029e..809d32d95c76 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
@@ -1981,6 +1981,7 @@ void iscsi_pool_free(struct iscsi_pool *q) | |||
1981 | kfree(q->pool[i]); | 1981 | kfree(q->pool[i]); |
1982 | if (q->pool) | 1982 | if (q->pool) |
1983 | kfree(q->pool); | 1983 | kfree(q->pool); |
1984 | kfree(q->queue); | ||
1984 | } | 1985 | } |
1985 | EXPORT_SYMBOL_GPL(iscsi_pool_free); | 1986 | EXPORT_SYMBOL_GPL(iscsi_pool_free); |
1986 | 1987 | ||
@@ -1997,6 +1998,8 @@ int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev) | |||
1997 | if (!shost->can_queue) | 1998 | if (!shost->can_queue) |
1998 | shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX; | 1999 | shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX; |
1999 | 2000 | ||
2001 | if (!shost->transportt->eh_timed_out) | ||
2002 | shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out; | ||
2000 | return scsi_add_host(shost, pdev); | 2003 | return scsi_add_host(shost, pdev); |
2001 | } | 2004 | } |
2002 | EXPORT_SYMBOL_GPL(iscsi_host_add); | 2005 | EXPORT_SYMBOL_GPL(iscsi_host_add); |
@@ -2019,7 +2022,6 @@ struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht, | |||
2019 | shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size); | 2022 | shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size); |
2020 | if (!shost) | 2023 | if (!shost) |
2021 | return NULL; | 2024 | return NULL; |
2022 | shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out; | ||
2023 | 2025 | ||
2024 | if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) { | 2026 | if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) { |
2025 | if (qdepth != 0) | 2027 | if (qdepth != 0) |
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c index a745f91d2928..e7705d3532c9 100644 --- a/drivers/scsi/libiscsi_tcp.c +++ b/drivers/scsi/libiscsi_tcp.c | |||
@@ -177,7 +177,6 @@ int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn, | |||
177 | struct iscsi_segment *segment, int recv, | 177 | struct iscsi_segment *segment, int recv, |
178 | unsigned copied) | 178 | unsigned copied) |
179 | { | 179 | { |
180 | static unsigned char padbuf[ISCSI_PAD_LEN]; | ||
181 | struct scatterlist sg; | 180 | struct scatterlist sg; |
182 | unsigned int pad; | 181 | unsigned int pad; |
183 | 182 | ||
@@ -233,7 +232,7 @@ int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn, | |||
233 | debug_tcp("consume %d pad bytes\n", pad); | 232 | debug_tcp("consume %d pad bytes\n", pad); |
234 | segment->total_size += pad; | 233 | segment->total_size += pad; |
235 | segment->size = pad; | 234 | segment->size = pad; |
236 | segment->data = padbuf; | 235 | segment->data = segment->padbuf; |
237 | return 0; | 236 | return 0; |
238 | } | 237 | } |
239 | } | 238 | } |
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index 744838780ada..1c558d3bce18 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c | |||
@@ -717,7 +717,7 @@ int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) | |||
717 | struct domain_device *dev = sdev_to_domain_dev(sdev); | 717 | struct domain_device *dev = sdev_to_domain_dev(sdev); |
718 | 718 | ||
719 | if (dev_is_sata(dev)) | 719 | if (dev_is_sata(dev)) |
720 | return ata_scsi_ioctl(sdev, cmd, arg); | 720 | return ata_sas_scsi_ioctl(dev->sata_dev.ap, sdev, cmd, arg); |
721 | 721 | ||
722 | return -EINVAL; | 722 | return -EINVAL; |
723 | } | 723 | } |
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index a8f30bdaff69..a7302480bc4a 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c | |||
@@ -5258,6 +5258,7 @@ lpfc_send_els_event(struct lpfc_vport *vport, | |||
5258 | sizeof(struct lpfc_name)); | 5258 | sizeof(struct lpfc_name)); |
5259 | break; | 5259 | break; |
5260 | default: | 5260 | default: |
5261 | kfree(els_data); | ||
5261 | return; | 5262 | return; |
5262 | } | 5263 | } |
5263 | memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); | 5264 | memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); |
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c index ce48e2d0193c..ca0dd33497ec 100644 --- a/drivers/scsi/ps3rom.c +++ b/drivers/scsi/ps3rom.c | |||
@@ -290,11 +290,11 @@ static irqreturn_t ps3rom_interrupt(int irq, void *data) | |||
290 | 290 | ||
291 | if (tag != dev->tag) | 291 | if (tag != dev->tag) |
292 | dev_err(&dev->sbd.core, | 292 | dev_err(&dev->sbd.core, |
293 | "%s:%u: tag mismatch, got %lx, expected %lx\n", | 293 | "%s:%u: tag mismatch, got %llx, expected %llx\n", |
294 | __func__, __LINE__, tag, dev->tag); | 294 | __func__, __LINE__, tag, dev->tag); |
295 | 295 | ||
296 | if (res) { | 296 | if (res) { |
297 | dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%lx\n", | 297 | dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%llx\n", |
298 | __func__, __LINE__, res, status); | 298 | __func__, __LINE__, res, status); |
299 | return IRQ_HANDLED; | 299 | return IRQ_HANDLED; |
300 | } | 300 | } |
@@ -364,7 +364,7 @@ static int __devinit ps3rom_probe(struct ps3_system_bus_device *_dev) | |||
364 | 364 | ||
365 | if (dev->blk_size != CD_FRAMESIZE) { | 365 | if (dev->blk_size != CD_FRAMESIZE) { |
366 | dev_err(&dev->sbd.core, | 366 | dev_err(&dev->sbd.core, |
367 | "%s:%u: cannot handle block size %lu\n", __func__, | 367 | "%s:%u: cannot handle block size %llu\n", __func__, |
368 | __LINE__, dev->blk_size); | 368 | __LINE__, dev->blk_size); |
369 | return -EINVAL; | 369 | return -EINVAL; |
370 | } | 370 | } |
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index c7acef50d5da..f4c57227ec18 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c | |||
@@ -1016,6 +1016,9 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport) | |||
1016 | struct Scsi_Host *host = rport_to_shost(rport); | 1016 | struct Scsi_Host *host = rport_to_shost(rport); |
1017 | fc_port_t *fcport = *(fc_port_t **)rport->dd_data; | 1017 | fc_port_t *fcport = *(fc_port_t **)rport->dd_data; |
1018 | 1018 | ||
1019 | if (!fcport) | ||
1020 | return; | ||
1021 | |||
1019 | qla2x00_abort_fcport_cmds(fcport); | 1022 | qla2x00_abort_fcport_cmds(fcport); |
1020 | 1023 | ||
1021 | /* | 1024 | /* |
@@ -1033,6 +1036,9 @@ qla2x00_terminate_rport_io(struct fc_rport *rport) | |||
1033 | { | 1036 | { |
1034 | fc_port_t *fcport = *(fc_port_t **)rport->dd_data; | 1037 | fc_port_t *fcport = *(fc_port_t **)rport->dd_data; |
1035 | 1038 | ||
1039 | if (!fcport) | ||
1040 | return; | ||
1041 | |||
1036 | /* | 1042 | /* |
1037 | * At this point all fcport's software-states are cleared. Perform any | 1043 | * At this point all fcport's software-states are cleared. Perform any |
1038 | * final cleanup of firmware resources (PCBs and XCBs). | 1044 | * final cleanup of firmware resources (PCBs and XCBs). |
@@ -1259,13 +1265,6 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) | |||
1259 | test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) | 1265 | test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) |
1260 | msleep(1000); | 1266 | msleep(1000); |
1261 | 1267 | ||
1262 | if (ha->mqenable) { | ||
1263 | if (qla25xx_delete_queues(vha, 0) != QLA_SUCCESS) | ||
1264 | qla_printk(KERN_WARNING, ha, | ||
1265 | "Queue delete failed.\n"); | ||
1266 | vha->req_ques[0] = ha->req_q_map[0]->id; | ||
1267 | } | ||
1268 | |||
1269 | qla24xx_disable_vp(vha); | 1268 | qla24xx_disable_vp(vha); |
1270 | 1269 | ||
1271 | fc_remove_host(vha->host); | 1270 | fc_remove_host(vha->host); |
@@ -1287,6 +1286,12 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) | |||
1287 | vha->host_no, vha->vp_idx, vha)); | 1286 | vha->host_no, vha->vp_idx, vha)); |
1288 | } | 1287 | } |
1289 | 1288 | ||
1289 | if (ha->mqenable) { | ||
1290 | if (qla25xx_delete_queues(vha, 0) != QLA_SUCCESS) | ||
1291 | qla_printk(KERN_WARNING, ha, | ||
1292 | "Queue delete failed.\n"); | ||
1293 | } | ||
1294 | |||
1290 | scsi_host_put(vha->host); | 1295 | scsi_host_put(vha->host); |
1291 | qla_printk(KERN_INFO, ha, "vport %d deleted\n", id); | 1296 | qla_printk(KERN_INFO, ha, "vport %d deleted\n", id); |
1292 | return 0; | 1297 | return 0; |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 023ee77fb027..e0c5bb54b258 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
@@ -2135,6 +2135,7 @@ struct qla_msix_entry { | |||
2135 | /* Work events. */ | 2135 | /* Work events. */ |
2136 | enum qla_work_type { | 2136 | enum qla_work_type { |
2137 | QLA_EVT_AEN, | 2137 | QLA_EVT_AEN, |
2138 | QLA_EVT_IDC_ACK, | ||
2138 | }; | 2139 | }; |
2139 | 2140 | ||
2140 | 2141 | ||
@@ -2149,6 +2150,10 @@ struct qla_work_evt { | |||
2149 | enum fc_host_event_code code; | 2150 | enum fc_host_event_code code; |
2150 | u32 data; | 2151 | u32 data; |
2151 | } aen; | 2152 | } aen; |
2153 | struct { | ||
2154 | #define QLA_IDC_ACK_REGS 7 | ||
2155 | uint16_t mb[QLA_IDC_ACK_REGS]; | ||
2156 | } idc_ack; | ||
2152 | } u; | 2157 | } u; |
2153 | }; | 2158 | }; |
2154 | 2159 | ||
diff --git a/drivers/scsi/qla2xxx/qla_devtbl.h b/drivers/scsi/qla2xxx/qla_devtbl.h index d78d35e681ab..d6ea69df7c5c 100644 --- a/drivers/scsi/qla2xxx/qla_devtbl.h +++ b/drivers/scsi/qla2xxx/qla_devtbl.h | |||
@@ -72,7 +72,7 @@ static char *qla2x00_model_name[QLA_MODEL_NAMES*2] = { | |||
72 | "QLA2462", "Sun PCI-X 2.0 to 4Gb FC, Dual Channel", /* 0x141 */ | 72 | "QLA2462", "Sun PCI-X 2.0 to 4Gb FC, Dual Channel", /* 0x141 */ |
73 | "QLE2460", "Sun PCI-Express to 2Gb FC, Single Channel", /* 0x142 */ | 73 | "QLE2460", "Sun PCI-Express to 2Gb FC, Single Channel", /* 0x142 */ |
74 | "QLE2462", "Sun PCI-Express to 4Gb FC, Single Channel", /* 0x143 */ | 74 | "QLE2462", "Sun PCI-Express to 4Gb FC, Single Channel", /* 0x143 */ |
75 | "QEM2462" "Server I/O Module 4Gb FC, Dual Channel", /* 0x144 */ | 75 | "QEM2462", "Server I/O Module 4Gb FC, Dual Channel", /* 0x144 */ |
76 | "QLE2440", "PCI-Express to 4Gb FC, Single Channel", /* 0x145 */ | 76 | "QLE2440", "PCI-Express to 4Gb FC, Single Channel", /* 0x145 */ |
77 | "QLE2464", "PCI-Express to 4Gb FC, Quad Channel", /* 0x146 */ | 77 | "QLE2464", "PCI-Express to 4Gb FC, Quad Channel", /* 0x146 */ |
78 | "QLA2440", "PCI-X 2.0 to 4Gb FC, Single Channel", /* 0x147 */ | 78 | "QLA2440", "PCI-X 2.0 to 4Gb FC, Single Channel", /* 0x147 */ |
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 7abb045a0410..ffff42554087 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h | |||
@@ -1402,6 +1402,8 @@ struct access_chip_rsp_84xx { | |||
1402 | #define MBA_IDC_NOTIFY 0x8101 | 1402 | #define MBA_IDC_NOTIFY 0x8101 |
1403 | #define MBA_IDC_TIME_EXT 0x8102 | 1403 | #define MBA_IDC_TIME_EXT 0x8102 |
1404 | 1404 | ||
1405 | #define MBC_IDC_ACK 0x101 | ||
1406 | |||
1405 | struct nvram_81xx { | 1407 | struct nvram_81xx { |
1406 | /* NVRAM header. */ | 1408 | /* NVRAM header. */ |
1407 | uint8_t id[4]; | 1409 | uint8_t id[4]; |
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index ba4913353752..6de283f8f111 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h | |||
@@ -34,6 +34,7 @@ extern void qla24xx_update_fw_options(scsi_qla_host_t *); | |||
34 | extern void qla81xx_update_fw_options(scsi_qla_host_t *); | 34 | extern void qla81xx_update_fw_options(scsi_qla_host_t *); |
35 | extern int qla2x00_load_risc(struct scsi_qla_host *, uint32_t *); | 35 | extern int qla2x00_load_risc(struct scsi_qla_host *, uint32_t *); |
36 | extern int qla24xx_load_risc(scsi_qla_host_t *, uint32_t *); | 36 | extern int qla24xx_load_risc(scsi_qla_host_t *, uint32_t *); |
37 | extern int qla81xx_load_risc(scsi_qla_host_t *, uint32_t *); | ||
37 | 38 | ||
38 | extern int qla2x00_loop_resync(scsi_qla_host_t *); | 39 | extern int qla2x00_loop_resync(scsi_qla_host_t *); |
39 | 40 | ||
@@ -71,6 +72,7 @@ extern int qla2x00_loop_reset(scsi_qla_host_t *); | |||
71 | extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); | 72 | extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); |
72 | extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum | 73 | extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum |
73 | fc_host_event_code, u32); | 74 | fc_host_event_code, u32); |
75 | extern int qla2x00_post_idc_ack_work(struct scsi_qla_host *, uint16_t *); | ||
74 | 76 | ||
75 | extern void qla2x00_abort_fcport_cmds(fc_port_t *); | 77 | extern void qla2x00_abort_fcport_cmds(fc_port_t *); |
76 | extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *, | 78 | extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *, |
@@ -265,6 +267,8 @@ qla2x00_set_idma_speed(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t *); | |||
265 | 267 | ||
266 | extern int qla84xx_verify_chip(struct scsi_qla_host *, uint16_t *); | 268 | extern int qla84xx_verify_chip(struct scsi_qla_host *, uint16_t *); |
267 | 269 | ||
270 | extern int qla81xx_idc_ack(scsi_qla_host_t *, uint16_t *); | ||
271 | |||
268 | /* | 272 | /* |
269 | * Global Function Prototypes in qla_isr.c source file. | 273 | * Global Function Prototypes in qla_isr.c source file. |
270 | */ | 274 | */ |
@@ -375,10 +379,8 @@ extern int qla2x00_dfs_remove(scsi_qla_host_t *); | |||
375 | 379 | ||
376 | /* Globa function prototypes for multi-q */ | 380 | /* Globa function prototypes for multi-q */ |
377 | extern int qla25xx_request_irq(struct rsp_que *); | 381 | extern int qla25xx_request_irq(struct rsp_que *); |
378 | extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *, | 382 | extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *); |
379 | uint8_t); | 383 | extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *); |
380 | extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *, | ||
381 | uint8_t); | ||
382 | extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t, | 384 | extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t, |
383 | uint16_t, uint8_t, uint8_t); | 385 | uint16_t, uint8_t, uint8_t); |
384 | extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t, | 386 | extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t, |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 2d4f32b4df5c..986501759ad4 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -1226,9 +1226,8 @@ qla24xx_config_rings(struct scsi_qla_host *vha) | |||
1226 | icb->firmware_options_2 |= | 1226 | icb->firmware_options_2 |= |
1227 | __constant_cpu_to_le32(BIT_18); | 1227 | __constant_cpu_to_le32(BIT_18); |
1228 | 1228 | ||
1229 | icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_22); | 1229 | icb->firmware_options_2 &= __constant_cpu_to_le32(~BIT_22); |
1230 | icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23); | 1230 | icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23); |
1231 | ha->rsp_q_map[0]->options = icb->firmware_options_2; | ||
1232 | 1231 | ||
1233 | WRT_REG_DWORD(®->isp25mq.req_q_in, 0); | 1232 | WRT_REG_DWORD(®->isp25mq.req_q_in, 0); |
1234 | WRT_REG_DWORD(®->isp25mq.req_q_out, 0); | 1233 | WRT_REG_DWORD(®->isp25mq.req_q_out, 0); |
@@ -1258,35 +1257,48 @@ qla2x00_init_rings(scsi_qla_host_t *vha) | |||
1258 | { | 1257 | { |
1259 | int rval; | 1258 | int rval; |
1260 | unsigned long flags = 0; | 1259 | unsigned long flags = 0; |
1261 | int cnt; | 1260 | int cnt, que; |
1262 | struct qla_hw_data *ha = vha->hw; | 1261 | struct qla_hw_data *ha = vha->hw; |
1263 | struct req_que *req = ha->req_q_map[0]; | 1262 | struct req_que *req; |
1264 | struct rsp_que *rsp = ha->rsp_q_map[0]; | 1263 | struct rsp_que *rsp; |
1264 | struct scsi_qla_host *vp; | ||
1265 | struct mid_init_cb_24xx *mid_init_cb = | 1265 | struct mid_init_cb_24xx *mid_init_cb = |
1266 | (struct mid_init_cb_24xx *) ha->init_cb; | 1266 | (struct mid_init_cb_24xx *) ha->init_cb; |
1267 | 1267 | ||
1268 | spin_lock_irqsave(&ha->hardware_lock, flags); | 1268 | spin_lock_irqsave(&ha->hardware_lock, flags); |
1269 | 1269 | ||
1270 | /* Clear outstanding commands array. */ | 1270 | /* Clear outstanding commands array. */ |
1271 | for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) | 1271 | for (que = 0; que < ha->max_queues; que++) { |
1272 | req->outstanding_cmds[cnt] = NULL; | 1272 | req = ha->req_q_map[que]; |
1273 | if (!req) | ||
1274 | continue; | ||
1275 | for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) | ||
1276 | req->outstanding_cmds[cnt] = NULL; | ||
1273 | 1277 | ||
1274 | req->current_outstanding_cmd = 0; | 1278 | req->current_outstanding_cmd = 0; |
1275 | 1279 | ||
1276 | /* Clear RSCN queue. */ | 1280 | /* Initialize firmware. */ |
1277 | vha->rscn_in_ptr = 0; | 1281 | req->ring_ptr = req->ring; |
1278 | vha->rscn_out_ptr = 0; | 1282 | req->ring_index = 0; |
1283 | req->cnt = req->length; | ||
1284 | } | ||
1279 | 1285 | ||
1280 | /* Initialize firmware. */ | 1286 | for (que = 0; que < ha->max_queues; que++) { |
1281 | req->ring_ptr = req->ring; | 1287 | rsp = ha->rsp_q_map[que]; |
1282 | req->ring_index = 0; | 1288 | if (!rsp) |
1283 | req->cnt = req->length; | 1289 | continue; |
1284 | rsp->ring_ptr = rsp->ring; | 1290 | rsp->ring_ptr = rsp->ring; |
1285 | rsp->ring_index = 0; | 1291 | rsp->ring_index = 0; |
1286 | 1292 | ||
1287 | /* Initialize response queue entries */ | 1293 | /* Initialize response queue entries */ |
1288 | qla2x00_init_response_q_entries(rsp); | 1294 | qla2x00_init_response_q_entries(rsp); |
1295 | } | ||
1289 | 1296 | ||
1297 | /* Clear RSCN queue. */ | ||
1298 | list_for_each_entry(vp, &ha->vp_list, list) { | ||
1299 | vp->rscn_in_ptr = 0; | ||
1300 | vp->rscn_out_ptr = 0; | ||
1301 | } | ||
1290 | ha->isp_ops->config_rings(vha); | 1302 | ha->isp_ops->config_rings(vha); |
1291 | 1303 | ||
1292 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 1304 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
@@ -3212,8 +3224,8 @@ qla2x00_loop_resync(scsi_qla_host_t *vha) | |||
3212 | int rval = QLA_SUCCESS; | 3224 | int rval = QLA_SUCCESS; |
3213 | uint32_t wait_time; | 3225 | uint32_t wait_time; |
3214 | struct qla_hw_data *ha = vha->hw; | 3226 | struct qla_hw_data *ha = vha->hw; |
3215 | struct req_que *req = ha->req_q_map[0]; | 3227 | struct req_que *req = ha->req_q_map[vha->req_ques[0]]; |
3216 | struct rsp_que *rsp = ha->rsp_q_map[0]; | 3228 | struct rsp_que *rsp = req->rsp; |
3217 | 3229 | ||
3218 | atomic_set(&vha->loop_state, LOOP_UPDATE); | 3230 | atomic_set(&vha->loop_state, LOOP_UPDATE); |
3219 | clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); | 3231 | clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); |
@@ -3480,7 +3492,7 @@ qla25xx_init_queues(struct qla_hw_data *ha) | |||
3480 | rsp = ha->rsp_q_map[i]; | 3492 | rsp = ha->rsp_q_map[i]; |
3481 | if (rsp) { | 3493 | if (rsp) { |
3482 | rsp->options &= ~BIT_0; | 3494 | rsp->options &= ~BIT_0; |
3483 | ret = qla25xx_init_rsp_que(base_vha, rsp, rsp->options); | 3495 | ret = qla25xx_init_rsp_que(base_vha, rsp); |
3484 | if (ret != QLA_SUCCESS) | 3496 | if (ret != QLA_SUCCESS) |
3485 | DEBUG2_17(printk(KERN_WARNING | 3497 | DEBUG2_17(printk(KERN_WARNING |
3486 | "%s Rsp que:%d init failed\n", __func__, | 3498 | "%s Rsp que:%d init failed\n", __func__, |
@@ -3492,15 +3504,16 @@ qla25xx_init_queues(struct qla_hw_data *ha) | |||
3492 | } | 3504 | } |
3493 | req = ha->req_q_map[i]; | 3505 | req = ha->req_q_map[i]; |
3494 | if (req) { | 3506 | if (req) { |
3507 | /* Clear outstanding commands array. */ | ||
3495 | req->options &= ~BIT_0; | 3508 | req->options &= ~BIT_0; |
3496 | ret = qla25xx_init_req_que(base_vha, req, req->options); | 3509 | ret = qla25xx_init_req_que(base_vha, req); |
3497 | if (ret != QLA_SUCCESS) | 3510 | if (ret != QLA_SUCCESS) |
3498 | DEBUG2_17(printk(KERN_WARNING | 3511 | DEBUG2_17(printk(KERN_WARNING |
3499 | "%s Req que:%d init failed\n", __func__, | 3512 | "%s Req que:%d init failed\n", __func__, |
3500 | req->id)); | 3513 | req->id)); |
3501 | else | 3514 | else |
3502 | DEBUG2_17(printk(KERN_WARNING | 3515 | DEBUG2_17(printk(KERN_WARNING |
3503 | "%s Rsp que:%d inited\n", __func__, | 3516 | "%s Req que:%d inited\n", __func__, |
3504 | req->id)); | 3517 | req->id)); |
3505 | } | 3518 | } |
3506 | } | 3519 | } |
@@ -3548,6 +3561,9 @@ qla24xx_reset_adapter(scsi_qla_host_t *vha) | |||
3548 | WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE); | 3561 | WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE); |
3549 | RD_REG_DWORD(®->hccr); | 3562 | RD_REG_DWORD(®->hccr); |
3550 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 3563 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
3564 | |||
3565 | if (IS_NOPOLLING_TYPE(ha)) | ||
3566 | ha->isp_ops->enable_intrs(ha); | ||
3551 | } | 3567 | } |
3552 | 3568 | ||
3553 | /* On sparc systems, obtain port and node WWN from firmware | 3569 | /* On sparc systems, obtain port and node WWN from firmware |
@@ -3833,6 +3849,10 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr) | |||
3833 | uint32_t i; | 3849 | uint32_t i; |
3834 | struct qla_hw_data *ha = vha->hw; | 3850 | struct qla_hw_data *ha = vha->hw; |
3835 | struct req_que *req = ha->req_q_map[0]; | 3851 | struct req_que *req = ha->req_q_map[0]; |
3852 | |||
3853 | qla_printk(KERN_INFO, ha, | ||
3854 | "FW: Loading from flash (%x)...\n", ha->flt_region_fw); | ||
3855 | |||
3836 | rval = QLA_SUCCESS; | 3856 | rval = QLA_SUCCESS; |
3837 | 3857 | ||
3838 | segments = FA_RISC_CODE_SEGMENTS; | 3858 | segments = FA_RISC_CODE_SEGMENTS; |
@@ -4008,8 +4028,8 @@ fail_fw_integrity: | |||
4008 | return QLA_FUNCTION_FAILED; | 4028 | return QLA_FUNCTION_FAILED; |
4009 | } | 4029 | } |
4010 | 4030 | ||
4011 | int | 4031 | static int |
4012 | qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) | 4032 | qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) |
4013 | { | 4033 | { |
4014 | int rval; | 4034 | int rval; |
4015 | int segments, fragment; | 4035 | int segments, fragment; |
@@ -4029,12 +4049,12 @@ qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) | |||
4029 | qla_printk(KERN_ERR, ha, "Firmware images can be retrieved " | 4049 | qla_printk(KERN_ERR, ha, "Firmware images can be retrieved " |
4030 | "from: " QLA_FW_URL ".\n"); | 4050 | "from: " QLA_FW_URL ".\n"); |
4031 | 4051 | ||
4032 | /* Try to load RISC code from flash. */ | 4052 | return QLA_FUNCTION_FAILED; |
4033 | qla_printk(KERN_ERR, ha, "Attempting to load (potentially " | ||
4034 | "outdated) firmware from flash.\n"); | ||
4035 | return qla24xx_load_risc_flash(vha, srisc_addr); | ||
4036 | } | 4053 | } |
4037 | 4054 | ||
4055 | qla_printk(KERN_INFO, ha, | ||
4056 | "FW: Loading via request-firmware...\n"); | ||
4057 | |||
4038 | rval = QLA_SUCCESS; | 4058 | rval = QLA_SUCCESS; |
4039 | 4059 | ||
4040 | segments = FA_RISC_CODE_SEGMENTS; | 4060 | segments = FA_RISC_CODE_SEGMENTS; |
@@ -4119,6 +4139,40 @@ fail_fw_integrity: | |||
4119 | return QLA_FUNCTION_FAILED; | 4139 | return QLA_FUNCTION_FAILED; |
4120 | } | 4140 | } |
4121 | 4141 | ||
4142 | int | ||
4143 | qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) | ||
4144 | { | ||
4145 | int rval; | ||
4146 | |||
4147 | /* | ||
4148 | * FW Load priority: | ||
4149 | * 1) Firmware via request-firmware interface (.bin file). | ||
4150 | * 2) Firmware residing in flash. | ||
4151 | */ | ||
4152 | rval = qla24xx_load_risc_blob(vha, srisc_addr); | ||
4153 | if (rval == QLA_SUCCESS) | ||
4154 | return rval; | ||
4155 | |||
4156 | return qla24xx_load_risc_flash(vha, srisc_addr); | ||
4157 | } | ||
4158 | |||
4159 | int | ||
4160 | qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) | ||
4161 | { | ||
4162 | int rval; | ||
4163 | |||
4164 | /* | ||
4165 | * FW Load priority: | ||
4166 | * 1) Firmware residing in flash. | ||
4167 | * 2) Firmware via request-firmware interface (.bin file). | ||
4168 | */ | ||
4169 | rval = qla24xx_load_risc_flash(vha, srisc_addr); | ||
4170 | if (rval == QLA_SUCCESS) | ||
4171 | return rval; | ||
4172 | |||
4173 | return qla24xx_load_risc_blob(vha, srisc_addr); | ||
4174 | } | ||
4175 | |||
4122 | void | 4176 | void |
4123 | qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha) | 4177 | qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha) |
4124 | { | 4178 | { |
@@ -4151,8 +4205,8 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha) | |||
4151 | uint16_t mb[MAILBOX_REGISTER_COUNT]; | 4205 | uint16_t mb[MAILBOX_REGISTER_COUNT]; |
4152 | struct qla_hw_data *ha = vha->hw; | 4206 | struct qla_hw_data *ha = vha->hw; |
4153 | struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); | 4207 | struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); |
4154 | struct req_que *req = ha->req_q_map[0]; | 4208 | struct req_que *req = ha->req_q_map[vha->req_ques[0]]; |
4155 | struct rsp_que *rsp = ha->rsp_q_map[0]; | 4209 | struct rsp_que *rsp = req->rsp; |
4156 | 4210 | ||
4157 | if (!vha->vp_idx) | 4211 | if (!vha->vp_idx) |
4158 | return -EINVAL; | 4212 | return -EINVAL; |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 789fc576f222..f250e5b7897c 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -266,6 +266,40 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) | |||
266 | } | 266 | } |
267 | } | 267 | } |
268 | 268 | ||
269 | static void | ||
270 | qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) | ||
271 | { | ||
272 | static char *event[] = | ||
273 | { "Complete", "Request Notification", "Time Extension" }; | ||
274 | int rval; | ||
275 | struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; | ||
276 | uint16_t __iomem *wptr; | ||
277 | uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; | ||
278 | |||
279 | /* Seed data -- mailbox1 -> mailbox7. */ | ||
280 | wptr = (uint16_t __iomem *)®24->mailbox1; | ||
281 | for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) | ||
282 | mb[cnt] = RD_REG_WORD(wptr); | ||
283 | |||
284 | DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- " | ||
285 | "%04x %04x %04x %04x %04x %04x %04x.\n", vha->host_no, | ||
286 | event[aen & 0xff], | ||
287 | mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6])); | ||
288 | |||
289 | /* Acknowledgement needed? [Notify && non-zero timeout]. */ | ||
290 | timeout = (descr >> 8) & 0xf; | ||
291 | if (aen != MBA_IDC_NOTIFY || !timeout) | ||
292 | return; | ||
293 | |||
294 | DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- " | ||
295 | "ACK timeout=%d.\n", vha->host_no, event[aen & 0xff], timeout)); | ||
296 | |||
297 | rval = qla2x00_post_idc_ack_work(vha, mb); | ||
298 | if (rval != QLA_SUCCESS) | ||
299 | qla_printk(KERN_WARNING, vha->hw, | ||
300 | "IDC failed to post ACK.\n"); | ||
301 | } | ||
302 | |||
269 | /** | 303 | /** |
270 | * qla2x00_async_event() - Process aynchronous events. | 304 | * qla2x00_async_event() - Process aynchronous events. |
271 | * @ha: SCSI driver HA context | 305 | * @ha: SCSI driver HA context |
@@ -714,21 +748,9 @@ skip_rio: | |||
714 | "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3])); | 748 | "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3])); |
715 | break; | 749 | break; |
716 | case MBA_IDC_COMPLETE: | 750 | case MBA_IDC_COMPLETE: |
717 | DEBUG2(printk("scsi(%ld): Inter-Driver Commucation " | ||
718 | "Complete -- %04x %04x %04x\n", vha->host_no, mb[1], mb[2], | ||
719 | mb[3])); | ||
720 | break; | ||
721 | case MBA_IDC_NOTIFY: | 751 | case MBA_IDC_NOTIFY: |
722 | DEBUG2(printk("scsi(%ld): Inter-Driver Commucation " | ||
723 | "Request Notification -- %04x %04x %04x\n", vha->host_no, | ||
724 | mb[1], mb[2], mb[3])); | ||
725 | /**** Mailbox registers 4 - 7 valid!!! */ | ||
726 | break; | ||
727 | case MBA_IDC_TIME_EXT: | 752 | case MBA_IDC_TIME_EXT: |
728 | DEBUG2(printk("scsi(%ld): Inter-Driver Commucation " | 753 | qla81xx_idc_event(vha, mb[0], mb[1]); |
729 | "Time Extension -- %04x %04x %04x\n", vha->host_no, mb[1], | ||
730 | mb[2], mb[3])); | ||
731 | /**** Mailbox registers 4 - 7 valid!!! */ | ||
732 | break; | 754 | break; |
733 | } | 755 | } |
734 | 756 | ||
@@ -1707,7 +1729,6 @@ qla25xx_msix_rsp_q(int irq, void *dev_id) | |||
1707 | struct qla_hw_data *ha; | 1729 | struct qla_hw_data *ha; |
1708 | struct rsp_que *rsp; | 1730 | struct rsp_que *rsp; |
1709 | struct device_reg_24xx __iomem *reg; | 1731 | struct device_reg_24xx __iomem *reg; |
1710 | uint16_t msix_disabled_hccr = 0; | ||
1711 | 1732 | ||
1712 | rsp = (struct rsp_que *) dev_id; | 1733 | rsp = (struct rsp_que *) dev_id; |
1713 | if (!rsp) { | 1734 | if (!rsp) { |
@@ -1720,17 +1741,8 @@ qla25xx_msix_rsp_q(int irq, void *dev_id) | |||
1720 | 1741 | ||
1721 | spin_lock_irq(&ha->hardware_lock); | 1742 | spin_lock_irq(&ha->hardware_lock); |
1722 | 1743 | ||
1723 | msix_disabled_hccr = rsp->options; | ||
1724 | if (!rsp->id) | ||
1725 | msix_disabled_hccr &= __constant_cpu_to_le32(BIT_22); | ||
1726 | else | ||
1727 | msix_disabled_hccr &= __constant_cpu_to_le32(BIT_6); | ||
1728 | |||
1729 | qla24xx_process_response_queue(rsp); | 1744 | qla24xx_process_response_queue(rsp); |
1730 | 1745 | ||
1731 | if (!msix_disabled_hccr) | ||
1732 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); | ||
1733 | |||
1734 | spin_unlock_irq(&ha->hardware_lock); | 1746 | spin_unlock_irq(&ha->hardware_lock); |
1735 | 1747 | ||
1736 | return IRQ_HANDLED; | 1748 | return IRQ_HANDLED; |
@@ -1868,6 +1880,7 @@ qla24xx_disable_msix(struct qla_hw_data *ha) | |||
1868 | static int | 1880 | static int |
1869 | qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) | 1881 | qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) |
1870 | { | 1882 | { |
1883 | #define MIN_MSIX_COUNT 2 | ||
1871 | int i, ret; | 1884 | int i, ret; |
1872 | struct msix_entry *entries; | 1885 | struct msix_entry *entries; |
1873 | struct qla_msix_entry *qentry; | 1886 | struct qla_msix_entry *qentry; |
@@ -1883,12 +1896,16 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) | |||
1883 | 1896 | ||
1884 | ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); | 1897 | ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); |
1885 | if (ret) { | 1898 | if (ret) { |
1899 | if (ret < MIN_MSIX_COUNT) | ||
1900 | goto msix_failed; | ||
1901 | |||
1886 | qla_printk(KERN_WARNING, ha, | 1902 | qla_printk(KERN_WARNING, ha, |
1887 | "MSI-X: Failed to enable support -- %d/%d\n" | 1903 | "MSI-X: Failed to enable support -- %d/%d\n" |
1888 | " Retry with %d vectors\n", ha->msix_count, ret, ret); | 1904 | " Retry with %d vectors\n", ha->msix_count, ret, ret); |
1889 | ha->msix_count = ret; | 1905 | ha->msix_count = ret; |
1890 | ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); | 1906 | ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); |
1891 | if (ret) { | 1907 | if (ret) { |
1908 | msix_failed: | ||
1892 | qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable" | 1909 | qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable" |
1893 | " support, giving up -- %d/%d\n", | 1910 | " support, giving up -- %d/%d\n", |
1894 | ha->msix_count, ret); | 1911 | ha->msix_count, ret); |
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index db4df45234a5..4c7504cb3990 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c | |||
@@ -58,14 +58,11 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) | |||
58 | * seconds. This is to serialize actual issuing of mailbox cmds during | 58 | * seconds. This is to serialize actual issuing of mailbox cmds during |
59 | * non ISP abort time. | 59 | * non ISP abort time. |
60 | */ | 60 | */ |
61 | if (!abort_active) { | 61 | if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { |
62 | if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, | 62 | /* Timeout occurred. Return error. */ |
63 | mcp->tov * HZ)) { | 63 | DEBUG2_3_11(printk("%s(%ld): cmd access timeout. " |
64 | /* Timeout occurred. Return error. */ | 64 | "Exiting.\n", __func__, base_vha->host_no)); |
65 | DEBUG2_3_11(printk("%s(%ld): cmd access timeout. " | 65 | return QLA_FUNCTION_TIMEOUT; |
66 | "Exiting.\n", __func__, base_vha->host_no)); | ||
67 | return QLA_FUNCTION_TIMEOUT; | ||
68 | } | ||
69 | } | 66 | } |
70 | 67 | ||
71 | ha->flags.mbox_busy = 1; | 68 | ha->flags.mbox_busy = 1; |
@@ -265,8 +262,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) | |||
265 | } | 262 | } |
266 | 263 | ||
267 | /* Allow next mbx cmd to come in. */ | 264 | /* Allow next mbx cmd to come in. */ |
268 | if (!abort_active) | 265 | complete(&ha->mbx_cmd_comp); |
269 | complete(&ha->mbx_cmd_comp); | ||
270 | 266 | ||
271 | if (rval) { | 267 | if (rval) { |
272 | DEBUG2_3_11(printk("%s(%ld): **** FAILED. mbx0=%x, mbx1=%x, " | 268 | DEBUG2_3_11(printk("%s(%ld): **** FAILED. mbx0=%x, mbx1=%x, " |
@@ -3094,8 +3090,7 @@ verify_done: | |||
3094 | } | 3090 | } |
3095 | 3091 | ||
3096 | int | 3092 | int |
3097 | qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req, | 3093 | qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) |
3098 | uint8_t options) | ||
3099 | { | 3094 | { |
3100 | int rval; | 3095 | int rval; |
3101 | unsigned long flags; | 3096 | unsigned long flags; |
@@ -3105,7 +3100,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req, | |||
3105 | struct qla_hw_data *ha = vha->hw; | 3100 | struct qla_hw_data *ha = vha->hw; |
3106 | 3101 | ||
3107 | mcp->mb[0] = MBC_INITIALIZE_MULTIQ; | 3102 | mcp->mb[0] = MBC_INITIALIZE_MULTIQ; |
3108 | mcp->mb[1] = options; | 3103 | mcp->mb[1] = req->options; |
3109 | mcp->mb[2] = MSW(LSD(req->dma)); | 3104 | mcp->mb[2] = MSW(LSD(req->dma)); |
3110 | mcp->mb[3] = LSW(LSD(req->dma)); | 3105 | mcp->mb[3] = LSW(LSD(req->dma)); |
3111 | mcp->mb[6] = MSW(MSD(req->dma)); | 3106 | mcp->mb[6] = MSW(MSD(req->dma)); |
@@ -3132,7 +3127,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req, | |||
3132 | mcp->tov = 60; | 3127 | mcp->tov = 60; |
3133 | 3128 | ||
3134 | spin_lock_irqsave(&ha->hardware_lock, flags); | 3129 | spin_lock_irqsave(&ha->hardware_lock, flags); |
3135 | if (!(options & BIT_0)) { | 3130 | if (!(req->options & BIT_0)) { |
3136 | WRT_REG_DWORD(®->req_q_in, 0); | 3131 | WRT_REG_DWORD(®->req_q_in, 0); |
3137 | WRT_REG_DWORD(®->req_q_out, 0); | 3132 | WRT_REG_DWORD(®->req_q_out, 0); |
3138 | } | 3133 | } |
@@ -3146,8 +3141,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req, | |||
3146 | } | 3141 | } |
3147 | 3142 | ||
3148 | int | 3143 | int |
3149 | qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp, | 3144 | qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) |
3150 | uint8_t options) | ||
3151 | { | 3145 | { |
3152 | int rval; | 3146 | int rval; |
3153 | unsigned long flags; | 3147 | unsigned long flags; |
@@ -3157,7 +3151,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp, | |||
3157 | struct qla_hw_data *ha = vha->hw; | 3151 | struct qla_hw_data *ha = vha->hw; |
3158 | 3152 | ||
3159 | mcp->mb[0] = MBC_INITIALIZE_MULTIQ; | 3153 | mcp->mb[0] = MBC_INITIALIZE_MULTIQ; |
3160 | mcp->mb[1] = options; | 3154 | mcp->mb[1] = rsp->options; |
3161 | mcp->mb[2] = MSW(LSD(rsp->dma)); | 3155 | mcp->mb[2] = MSW(LSD(rsp->dma)); |
3162 | mcp->mb[3] = LSW(LSD(rsp->dma)); | 3156 | mcp->mb[3] = LSW(LSD(rsp->dma)); |
3163 | mcp->mb[6] = MSW(MSD(rsp->dma)); | 3157 | mcp->mb[6] = MSW(MSD(rsp->dma)); |
@@ -3182,7 +3176,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp, | |||
3182 | mcp->tov = 60; | 3176 | mcp->tov = 60; |
3183 | 3177 | ||
3184 | spin_lock_irqsave(&ha->hardware_lock, flags); | 3178 | spin_lock_irqsave(&ha->hardware_lock, flags); |
3185 | if (!(options & BIT_0)) { | 3179 | if (!(rsp->options & BIT_0)) { |
3186 | WRT_REG_DWORD(®->rsp_q_out, 0); | 3180 | WRT_REG_DWORD(®->rsp_q_out, 0); |
3187 | WRT_REG_DWORD(®->rsp_q_in, 0); | 3181 | WRT_REG_DWORD(®->rsp_q_in, 0); |
3188 | } | 3182 | } |
@@ -3197,3 +3191,29 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp, | |||
3197 | return rval; | 3191 | return rval; |
3198 | } | 3192 | } |
3199 | 3193 | ||
3194 | int | ||
3195 | qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb) | ||
3196 | { | ||
3197 | int rval; | ||
3198 | mbx_cmd_t mc; | ||
3199 | mbx_cmd_t *mcp = &mc; | ||
3200 | |||
3201 | DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); | ||
3202 | |||
3203 | mcp->mb[0] = MBC_IDC_ACK; | ||
3204 | memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); | ||
3205 | mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; | ||
3206 | mcp->in_mb = MBX_0; | ||
3207 | mcp->tov = MBX_TOV_SECONDS; | ||
3208 | mcp->flags = 0; | ||
3209 | rval = qla2x00_mailbox_command(vha, mcp); | ||
3210 | |||
3211 | if (rval != QLA_SUCCESS) { | ||
3212 | DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, | ||
3213 | vha->host_no, rval, mcp->mb[0])); | ||
3214 | } else { | ||
3215 | DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); | ||
3216 | } | ||
3217 | |||
3218 | return rval; | ||
3219 | } | ||
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index 886323130fcc..3f23932210c4 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c | |||
@@ -396,7 +396,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport) | |||
396 | 396 | ||
397 | qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL); | 397 | qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL); |
398 | 398 | ||
399 | memset(vha->req_ques, 0, sizeof(vha->req_ques) * QLA_MAX_HOST_QUES); | 399 | memset(vha->req_ques, 0, sizeof(vha->req_ques)); |
400 | vha->req_ques[0] = ha->req_q_map[0]->id; | 400 | vha->req_ques[0] = ha->req_q_map[0]->id; |
401 | host->can_queue = ha->req_q_map[0]->length + 128; | 401 | host->can_queue = ha->req_q_map[0]->length + 128; |
402 | host->this_id = 255; | 402 | host->this_id = 255; |
@@ -471,7 +471,7 @@ qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req) | |||
471 | 471 | ||
472 | if (req) { | 472 | if (req) { |
473 | req->options |= BIT_0; | 473 | req->options |= BIT_0; |
474 | ret = qla25xx_init_req_que(vha, req, req->options); | 474 | ret = qla25xx_init_req_que(vha, req); |
475 | } | 475 | } |
476 | if (ret == QLA_SUCCESS) | 476 | if (ret == QLA_SUCCESS) |
477 | qla25xx_free_req_que(vha, req); | 477 | qla25xx_free_req_que(vha, req); |
@@ -486,7 +486,7 @@ qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) | |||
486 | 486 | ||
487 | if (rsp) { | 487 | if (rsp) { |
488 | rsp->options |= BIT_0; | 488 | rsp->options |= BIT_0; |
489 | ret = qla25xx_init_rsp_que(vha, rsp, rsp->options); | 489 | ret = qla25xx_init_rsp_que(vha, rsp); |
490 | } | 490 | } |
491 | if (ret == QLA_SUCCESS) | 491 | if (ret == QLA_SUCCESS) |
492 | qla25xx_free_rsp_que(vha, rsp); | 492 | qla25xx_free_rsp_que(vha, rsp); |
@@ -502,7 +502,7 @@ int qla25xx_update_req_que(struct scsi_qla_host *vha, uint8_t que, uint8_t qos) | |||
502 | 502 | ||
503 | req->options |= BIT_3; | 503 | req->options |= BIT_3; |
504 | req->qos = qos; | 504 | req->qos = qos; |
505 | ret = qla25xx_init_req_que(vha, req, req->options); | 505 | ret = qla25xx_init_req_que(vha, req); |
506 | if (ret != QLA_SUCCESS) | 506 | if (ret != QLA_SUCCESS) |
507 | DEBUG2_17(printk(KERN_WARNING "%s failed\n", __func__)); | 507 | DEBUG2_17(printk(KERN_WARNING "%s failed\n", __func__)); |
508 | /* restore options bit */ | 508 | /* restore options bit */ |
@@ -629,9 +629,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, | |||
629 | req->ring_index = 0; | 629 | req->ring_index = 0; |
630 | req->cnt = req->length; | 630 | req->cnt = req->length; |
631 | req->id = que_id; | 631 | req->id = que_id; |
632 | req->max_q_depth = ha->req_q_map[0]->max_q_depth; | ||
632 | mutex_unlock(&ha->vport_lock); | 633 | mutex_unlock(&ha->vport_lock); |
633 | 634 | ||
634 | ret = qla25xx_init_req_que(base_vha, req, options); | 635 | ret = qla25xx_init_req_que(base_vha, req); |
635 | if (ret != QLA_SUCCESS) { | 636 | if (ret != QLA_SUCCESS) { |
636 | qla_printk(KERN_WARNING, ha, "%s failed\n", __func__); | 637 | qla_printk(KERN_WARNING, ha, "%s failed\n", __func__); |
637 | mutex_lock(&ha->vport_lock); | 638 | mutex_lock(&ha->vport_lock); |
@@ -709,7 +710,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, | |||
709 | if (ret) | 710 | if (ret) |
710 | goto que_failed; | 711 | goto que_failed; |
711 | 712 | ||
712 | ret = qla25xx_init_rsp_que(base_vha, rsp, options); | 713 | ret = qla25xx_init_rsp_que(base_vha, rsp); |
713 | if (ret != QLA_SUCCESS) { | 714 | if (ret != QLA_SUCCESS) { |
714 | qla_printk(KERN_WARNING, ha, "%s failed\n", __func__); | 715 | qla_printk(KERN_WARNING, ha, "%s failed\n", __func__); |
715 | mutex_lock(&ha->vport_lock); | 716 | mutex_lock(&ha->vport_lock); |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 4a71f522f925..2f5f72531e23 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -65,8 +65,6 @@ MODULE_PARM_DESC(ql2xextended_error_logging, | |||
65 | 65 | ||
66 | static void qla2x00_free_device(scsi_qla_host_t *); | 66 | static void qla2x00_free_device(scsi_qla_host_t *); |
67 | 67 | ||
68 | static void qla2x00_config_dma_addressing(scsi_qla_host_t *ha); | ||
69 | |||
70 | int ql2xfdmienable=1; | 68 | int ql2xfdmienable=1; |
71 | module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR); | 69 | module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR); |
72 | MODULE_PARM_DESC(ql2xfdmienable, | 70 | MODULE_PARM_DESC(ql2xfdmienable, |
@@ -800,6 +798,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) | |||
800 | if (ha->isp_ops->abort_command(vha, sp, req)) { | 798 | if (ha->isp_ops->abort_command(vha, sp, req)) { |
801 | DEBUG2(printk("%s(%ld): abort_command " | 799 | DEBUG2(printk("%s(%ld): abort_command " |
802 | "mbx failed.\n", __func__, vha->host_no)); | 800 | "mbx failed.\n", __func__, vha->host_no)); |
801 | ret = FAILED; | ||
803 | } else { | 802 | } else { |
804 | DEBUG3(printk("%s(%ld): abort_command " | 803 | DEBUG3(printk("%s(%ld): abort_command " |
805 | "mbx success.\n", __func__, vha->host_no)); | 804 | "mbx success.\n", __func__, vha->host_no)); |
@@ -1158,8 +1157,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) | |||
1158 | struct req_que *req; | 1157 | struct req_que *req; |
1159 | 1158 | ||
1160 | spin_lock_irqsave(&ha->hardware_lock, flags); | 1159 | spin_lock_irqsave(&ha->hardware_lock, flags); |
1161 | for (que = 0; que < QLA_MAX_HOST_QUES; que++) { | 1160 | for (que = 0; que < ha->max_queues; que++) { |
1162 | req = ha->req_q_map[vha->req_ques[que]]; | 1161 | req = ha->req_q_map[que]; |
1163 | if (!req) | 1162 | if (!req) |
1164 | continue; | 1163 | continue; |
1165 | for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { | 1164 | for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { |
@@ -1193,7 +1192,7 @@ qla2xxx_slave_configure(struct scsi_device *sdev) | |||
1193 | scsi_qla_host_t *vha = shost_priv(sdev->host); | 1192 | scsi_qla_host_t *vha = shost_priv(sdev->host); |
1194 | struct qla_hw_data *ha = vha->hw; | 1193 | struct qla_hw_data *ha = vha->hw; |
1195 | struct fc_rport *rport = starget_to_rport(sdev->sdev_target); | 1194 | struct fc_rport *rport = starget_to_rport(sdev->sdev_target); |
1196 | struct req_que *req = ha->req_q_map[0]; | 1195 | struct req_que *req = ha->req_q_map[vha->req_ques[0]]; |
1197 | 1196 | ||
1198 | if (sdev->tagged_supported) | 1197 | if (sdev->tagged_supported) |
1199 | scsi_activate_tcq(sdev, req->max_q_depth); | 1198 | scsi_activate_tcq(sdev, req->max_q_depth); |
@@ -1241,9 +1240,8 @@ qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type) | |||
1241 | * supported addressing method. | 1240 | * supported addressing method. |
1242 | */ | 1241 | */ |
1243 | static void | 1242 | static void |
1244 | qla2x00_config_dma_addressing(scsi_qla_host_t *vha) | 1243 | qla2x00_config_dma_addressing(struct qla_hw_data *ha) |
1245 | { | 1244 | { |
1246 | struct qla_hw_data *ha = vha->hw; | ||
1247 | /* Assume a 32bit DMA mask. */ | 1245 | /* Assume a 32bit DMA mask. */ |
1248 | ha->flags.enable_64bit_addressing = 0; | 1246 | ha->flags.enable_64bit_addressing = 0; |
1249 | 1247 | ||
@@ -1480,7 +1478,7 @@ static struct isp_operations qla81xx_isp_ops = { | |||
1480 | .reset_adapter = qla24xx_reset_adapter, | 1478 | .reset_adapter = qla24xx_reset_adapter, |
1481 | .nvram_config = qla81xx_nvram_config, | 1479 | .nvram_config = qla81xx_nvram_config, |
1482 | .update_fw_options = qla81xx_update_fw_options, | 1480 | .update_fw_options = qla81xx_update_fw_options, |
1483 | .load_risc = qla24xx_load_risc, | 1481 | .load_risc = qla81xx_load_risc, |
1484 | .pci_info_str = qla24xx_pci_info_str, | 1482 | .pci_info_str = qla24xx_pci_info_str, |
1485 | .fw_version_str = qla24xx_fw_version_str, | 1483 | .fw_version_str = qla24xx_fw_version_str, |
1486 | .intr_handler = qla24xx_intr_handler, | 1484 | .intr_handler = qla24xx_intr_handler, |
@@ -1869,6 +1867,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1869 | 1867 | ||
1870 | set_bit(0, (unsigned long *) ha->vp_idx_map); | 1868 | set_bit(0, (unsigned long *) ha->vp_idx_map); |
1871 | 1869 | ||
1870 | qla2x00_config_dma_addressing(ha); | ||
1872 | ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp); | 1871 | ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp); |
1873 | if (!ret) { | 1872 | if (!ret) { |
1874 | qla_printk(KERN_WARNING, ha, | 1873 | qla_printk(KERN_WARNING, ha, |
@@ -1888,13 +1887,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1888 | "[ERROR] Failed to allocate memory for scsi_host\n"); | 1887 | "[ERROR] Failed to allocate memory for scsi_host\n"); |
1889 | 1888 | ||
1890 | ret = -ENOMEM; | 1889 | ret = -ENOMEM; |
1890 | qla2x00_mem_free(ha); | ||
1891 | qla2x00_free_que(ha, req, rsp); | ||
1891 | goto probe_hw_failed; | 1892 | goto probe_hw_failed; |
1892 | } | 1893 | } |
1893 | 1894 | ||
1894 | pci_set_drvdata(pdev, base_vha); | 1895 | pci_set_drvdata(pdev, base_vha); |
1895 | 1896 | ||
1896 | qla2x00_config_dma_addressing(base_vha); | ||
1897 | |||
1898 | host = base_vha->host; | 1897 | host = base_vha->host; |
1899 | base_vha->req_ques[0] = req->id; | 1898 | base_vha->req_ques[0] = req->id; |
1900 | host->can_queue = req->length + 128; | 1899 | host->can_queue = req->length + 128; |
@@ -1917,14 +1916,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1917 | /* Set up the irqs */ | 1916 | /* Set up the irqs */ |
1918 | ret = qla2x00_request_irqs(ha, rsp); | 1917 | ret = qla2x00_request_irqs(ha, rsp); |
1919 | if (ret) | 1918 | if (ret) |
1920 | goto probe_failed; | 1919 | goto probe_init_failed; |
1921 | |||
1922 | /* Alloc arrays of request and response ring ptrs */ | 1920 | /* Alloc arrays of request and response ring ptrs */ |
1923 | if (!qla2x00_alloc_queues(ha)) { | 1921 | if (!qla2x00_alloc_queues(ha)) { |
1924 | qla_printk(KERN_WARNING, ha, | 1922 | qla_printk(KERN_WARNING, ha, |
1925 | "[ERROR] Failed to allocate memory for queue" | 1923 | "[ERROR] Failed to allocate memory for queue" |
1926 | " pointers\n"); | 1924 | " pointers\n"); |
1927 | goto probe_failed; | 1925 | goto probe_init_failed; |
1928 | } | 1926 | } |
1929 | ha->rsp_q_map[0] = rsp; | 1927 | ha->rsp_q_map[0] = rsp; |
1930 | ha->req_q_map[0] = req; | 1928 | ha->req_q_map[0] = req; |
@@ -1997,8 +1995,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1997 | 1995 | ||
1998 | return 0; | 1996 | return 0; |
1999 | 1997 | ||
2000 | probe_failed: | 1998 | probe_init_failed: |
2001 | qla2x00_free_que(ha, req, rsp); | 1999 | qla2x00_free_que(ha, req, rsp); |
2000 | ha->max_queues = 0; | ||
2001 | |||
2002 | probe_failed: | ||
2002 | qla2x00_free_device(base_vha); | 2003 | qla2x00_free_device(base_vha); |
2003 | 2004 | ||
2004 | scsi_host_put(base_vha->host); | 2005 | scsi_host_put(base_vha->host); |
@@ -2521,6 +2522,19 @@ qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code, | |||
2521 | return qla2x00_post_work(vha, e, 1); | 2522 | return qla2x00_post_work(vha, e, 1); |
2522 | } | 2523 | } |
2523 | 2524 | ||
2525 | int | ||
2526 | qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb) | ||
2527 | { | ||
2528 | struct qla_work_evt *e; | ||
2529 | |||
2530 | e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK, 1); | ||
2531 | if (!e) | ||
2532 | return QLA_FUNCTION_FAILED; | ||
2533 | |||
2534 | memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); | ||
2535 | return qla2x00_post_work(vha, e, 1); | ||
2536 | } | ||
2537 | |||
2524 | static void | 2538 | static void |
2525 | qla2x00_do_work(struct scsi_qla_host *vha) | 2539 | qla2x00_do_work(struct scsi_qla_host *vha) |
2526 | { | 2540 | { |
@@ -2538,6 +2552,9 @@ qla2x00_do_work(struct scsi_qla_host *vha) | |||
2538 | fc_host_post_event(vha->host, fc_get_event_number(), | 2552 | fc_host_post_event(vha->host, fc_get_event_number(), |
2539 | e->u.aen.code, e->u.aen.data); | 2553 | e->u.aen.code, e->u.aen.data); |
2540 | break; | 2554 | break; |
2555 | case QLA_EVT_IDC_ACK: | ||
2556 | qla81xx_idc_ack(vha, e->u.idc_ack.mb); | ||
2557 | break; | ||
2541 | } | 2558 | } |
2542 | if (e->flags & QLA_EVT_FLAG_FREE) | 2559 | if (e->flags & QLA_EVT_FLAG_FREE) |
2543 | kfree(e); | 2560 | kfree(e); |
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 303f8ee11f25..284827926eff 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c | |||
@@ -684,7 +684,7 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) | |||
684 | "end=0x%x size=0x%x.\n", le32_to_cpu(region->code), start, | 684 | "end=0x%x size=0x%x.\n", le32_to_cpu(region->code), start, |
685 | le32_to_cpu(region->end) >> 2, le32_to_cpu(region->size))); | 685 | le32_to_cpu(region->end) >> 2, le32_to_cpu(region->size))); |
686 | 686 | ||
687 | switch (le32_to_cpu(region->code)) { | 687 | switch (le32_to_cpu(region->code) & 0xff) { |
688 | case FLT_REG_FW: | 688 | case FLT_REG_FW: |
689 | ha->flt_region_fw = start; | 689 | ha->flt_region_fw = start; |
690 | break; | 690 | break; |
@@ -944,9 +944,9 @@ qla24xx_unprotect_flash(struct qla_hw_data *ha) | |||
944 | if (!ha->fdt_wrt_disable) | 944 | if (!ha->fdt_wrt_disable) |
945 | return; | 945 | return; |
946 | 946 | ||
947 | /* Disable flash write-protection. */ | 947 | /* Disable flash write-protection, first clear SR protection bit */ |
948 | qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101), 0); | 948 | qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101), 0); |
949 | /* Some flash parts need an additional zero-write to clear bits.*/ | 949 | /* Then write zero again to clear remaining SR bits.*/ |
950 | qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101), 0); | 950 | qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101), 0); |
951 | } | 951 | } |
952 | 952 | ||
@@ -980,12 +980,11 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, | |||
980 | uint32_t dwords) | 980 | uint32_t dwords) |
981 | { | 981 | { |
982 | int ret; | 982 | int ret; |
983 | uint32_t liter, miter; | 983 | uint32_t liter; |
984 | uint32_t sec_mask, rest_addr; | 984 | uint32_t sec_mask, rest_addr; |
985 | uint32_t fdata, findex; | 985 | uint32_t fdata; |
986 | dma_addr_t optrom_dma; | 986 | dma_addr_t optrom_dma; |
987 | void *optrom = NULL; | 987 | void *optrom = NULL; |
988 | uint32_t *s, *d; | ||
989 | struct qla_hw_data *ha = vha->hw; | 988 | struct qla_hw_data *ha = vha->hw; |
990 | 989 | ||
991 | ret = QLA_SUCCESS; | 990 | ret = QLA_SUCCESS; |
@@ -1003,17 +1002,15 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, | |||
1003 | } | 1002 | } |
1004 | 1003 | ||
1005 | rest_addr = (ha->fdt_block_size >> 2) - 1; | 1004 | rest_addr = (ha->fdt_block_size >> 2) - 1; |
1006 | sec_mask = (ha->optrom_size >> 2) - (ha->fdt_block_size >> 2); | 1005 | sec_mask = ~rest_addr; |
1007 | 1006 | ||
1008 | qla24xx_unprotect_flash(ha); | 1007 | qla24xx_unprotect_flash(ha); |
1009 | 1008 | ||
1010 | for (liter = 0; liter < dwords; liter++, faddr++, dwptr++) { | 1009 | for (liter = 0; liter < dwords; liter++, faddr++, dwptr++) { |
1011 | 1010 | fdata = (faddr & sec_mask) << 2; | |
1012 | findex = faddr; | ||
1013 | fdata = (findex & sec_mask) << 2; | ||
1014 | 1011 | ||
1015 | /* Are we at the beginning of a sector? */ | 1012 | /* Are we at the beginning of a sector? */ |
1016 | if ((findex & rest_addr) == 0) { | 1013 | if ((faddr & rest_addr) == 0) { |
1017 | /* Do sector unprotect. */ | 1014 | /* Do sector unprotect. */ |
1018 | if (ha->fdt_unprotect_sec_cmd) | 1015 | if (ha->fdt_unprotect_sec_cmd) |
1019 | qla24xx_write_flash_dword(ha, | 1016 | qla24xx_write_flash_dword(ha, |
@@ -1024,7 +1021,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, | |||
1024 | (fdata & 0xff00) |((fdata << 16) & | 1021 | (fdata & 0xff00) |((fdata << 16) & |
1025 | 0xff0000) | ((fdata >> 16) & 0xff)); | 1022 | 0xff0000) | ((fdata >> 16) & 0xff)); |
1026 | if (ret != QLA_SUCCESS) { | 1023 | if (ret != QLA_SUCCESS) { |
1027 | DEBUG9(qla_printk("Unable to flash sector: " | 1024 | DEBUG9(qla_printk("Unable to erase sector: " |
1028 | "address=%x.\n", faddr)); | 1025 | "address=%x.\n", faddr)); |
1029 | break; | 1026 | break; |
1030 | } | 1027 | } |
@@ -1033,9 +1030,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, | |||
1033 | /* Go with burst-write. */ | 1030 | /* Go with burst-write. */ |
1034 | if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) { | 1031 | if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) { |
1035 | /* Copy data to DMA'ble buffer. */ | 1032 | /* Copy data to DMA'ble buffer. */ |
1036 | for (miter = 0, s = optrom, d = dwptr; | 1033 | memcpy(optrom, dwptr, OPTROM_BURST_SIZE); |
1037 | miter < OPTROM_BURST_DWORDS; miter++, s++, d++) | ||
1038 | *s = cpu_to_le32(*d); | ||
1039 | 1034 | ||
1040 | ret = qla2x00_load_ram(vha, optrom_dma, | 1035 | ret = qla2x00_load_ram(vha, optrom_dma, |
1041 | flash_data_addr(ha, faddr), | 1036 | flash_data_addr(ha, faddr), |
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 808bab6ef06b..79f7053da99b 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h | |||
@@ -7,7 +7,7 @@ | |||
7 | /* | 7 | /* |
8 | * Driver version | 8 | * Driver version |
9 | */ | 9 | */ |
10 | #define QLA2XXX_VERSION "8.03.00-k1" | 10 | #define QLA2XXX_VERSION "8.03.00-k3" |
11 | 11 | ||
12 | #define QLA_DRIVER_MAJOR_VER 8 | 12 | #define QLA_DRIVER_MAJOR_VER 8 |
13 | #define QLA_DRIVER_MINOR_VER 3 | 13 | #define QLA_DRIVER_MINOR_VER 3 |
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h index d6be0762eb91..b586f27c3bd4 100644 --- a/drivers/scsi/qla4xxx/ql4_def.h +++ b/drivers/scsi/qla4xxx/ql4_def.h | |||
@@ -244,6 +244,7 @@ struct ddb_entry { | |||
244 | uint8_t ip_addr[ISCSI_IPADDR_SIZE]; | 244 | uint8_t ip_addr[ISCSI_IPADDR_SIZE]; |
245 | uint8_t iscsi_name[ISCSI_NAME_SIZE]; /* 72 x48 */ | 245 | uint8_t iscsi_name[ISCSI_NAME_SIZE]; /* 72 x48 */ |
246 | uint8_t iscsi_alias[0x20]; | 246 | uint8_t iscsi_alias[0x20]; |
247 | uint8_t isid[6]; | ||
247 | }; | 248 | }; |
248 | 249 | ||
249 | /* | 250 | /* |
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c index 109c5f5985ec..af8c3233e8ae 100644 --- a/drivers/scsi/qla4xxx/ql4_init.c +++ b/drivers/scsi/qla4xxx/ql4_init.c | |||
@@ -342,8 +342,12 @@ static struct ddb_entry* qla4xxx_get_ddb_entry(struct scsi_qla_host *ha, | |||
342 | DEBUG2(printk("scsi%ld: %s: Looking for ddb[%d]\n", ha->host_no, | 342 | DEBUG2(printk("scsi%ld: %s: Looking for ddb[%d]\n", ha->host_no, |
343 | __func__, fw_ddb_index)); | 343 | __func__, fw_ddb_index)); |
344 | list_for_each_entry(ddb_entry, &ha->ddb_list, list) { | 344 | list_for_each_entry(ddb_entry, &ha->ddb_list, list) { |
345 | if (memcmp(ddb_entry->iscsi_name, fw_ddb_entry->iscsi_name, | 345 | if ((memcmp(ddb_entry->iscsi_name, fw_ddb_entry->iscsi_name, |
346 | ISCSI_NAME_SIZE) == 0) { | 346 | ISCSI_NAME_SIZE) == 0) && |
347 | (ddb_entry->tpgt == | ||
348 | le32_to_cpu(fw_ddb_entry->tgt_portal_grp)) && | ||
349 | (memcmp(ddb_entry->isid, fw_ddb_entry->isid, | ||
350 | sizeof(ddb_entry->isid)) == 0)) { | ||
347 | found++; | 351 | found++; |
348 | break; | 352 | break; |
349 | } | 353 | } |
@@ -430,6 +434,8 @@ static int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha, | |||
430 | 434 | ||
431 | ddb_entry->port = le16_to_cpu(fw_ddb_entry->port); | 435 | ddb_entry->port = le16_to_cpu(fw_ddb_entry->port); |
432 | ddb_entry->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); | 436 | ddb_entry->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); |
437 | memcpy(ddb_entry->isid, fw_ddb_entry->isid, sizeof(ddb_entry->isid)); | ||
438 | |||
433 | memcpy(&ddb_entry->iscsi_name[0], &fw_ddb_entry->iscsi_name[0], | 439 | memcpy(&ddb_entry->iscsi_name[0], &fw_ddb_entry->iscsi_name[0], |
434 | min(sizeof(ddb_entry->iscsi_name), | 440 | min(sizeof(ddb_entry->iscsi_name), |
435 | sizeof(fw_ddb_entry->iscsi_name))); | 441 | sizeof(fw_ddb_entry->iscsi_name))); |
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 42e72a2c1f98..cbcd3f681b62 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
@@ -1095,7 +1095,8 @@ EXPORT_SYMBOL(__starget_for_each_device); | |||
1095 | * Description: Looks up the scsi_device with the specified @lun for a given | 1095 | * Description: Looks up the scsi_device with the specified @lun for a given |
1096 | * @starget. The returned scsi_device does not have an additional | 1096 | * @starget. The returned scsi_device does not have an additional |
1097 | * reference. You must hold the host's host_lock over this call and | 1097 | * reference. You must hold the host's host_lock over this call and |
1098 | * any access to the returned scsi_device. | 1098 | * any access to the returned scsi_device. A scsi_device in state |
1099 | * SDEV_DEL is skipped. | ||
1099 | * | 1100 | * |
1100 | * Note: The only reason why drivers should use this is because | 1101 | * Note: The only reason why drivers should use this is because |
1101 | * they need to access the device list in irq context. Otherwise you | 1102 | * they need to access the device list in irq context. Otherwise you |
@@ -1107,6 +1108,8 @@ struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget, | |||
1107 | struct scsi_device *sdev; | 1108 | struct scsi_device *sdev; |
1108 | 1109 | ||
1109 | list_for_each_entry(sdev, &starget->devices, same_target_siblings) { | 1110 | list_for_each_entry(sdev, &starget->devices, same_target_siblings) { |
1111 | if (sdev->sdev_state == SDEV_DEL) | ||
1112 | continue; | ||
1110 | if (sdev->lun ==lun) | 1113 | if (sdev->lun ==lun) |
1111 | return sdev; | 1114 | return sdev; |
1112 | } | 1115 | } |
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 4969e4ec75ea..099b5455bbce 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c | |||
@@ -224,6 +224,7 @@ static struct { | |||
224 | {"SGI", "TP9100", "*", BLIST_REPORTLUN2}, | 224 | {"SGI", "TP9100", "*", BLIST_REPORTLUN2}, |
225 | {"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, | 225 | {"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, |
226 | {"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, | 226 | {"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, |
227 | {"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, | ||
227 | {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36}, | 228 | {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36}, |
228 | {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN}, | 229 | {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN}, |
229 | {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */ | 230 | {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */ |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 940dc32ff0dc..b82ffd90632e 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -1040,12 +1040,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
1040 | action = ACTION_FAIL; | 1040 | action = ACTION_FAIL; |
1041 | break; | 1041 | break; |
1042 | case ABORTED_COMMAND: | 1042 | case ABORTED_COMMAND: |
1043 | action = ACTION_FAIL; | ||
1043 | if (sshdr.asc == 0x10) { /* DIF */ | 1044 | if (sshdr.asc == 0x10) { /* DIF */ |
1044 | description = "Target Data Integrity Failure"; | 1045 | description = "Target Data Integrity Failure"; |
1045 | action = ACTION_FAIL; | ||
1046 | error = -EILSEQ; | 1046 | error = -EILSEQ; |
1047 | } else | 1047 | } |
1048 | action = ACTION_RETRY; | ||
1049 | break; | 1048 | break; |
1050 | case NOT_READY: | 1049 | case NOT_READY: |
1051 | /* If the device is in the process of becoming | 1050 | /* If the device is in the process of becoming |
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 66505bb79410..8f4de20c9deb 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c | |||
@@ -317,6 +317,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, | |||
317 | return sdev; | 317 | return sdev; |
318 | 318 | ||
319 | out_device_destroy: | 319 | out_device_destroy: |
320 | scsi_device_set_state(sdev, SDEV_DEL); | ||
320 | transport_destroy_device(&sdev->sdev_gendev); | 321 | transport_destroy_device(&sdev->sdev_gendev); |
321 | put_device(&sdev->sdev_gendev); | 322 | put_device(&sdev->sdev_gendev); |
322 | out: | 323 | out: |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index d57566b8be0a..55310dbc10a6 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -107,6 +107,7 @@ static void scsi_disk_release(struct device *cdev); | |||
107 | static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *); | 107 | static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *); |
108 | static void sd_print_result(struct scsi_disk *, int); | 108 | static void sd_print_result(struct scsi_disk *, int); |
109 | 109 | ||
110 | static DEFINE_SPINLOCK(sd_index_lock); | ||
110 | static DEFINE_IDA(sd_index_ida); | 111 | static DEFINE_IDA(sd_index_ida); |
111 | 112 | ||
112 | /* This semaphore is used to mediate the 0->1 reference get in the | 113 | /* This semaphore is used to mediate the 0->1 reference get in the |
@@ -1914,7 +1915,9 @@ static int sd_probe(struct device *dev) | |||
1914 | if (!ida_pre_get(&sd_index_ida, GFP_KERNEL)) | 1915 | if (!ida_pre_get(&sd_index_ida, GFP_KERNEL)) |
1915 | goto out_put; | 1916 | goto out_put; |
1916 | 1917 | ||
1918 | spin_lock(&sd_index_lock); | ||
1917 | error = ida_get_new(&sd_index_ida, &index); | 1919 | error = ida_get_new(&sd_index_ida, &index); |
1920 | spin_unlock(&sd_index_lock); | ||
1918 | } while (error == -EAGAIN); | 1921 | } while (error == -EAGAIN); |
1919 | 1922 | ||
1920 | if (error) | 1923 | if (error) |
@@ -1936,7 +1939,9 @@ static int sd_probe(struct device *dev) | |||
1936 | return 0; | 1939 | return 0; |
1937 | 1940 | ||
1938 | out_free_index: | 1941 | out_free_index: |
1942 | spin_lock(&sd_index_lock); | ||
1939 | ida_remove(&sd_index_ida, index); | 1943 | ida_remove(&sd_index_ida, index); |
1944 | spin_unlock(&sd_index_lock); | ||
1940 | out_put: | 1945 | out_put: |
1941 | put_disk(gd); | 1946 | put_disk(gd); |
1942 | out_free: | 1947 | out_free: |
@@ -1986,7 +1991,9 @@ static void scsi_disk_release(struct device *dev) | |||
1986 | struct scsi_disk *sdkp = to_scsi_disk(dev); | 1991 | struct scsi_disk *sdkp = to_scsi_disk(dev); |
1987 | struct gendisk *disk = sdkp->disk; | 1992 | struct gendisk *disk = sdkp->disk; |
1988 | 1993 | ||
1994 | spin_lock(&sd_index_lock); | ||
1989 | ida_remove(&sd_index_ida, sdkp->index); | 1995 | ida_remove(&sd_index_ida, sdkp->index); |
1996 | spin_unlock(&sd_index_lock); | ||
1990 | 1997 | ||
1991 | disk->private_data = NULL; | 1998 | disk->private_data = NULL; |
1992 | put_disk(disk); | 1999 | put_disk(disk); |
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 8f0bd3f7a59f..516925d8b570 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c | |||
@@ -1078,7 +1078,7 @@ sg_ioctl(struct inode *inode, struct file *filp, | |||
1078 | case BLKTRACESETUP: | 1078 | case BLKTRACESETUP: |
1079 | return blk_trace_setup(sdp->device->request_queue, | 1079 | return blk_trace_setup(sdp->device->request_queue, |
1080 | sdp->disk->disk_name, | 1080 | sdp->disk->disk_name, |
1081 | sdp->device->sdev_gendev.devt, | 1081 | MKDEV(SCSI_GENERIC_MAJOR, sdp->index), |
1082 | (char *)arg); | 1082 | (char *)arg); |
1083 | case BLKTRACESTART: | 1083 | case BLKTRACESTART: |
1084 | return blk_trace_startstop(sdp->device->request_queue, 1); | 1084 | return blk_trace_startstop(sdp->device->request_queue, 1); |