diff options
Diffstat (limited to 'drivers/scsi')
41 files changed, 995 insertions, 797 deletions
diff --git a/drivers/scsi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgb3i/cxgb3i.h index fde6e4c634e7..a7cf550b9cca 100644 --- a/drivers/scsi/cxgb3i/cxgb3i.h +++ b/drivers/scsi/cxgb3i/cxgb3i.h | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/list.h> | 20 | #include <linux/list.h> |
21 | #include <linux/netdevice.h> | 21 | #include <linux/netdevice.h> |
22 | #include <linux/scatterlist.h> | 22 | #include <linux/scatterlist.h> |
23 | #include <linux/skbuff.h> | ||
23 | #include <scsi/libiscsi_tcp.h> | 24 | #include <scsi/libiscsi_tcp.h> |
24 | 25 | ||
25 | /* from cxgb3 LLD */ | 26 | /* from cxgb3 LLD */ |
@@ -113,6 +114,26 @@ struct cxgb3i_endpoint { | |||
113 | struct cxgb3i_conn *cconn; | 114 | struct cxgb3i_conn *cconn; |
114 | }; | 115 | }; |
115 | 116 | ||
117 | /** | ||
118 | * struct cxgb3i_task_data - private iscsi task data | ||
119 | * | ||
120 | * @nr_frags: # of coalesced page frags (from scsi sgl) | ||
121 | * @frags: coalesced page frags (from scsi sgl) | ||
122 | * @skb: tx pdu skb | ||
123 | * @offset: data offset for the next pdu | ||
124 | * @count: max. possible pdu payload | ||
125 | * @sgoffset: offset to the first sg entry for a given offset | ||
126 | */ | ||
127 | #define MAX_PDU_FRAGS ((ULP2_MAX_PDU_PAYLOAD + 512 - 1) / 512) | ||
128 | struct cxgb3i_task_data { | ||
129 | unsigned short nr_frags; | ||
130 | skb_frag_t frags[MAX_PDU_FRAGS]; | ||
131 | struct sk_buff *skb; | ||
132 | unsigned int offset; | ||
133 | unsigned int count; | ||
134 | unsigned int sgoffset; | ||
135 | }; | ||
136 | |||
116 | int cxgb3i_iscsi_init(void); | 137 | int cxgb3i_iscsi_init(void); |
117 | void cxgb3i_iscsi_cleanup(void); | 138 | void cxgb3i_iscsi_cleanup(void); |
118 | 139 | ||
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.c b/drivers/scsi/cxgb3i/cxgb3i_ddp.c index 08f3a09d9233..a83d36e4926f 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_ddp.c +++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.c | |||
@@ -639,10 +639,11 @@ static int ddp_init(struct t3cdev *tdev) | |||
639 | write_unlock(&cxgb3i_ddp_rwlock); | 639 | write_unlock(&cxgb3i_ddp_rwlock); |
640 | 640 | ||
641 | ddp_log_info("nppods %u (0x%x ~ 0x%x), bits %u, mask 0x%x,0x%x " | 641 | ddp_log_info("nppods %u (0x%x ~ 0x%x), bits %u, mask 0x%x,0x%x " |
642 | "pkt %u,%u.\n", | 642 | "pkt %u/%u, %u/%u.\n", |
643 | ppmax, ddp->llimit, ddp->ulimit, ddp->idx_bits, | 643 | ppmax, ddp->llimit, ddp->ulimit, ddp->idx_bits, |
644 | ddp->idx_mask, ddp->rsvd_tag_mask, | 644 | ddp->idx_mask, ddp->rsvd_tag_mask, |
645 | ddp->max_txsz, ddp->max_rxsz); | 645 | ddp->max_txsz, uinfo.max_txsz, |
646 | ddp->max_rxsz, uinfo.max_rxsz); | ||
646 | return 0; | 647 | return 0; |
647 | 648 | ||
648 | free_ddp_map: | 649 | free_ddp_map: |
@@ -654,8 +655,8 @@ free_ddp_map: | |||
654 | * cxgb3i_adapter_ddp_init - initialize the adapter's ddp resource | 655 | * cxgb3i_adapter_ddp_init - initialize the adapter's ddp resource |
655 | * @tdev: t3cdev adapter | 656 | * @tdev: t3cdev adapter |
656 | * @tformat: tag format | 657 | * @tformat: tag format |
657 | * @txsz: max tx pkt size, filled in by this func. | 658 | * @txsz: max tx pdu payload size, filled in by this func. |
658 | * @rxsz: max rx pkt size, filled in by this func. | 659 | * @rxsz: max rx pdu payload size, filled in by this func. |
659 | * initialize the ddp pagepod manager for a given adapter if needed and | 660 | * initialize the ddp pagepod manager for a given adapter if needed and |
660 | * setup the tag format for a given iscsi entity | 661 | * setup the tag format for a given iscsi entity |
661 | */ | 662 | */ |
@@ -685,10 +686,12 @@ int cxgb3i_adapter_ddp_init(struct t3cdev *tdev, | |||
685 | tformat->sw_bits, tformat->rsvd_bits, | 686 | tformat->sw_bits, tformat->rsvd_bits, |
686 | tformat->rsvd_shift, tformat->rsvd_mask); | 687 | tformat->rsvd_shift, tformat->rsvd_mask); |
687 | 688 | ||
688 | *txsz = ddp->max_txsz; | 689 | *txsz = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, |
689 | *rxsz = ddp->max_rxsz; | 690 | ddp->max_txsz - ISCSI_PDU_NONPAYLOAD_LEN); |
690 | ddp_log_info("ddp max pkt size: %u, %u.\n", | 691 | *rxsz = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, |
691 | ddp->max_txsz, ddp->max_rxsz); | 692 | ddp->max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN); |
693 | ddp_log_info("max payload size: %u/%u, %u/%u.\n", | ||
694 | *txsz, ddp->max_txsz, *rxsz, ddp->max_rxsz); | ||
692 | return 0; | 695 | return 0; |
693 | } | 696 | } |
694 | EXPORT_SYMBOL_GPL(cxgb3i_adapter_ddp_init); | 697 | EXPORT_SYMBOL_GPL(cxgb3i_adapter_ddp_init); |
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.h b/drivers/scsi/cxgb3i/cxgb3i_ddp.h index 5c7c4d95c493..3faae7831c83 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_ddp.h +++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.h | |||
@@ -13,6 +13,8 @@ | |||
13 | #ifndef __CXGB3I_ULP2_DDP_H__ | 13 | #ifndef __CXGB3I_ULP2_DDP_H__ |
14 | #define __CXGB3I_ULP2_DDP_H__ | 14 | #define __CXGB3I_ULP2_DDP_H__ |
15 | 15 | ||
16 | #include <linux/vmalloc.h> | ||
17 | |||
16 | /** | 18 | /** |
17 | * struct cxgb3i_tag_format - cxgb3i ulp tag format for an iscsi entity | 19 | * struct cxgb3i_tag_format - cxgb3i ulp tag format for an iscsi entity |
18 | * | 20 | * |
@@ -85,8 +87,9 @@ struct cxgb3i_ddp_info { | |||
85 | struct sk_buff **gl_skb; | 87 | struct sk_buff **gl_skb; |
86 | }; | 88 | }; |
87 | 89 | ||
90 | #define ISCSI_PDU_NONPAYLOAD_LEN 312 /* bhs(48) + ahs(256) + digest(8) */ | ||
88 | #define ULP2_MAX_PKT_SIZE 16224 | 91 | #define ULP2_MAX_PKT_SIZE 16224 |
89 | #define ULP2_MAX_PDU_PAYLOAD (ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_MAX) | 92 | #define ULP2_MAX_PDU_PAYLOAD (ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_LEN) |
90 | #define PPOD_PAGES_MAX 4 | 93 | #define PPOD_PAGES_MAX 4 |
91 | #define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */ | 94 | #define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */ |
92 | 95 | ||
diff --git a/drivers/scsi/cxgb3i/cxgb3i_init.c b/drivers/scsi/cxgb3i/cxgb3i_init.c index 091ecb4d9f3d..1ce9f244e46c 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_init.c +++ b/drivers/scsi/cxgb3i/cxgb3i_init.c | |||
@@ -12,8 +12,8 @@ | |||
12 | #include "cxgb3i.h" | 12 | #include "cxgb3i.h" |
13 | 13 | ||
14 | #define DRV_MODULE_NAME "cxgb3i" | 14 | #define DRV_MODULE_NAME "cxgb3i" |
15 | #define DRV_MODULE_VERSION "1.0.0" | 15 | #define DRV_MODULE_VERSION "1.0.1" |
16 | #define DRV_MODULE_RELDATE "Jun. 1, 2008" | 16 | #define DRV_MODULE_RELDATE "Jan. 2009" |
17 | 17 | ||
18 | static char version[] = | 18 | static char version[] = |
19 | "Chelsio S3xx iSCSI Driver " DRV_MODULE_NAME | 19 | "Chelsio S3xx iSCSI Driver " DRV_MODULE_NAME |
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c index d83464b9b3f9..fa2a44f37b36 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c +++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c | |||
@@ -364,7 +364,8 @@ cxgb3i_session_create(struct iscsi_endpoint *ep, u16 cmds_max, u16 qdepth, | |||
364 | 364 | ||
365 | cls_session = iscsi_session_setup(&cxgb3i_iscsi_transport, shost, | 365 | cls_session = iscsi_session_setup(&cxgb3i_iscsi_transport, shost, |
366 | cmds_max, | 366 | cmds_max, |
367 | sizeof(struct iscsi_tcp_task), | 367 | sizeof(struct iscsi_tcp_task) + |
368 | sizeof(struct cxgb3i_task_data), | ||
368 | initial_cmdsn, ISCSI_MAX_TARGET); | 369 | initial_cmdsn, ISCSI_MAX_TARGET); |
369 | if (!cls_session) | 370 | if (!cls_session) |
370 | return NULL; | 371 | return NULL; |
@@ -402,17 +403,15 @@ static inline int cxgb3i_conn_max_xmit_dlength(struct iscsi_conn *conn) | |||
402 | { | 403 | { |
403 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | 404 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
404 | struct cxgb3i_conn *cconn = tcp_conn->dd_data; | 405 | struct cxgb3i_conn *cconn = tcp_conn->dd_data; |
405 | unsigned int max = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, | 406 | unsigned int max = max(512 * MAX_SKB_FRAGS, SKB_TX_HEADROOM); |
406 | cconn->hba->snic->tx_max_size - | ||
407 | ISCSI_PDU_NONPAYLOAD_MAX); | ||
408 | 407 | ||
408 | max = min(cconn->hba->snic->tx_max_size, max); | ||
409 | if (conn->max_xmit_dlength) | 409 | if (conn->max_xmit_dlength) |
410 | conn->max_xmit_dlength = min_t(unsigned int, | 410 | conn->max_xmit_dlength = min(conn->max_xmit_dlength, max); |
411 | conn->max_xmit_dlength, max); | ||
412 | else | 411 | else |
413 | conn->max_xmit_dlength = max; | 412 | conn->max_xmit_dlength = max; |
414 | align_pdu_size(conn->max_xmit_dlength); | 413 | align_pdu_size(conn->max_xmit_dlength); |
415 | cxgb3i_log_info("conn 0x%p, max xmit %u.\n", | 414 | cxgb3i_api_debug("conn 0x%p, max xmit %u.\n", |
416 | conn, conn->max_xmit_dlength); | 415 | conn, conn->max_xmit_dlength); |
417 | return 0; | 416 | return 0; |
418 | } | 417 | } |
@@ -427,9 +426,7 @@ static inline int cxgb3i_conn_max_recv_dlength(struct iscsi_conn *conn) | |||
427 | { | 426 | { |
428 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | 427 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
429 | struct cxgb3i_conn *cconn = tcp_conn->dd_data; | 428 | struct cxgb3i_conn *cconn = tcp_conn->dd_data; |
430 | unsigned int max = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, | 429 | unsigned int max = cconn->hba->snic->rx_max_size; |
431 | cconn->hba->snic->rx_max_size - | ||
432 | ISCSI_PDU_NONPAYLOAD_MAX); | ||
433 | 430 | ||
434 | align_pdu_size(max); | 431 | align_pdu_size(max); |
435 | if (conn->max_recv_dlength) { | 432 | if (conn->max_recv_dlength) { |
@@ -439,8 +436,7 @@ static inline int cxgb3i_conn_max_recv_dlength(struct iscsi_conn *conn) | |||
439 | conn->max_recv_dlength, max); | 436 | conn->max_recv_dlength, max); |
440 | return -EINVAL; | 437 | return -EINVAL; |
441 | } | 438 | } |
442 | conn->max_recv_dlength = min_t(unsigned int, | 439 | conn->max_recv_dlength = min(conn->max_recv_dlength, max); |
443 | conn->max_recv_dlength, max); | ||
444 | align_pdu_size(conn->max_recv_dlength); | 440 | align_pdu_size(conn->max_recv_dlength); |
445 | } else | 441 | } else |
446 | conn->max_recv_dlength = max; | 442 | conn->max_recv_dlength = max; |
@@ -844,7 +840,7 @@ static struct scsi_host_template cxgb3i_host_template = { | |||
844 | .proc_name = "cxgb3i", | 840 | .proc_name = "cxgb3i", |
845 | .queuecommand = iscsi_queuecommand, | 841 | .queuecommand = iscsi_queuecommand, |
846 | .change_queue_depth = iscsi_change_queue_depth, | 842 | .change_queue_depth = iscsi_change_queue_depth, |
847 | .can_queue = 128 * (ISCSI_DEF_XMIT_CMDS_MAX - 1), | 843 | .can_queue = CXGB3I_SCSI_QDEPTH_DFLT - 1, |
848 | .sg_tablesize = SG_ALL, | 844 | .sg_tablesize = SG_ALL, |
849 | .max_sectors = 0xFFFF, | 845 | .max_sectors = 0xFFFF, |
850 | .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, | 846 | .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, |
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c index a865f1fefe8b..de3b3b614cca 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_offload.c +++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c | |||
@@ -23,19 +23,19 @@ | |||
23 | #include "cxgb3i_ddp.h" | 23 | #include "cxgb3i_ddp.h" |
24 | 24 | ||
25 | #ifdef __DEBUG_C3CN_CONN__ | 25 | #ifdef __DEBUG_C3CN_CONN__ |
26 | #define c3cn_conn_debug cxgb3i_log_info | 26 | #define c3cn_conn_debug cxgb3i_log_debug |
27 | #else | 27 | #else |
28 | #define c3cn_conn_debug(fmt...) | 28 | #define c3cn_conn_debug(fmt...) |
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | #ifdef __DEBUG_C3CN_TX__ | 31 | #ifdef __DEBUG_C3CN_TX__ |
32 | #define c3cn_tx_debug cxgb3i_log_debug | 32 | #define c3cn_tx_debug cxgb3i_log_debug |
33 | #else | 33 | #else |
34 | #define c3cn_tx_debug(fmt...) | 34 | #define c3cn_tx_debug(fmt...) |
35 | #endif | 35 | #endif |
36 | 36 | ||
37 | #ifdef __DEBUG_C3CN_RX__ | 37 | #ifdef __DEBUG_C3CN_RX__ |
38 | #define c3cn_rx_debug cxgb3i_log_debug | 38 | #define c3cn_rx_debug cxgb3i_log_debug |
39 | #else | 39 | #else |
40 | #define c3cn_rx_debug(fmt...) | 40 | #define c3cn_rx_debug(fmt...) |
41 | #endif | 41 | #endif |
@@ -47,9 +47,9 @@ static int cxgb3_rcv_win = 256 * 1024; | |||
47 | module_param(cxgb3_rcv_win, int, 0644); | 47 | module_param(cxgb3_rcv_win, int, 0644); |
48 | MODULE_PARM_DESC(cxgb3_rcv_win, "TCP receive window in bytes (default=256KB)"); | 48 | MODULE_PARM_DESC(cxgb3_rcv_win, "TCP receive window in bytes (default=256KB)"); |
49 | 49 | ||
50 | static int cxgb3_snd_win = 64 * 1024; | 50 | static int cxgb3_snd_win = 128 * 1024; |
51 | module_param(cxgb3_snd_win, int, 0644); | 51 | module_param(cxgb3_snd_win, int, 0644); |
52 | MODULE_PARM_DESC(cxgb3_snd_win, "TCP send window in bytes (default=64KB)"); | 52 | MODULE_PARM_DESC(cxgb3_snd_win, "TCP send window in bytes (default=128KB)"); |
53 | 53 | ||
54 | static int cxgb3_rx_credit_thres = 10 * 1024; | 54 | static int cxgb3_rx_credit_thres = 10 * 1024; |
55 | module_param(cxgb3_rx_credit_thres, int, 0644); | 55 | module_param(cxgb3_rx_credit_thres, int, 0644); |
@@ -301,8 +301,8 @@ static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb) | |||
301 | static void skb_entail(struct s3_conn *c3cn, struct sk_buff *skb, | 301 | static void skb_entail(struct s3_conn *c3cn, struct sk_buff *skb, |
302 | int flags) | 302 | int flags) |
303 | { | 303 | { |
304 | CXGB3_SKB_CB(skb)->seq = c3cn->write_seq; | 304 | skb_tcp_seq(skb) = c3cn->write_seq; |
305 | CXGB3_SKB_CB(skb)->flags = flags; | 305 | skb_flags(skb) = flags; |
306 | __skb_queue_tail(&c3cn->write_queue, skb); | 306 | __skb_queue_tail(&c3cn->write_queue, skb); |
307 | } | 307 | } |
308 | 308 | ||
@@ -457,12 +457,9 @@ static unsigned int wrlen __read_mostly; | |||
457 | * The number of WRs needed for an skb depends on the number of fragments | 457 | * The number of WRs needed for an skb depends on the number of fragments |
458 | * in the skb and whether it has any payload in its main body. This maps the | 458 | * in the skb and whether it has any payload in its main body. This maps the |
459 | * length of the gather list represented by an skb into the # of necessary WRs. | 459 | * length of the gather list represented by an skb into the # of necessary WRs. |
460 | * | 460 | * The extra two fragments are for iscsi bhs and payload padding. |
461 | * The max. length of an skb is controlled by the max pdu size which is ~16K. | ||
462 | * Also, assume the min. fragment length is the sector size (512), then add | ||
463 | * extra fragment counts for iscsi bhs and payload padding. | ||
464 | */ | 461 | */ |
465 | #define SKB_WR_LIST_SIZE (16384/512 + 3) | 462 | #define SKB_WR_LIST_SIZE (MAX_SKB_FRAGS + 2) |
466 | static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly; | 463 | static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly; |
467 | 464 | ||
468 | static void s3_init_wr_tab(unsigned int wr_len) | 465 | static void s3_init_wr_tab(unsigned int wr_len) |
@@ -485,7 +482,7 @@ static void s3_init_wr_tab(unsigned int wr_len) | |||
485 | 482 | ||
486 | static inline void reset_wr_list(struct s3_conn *c3cn) | 483 | static inline void reset_wr_list(struct s3_conn *c3cn) |
487 | { | 484 | { |
488 | c3cn->wr_pending_head = NULL; | 485 | c3cn->wr_pending_head = c3cn->wr_pending_tail = NULL; |
489 | } | 486 | } |
490 | 487 | ||
491 | /* | 488 | /* |
@@ -496,7 +493,7 @@ static inline void reset_wr_list(struct s3_conn *c3cn) | |||
496 | static inline void enqueue_wr(struct s3_conn *c3cn, | 493 | static inline void enqueue_wr(struct s3_conn *c3cn, |
497 | struct sk_buff *skb) | 494 | struct sk_buff *skb) |
498 | { | 495 | { |
499 | skb_wr_data(skb) = NULL; | 496 | skb_tx_wr_next(skb) = NULL; |
500 | 497 | ||
501 | /* | 498 | /* |
502 | * We want to take an extra reference since both us and the driver | 499 | * We want to take an extra reference since both us and the driver |
@@ -509,10 +506,22 @@ static inline void enqueue_wr(struct s3_conn *c3cn, | |||
509 | if (!c3cn->wr_pending_head) | 506 | if (!c3cn->wr_pending_head) |
510 | c3cn->wr_pending_head = skb; | 507 | c3cn->wr_pending_head = skb; |
511 | else | 508 | else |
512 | skb_wr_data(skb) = skb; | 509 | skb_tx_wr_next(c3cn->wr_pending_tail) = skb; |
513 | c3cn->wr_pending_tail = skb; | 510 | c3cn->wr_pending_tail = skb; |
514 | } | 511 | } |
515 | 512 | ||
513 | static int count_pending_wrs(struct s3_conn *c3cn) | ||
514 | { | ||
515 | int n = 0; | ||
516 | const struct sk_buff *skb = c3cn->wr_pending_head; | ||
517 | |||
518 | while (skb) { | ||
519 | n += skb->csum; | ||
520 | skb = skb_tx_wr_next(skb); | ||
521 | } | ||
522 | return n; | ||
523 | } | ||
524 | |||
516 | static inline struct sk_buff *peek_wr(const struct s3_conn *c3cn) | 525 | static inline struct sk_buff *peek_wr(const struct s3_conn *c3cn) |
517 | { | 526 | { |
518 | return c3cn->wr_pending_head; | 527 | return c3cn->wr_pending_head; |
@@ -529,8 +538,8 @@ static inline struct sk_buff *dequeue_wr(struct s3_conn *c3cn) | |||
529 | 538 | ||
530 | if (likely(skb)) { | 539 | if (likely(skb)) { |
531 | /* Don't bother clearing the tail */ | 540 | /* Don't bother clearing the tail */ |
532 | c3cn->wr_pending_head = skb_wr_data(skb); | 541 | c3cn->wr_pending_head = skb_tx_wr_next(skb); |
533 | skb_wr_data(skb) = NULL; | 542 | skb_tx_wr_next(skb) = NULL; |
534 | } | 543 | } |
535 | return skb; | 544 | return skb; |
536 | } | 545 | } |
@@ -543,13 +552,14 @@ static void purge_wr_queue(struct s3_conn *c3cn) | |||
543 | } | 552 | } |
544 | 553 | ||
545 | static inline void make_tx_data_wr(struct s3_conn *c3cn, struct sk_buff *skb, | 554 | static inline void make_tx_data_wr(struct s3_conn *c3cn, struct sk_buff *skb, |
546 | int len) | 555 | int len, int req_completion) |
547 | { | 556 | { |
548 | struct tx_data_wr *req; | 557 | struct tx_data_wr *req; |
549 | 558 | ||
550 | skb_reset_transport_header(skb); | 559 | skb_reset_transport_header(skb); |
551 | req = (struct tx_data_wr *)__skb_push(skb, sizeof(*req)); | 560 | req = (struct tx_data_wr *)__skb_push(skb, sizeof(*req)); |
552 | req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); | 561 | req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA) | |
562 | (req_completion ? F_WR_COMPL : 0)); | ||
553 | req->wr_lo = htonl(V_WR_TID(c3cn->tid)); | 563 | req->wr_lo = htonl(V_WR_TID(c3cn->tid)); |
554 | req->sndseq = htonl(c3cn->snd_nxt); | 564 | req->sndseq = htonl(c3cn->snd_nxt); |
555 | /* len includes the length of any HW ULP additions */ | 565 | /* len includes the length of any HW ULP additions */ |
@@ -592,7 +602,7 @@ static int c3cn_push_tx_frames(struct s3_conn *c3cn, int req_completion) | |||
592 | 602 | ||
593 | if (unlikely(c3cn->state == C3CN_STATE_CONNECTING || | 603 | if (unlikely(c3cn->state == C3CN_STATE_CONNECTING || |
594 | c3cn->state == C3CN_STATE_CLOSE_WAIT_1 || | 604 | c3cn->state == C3CN_STATE_CLOSE_WAIT_1 || |
595 | c3cn->state == C3CN_STATE_ABORTING)) { | 605 | c3cn->state >= C3CN_STATE_ABORTING)) { |
596 | c3cn_tx_debug("c3cn 0x%p, in closing state %u.\n", | 606 | c3cn_tx_debug("c3cn 0x%p, in closing state %u.\n", |
597 | c3cn, c3cn->state); | 607 | c3cn, c3cn->state); |
598 | return 0; | 608 | return 0; |
@@ -615,7 +625,7 @@ static int c3cn_push_tx_frames(struct s3_conn *c3cn, int req_completion) | |||
615 | if (c3cn->wr_avail < wrs_needed) { | 625 | if (c3cn->wr_avail < wrs_needed) { |
616 | c3cn_tx_debug("c3cn 0x%p, skb len %u/%u, frag %u, " | 626 | c3cn_tx_debug("c3cn 0x%p, skb len %u/%u, frag %u, " |
617 | "wr %d < %u.\n", | 627 | "wr %d < %u.\n", |
618 | c3cn, skb->len, skb->datalen, frags, | 628 | c3cn, skb->len, skb->data_len, frags, |
619 | wrs_needed, c3cn->wr_avail); | 629 | wrs_needed, c3cn->wr_avail); |
620 | break; | 630 | break; |
621 | } | 631 | } |
@@ -627,20 +637,24 @@ static int c3cn_push_tx_frames(struct s3_conn *c3cn, int req_completion) | |||
627 | c3cn->wr_unacked += wrs_needed; | 637 | c3cn->wr_unacked += wrs_needed; |
628 | enqueue_wr(c3cn, skb); | 638 | enqueue_wr(c3cn, skb); |
629 | 639 | ||
630 | if (likely(CXGB3_SKB_CB(skb)->flags & C3CB_FLAG_NEED_HDR)) { | 640 | c3cn_tx_debug("c3cn 0x%p, enqueue, skb len %u/%u, frag %u, " |
631 | len += ulp_extra_len(skb); | 641 | "wr %d, left %u, unack %u.\n", |
632 | make_tx_data_wr(c3cn, skb, len); | 642 | c3cn, skb->len, skb->data_len, frags, |
633 | c3cn->snd_nxt += len; | 643 | wrs_needed, c3cn->wr_avail, c3cn->wr_unacked); |
634 | if ((req_completion | 644 | |
635 | && c3cn->wr_unacked == wrs_needed) | ||
636 | || (CXGB3_SKB_CB(skb)->flags & C3CB_FLAG_COMPL) | ||
637 | || c3cn->wr_unacked >= c3cn->wr_max / 2) { | ||
638 | struct work_request_hdr *wr = cplhdr(skb); | ||
639 | 645 | ||
640 | wr->wr_hi |= htonl(F_WR_COMPL); | 646 | if (likely(skb_flags(skb) & C3CB_FLAG_NEED_HDR)) { |
647 | if ((req_completion && | ||
648 | c3cn->wr_unacked == wrs_needed) || | ||
649 | (skb_flags(skb) & C3CB_FLAG_COMPL) || | ||
650 | c3cn->wr_unacked >= c3cn->wr_max / 2) { | ||
651 | req_completion = 1; | ||
641 | c3cn->wr_unacked = 0; | 652 | c3cn->wr_unacked = 0; |
642 | } | 653 | } |
643 | CXGB3_SKB_CB(skb)->flags &= ~C3CB_FLAG_NEED_HDR; | 654 | len += ulp_extra_len(skb); |
655 | make_tx_data_wr(c3cn, skb, len, req_completion); | ||
656 | c3cn->snd_nxt += len; | ||
657 | skb_flags(skb) &= ~C3CB_FLAG_NEED_HDR; | ||
644 | } | 658 | } |
645 | 659 | ||
646 | total_size += skb->truesize; | 660 | total_size += skb->truesize; |
@@ -735,8 +749,11 @@ static void process_act_establish(struct s3_conn *c3cn, struct sk_buff *skb) | |||
735 | if (unlikely(c3cn_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED))) | 749 | if (unlikely(c3cn_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED))) |
736 | /* upper layer has requested closing */ | 750 | /* upper layer has requested closing */ |
737 | send_abort_req(c3cn); | 751 | send_abort_req(c3cn); |
738 | else if (c3cn_push_tx_frames(c3cn, 1)) | 752 | else { |
753 | if (skb_queue_len(&c3cn->write_queue)) | ||
754 | c3cn_push_tx_frames(c3cn, 1); | ||
739 | cxgb3i_conn_tx_open(c3cn); | 755 | cxgb3i_conn_tx_open(c3cn); |
756 | } | ||
740 | } | 757 | } |
741 | 758 | ||
742 | static int do_act_establish(struct t3cdev *cdev, struct sk_buff *skb, | 759 | static int do_act_establish(struct t3cdev *cdev, struct sk_buff *skb, |
@@ -1082,8 +1099,8 @@ static void process_rx_iscsi_hdr(struct s3_conn *c3cn, struct sk_buff *skb) | |||
1082 | return; | 1099 | return; |
1083 | } | 1100 | } |
1084 | 1101 | ||
1085 | CXGB3_SKB_CB(skb)->seq = ntohl(hdr_cpl->seq); | 1102 | skb_tcp_seq(skb) = ntohl(hdr_cpl->seq); |
1086 | CXGB3_SKB_CB(skb)->flags = 0; | 1103 | skb_flags(skb) = 0; |
1087 | 1104 | ||
1088 | skb_reset_transport_header(skb); | 1105 | skb_reset_transport_header(skb); |
1089 | __skb_pull(skb, sizeof(struct cpl_iscsi_hdr)); | 1106 | __skb_pull(skb, sizeof(struct cpl_iscsi_hdr)); |
@@ -1103,12 +1120,12 @@ static void process_rx_iscsi_hdr(struct s3_conn *c3cn, struct sk_buff *skb) | |||
1103 | goto abort_conn; | 1120 | goto abort_conn; |
1104 | 1121 | ||
1105 | skb_ulp_mode(skb) = ULP2_FLAG_DATA_READY; | 1122 | skb_ulp_mode(skb) = ULP2_FLAG_DATA_READY; |
1106 | skb_ulp_pdulen(skb) = ntohs(ddp_cpl.len); | 1123 | skb_rx_pdulen(skb) = ntohs(ddp_cpl.len); |
1107 | skb_ulp_ddigest(skb) = ntohl(ddp_cpl.ulp_crc); | 1124 | skb_rx_ddigest(skb) = ntohl(ddp_cpl.ulp_crc); |
1108 | status = ntohl(ddp_cpl.ddp_status); | 1125 | status = ntohl(ddp_cpl.ddp_status); |
1109 | 1126 | ||
1110 | c3cn_rx_debug("rx skb 0x%p, len %u, pdulen %u, ddp status 0x%x.\n", | 1127 | c3cn_rx_debug("rx skb 0x%p, len %u, pdulen %u, ddp status 0x%x.\n", |
1111 | skb, skb->len, skb_ulp_pdulen(skb), status); | 1128 | skb, skb->len, skb_rx_pdulen(skb), status); |
1112 | 1129 | ||
1113 | if (status & (1 << RX_DDP_STATUS_HCRC_SHIFT)) | 1130 | if (status & (1 << RX_DDP_STATUS_HCRC_SHIFT)) |
1114 | skb_ulp_mode(skb) |= ULP2_FLAG_HCRC_ERROR; | 1131 | skb_ulp_mode(skb) |= ULP2_FLAG_HCRC_ERROR; |
@@ -1126,7 +1143,7 @@ static void process_rx_iscsi_hdr(struct s3_conn *c3cn, struct sk_buff *skb) | |||
1126 | } else if (status & (1 << RX_DDP_STATUS_DDP_SHIFT)) | 1143 | } else if (status & (1 << RX_DDP_STATUS_DDP_SHIFT)) |
1127 | skb_ulp_mode(skb) |= ULP2_FLAG_DATA_DDPED; | 1144 | skb_ulp_mode(skb) |= ULP2_FLAG_DATA_DDPED; |
1128 | 1145 | ||
1129 | c3cn->rcv_nxt = ntohl(ddp_cpl.seq) + skb_ulp_pdulen(skb); | 1146 | c3cn->rcv_nxt = ntohl(ddp_cpl.seq) + skb_rx_pdulen(skb); |
1130 | __pskb_trim(skb, len); | 1147 | __pskb_trim(skb, len); |
1131 | __skb_queue_tail(&c3cn->receive_queue, skb); | 1148 | __skb_queue_tail(&c3cn->receive_queue, skb); |
1132 | cxgb3i_conn_pdu_ready(c3cn); | 1149 | cxgb3i_conn_pdu_ready(c3cn); |
@@ -1151,12 +1168,27 @@ static int do_iscsi_hdr(struct t3cdev *t3dev, struct sk_buff *skb, void *ctx) | |||
1151 | * Process an acknowledgment of WR completion. Advance snd_una and send the | 1168 | * Process an acknowledgment of WR completion. Advance snd_una and send the |
1152 | * next batch of work requests from the write queue. | 1169 | * next batch of work requests from the write queue. |
1153 | */ | 1170 | */ |
1171 | static void check_wr_invariants(struct s3_conn *c3cn) | ||
1172 | { | ||
1173 | int pending = count_pending_wrs(c3cn); | ||
1174 | |||
1175 | if (unlikely(c3cn->wr_avail + pending != c3cn->wr_max)) | ||
1176 | cxgb3i_log_error("TID %u: credit imbalance: avail %u, " | ||
1177 | "pending %u, total should be %u\n", | ||
1178 | c3cn->tid, c3cn->wr_avail, pending, | ||
1179 | c3cn->wr_max); | ||
1180 | } | ||
1181 | |||
1154 | static void process_wr_ack(struct s3_conn *c3cn, struct sk_buff *skb) | 1182 | static void process_wr_ack(struct s3_conn *c3cn, struct sk_buff *skb) |
1155 | { | 1183 | { |
1156 | struct cpl_wr_ack *hdr = cplhdr(skb); | 1184 | struct cpl_wr_ack *hdr = cplhdr(skb); |
1157 | unsigned int credits = ntohs(hdr->credits); | 1185 | unsigned int credits = ntohs(hdr->credits); |
1158 | u32 snd_una = ntohl(hdr->snd_una); | 1186 | u32 snd_una = ntohl(hdr->snd_una); |
1159 | 1187 | ||
1188 | c3cn_tx_debug("%u WR credits, avail %u, unack %u, TID %u, state %u.\n", | ||
1189 | credits, c3cn->wr_avail, c3cn->wr_unacked, | ||
1190 | c3cn->tid, c3cn->state); | ||
1191 | |||
1160 | c3cn->wr_avail += credits; | 1192 | c3cn->wr_avail += credits; |
1161 | if (c3cn->wr_unacked > c3cn->wr_max - c3cn->wr_avail) | 1193 | if (c3cn->wr_unacked > c3cn->wr_max - c3cn->wr_avail) |
1162 | c3cn->wr_unacked = c3cn->wr_max - c3cn->wr_avail; | 1194 | c3cn->wr_unacked = c3cn->wr_max - c3cn->wr_avail; |
@@ -1171,6 +1203,17 @@ static void process_wr_ack(struct s3_conn *c3cn, struct sk_buff *skb) | |||
1171 | break; | 1203 | break; |
1172 | } | 1204 | } |
1173 | if (unlikely(credits < p->csum)) { | 1205 | if (unlikely(credits < p->csum)) { |
1206 | struct tx_data_wr *w = cplhdr(p); | ||
1207 | cxgb3i_log_error("TID %u got %u WR credits need %u, " | ||
1208 | "len %u, main body %u, frags %u, " | ||
1209 | "seq # %u, ACK una %u, ACK nxt %u, " | ||
1210 | "WR_AVAIL %u, WRs pending %u\n", | ||
1211 | c3cn->tid, credits, p->csum, p->len, | ||
1212 | p->len - p->data_len, | ||
1213 | skb_shinfo(p)->nr_frags, | ||
1214 | ntohl(w->sndseq), snd_una, | ||
1215 | ntohl(hdr->snd_nxt), c3cn->wr_avail, | ||
1216 | count_pending_wrs(c3cn) - credits); | ||
1174 | p->csum -= credits; | 1217 | p->csum -= credits; |
1175 | break; | 1218 | break; |
1176 | } else { | 1219 | } else { |
@@ -1180,15 +1223,24 @@ static void process_wr_ack(struct s3_conn *c3cn, struct sk_buff *skb) | |||
1180 | } | 1223 | } |
1181 | } | 1224 | } |
1182 | 1225 | ||
1183 | if (unlikely(before(snd_una, c3cn->snd_una))) | 1226 | check_wr_invariants(c3cn); |
1227 | |||
1228 | if (unlikely(before(snd_una, c3cn->snd_una))) { | ||
1229 | cxgb3i_log_error("TID %u, unexpected sequence # %u in WR_ACK " | ||
1230 | "snd_una %u\n", | ||
1231 | c3cn->tid, snd_una, c3cn->snd_una); | ||
1184 | goto out_free; | 1232 | goto out_free; |
1233 | } | ||
1185 | 1234 | ||
1186 | if (c3cn->snd_una != snd_una) { | 1235 | if (c3cn->snd_una != snd_una) { |
1187 | c3cn->snd_una = snd_una; | 1236 | c3cn->snd_una = snd_una; |
1188 | dst_confirm(c3cn->dst_cache); | 1237 | dst_confirm(c3cn->dst_cache); |
1189 | } | 1238 | } |
1190 | 1239 | ||
1191 | if (skb_queue_len(&c3cn->write_queue) && c3cn_push_tx_frames(c3cn, 0)) | 1240 | if (skb_queue_len(&c3cn->write_queue)) { |
1241 | if (c3cn_push_tx_frames(c3cn, 0)) | ||
1242 | cxgb3i_conn_tx_open(c3cn); | ||
1243 | } else | ||
1192 | cxgb3i_conn_tx_open(c3cn); | 1244 | cxgb3i_conn_tx_open(c3cn); |
1193 | out_free: | 1245 | out_free: |
1194 | __kfree_skb(skb); | 1246 | __kfree_skb(skb); |
@@ -1452,7 +1504,7 @@ static void init_offload_conn(struct s3_conn *c3cn, | |||
1452 | struct dst_entry *dst) | 1504 | struct dst_entry *dst) |
1453 | { | 1505 | { |
1454 | BUG_ON(c3cn->cdev != cdev); | 1506 | BUG_ON(c3cn->cdev != cdev); |
1455 | c3cn->wr_max = c3cn->wr_avail = T3C_DATA(cdev)->max_wrs; | 1507 | c3cn->wr_max = c3cn->wr_avail = T3C_DATA(cdev)->max_wrs - 1; |
1456 | c3cn->wr_unacked = 0; | 1508 | c3cn->wr_unacked = 0; |
1457 | c3cn->mss_idx = select_mss(c3cn, dst_mtu(dst)); | 1509 | c3cn->mss_idx = select_mss(c3cn, dst_mtu(dst)); |
1458 | 1510 | ||
@@ -1671,9 +1723,17 @@ int cxgb3i_c3cn_send_pdus(struct s3_conn *c3cn, struct sk_buff *skb) | |||
1671 | goto out_err; | 1723 | goto out_err; |
1672 | } | 1724 | } |
1673 | 1725 | ||
1674 | err = -EPIPE; | ||
1675 | if (c3cn->err) { | 1726 | if (c3cn->err) { |
1676 | c3cn_tx_debug("c3cn 0x%p, err %d.\n", c3cn, c3cn->err); | 1727 | c3cn_tx_debug("c3cn 0x%p, err %d.\n", c3cn, c3cn->err); |
1728 | err = -EPIPE; | ||
1729 | goto out_err; | ||
1730 | } | ||
1731 | |||
1732 | if (c3cn->write_seq - c3cn->snd_una >= cxgb3_snd_win) { | ||
1733 | c3cn_tx_debug("c3cn 0x%p, snd %u - %u > %u.\n", | ||
1734 | c3cn, c3cn->write_seq, c3cn->snd_una, | ||
1735 | cxgb3_snd_win); | ||
1736 | err = -EAGAIN; | ||
1677 | goto out_err; | 1737 | goto out_err; |
1678 | } | 1738 | } |
1679 | 1739 | ||
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.h b/drivers/scsi/cxgb3i/cxgb3i_offload.h index d23156907ffd..6344b9eb2589 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_offload.h +++ b/drivers/scsi/cxgb3i/cxgb3i_offload.h | |||
@@ -178,25 +178,33 @@ void cxgb3i_c3cn_release(struct s3_conn *); | |||
178 | * @flag: see C3CB_FLAG_* below | 178 | * @flag: see C3CB_FLAG_* below |
179 | * @ulp_mode: ULP mode/submode of sk_buff | 179 | * @ulp_mode: ULP mode/submode of sk_buff |
180 | * @seq: tcp sequence number | 180 | * @seq: tcp sequence number |
181 | * @ddigest: pdu data digest | ||
182 | * @pdulen: recovered pdu length | ||
183 | * @wr_data: scratch area for tx wr | ||
184 | */ | 181 | */ |
182 | struct cxgb3_skb_rx_cb { | ||
183 | __u32 ddigest; /* data digest */ | ||
184 | __u32 pdulen; /* recovered pdu length */ | ||
185 | }; | ||
186 | |||
187 | struct cxgb3_skb_tx_cb { | ||
188 | struct sk_buff *wr_next; /* next wr */ | ||
189 | }; | ||
190 | |||
185 | struct cxgb3_skb_cb { | 191 | struct cxgb3_skb_cb { |
186 | __u8 flags; | 192 | __u8 flags; |
187 | __u8 ulp_mode; | 193 | __u8 ulp_mode; |
188 | __u32 seq; | 194 | __u32 seq; |
189 | __u32 ddigest; | 195 | union { |
190 | __u32 pdulen; | 196 | struct cxgb3_skb_rx_cb rx; |
191 | struct sk_buff *wr_data; | 197 | struct cxgb3_skb_tx_cb tx; |
198 | }; | ||
192 | }; | 199 | }; |
193 | 200 | ||
194 | #define CXGB3_SKB_CB(skb) ((struct cxgb3_skb_cb *)&((skb)->cb[0])) | 201 | #define CXGB3_SKB_CB(skb) ((struct cxgb3_skb_cb *)&((skb)->cb[0])) |
195 | 202 | #define skb_flags(skb) (CXGB3_SKB_CB(skb)->flags) | |
196 | #define skb_ulp_mode(skb) (CXGB3_SKB_CB(skb)->ulp_mode) | 203 | #define skb_ulp_mode(skb) (CXGB3_SKB_CB(skb)->ulp_mode) |
197 | #define skb_ulp_ddigest(skb) (CXGB3_SKB_CB(skb)->ddigest) | 204 | #define skb_tcp_seq(skb) (CXGB3_SKB_CB(skb)->seq) |
198 | #define skb_ulp_pdulen(skb) (CXGB3_SKB_CB(skb)->pdulen) | 205 | #define skb_rx_ddigest(skb) (CXGB3_SKB_CB(skb)->rx.ddigest) |
199 | #define skb_wr_data(skb) (CXGB3_SKB_CB(skb)->wr_data) | 206 | #define skb_rx_pdulen(skb) (CXGB3_SKB_CB(skb)->rx.pdulen) |
207 | #define skb_tx_wr_next(skb) (CXGB3_SKB_CB(skb)->tx.wr_next) | ||
200 | 208 | ||
201 | enum c3cb_flags { | 209 | enum c3cb_flags { |
202 | C3CB_FLAG_NEED_HDR = 1 << 0, /* packet needs a TX_DATA_WR header */ | 210 | C3CB_FLAG_NEED_HDR = 1 << 0, /* packet needs a TX_DATA_WR header */ |
@@ -217,6 +225,7 @@ struct sge_opaque_hdr { | |||
217 | /* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */ | 225 | /* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */ |
218 | #define TX_HEADER_LEN \ | 226 | #define TX_HEADER_LEN \ |
219 | (sizeof(struct tx_data_wr) + sizeof(struct sge_opaque_hdr)) | 227 | (sizeof(struct tx_data_wr) + sizeof(struct sge_opaque_hdr)) |
228 | #define SKB_TX_HEADROOM SKB_MAX_HEAD(TX_HEADER_LEN) | ||
220 | 229 | ||
221 | /* | 230 | /* |
222 | * get and set private ip for iscsi traffic | 231 | * get and set private ip for iscsi traffic |
diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.c b/drivers/scsi/cxgb3i/cxgb3i_pdu.c index ce7ce8c6094c..17115c230d65 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_pdu.c +++ b/drivers/scsi/cxgb3i/cxgb3i_pdu.c | |||
@@ -32,6 +32,10 @@ | |||
32 | #define cxgb3i_tx_debug(fmt...) | 32 | #define cxgb3i_tx_debug(fmt...) |
33 | #endif | 33 | #endif |
34 | 34 | ||
35 | /* always allocate rooms for AHS */ | ||
36 | #define SKB_TX_PDU_HEADER_LEN \ | ||
37 | (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE) | ||
38 | static unsigned int skb_extra_headroom; | ||
35 | static struct page *pad_page; | 39 | static struct page *pad_page; |
36 | 40 | ||
37 | /* | 41 | /* |
@@ -146,12 +150,13 @@ static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc) | |||
146 | 150 | ||
147 | void cxgb3i_conn_cleanup_task(struct iscsi_task *task) | 151 | void cxgb3i_conn_cleanup_task(struct iscsi_task *task) |
148 | { | 152 | { |
149 | struct iscsi_tcp_task *tcp_task = task->dd_data; | 153 | struct cxgb3i_task_data *tdata = task->dd_data + |
154 | sizeof(struct iscsi_tcp_task); | ||
150 | 155 | ||
151 | /* never reached the xmit task callout */ | 156 | /* never reached the xmit task callout */ |
152 | if (tcp_task->dd_data) | 157 | if (tdata->skb) |
153 | kfree_skb(tcp_task->dd_data); | 158 | __kfree_skb(tdata->skb); |
154 | tcp_task->dd_data = NULL; | 159 | memset(tdata, 0, sizeof(struct cxgb3i_task_data)); |
155 | 160 | ||
156 | /* MNC - Do we need a check in case this is called but | 161 | /* MNC - Do we need a check in case this is called but |
157 | * cxgb3i_conn_alloc_pdu has never been called on the task */ | 162 | * cxgb3i_conn_alloc_pdu has never been called on the task */ |
@@ -159,28 +164,102 @@ void cxgb3i_conn_cleanup_task(struct iscsi_task *task) | |||
159 | iscsi_tcp_cleanup_task(task); | 164 | iscsi_tcp_cleanup_task(task); |
160 | } | 165 | } |
161 | 166 | ||
162 | /* | 167 | static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt, |
163 | * We do not support ahs yet | 168 | unsigned int offset, unsigned int *off, |
164 | */ | 169 | struct scatterlist **sgp) |
170 | { | ||
171 | int i; | ||
172 | struct scatterlist *sg; | ||
173 | |||
174 | for_each_sg(sgl, sg, sgcnt, i) { | ||
175 | if (offset < sg->length) { | ||
176 | *off = offset; | ||
177 | *sgp = sg; | ||
178 | return 0; | ||
179 | } | ||
180 | offset -= sg->length; | ||
181 | } | ||
182 | return -EFAULT; | ||
183 | } | ||
184 | |||
185 | static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset, | ||
186 | unsigned int dlen, skb_frag_t *frags, | ||
187 | int frag_max) | ||
188 | { | ||
189 | unsigned int datalen = dlen; | ||
190 | unsigned int sglen = sg->length - sgoffset; | ||
191 | struct page *page = sg_page(sg); | ||
192 | int i; | ||
193 | |||
194 | i = 0; | ||
195 | do { | ||
196 | unsigned int copy; | ||
197 | |||
198 | if (!sglen) { | ||
199 | sg = sg_next(sg); | ||
200 | if (!sg) { | ||
201 | cxgb3i_log_error("%s, sg NULL, len %u/%u.\n", | ||
202 | __func__, datalen, dlen); | ||
203 | return -EINVAL; | ||
204 | } | ||
205 | sgoffset = 0; | ||
206 | sglen = sg->length; | ||
207 | page = sg_page(sg); | ||
208 | |||
209 | } | ||
210 | copy = min(datalen, sglen); | ||
211 | if (i && page == frags[i - 1].page && | ||
212 | sgoffset + sg->offset == | ||
213 | frags[i - 1].page_offset + frags[i - 1].size) { | ||
214 | frags[i - 1].size += copy; | ||
215 | } else { | ||
216 | if (i >= frag_max) { | ||
217 | cxgb3i_log_error("%s, too many pages %u, " | ||
218 | "dlen %u.\n", __func__, | ||
219 | frag_max, dlen); | ||
220 | return -EINVAL; | ||
221 | } | ||
222 | |||
223 | frags[i].page = page; | ||
224 | frags[i].page_offset = sg->offset + sgoffset; | ||
225 | frags[i].size = copy; | ||
226 | i++; | ||
227 | } | ||
228 | datalen -= copy; | ||
229 | sgoffset += copy; | ||
230 | sglen -= copy; | ||
231 | } while (datalen); | ||
232 | |||
233 | return i; | ||
234 | } | ||
235 | |||
165 | int cxgb3i_conn_alloc_pdu(struct iscsi_task *task, u8 opcode) | 236 | int cxgb3i_conn_alloc_pdu(struct iscsi_task *task, u8 opcode) |
166 | { | 237 | { |
238 | struct iscsi_conn *conn = task->conn; | ||
167 | struct iscsi_tcp_task *tcp_task = task->dd_data; | 239 | struct iscsi_tcp_task *tcp_task = task->dd_data; |
168 | struct sk_buff *skb; | 240 | struct cxgb3i_task_data *tdata = task->dd_data + sizeof(*tcp_task); |
241 | struct scsi_cmnd *sc = task->sc; | ||
242 | int headroom = SKB_TX_PDU_HEADER_LEN; | ||
169 | 243 | ||
244 | tcp_task->dd_data = tdata; | ||
170 | task->hdr = NULL; | 245 | task->hdr = NULL; |
171 | /* always allocate rooms for AHS */ | 246 | |
172 | skb = alloc_skb(sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE + | 247 | /* write command, need to send data pdus */ |
173 | TX_HEADER_LEN, GFP_ATOMIC); | 248 | if (skb_extra_headroom && (opcode == ISCSI_OP_SCSI_DATA_OUT || |
174 | if (!skb) | 249 | (opcode == ISCSI_OP_SCSI_CMD && |
250 | (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE)))) | ||
251 | headroom += min(skb_extra_headroom, conn->max_xmit_dlength); | ||
252 | |||
253 | tdata->skb = alloc_skb(TX_HEADER_LEN + headroom, GFP_ATOMIC); | ||
254 | if (!tdata->skb) | ||
175 | return -ENOMEM; | 255 | return -ENOMEM; |
256 | skb_reserve(tdata->skb, TX_HEADER_LEN); | ||
176 | 257 | ||
177 | cxgb3i_tx_debug("task 0x%p, opcode 0x%x, skb 0x%p.\n", | 258 | cxgb3i_tx_debug("task 0x%p, opcode 0x%x, skb 0x%p.\n", |
178 | task, opcode, skb); | 259 | task, opcode, tdata->skb); |
179 | 260 | ||
180 | tcp_task->dd_data = skb; | 261 | task->hdr = (struct iscsi_hdr *)tdata->skb->data; |
181 | skb_reserve(skb, TX_HEADER_LEN); | 262 | task->hdr_max = SKB_TX_PDU_HEADER_LEN; |
182 | task->hdr = (struct iscsi_hdr *)skb->data; | ||
183 | task->hdr_max = sizeof(struct iscsi_hdr); | ||
184 | 263 | ||
185 | /* data_out uses scsi_cmd's itt */ | 264 | /* data_out uses scsi_cmd's itt */ |
186 | if (opcode != ISCSI_OP_SCSI_DATA_OUT) | 265 | if (opcode != ISCSI_OP_SCSI_DATA_OUT) |
@@ -192,13 +271,13 @@ int cxgb3i_conn_alloc_pdu(struct iscsi_task *task, u8 opcode) | |||
192 | int cxgb3i_conn_init_pdu(struct iscsi_task *task, unsigned int offset, | 271 | int cxgb3i_conn_init_pdu(struct iscsi_task *task, unsigned int offset, |
193 | unsigned int count) | 272 | unsigned int count) |
194 | { | 273 | { |
195 | struct iscsi_tcp_task *tcp_task = task->dd_data; | ||
196 | struct sk_buff *skb = tcp_task->dd_data; | ||
197 | struct iscsi_conn *conn = task->conn; | 274 | struct iscsi_conn *conn = task->conn; |
198 | struct page *pg; | 275 | struct iscsi_tcp_task *tcp_task = task->dd_data; |
276 | struct cxgb3i_task_data *tdata = tcp_task->dd_data; | ||
277 | struct sk_buff *skb = tdata->skb; | ||
199 | unsigned int datalen = count; | 278 | unsigned int datalen = count; |
200 | int i, padlen = iscsi_padding(count); | 279 | int i, padlen = iscsi_padding(count); |
201 | skb_frag_t *frag; | 280 | struct page *pg; |
202 | 281 | ||
203 | cxgb3i_tx_debug("task 0x%p,0x%p, offset %u, count %u, skb 0x%p.\n", | 282 | cxgb3i_tx_debug("task 0x%p,0x%p, offset %u, count %u, skb 0x%p.\n", |
204 | task, task->sc, offset, count, skb); | 283 | task, task->sc, offset, count, skb); |
@@ -209,90 +288,94 @@ int cxgb3i_conn_init_pdu(struct iscsi_task *task, unsigned int offset, | |||
209 | return 0; | 288 | return 0; |
210 | 289 | ||
211 | if (task->sc) { | 290 | if (task->sc) { |
212 | struct scatterlist *sg; | 291 | struct scsi_data_buffer *sdb = scsi_out(task->sc); |
213 | struct scsi_data_buffer *sdb; | 292 | struct scatterlist *sg = NULL; |
214 | unsigned int sgoffset = offset; | 293 | int err; |
215 | struct page *sgpg; | 294 | |
216 | unsigned int sglen; | 295 | tdata->offset = offset; |
217 | 296 | tdata->count = count; | |
218 | sdb = scsi_out(task->sc); | 297 | err = sgl_seek_offset(sdb->table.sgl, sdb->table.nents, |
219 | sg = sdb->table.sgl; | 298 | tdata->offset, &tdata->sgoffset, &sg); |
220 | 299 | if (err < 0) { | |
221 | for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) { | 300 | cxgb3i_log_warn("tpdu, sgl %u, bad offset %u/%u.\n", |
222 | cxgb3i_tx_debug("sg %d, page 0x%p, len %u offset %u\n", | 301 | sdb->table.nents, tdata->offset, |
223 | i, sg_page(sg), sg->length, sg->offset); | 302 | sdb->length); |
224 | 303 | return err; | |
225 | if (sgoffset < sg->length) | ||
226 | break; | ||
227 | sgoffset -= sg->length; | ||
228 | } | 304 | } |
229 | sgpg = sg_page(sg); | 305 | err = sgl_read_to_frags(sg, tdata->sgoffset, tdata->count, |
230 | sglen = sg->length - sgoffset; | 306 | tdata->frags, MAX_PDU_FRAGS); |
231 | 307 | if (err < 0) { | |
232 | do { | 308 | cxgb3i_log_warn("tpdu, sgl %u, bad offset %u + %u.\n", |
233 | int j = skb_shinfo(skb)->nr_frags; | 309 | sdb->table.nents, tdata->offset, |
234 | unsigned int copy; | 310 | tdata->count); |
235 | 311 | return err; | |
236 | if (!sglen) { | 312 | } |
237 | sg = sg_next(sg); | 313 | tdata->nr_frags = err; |
238 | sgpg = sg_page(sg); | 314 | |
239 | sgoffset = 0; | 315 | if (tdata->nr_frags > MAX_SKB_FRAGS || |
240 | sglen = sg->length; | 316 | (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) { |
241 | ++i; | 317 | char *dst = skb->data + task->hdr_len; |
318 | skb_frag_t *frag = tdata->frags; | ||
319 | |||
320 | /* data fits in the skb's headroom */ | ||
321 | for (i = 0; i < tdata->nr_frags; i++, frag++) { | ||
322 | char *src = kmap_atomic(frag->page, | ||
323 | KM_SOFTIRQ0); | ||
324 | |||
325 | memcpy(dst, src+frag->page_offset, frag->size); | ||
326 | dst += frag->size; | ||
327 | kunmap_atomic(src, KM_SOFTIRQ0); | ||
242 | } | 328 | } |
243 | copy = min(sglen, datalen); | 329 | if (padlen) { |
244 | if (j && skb_can_coalesce(skb, j, sgpg, | 330 | memset(dst, 0, padlen); |
245 | sg->offset + sgoffset)) { | 331 | padlen = 0; |
246 | skb_shinfo(skb)->frags[j - 1].size += copy; | ||
247 | } else { | ||
248 | get_page(sgpg); | ||
249 | skb_fill_page_desc(skb, j, sgpg, | ||
250 | sg->offset + sgoffset, copy); | ||
251 | } | 332 | } |
252 | sgoffset += copy; | 333 | skb_put(skb, count + padlen); |
253 | sglen -= copy; | 334 | } else { |
254 | datalen -= copy; | 335 | /* data fit into frag_list */ |
255 | } while (datalen); | 336 | for (i = 0; i < tdata->nr_frags; i++) |
337 | get_page(tdata->frags[i].page); | ||
338 | |||
339 | memcpy(skb_shinfo(skb)->frags, tdata->frags, | ||
340 | sizeof(skb_frag_t) * tdata->nr_frags); | ||
341 | skb_shinfo(skb)->nr_frags = tdata->nr_frags; | ||
342 | skb->len += count; | ||
343 | skb->data_len += count; | ||
344 | skb->truesize += count; | ||
345 | } | ||
346 | |||
256 | } else { | 347 | } else { |
257 | pg = virt_to_page(task->data); | 348 | pg = virt_to_page(task->data); |
258 | 349 | ||
259 | while (datalen) { | 350 | get_page(pg); |
260 | i = skb_shinfo(skb)->nr_frags; | 351 | skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data), |
261 | frag = &skb_shinfo(skb)->frags[i]; | 352 | count); |
262 | 353 | skb->len += count; | |
263 | get_page(pg); | 354 | skb->data_len += count; |
264 | frag->page = pg; | 355 | skb->truesize += count; |
265 | frag->page_offset = 0; | ||
266 | frag->size = min((unsigned int)PAGE_SIZE, datalen); | ||
267 | |||
268 | skb_shinfo(skb)->nr_frags++; | ||
269 | datalen -= frag->size; | ||
270 | pg++; | ||
271 | } | ||
272 | } | 356 | } |
273 | 357 | ||
274 | if (padlen) { | 358 | if (padlen) { |
275 | i = skb_shinfo(skb)->nr_frags; | 359 | i = skb_shinfo(skb)->nr_frags; |
276 | frag = &skb_shinfo(skb)->frags[i]; | 360 | get_page(pad_page); |
277 | frag->page = pad_page; | 361 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, pad_page, 0, |
278 | frag->page_offset = 0; | 362 | padlen); |
279 | frag->size = padlen; | 363 | |
280 | skb_shinfo(skb)->nr_frags++; | 364 | skb->data_len += padlen; |
365 | skb->truesize += padlen; | ||
366 | skb->len += padlen; | ||
281 | } | 367 | } |
282 | 368 | ||
283 | datalen = count + padlen; | ||
284 | skb->data_len += datalen; | ||
285 | skb->truesize += datalen; | ||
286 | skb->len += datalen; | ||
287 | return 0; | 369 | return 0; |
288 | } | 370 | } |
289 | 371 | ||
290 | int cxgb3i_conn_xmit_pdu(struct iscsi_task *task) | 372 | int cxgb3i_conn_xmit_pdu(struct iscsi_task *task) |
291 | { | 373 | { |
292 | struct iscsi_tcp_task *tcp_task = task->dd_data; | ||
293 | struct sk_buff *skb = tcp_task->dd_data; | ||
294 | struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; | 374 | struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; |
295 | struct cxgb3i_conn *cconn = tcp_conn->dd_data; | 375 | struct cxgb3i_conn *cconn = tcp_conn->dd_data; |
376 | struct iscsi_tcp_task *tcp_task = task->dd_data; | ||
377 | struct cxgb3i_task_data *tdata = tcp_task->dd_data; | ||
378 | struct sk_buff *skb = tdata->skb; | ||
296 | unsigned int datalen; | 379 | unsigned int datalen; |
297 | int err; | 380 | int err; |
298 | 381 | ||
@@ -300,13 +383,14 @@ int cxgb3i_conn_xmit_pdu(struct iscsi_task *task) | |||
300 | return 0; | 383 | return 0; |
301 | 384 | ||
302 | datalen = skb->data_len; | 385 | datalen = skb->data_len; |
303 | tcp_task->dd_data = NULL; | 386 | tdata->skb = NULL; |
304 | err = cxgb3i_c3cn_send_pdus(cconn->cep->c3cn, skb); | 387 | err = cxgb3i_c3cn_send_pdus(cconn->cep->c3cn, skb); |
305 | cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n", | ||
306 | task, skb, skb->len, skb->data_len, err); | ||
307 | if (err > 0) { | 388 | if (err > 0) { |
308 | int pdulen = err; | 389 | int pdulen = err; |
309 | 390 | ||
391 | cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n", | ||
392 | task, skb, skb->len, skb->data_len, err); | ||
393 | |||
310 | if (task->conn->hdrdgst_en) | 394 | if (task->conn->hdrdgst_en) |
311 | pdulen += ISCSI_DIGEST_SIZE; | 395 | pdulen += ISCSI_DIGEST_SIZE; |
312 | if (datalen && task->conn->datadgst_en) | 396 | if (datalen && task->conn->datadgst_en) |
@@ -325,12 +409,14 @@ int cxgb3i_conn_xmit_pdu(struct iscsi_task *task) | |||
325 | return err; | 409 | return err; |
326 | } | 410 | } |
327 | /* reset skb to send when we are called again */ | 411 | /* reset skb to send when we are called again */ |
328 | tcp_task->dd_data = skb; | 412 | tdata->skb = skb; |
329 | return -EAGAIN; | 413 | return -EAGAIN; |
330 | } | 414 | } |
331 | 415 | ||
332 | int cxgb3i_pdu_init(void) | 416 | int cxgb3i_pdu_init(void) |
333 | { | 417 | { |
418 | if (SKB_TX_HEADROOM > (512 * MAX_SKB_FRAGS)) | ||
419 | skb_extra_headroom = SKB_TX_HEADROOM; | ||
334 | pad_page = alloc_page(GFP_KERNEL); | 420 | pad_page = alloc_page(GFP_KERNEL); |
335 | if (!pad_page) | 421 | if (!pad_page) |
336 | return -ENOMEM; | 422 | return -ENOMEM; |
@@ -366,7 +452,9 @@ void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn) | |||
366 | skb = skb_peek(&c3cn->receive_queue); | 452 | skb = skb_peek(&c3cn->receive_queue); |
367 | while (!err && skb) { | 453 | while (!err && skb) { |
368 | __skb_unlink(skb, &c3cn->receive_queue); | 454 | __skb_unlink(skb, &c3cn->receive_queue); |
369 | read += skb_ulp_pdulen(skb); | 455 | read += skb_rx_pdulen(skb); |
456 | cxgb3i_rx_debug("conn 0x%p, cn 0x%p, rx skb 0x%p, pdulen %u.\n", | ||
457 | conn, c3cn, skb, skb_rx_pdulen(skb)); | ||
370 | err = cxgb3i_conn_read_pdu_skb(conn, skb); | 458 | err = cxgb3i_conn_read_pdu_skb(conn, skb); |
371 | __kfree_skb(skb); | 459 | __kfree_skb(skb); |
372 | skb = skb_peek(&c3cn->receive_queue); | 460 | skb = skb_peek(&c3cn->receive_queue); |
@@ -377,6 +465,11 @@ void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn) | |||
377 | cxgb3i_c3cn_rx_credits(c3cn, read); | 465 | cxgb3i_c3cn_rx_credits(c3cn, read); |
378 | } | 466 | } |
379 | conn->rxdata_octets += read; | 467 | conn->rxdata_octets += read; |
468 | |||
469 | if (err) { | ||
470 | cxgb3i_log_info("conn 0x%p rx failed err %d.\n", conn, err); | ||
471 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | ||
472 | } | ||
380 | } | 473 | } |
381 | 474 | ||
382 | void cxgb3i_conn_tx_open(struct s3_conn *c3cn) | 475 | void cxgb3i_conn_tx_open(struct s3_conn *c3cn) |
diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.h b/drivers/scsi/cxgb3i/cxgb3i_pdu.h index a3f685cc2362..0770b23d90da 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_pdu.h +++ b/drivers/scsi/cxgb3i/cxgb3i_pdu.h | |||
@@ -53,7 +53,7 @@ struct cpl_rx_data_ddp_norss { | |||
53 | #define ULP2_FLAG_DCRC_ERROR 0x20 | 53 | #define ULP2_FLAG_DCRC_ERROR 0x20 |
54 | #define ULP2_FLAG_PAD_ERROR 0x40 | 54 | #define ULP2_FLAG_PAD_ERROR 0x40 |
55 | 55 | ||
56 | void cxgb3i_conn_closing(struct s3_conn *); | 56 | void cxgb3i_conn_closing(struct s3_conn *c3cn); |
57 | void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn); | 57 | void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn); |
58 | void cxgb3i_conn_tx_open(struct s3_conn *c3cn); | 58 | void cxgb3i_conn_tx_open(struct s3_conn *c3cn); |
59 | #endif | 59 | #endif |
diff --git a/drivers/scsi/fcoe/fc_transport_fcoe.c b/drivers/scsi/fcoe/fc_transport_fcoe.c index bf7fe6fc0820..8862758006c0 100644 --- a/drivers/scsi/fcoe/fc_transport_fcoe.c +++ b/drivers/scsi/fcoe/fc_transport_fcoe.c | |||
@@ -33,19 +33,19 @@ static LIST_HEAD(fcoe_transports); | |||
33 | static DEFINE_MUTEX(fcoe_transports_lock); | 33 | static DEFINE_MUTEX(fcoe_transports_lock); |
34 | 34 | ||
35 | /** | 35 | /** |
36 | * fcoe_transport_default - returns ptr to the default transport fcoe_sw | 36 | * fcoe_transport_default() - Returns ptr to the default transport fcoe_sw |
37 | **/ | 37 | */ |
38 | struct fcoe_transport *fcoe_transport_default(void) | 38 | struct fcoe_transport *fcoe_transport_default(void) |
39 | { | 39 | { |
40 | return &fcoe_sw_transport; | 40 | return &fcoe_sw_transport; |
41 | } | 41 | } |
42 | 42 | ||
43 | /** | 43 | /** |
44 | * fcoe_transport_to_pcidev - get the pci dev from a netdev | 44 | * fcoe_transport_to_pcidev() - get the pci dev from a netdev |
45 | * @netdev: the netdev that pci dev will be retrived from | 45 | * @netdev: the netdev that pci dev will be retrived from |
46 | * | 46 | * |
47 | * Returns: NULL or the corrsponding pci_dev | 47 | * Returns: NULL or the corrsponding pci_dev |
48 | **/ | 48 | */ |
49 | struct pci_dev *fcoe_transport_pcidev(const struct net_device *netdev) | 49 | struct pci_dev *fcoe_transport_pcidev(const struct net_device *netdev) |
50 | { | 50 | { |
51 | if (!netdev->dev.parent) | 51 | if (!netdev->dev.parent) |
@@ -54,18 +54,17 @@ struct pci_dev *fcoe_transport_pcidev(const struct net_device *netdev) | |||
54 | } | 54 | } |
55 | 55 | ||
56 | /** | 56 | /** |
57 | * fcoe_transport_device_lookup - find out netdev is managed by the | 57 | * fcoe_transport_device_lookup() - Lookup a transport |
58 | * transport | ||
59 | * assign a transport to a device | ||
60 | * @netdev: the netdev the transport to be attached to | 58 | * @netdev: the netdev the transport to be attached to |
61 | * | 59 | * |
62 | * This will look for existing offload driver, if not found, it falls back to | 60 | * This will look for existing offload driver, if not found, it falls back to |
63 | * the default sw hba (fcoe_sw) as its fcoe transport. | 61 | * the default sw hba (fcoe_sw) as its fcoe transport. |
64 | * | 62 | * |
65 | * Returns: 0 for success | 63 | * Returns: 0 for success |
66 | **/ | 64 | */ |
67 | static struct fcoe_transport_internal *fcoe_transport_device_lookup( | 65 | static struct fcoe_transport_internal * |
68 | struct fcoe_transport *t, struct net_device *netdev) | 66 | fcoe_transport_device_lookup(struct fcoe_transport *t, |
67 | struct net_device *netdev) | ||
69 | { | 68 | { |
70 | struct fcoe_transport_internal *ti; | 69 | struct fcoe_transport_internal *ti; |
71 | 70 | ||
@@ -81,14 +80,14 @@ static struct fcoe_transport_internal *fcoe_transport_device_lookup( | |||
81 | return NULL; | 80 | return NULL; |
82 | } | 81 | } |
83 | /** | 82 | /** |
84 | * fcoe_transport_device_add - assign a transport to a device | 83 | * fcoe_transport_device_add() - Assign a transport to a device |
85 | * @netdev: the netdev the transport to be attached to | 84 | * @netdev: the netdev the transport to be attached to |
86 | * | 85 | * |
87 | * This will look for existing offload driver, if not found, it falls back to | 86 | * This will look for existing offload driver, if not found, it falls back to |
88 | * the default sw hba (fcoe_sw) as its fcoe transport. | 87 | * the default sw hba (fcoe_sw) as its fcoe transport. |
89 | * | 88 | * |
90 | * Returns: 0 for success | 89 | * Returns: 0 for success |
91 | **/ | 90 | */ |
92 | static int fcoe_transport_device_add(struct fcoe_transport *t, | 91 | static int fcoe_transport_device_add(struct fcoe_transport *t, |
93 | struct net_device *netdev) | 92 | struct net_device *netdev) |
94 | { | 93 | { |
@@ -123,14 +122,14 @@ static int fcoe_transport_device_add(struct fcoe_transport *t, | |||
123 | } | 122 | } |
124 | 123 | ||
125 | /** | 124 | /** |
126 | * fcoe_transport_device_remove - remove a device from its transport | 125 | * fcoe_transport_device_remove() - Remove a device from its transport |
127 | * @netdev: the netdev the transport to be attached to | 126 | * @netdev: the netdev the transport to be attached to |
128 | * | 127 | * |
129 | * this removes the device from the transport so the given transport will | 128 | * This removes the device from the transport so the given transport will |
130 | * not manage this device any more | 129 | * not manage this device any more |
131 | * | 130 | * |
132 | * Returns: 0 for success | 131 | * Returns: 0 for success |
133 | **/ | 132 | */ |
134 | static int fcoe_transport_device_remove(struct fcoe_transport *t, | 133 | static int fcoe_transport_device_remove(struct fcoe_transport *t, |
135 | struct net_device *netdev) | 134 | struct net_device *netdev) |
136 | { | 135 | { |
@@ -155,13 +154,13 @@ static int fcoe_transport_device_remove(struct fcoe_transport *t, | |||
155 | } | 154 | } |
156 | 155 | ||
157 | /** | 156 | /** |
158 | * fcoe_transport_device_remove_all - remove all from transport devlist | 157 | * fcoe_transport_device_remove_all() - Remove all from transport devlist |
159 | * | 158 | * |
160 | * this removes the device from the transport so the given transport will | 159 | * This removes the device from the transport so the given transport will |
161 | * not manage this device any more | 160 | * not manage this device any more |
162 | * | 161 | * |
163 | * Returns: 0 for success | 162 | * Returns: 0 for success |
164 | **/ | 163 | */ |
165 | static void fcoe_transport_device_remove_all(struct fcoe_transport *t) | 164 | static void fcoe_transport_device_remove_all(struct fcoe_transport *t) |
166 | { | 165 | { |
167 | struct fcoe_transport_internal *ti, *tmp; | 166 | struct fcoe_transport_internal *ti, *tmp; |
@@ -175,18 +174,18 @@ static void fcoe_transport_device_remove_all(struct fcoe_transport *t) | |||
175 | } | 174 | } |
176 | 175 | ||
177 | /** | 176 | /** |
178 | * fcoe_transport_match - use the bus device match function to match the hw | 177 | * fcoe_transport_match() - Use the bus device match function to match the hw |
179 | * @t: the fcoe transport | 178 | * @t: The fcoe transport to check |
180 | * @netdev: | 179 | * @netdev: The netdev to match against |
181 | * | 180 | * |
182 | * This function is used to check if the givne transport wants to manage the | 181 | * This function is used to check if the given transport wants to manage the |
183 | * input netdev. if the transports implements the match function, it will be | 182 | * input netdev. if the transports implements the match function, it will be |
184 | * called, o.w. we just compare the pci vendor and device id. | 183 | * called, o.w. we just compare the pci vendor and device id. |
185 | * | 184 | * |
186 | * Returns: true for match up | 185 | * Returns: true for match up |
187 | **/ | 186 | */ |
188 | static bool fcoe_transport_match(struct fcoe_transport *t, | 187 | static bool fcoe_transport_match(struct fcoe_transport *t, |
189 | struct net_device *netdev) | 188 | struct net_device *netdev) |
190 | { | 189 | { |
191 | /* match transport by vendor and device id */ | 190 | /* match transport by vendor and device id */ |
192 | struct pci_dev *pci; | 191 | struct pci_dev *pci; |
@@ -210,17 +209,17 @@ static bool fcoe_transport_match(struct fcoe_transport *t, | |||
210 | } | 209 | } |
211 | 210 | ||
212 | /** | 211 | /** |
213 | * fcoe_transport_lookup - check if the transport is already registered | 212 | * fcoe_transport_lookup() - Check if the transport is already registered |
214 | * @t: the transport to be looked up | 213 | * @t: the transport to be looked up |
215 | * | 214 | * |
216 | * This compares the parent device (pci) vendor and device id | 215 | * This compares the parent device (pci) vendor and device id |
217 | * | 216 | * |
218 | * Returns: NULL if not found | 217 | * Returns: NULL if not found |
219 | * | 218 | * |
220 | * TODO - return default sw transport if no other transport is found | 219 | * TODO: return default sw transport if no other transport is found |
221 | **/ | 220 | */ |
222 | static struct fcoe_transport *fcoe_transport_lookup( | 221 | static struct fcoe_transport * |
223 | struct net_device *netdev) | 222 | fcoe_transport_lookup(struct net_device *netdev) |
224 | { | 223 | { |
225 | struct fcoe_transport *t; | 224 | struct fcoe_transport *t; |
226 | 225 | ||
@@ -239,11 +238,11 @@ static struct fcoe_transport *fcoe_transport_lookup( | |||
239 | } | 238 | } |
240 | 239 | ||
241 | /** | 240 | /** |
242 | * fcoe_transport_register - adds a fcoe transport to the fcoe transports list | 241 | * fcoe_transport_register() - Adds a fcoe transport to the fcoe transports list |
243 | * @t: ptr to the fcoe transport to be added | 242 | * @t: ptr to the fcoe transport to be added |
244 | * | 243 | * |
245 | * Returns: 0 for success | 244 | * Returns: 0 for success |
246 | **/ | 245 | */ |
247 | int fcoe_transport_register(struct fcoe_transport *t) | 246 | int fcoe_transport_register(struct fcoe_transport *t) |
248 | { | 247 | { |
249 | struct fcoe_transport *tt; | 248 | struct fcoe_transport *tt; |
@@ -259,9 +258,6 @@ int fcoe_transport_register(struct fcoe_transport *t) | |||
259 | list_add_tail(&t->list, &fcoe_transports); | 258 | list_add_tail(&t->list, &fcoe_transports); |
260 | mutex_unlock(&fcoe_transports_lock); | 259 | mutex_unlock(&fcoe_transports_lock); |
261 | 260 | ||
262 | mutex_init(&t->devlock); | ||
263 | INIT_LIST_HEAD(&t->devlist); | ||
264 | |||
265 | printk(KERN_DEBUG "fcoe_transport_register:%s\n", t->name); | 261 | printk(KERN_DEBUG "fcoe_transport_register:%s\n", t->name); |
266 | 262 | ||
267 | return 0; | 263 | return 0; |
@@ -269,11 +265,11 @@ int fcoe_transport_register(struct fcoe_transport *t) | |||
269 | EXPORT_SYMBOL_GPL(fcoe_transport_register); | 265 | EXPORT_SYMBOL_GPL(fcoe_transport_register); |
270 | 266 | ||
271 | /** | 267 | /** |
272 | * fcoe_transport_unregister - remove the tranport fro the fcoe transports list | 268 | * fcoe_transport_unregister() - Remove the tranport fro the fcoe transports list |
273 | * @t: ptr to the fcoe transport to be removed | 269 | * @t: ptr to the fcoe transport to be removed |
274 | * | 270 | * |
275 | * Returns: 0 for success | 271 | * Returns: 0 for success |
276 | **/ | 272 | */ |
277 | int fcoe_transport_unregister(struct fcoe_transport *t) | 273 | int fcoe_transport_unregister(struct fcoe_transport *t) |
278 | { | 274 | { |
279 | struct fcoe_transport *tt, *tmp; | 275 | struct fcoe_transport *tt, *tmp; |
@@ -294,8 +290,8 @@ int fcoe_transport_unregister(struct fcoe_transport *t) | |||
294 | } | 290 | } |
295 | EXPORT_SYMBOL_GPL(fcoe_transport_unregister); | 291 | EXPORT_SYMBOL_GPL(fcoe_transport_unregister); |
296 | 292 | ||
297 | /* | 293 | /** |
298 | * fcoe_load_transport_driver - load an offload driver by alias name | 294 | * fcoe_load_transport_driver() - Load an offload driver by alias name |
299 | * @netdev: the target net device | 295 | * @netdev: the target net device |
300 | * | 296 | * |
301 | * Requests for an offload driver module as the fcoe transport, if fails, it | 297 | * Requests for an offload driver module as the fcoe transport, if fails, it |
@@ -307,7 +303,7 @@ EXPORT_SYMBOL_GPL(fcoe_transport_unregister); | |||
307 | * 3. pure hw fcoe hba may not have netdev | 303 | * 3. pure hw fcoe hba may not have netdev |
308 | * | 304 | * |
309 | * Returns: 0 for success | 305 | * Returns: 0 for success |
310 | **/ | 306 | */ |
311 | int fcoe_load_transport_driver(struct net_device *netdev) | 307 | int fcoe_load_transport_driver(struct net_device *netdev) |
312 | { | 308 | { |
313 | struct pci_dev *pci; | 309 | struct pci_dev *pci; |
@@ -335,14 +331,14 @@ int fcoe_load_transport_driver(struct net_device *netdev) | |||
335 | EXPORT_SYMBOL_GPL(fcoe_load_transport_driver); | 331 | EXPORT_SYMBOL_GPL(fcoe_load_transport_driver); |
336 | 332 | ||
337 | /** | 333 | /** |
338 | * fcoe_transport_attach - load transport to fcoe | 334 | * fcoe_transport_attach() - Load transport to fcoe |
339 | * @netdev: the netdev the transport to be attached to | 335 | * @netdev: the netdev the transport to be attached to |
340 | * | 336 | * |
341 | * This will look for existing offload driver, if not found, it falls back to | 337 | * This will look for existing offload driver, if not found, it falls back to |
342 | * the default sw hba (fcoe_sw) as its fcoe transport. | 338 | * the default sw hba (fcoe_sw) as its fcoe transport. |
343 | * | 339 | * |
344 | * Returns: 0 for success | 340 | * Returns: 0 for success |
345 | **/ | 341 | */ |
346 | int fcoe_transport_attach(struct net_device *netdev) | 342 | int fcoe_transport_attach(struct net_device *netdev) |
347 | { | 343 | { |
348 | struct fcoe_transport *t; | 344 | struct fcoe_transport *t; |
@@ -373,11 +369,11 @@ int fcoe_transport_attach(struct net_device *netdev) | |||
373 | EXPORT_SYMBOL_GPL(fcoe_transport_attach); | 369 | EXPORT_SYMBOL_GPL(fcoe_transport_attach); |
374 | 370 | ||
375 | /** | 371 | /** |
376 | * fcoe_transport_release - unload transport from fcoe | 372 | * fcoe_transport_release() - Unload transport from fcoe |
377 | * @netdev: the net device on which fcoe is to be released | 373 | * @netdev: the net device on which fcoe is to be released |
378 | * | 374 | * |
379 | * Returns: 0 for success | 375 | * Returns: 0 for success |
380 | **/ | 376 | */ |
381 | int fcoe_transport_release(struct net_device *netdev) | 377 | int fcoe_transport_release(struct net_device *netdev) |
382 | { | 378 | { |
383 | struct fcoe_transport *t; | 379 | struct fcoe_transport *t; |
@@ -410,12 +406,12 @@ int fcoe_transport_release(struct net_device *netdev) | |||
410 | EXPORT_SYMBOL_GPL(fcoe_transport_release); | 406 | EXPORT_SYMBOL_GPL(fcoe_transport_release); |
411 | 407 | ||
412 | /** | 408 | /** |
413 | * fcoe_transport_init - initializes fcoe transport layer | 409 | * fcoe_transport_init() - Initializes fcoe transport layer |
414 | * | 410 | * |
415 | * This prepares for the fcoe transport layer | 411 | * This prepares for the fcoe transport layer |
416 | * | 412 | * |
417 | * Returns: none | 413 | * Returns: none |
418 | **/ | 414 | */ |
419 | int __init fcoe_transport_init(void) | 415 | int __init fcoe_transport_init(void) |
420 | { | 416 | { |
421 | INIT_LIST_HEAD(&fcoe_transports); | 417 | INIT_LIST_HEAD(&fcoe_transports); |
@@ -424,12 +420,13 @@ int __init fcoe_transport_init(void) | |||
424 | } | 420 | } |
425 | 421 | ||
426 | /** | 422 | /** |
427 | * fcoe_transport_exit - cleans up the fcoe transport layer | 423 | * fcoe_transport_exit() - Cleans up the fcoe transport layer |
424 | * | ||
428 | * This cleans up the fcoe transport layer. removing any transport on the list, | 425 | * This cleans up the fcoe transport layer. removing any transport on the list, |
429 | * note that the transport destroy func is not called here. | 426 | * note that the transport destroy func is not called here. |
430 | * | 427 | * |
431 | * Returns: none | 428 | * Returns: none |
432 | **/ | 429 | */ |
433 | int __exit fcoe_transport_exit(void) | 430 | int __exit fcoe_transport_exit(void) |
434 | { | 431 | { |
435 | struct fcoe_transport *t, *tmp; | 432 | struct fcoe_transport *t, *tmp; |
diff --git a/drivers/scsi/fcoe/fcoe_sw.c b/drivers/scsi/fcoe/fcoe_sw.c index dc4cd5e25760..da210eba1941 100644 --- a/drivers/scsi/fcoe/fcoe_sw.c +++ b/drivers/scsi/fcoe/fcoe_sw.c | |||
@@ -104,19 +104,19 @@ static struct scsi_host_template fcoe_sw_shost_template = { | |||
104 | .max_sectors = 0xffff, | 104 | .max_sectors = 0xffff, |
105 | }; | 105 | }; |
106 | 106 | ||
107 | /* | 107 | /** |
108 | * fcoe_sw_lport_config - sets up the fc_lport | 108 | * fcoe_sw_lport_config() - sets up the fc_lport |
109 | * @lp: ptr to the fc_lport | 109 | * @lp: ptr to the fc_lport |
110 | * @shost: ptr to the parent scsi host | 110 | * @shost: ptr to the parent scsi host |
111 | * | 111 | * |
112 | * Returns: 0 for success | 112 | * Returns: 0 for success |
113 | * | ||
114 | */ | 113 | */ |
115 | static int fcoe_sw_lport_config(struct fc_lport *lp) | 114 | static int fcoe_sw_lport_config(struct fc_lport *lp) |
116 | { | 115 | { |
117 | int i = 0; | 116 | int i = 0; |
118 | 117 | ||
119 | lp->link_status = 0; | 118 | lp->link_up = 0; |
119 | lp->qfull = 0; | ||
120 | lp->max_retry_count = 3; | 120 | lp->max_retry_count = 3; |
121 | lp->e_d_tov = 2 * 1000; /* FC-FS default */ | 121 | lp->e_d_tov = 2 * 1000; /* FC-FS default */ |
122 | lp->r_a_tov = 2 * 2 * 1000; | 122 | lp->r_a_tov = 2 * 2 * 1000; |
@@ -136,16 +136,14 @@ static int fcoe_sw_lport_config(struct fc_lport *lp) | |||
136 | return 0; | 136 | return 0; |
137 | } | 137 | } |
138 | 138 | ||
139 | /* | 139 | /** |
140 | * fcoe_sw_netdev_config - sets up fcoe_softc for lport and network | 140 | * fcoe_sw_netdev_config() - Set up netdev for SW FCoE |
141 | * related properties | ||
142 | * @lp : ptr to the fc_lport | 141 | * @lp : ptr to the fc_lport |
143 | * @netdev : ptr to the associated netdevice struct | 142 | * @netdev : ptr to the associated netdevice struct |
144 | * | 143 | * |
145 | * Must be called after fcoe_sw_lport_config() as it will use lport mutex | 144 | * Must be called after fcoe_sw_lport_config() as it will use lport mutex |
146 | * | 145 | * |
147 | * Returns : 0 for success | 146 | * Returns : 0 for success |
148 | * | ||
149 | */ | 147 | */ |
150 | static int fcoe_sw_netdev_config(struct fc_lport *lp, struct net_device *netdev) | 148 | static int fcoe_sw_netdev_config(struct fc_lport *lp, struct net_device *netdev) |
151 | { | 149 | { |
@@ -181,9 +179,8 @@ static int fcoe_sw_netdev_config(struct fc_lport *lp, struct net_device *netdev) | |||
181 | if (fc_set_mfs(lp, mfs)) | 179 | if (fc_set_mfs(lp, mfs)) |
182 | return -EINVAL; | 180 | return -EINVAL; |
183 | 181 | ||
184 | lp->link_status = ~FC_PAUSE & ~FC_LINK_UP; | ||
185 | if (!fcoe_link_ok(lp)) | 182 | if (!fcoe_link_ok(lp)) |
186 | lp->link_status |= FC_LINK_UP; | 183 | lp->link_up = 1; |
187 | 184 | ||
188 | /* offload features support */ | 185 | /* offload features support */ |
189 | if (fc->real_dev->features & NETIF_F_SG) | 186 | if (fc->real_dev->features & NETIF_F_SG) |
@@ -191,6 +188,7 @@ static int fcoe_sw_netdev_config(struct fc_lport *lp, struct net_device *netdev) | |||
191 | 188 | ||
192 | 189 | ||
193 | skb_queue_head_init(&fc->fcoe_pending_queue); | 190 | skb_queue_head_init(&fc->fcoe_pending_queue); |
191 | fc->fcoe_pending_queue_active = 0; | ||
194 | 192 | ||
195 | /* setup Source Mac Address */ | 193 | /* setup Source Mac Address */ |
196 | memcpy(fc->ctl_src_addr, fc->real_dev->dev_addr, | 194 | memcpy(fc->ctl_src_addr, fc->real_dev->dev_addr, |
@@ -224,16 +222,15 @@ static int fcoe_sw_netdev_config(struct fc_lport *lp, struct net_device *netdev) | |||
224 | return 0; | 222 | return 0; |
225 | } | 223 | } |
226 | 224 | ||
227 | /* | 225 | /** |
228 | * fcoe_sw_shost_config - sets up fc_lport->host | 226 | * fcoe_sw_shost_config() - Sets up fc_lport->host |
229 | * @lp : ptr to the fc_lport | 227 | * @lp : ptr to the fc_lport |
230 | * @shost : ptr to the associated scsi host | 228 | * @shost : ptr to the associated scsi host |
231 | * @dev : device associated to scsi host | 229 | * @dev : device associated to scsi host |
232 | * | 230 | * |
233 | * Must be called after fcoe_sw_lport_config) and fcoe_sw_netdev_config() | 231 | * Must be called after fcoe_sw_lport_config() and fcoe_sw_netdev_config() |
234 | * | 232 | * |
235 | * Returns : 0 for success | 233 | * Returns : 0 for success |
236 | * | ||
237 | */ | 234 | */ |
238 | static int fcoe_sw_shost_config(struct fc_lport *lp, struct Scsi_Host *shost, | 235 | static int fcoe_sw_shost_config(struct fc_lport *lp, struct Scsi_Host *shost, |
239 | struct device *dev) | 236 | struct device *dev) |
@@ -261,8 +258,8 @@ static int fcoe_sw_shost_config(struct fc_lport *lp, struct Scsi_Host *shost, | |||
261 | return 0; | 258 | return 0; |
262 | } | 259 | } |
263 | 260 | ||
264 | /* | 261 | /** |
265 | * fcoe_sw_em_config - allocates em for this lport | 262 | * fcoe_sw_em_config() - allocates em for this lport |
266 | * @lp: the port that em is to allocated for | 263 | * @lp: the port that em is to allocated for |
267 | * | 264 | * |
268 | * Returns : 0 on success | 265 | * Returns : 0 on success |
@@ -279,8 +276,8 @@ static inline int fcoe_sw_em_config(struct fc_lport *lp) | |||
279 | return 0; | 276 | return 0; |
280 | } | 277 | } |
281 | 278 | ||
282 | /* | 279 | /** |
283 | * fcoe_sw_destroy - FCoE software HBA tear-down function | 280 | * fcoe_sw_destroy() - FCoE software HBA tear-down function |
284 | * @netdev: ptr to the associated net_device | 281 | * @netdev: ptr to the associated net_device |
285 | * | 282 | * |
286 | * Returns: 0 if link is OK for use by FCoE. | 283 | * Returns: 0 if link is OK for use by FCoE. |
@@ -301,7 +298,7 @@ static int fcoe_sw_destroy(struct net_device *netdev) | |||
301 | if (!lp) | 298 | if (!lp) |
302 | return -ENODEV; | 299 | return -ENODEV; |
303 | 300 | ||
304 | fc = fcoe_softc(lp); | 301 | fc = lport_priv(lp); |
305 | 302 | ||
306 | /* Logout of the fabric */ | 303 | /* Logout of the fabric */ |
307 | fc_fabric_logoff(lp); | 304 | fc_fabric_logoff(lp); |
@@ -353,8 +350,8 @@ static struct libfc_function_template fcoe_sw_libfc_fcn_templ = { | |||
353 | .frame_send = fcoe_xmit, | 350 | .frame_send = fcoe_xmit, |
354 | }; | 351 | }; |
355 | 352 | ||
356 | /* | 353 | /** |
357 | * fcoe_sw_create - this function creates the fcoe interface | 354 | * fcoe_sw_create() - this function creates the fcoe interface |
358 | * @netdev: pointer the associated netdevice | 355 | * @netdev: pointer the associated netdevice |
359 | * | 356 | * |
360 | * Creates fc_lport struct and scsi_host for lport, configures lport | 357 | * Creates fc_lport struct and scsi_host for lport, configures lport |
@@ -440,8 +437,8 @@ out_host_put: | |||
440 | return rc; | 437 | return rc; |
441 | } | 438 | } |
442 | 439 | ||
443 | /* | 440 | /** |
444 | * fcoe_sw_match - the fcoe sw transport match function | 441 | * fcoe_sw_match() - The FCoE SW transport match function |
445 | * | 442 | * |
446 | * Returns : false always | 443 | * Returns : false always |
447 | */ | 444 | */ |
@@ -461,8 +458,8 @@ struct fcoe_transport fcoe_sw_transport = { | |||
461 | .device = 0xffff, | 458 | .device = 0xffff, |
462 | }; | 459 | }; |
463 | 460 | ||
464 | /* | 461 | /** |
465 | * fcoe_sw_init - registers fcoe_sw_transport | 462 | * fcoe_sw_init() - Registers fcoe_sw_transport |
466 | * | 463 | * |
467 | * Returns : 0 on success | 464 | * Returns : 0 on success |
468 | */ | 465 | */ |
@@ -471,17 +468,22 @@ int __init fcoe_sw_init(void) | |||
471 | /* attach to scsi transport */ | 468 | /* attach to scsi transport */ |
472 | scsi_transport_fcoe_sw = | 469 | scsi_transport_fcoe_sw = |
473 | fc_attach_transport(&fcoe_sw_transport_function); | 470 | fc_attach_transport(&fcoe_sw_transport_function); |
471 | |||
474 | if (!scsi_transport_fcoe_sw) { | 472 | if (!scsi_transport_fcoe_sw) { |
475 | printk(KERN_ERR "fcoe_sw_init:fc_attach_transport() failed\n"); | 473 | printk(KERN_ERR "fcoe_sw_init:fc_attach_transport() failed\n"); |
476 | return -ENODEV; | 474 | return -ENODEV; |
477 | } | 475 | } |
476 | |||
477 | mutex_init(&fcoe_sw_transport.devlock); | ||
478 | INIT_LIST_HEAD(&fcoe_sw_transport.devlist); | ||
479 | |||
478 | /* register sw transport */ | 480 | /* register sw transport */ |
479 | fcoe_transport_register(&fcoe_sw_transport); | 481 | fcoe_transport_register(&fcoe_sw_transport); |
480 | return 0; | 482 | return 0; |
481 | } | 483 | } |
482 | 484 | ||
483 | /* | 485 | /** |
484 | * fcoe_sw_exit - unregisters fcoe_sw_transport | 486 | * fcoe_sw_exit() - Unregisters fcoe_sw_transport |
485 | * | 487 | * |
486 | * Returns : 0 on success | 488 | * Returns : 0 on success |
487 | */ | 489 | */ |
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index e419f486cdb3..5548bf3bb58b 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c | |||
@@ -49,6 +49,7 @@ | |||
49 | static int debug_fcoe; | 49 | static int debug_fcoe; |
50 | 50 | ||
51 | #define FCOE_MAX_QUEUE_DEPTH 256 | 51 | #define FCOE_MAX_QUEUE_DEPTH 256 |
52 | #define FCOE_LOW_QUEUE_DEPTH 32 | ||
52 | 53 | ||
53 | /* destination address mode */ | 54 | /* destination address mode */ |
54 | #define FCOE_GW_ADDR_MODE 0x00 | 55 | #define FCOE_GW_ADDR_MODE 0x00 |
@@ -69,8 +70,6 @@ struct fcoe_percpu_s *fcoe_percpu[NR_CPUS]; | |||
69 | 70 | ||
70 | /* Function Prototyes */ | 71 | /* Function Prototyes */ |
71 | static int fcoe_check_wait_queue(struct fc_lport *); | 72 | static int fcoe_check_wait_queue(struct fc_lport *); |
72 | static void fcoe_insert_wait_queue_head(struct fc_lport *, struct sk_buff *); | ||
73 | static void fcoe_insert_wait_queue(struct fc_lport *, struct sk_buff *); | ||
74 | static void fcoe_recv_flogi(struct fcoe_softc *, struct fc_frame *, u8 *); | 73 | static void fcoe_recv_flogi(struct fcoe_softc *, struct fc_frame *, u8 *); |
75 | #ifdef CONFIG_HOTPLUG_CPU | 74 | #ifdef CONFIG_HOTPLUG_CPU |
76 | static int fcoe_cpu_callback(struct notifier_block *, ulong, void *); | 75 | static int fcoe_cpu_callback(struct notifier_block *, ulong, void *); |
@@ -91,13 +90,13 @@ static struct notifier_block fcoe_cpu_notifier = { | |||
91 | }; | 90 | }; |
92 | 91 | ||
93 | /** | 92 | /** |
94 | * fcoe_create_percpu_data - creates the associated cpu data | 93 | * fcoe_create_percpu_data() - creates the associated cpu data |
95 | * @cpu: index for the cpu where fcoe cpu data will be created | 94 | * @cpu: index for the cpu where fcoe cpu data will be created |
96 | * | 95 | * |
97 | * create percpu stats block, from cpu add notifier | 96 | * create percpu stats block, from cpu add notifier |
98 | * | 97 | * |
99 | * Returns: none | 98 | * Returns: none |
100 | **/ | 99 | */ |
101 | static void fcoe_create_percpu_data(int cpu) | 100 | static void fcoe_create_percpu_data(int cpu) |
102 | { | 101 | { |
103 | struct fc_lport *lp; | 102 | struct fc_lport *lp; |
@@ -115,13 +114,13 @@ static void fcoe_create_percpu_data(int cpu) | |||
115 | } | 114 | } |
116 | 115 | ||
117 | /** | 116 | /** |
118 | * fcoe_destroy_percpu_data - destroys the associated cpu data | 117 | * fcoe_destroy_percpu_data() - destroys the associated cpu data |
119 | * @cpu: index for the cpu where fcoe cpu data will destroyed | 118 | * @cpu: index for the cpu where fcoe cpu data will destroyed |
120 | * | 119 | * |
121 | * destroy percpu stats block called by cpu add/remove notifier | 120 | * destroy percpu stats block called by cpu add/remove notifier |
122 | * | 121 | * |
123 | * Retuns: none | 122 | * Retuns: none |
124 | **/ | 123 | */ |
125 | static void fcoe_destroy_percpu_data(int cpu) | 124 | static void fcoe_destroy_percpu_data(int cpu) |
126 | { | 125 | { |
127 | struct fc_lport *lp; | 126 | struct fc_lport *lp; |
@@ -137,7 +136,7 @@ static void fcoe_destroy_percpu_data(int cpu) | |||
137 | } | 136 | } |
138 | 137 | ||
139 | /** | 138 | /** |
140 | * fcoe_cpu_callback - fcoe cpu hotplug event callback | 139 | * fcoe_cpu_callback() - fcoe cpu hotplug event callback |
141 | * @nfb: callback data block | 140 | * @nfb: callback data block |
142 | * @action: event triggering the callback | 141 | * @action: event triggering the callback |
143 | * @hcpu: index for the cpu of this event | 142 | * @hcpu: index for the cpu of this event |
@@ -145,7 +144,7 @@ static void fcoe_destroy_percpu_data(int cpu) | |||
145 | * this creates or destroys per cpu data for fcoe | 144 | * this creates or destroys per cpu data for fcoe |
146 | * | 145 | * |
147 | * Returns NOTIFY_OK always. | 146 | * Returns NOTIFY_OK always. |
148 | **/ | 147 | */ |
149 | static int fcoe_cpu_callback(struct notifier_block *nfb, unsigned long action, | 148 | static int fcoe_cpu_callback(struct notifier_block *nfb, unsigned long action, |
150 | void *hcpu) | 149 | void *hcpu) |
151 | { | 150 | { |
@@ -166,7 +165,7 @@ static int fcoe_cpu_callback(struct notifier_block *nfb, unsigned long action, | |||
166 | #endif /* CONFIG_HOTPLUG_CPU */ | 165 | #endif /* CONFIG_HOTPLUG_CPU */ |
167 | 166 | ||
168 | /** | 167 | /** |
169 | * fcoe_rcv - this is the fcoe receive function called by NET_RX_SOFTIRQ | 168 | * fcoe_rcv() - this is the fcoe receive function called by NET_RX_SOFTIRQ |
170 | * @skb: the receive skb | 169 | * @skb: the receive skb |
171 | * @dev: associated net device | 170 | * @dev: associated net device |
172 | * @ptype: context | 171 | * @ptype: context |
@@ -175,7 +174,7 @@ static int fcoe_cpu_callback(struct notifier_block *nfb, unsigned long action, | |||
175 | * this function will receive the packet and build fc frame and pass it up | 174 | * this function will receive the packet and build fc frame and pass it up |
176 | * | 175 | * |
177 | * Returns: 0 for success | 176 | * Returns: 0 for success |
178 | **/ | 177 | */ |
179 | int fcoe_rcv(struct sk_buff *skb, struct net_device *dev, | 178 | int fcoe_rcv(struct sk_buff *skb, struct net_device *dev, |
180 | struct packet_type *ptype, struct net_device *olddev) | 179 | struct packet_type *ptype, struct net_device *olddev) |
181 | { | 180 | { |
@@ -265,11 +264,11 @@ err2: | |||
265 | EXPORT_SYMBOL_GPL(fcoe_rcv); | 264 | EXPORT_SYMBOL_GPL(fcoe_rcv); |
266 | 265 | ||
267 | /** | 266 | /** |
268 | * fcoe_start_io - pass to netdev to start xmit for fcoe | 267 | * fcoe_start_io() - pass to netdev to start xmit for fcoe |
269 | * @skb: the skb to be xmitted | 268 | * @skb: the skb to be xmitted |
270 | * | 269 | * |
271 | * Returns: 0 for success | 270 | * Returns: 0 for success |
272 | **/ | 271 | */ |
273 | static inline int fcoe_start_io(struct sk_buff *skb) | 272 | static inline int fcoe_start_io(struct sk_buff *skb) |
274 | { | 273 | { |
275 | int rc; | 274 | int rc; |
@@ -283,12 +282,12 @@ static inline int fcoe_start_io(struct sk_buff *skb) | |||
283 | } | 282 | } |
284 | 283 | ||
285 | /** | 284 | /** |
286 | * fcoe_get_paged_crc_eof - in case we need alloc a page for crc_eof | 285 | * fcoe_get_paged_crc_eof() - in case we need alloc a page for crc_eof |
287 | * @skb: the skb to be xmitted | 286 | * @skb: the skb to be xmitted |
288 | * @tlen: total len | 287 | * @tlen: total len |
289 | * | 288 | * |
290 | * Returns: 0 for success | 289 | * Returns: 0 for success |
291 | **/ | 290 | */ |
292 | static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen) | 291 | static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen) |
293 | { | 292 | { |
294 | struct fcoe_percpu_s *fps; | 293 | struct fcoe_percpu_s *fps; |
@@ -326,13 +325,12 @@ static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen) | |||
326 | } | 325 | } |
327 | 326 | ||
328 | /** | 327 | /** |
329 | * fcoe_fc_crc - calculates FC CRC in this fcoe skb | 328 | * fcoe_fc_crc() - calculates FC CRC in this fcoe skb |
330 | * @fp: the fc_frame containg data to be checksummed | 329 | * @fp: the fc_frame containg data to be checksummed |
331 | * | 330 | * |
332 | * This uses crc32() to calculate the crc for fc frame | 331 | * This uses crc32() to calculate the crc for fc frame |
333 | * Return : 32 bit crc | 332 | * Return : 32 bit crc |
334 | * | 333 | */ |
335 | **/ | ||
336 | u32 fcoe_fc_crc(struct fc_frame *fp) | 334 | u32 fcoe_fc_crc(struct fc_frame *fp) |
337 | { | 335 | { |
338 | struct sk_buff *skb = fp_skb(fp); | 336 | struct sk_buff *skb = fp_skb(fp); |
@@ -363,13 +361,12 @@ u32 fcoe_fc_crc(struct fc_frame *fp) | |||
363 | EXPORT_SYMBOL_GPL(fcoe_fc_crc); | 361 | EXPORT_SYMBOL_GPL(fcoe_fc_crc); |
364 | 362 | ||
365 | /** | 363 | /** |
366 | * fcoe_xmit - FCoE frame transmit function | 364 | * fcoe_xmit() - FCoE frame transmit function |
367 | * @lp: the associated local port | 365 | * @lp: the associated local port |
368 | * @fp: the fc_frame to be transmitted | 366 | * @fp: the fc_frame to be transmitted |
369 | * | 367 | * |
370 | * Return : 0 for success | 368 | * Return : 0 for success |
371 | * | 369 | */ |
372 | **/ | ||
373 | int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) | 370 | int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) |
374 | { | 371 | { |
375 | int wlen, rc = 0; | 372 | int wlen, rc = 0; |
@@ -389,7 +386,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) | |||
389 | 386 | ||
390 | WARN_ON((fr_len(fp) % sizeof(u32)) != 0); | 387 | WARN_ON((fr_len(fp) % sizeof(u32)) != 0); |
391 | 388 | ||
392 | fc = fcoe_softc(lp); | 389 | fc = lport_priv(lp); |
393 | /* | 390 | /* |
394 | * if it is a flogi then we need to learn gw-addr | 391 | * if it is a flogi then we need to learn gw-addr |
395 | * and my own fcid | 392 | * and my own fcid |
@@ -439,7 +436,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) | |||
439 | if (skb_is_nonlinear(skb)) { | 436 | if (skb_is_nonlinear(skb)) { |
440 | skb_frag_t *frag; | 437 | skb_frag_t *frag; |
441 | if (fcoe_get_paged_crc_eof(skb, tlen)) { | 438 | if (fcoe_get_paged_crc_eof(skb, tlen)) { |
442 | kfree(skb); | 439 | kfree_skb(skb); |
443 | return -ENOMEM; | 440 | return -ENOMEM; |
444 | } | 441 | } |
445 | frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; | 442 | frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; |
@@ -502,21 +499,22 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) | |||
502 | rc = fcoe_start_io(skb); | 499 | rc = fcoe_start_io(skb); |
503 | 500 | ||
504 | if (rc) { | 501 | if (rc) { |
505 | fcoe_insert_wait_queue(lp, skb); | 502 | spin_lock_bh(&fc->fcoe_pending_queue.lock); |
503 | __skb_queue_tail(&fc->fcoe_pending_queue, skb); | ||
504 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); | ||
506 | if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) | 505 | if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) |
507 | fc_pause(lp); | 506 | lp->qfull = 1; |
508 | } | 507 | } |
509 | 508 | ||
510 | return 0; | 509 | return 0; |
511 | } | 510 | } |
512 | EXPORT_SYMBOL_GPL(fcoe_xmit); | 511 | EXPORT_SYMBOL_GPL(fcoe_xmit); |
513 | 512 | ||
514 | /* | 513 | /** |
515 | * fcoe_percpu_receive_thread - recv thread per cpu | 514 | * fcoe_percpu_receive_thread() - recv thread per cpu |
516 | * @arg: ptr to the fcoe per cpu struct | 515 | * @arg: ptr to the fcoe per cpu struct |
517 | * | 516 | * |
518 | * Return: 0 for success | 517 | * Return: 0 for success |
519 | * | ||
520 | */ | 518 | */ |
521 | int fcoe_percpu_receive_thread(void *arg) | 519 | int fcoe_percpu_receive_thread(void *arg) |
522 | { | 520 | { |
@@ -533,7 +531,7 @@ int fcoe_percpu_receive_thread(void *arg) | |||
533 | struct fcoe_softc *fc; | 531 | struct fcoe_softc *fc; |
534 | struct fcoe_hdr *hp; | 532 | struct fcoe_hdr *hp; |
535 | 533 | ||
536 | set_user_nice(current, 19); | 534 | set_user_nice(current, -20); |
537 | 535 | ||
538 | while (!kthread_should_stop()) { | 536 | while (!kthread_should_stop()) { |
539 | 537 | ||
@@ -658,7 +656,7 @@ int fcoe_percpu_receive_thread(void *arg) | |||
658 | } | 656 | } |
659 | 657 | ||
660 | /** | 658 | /** |
661 | * fcoe_recv_flogi - flogi receive function | 659 | * fcoe_recv_flogi() - flogi receive function |
662 | * @fc: associated fcoe_softc | 660 | * @fc: associated fcoe_softc |
663 | * @fp: the recieved frame | 661 | * @fp: the recieved frame |
664 | * @sa: the source address of this flogi | 662 | * @sa: the source address of this flogi |
@@ -667,7 +665,7 @@ int fcoe_percpu_receive_thread(void *arg) | |||
667 | * mac address for the initiator, eitehr OUI based or GW based. | 665 | * mac address for the initiator, eitehr OUI based or GW based. |
668 | * | 666 | * |
669 | * Returns: none | 667 | * Returns: none |
670 | **/ | 668 | */ |
671 | static void fcoe_recv_flogi(struct fcoe_softc *fc, struct fc_frame *fp, u8 *sa) | 669 | static void fcoe_recv_flogi(struct fcoe_softc *fc, struct fc_frame *fp, u8 *sa) |
672 | { | 670 | { |
673 | struct fc_frame_header *fh; | 671 | struct fc_frame_header *fh; |
@@ -715,32 +713,23 @@ static void fcoe_recv_flogi(struct fcoe_softc *fc, struct fc_frame *fp, u8 *sa) | |||
715 | } | 713 | } |
716 | 714 | ||
717 | /** | 715 | /** |
718 | * fcoe_watchdog - fcoe timer callback | 716 | * fcoe_watchdog() - fcoe timer callback |
719 | * @vp: | 717 | * @vp: |
720 | * | 718 | * |
721 | * This checks the pending queue length for fcoe and put fcoe to be paused state | 719 | * This checks the pending queue length for fcoe and set lport qfull |
722 | * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the | 720 | * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the |
723 | * fcoe_hostlist. | 721 | * fcoe_hostlist. |
724 | * | 722 | * |
725 | * Returns: 0 for success | 723 | * Returns: 0 for success |
726 | **/ | 724 | */ |
727 | void fcoe_watchdog(ulong vp) | 725 | void fcoe_watchdog(ulong vp) |
728 | { | 726 | { |
729 | struct fc_lport *lp; | ||
730 | struct fcoe_softc *fc; | 727 | struct fcoe_softc *fc; |
731 | int paused = 0; | ||
732 | 728 | ||
733 | read_lock(&fcoe_hostlist_lock); | 729 | read_lock(&fcoe_hostlist_lock); |
734 | list_for_each_entry(fc, &fcoe_hostlist, list) { | 730 | list_for_each_entry(fc, &fcoe_hostlist, list) { |
735 | lp = fc->lp; | 731 | if (fc->lp) |
736 | if (lp) { | 732 | fcoe_check_wait_queue(fc->lp); |
737 | if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) | ||
738 | paused = 1; | ||
739 | if (fcoe_check_wait_queue(lp) < FCOE_MAX_QUEUE_DEPTH) { | ||
740 | if (paused) | ||
741 | fc_unpause(lp); | ||
742 | } | ||
743 | } | ||
744 | } | 733 | } |
745 | read_unlock(&fcoe_hostlist_lock); | 734 | read_unlock(&fcoe_hostlist_lock); |
746 | 735 | ||
@@ -750,96 +739,64 @@ void fcoe_watchdog(ulong vp) | |||
750 | 739 | ||
751 | 740 | ||
752 | /** | 741 | /** |
753 | * fcoe_check_wait_queue - put the skb into fcoe pending xmit queue | 742 | * fcoe_check_wait_queue() - put the skb into fcoe pending xmit queue |
754 | * @lp: the fc_port for this skb | 743 | * @lp: the fc_port for this skb |
755 | * @skb: the associated skb to be xmitted | 744 | * @skb: the associated skb to be xmitted |
756 | * | 745 | * |
757 | * This empties the wait_queue, dequeue the head of the wait_queue queue | 746 | * This empties the wait_queue, dequeue the head of the wait_queue queue |
758 | * and calls fcoe_start_io() for each packet, if all skb have been | 747 | * and calls fcoe_start_io() for each packet, if all skb have been |
759 | * transmitted, return 0 if a error occurs, then restore wait_queue and | 748 | * transmitted, return qlen or -1 if a error occurs, then restore |
760 | * try again later. | 749 | * wait_queue and try again later. |
761 | * | 750 | * |
762 | * The wait_queue is used when the skb transmit fails. skb will go | 751 | * The wait_queue is used when the skb transmit fails. skb will go |
763 | * in the wait_queue which will be emptied by the time function OR | 752 | * in the wait_queue which will be emptied by the time function OR |
764 | * by the next skb transmit. | 753 | * by the next skb transmit. |
765 | * | 754 | * |
766 | * Returns: 0 for success | 755 | * Returns: 0 for success |
767 | **/ | 756 | */ |
768 | static int fcoe_check_wait_queue(struct fc_lport *lp) | 757 | static int fcoe_check_wait_queue(struct fc_lport *lp) |
769 | { | 758 | { |
770 | int rc, unpause = 0; | 759 | struct fcoe_softc *fc = lport_priv(lp); |
771 | int paused = 0; | ||
772 | struct sk_buff *skb; | 760 | struct sk_buff *skb; |
773 | struct fcoe_softc *fc; | 761 | int rc = -1; |
774 | 762 | ||
775 | fc = fcoe_softc(lp); | ||
776 | spin_lock_bh(&fc->fcoe_pending_queue.lock); | 763 | spin_lock_bh(&fc->fcoe_pending_queue.lock); |
764 | if (fc->fcoe_pending_queue_active) | ||
765 | goto out; | ||
766 | fc->fcoe_pending_queue_active = 1; | ||
777 | 767 | ||
778 | /* | 768 | while (fc->fcoe_pending_queue.qlen) { |
779 | * is this interface paused? | 769 | /* keep qlen > 0 until fcoe_start_io succeeds */ |
780 | */ | 770 | fc->fcoe_pending_queue.qlen++; |
781 | if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) | 771 | skb = __skb_dequeue(&fc->fcoe_pending_queue); |
782 | paused = 1; | ||
783 | if (fc->fcoe_pending_queue.qlen) { | ||
784 | while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) { | ||
785 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); | ||
786 | rc = fcoe_start_io(skb); | ||
787 | if (rc) { | ||
788 | fcoe_insert_wait_queue_head(lp, skb); | ||
789 | return rc; | ||
790 | } | ||
791 | spin_lock_bh(&fc->fcoe_pending_queue.lock); | ||
792 | } | ||
793 | if (fc->fcoe_pending_queue.qlen < FCOE_MAX_QUEUE_DEPTH) | ||
794 | unpause = 1; | ||
795 | } | ||
796 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); | ||
797 | if ((unpause) && (paused)) | ||
798 | fc_unpause(lp); | ||
799 | return fc->fcoe_pending_queue.qlen; | ||
800 | } | ||
801 | |||
802 | /** | ||
803 | * fcoe_insert_wait_queue_head - puts skb to fcoe pending queue head | ||
804 | * @lp: the fc_port for this skb | ||
805 | * @skb: the associated skb to be xmitted | ||
806 | * | ||
807 | * Returns: none | ||
808 | **/ | ||
809 | static void fcoe_insert_wait_queue_head(struct fc_lport *lp, | ||
810 | struct sk_buff *skb) | ||
811 | { | ||
812 | struct fcoe_softc *fc; | ||
813 | 772 | ||
814 | fc = fcoe_softc(lp); | 773 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); |
815 | spin_lock_bh(&fc->fcoe_pending_queue.lock); | 774 | rc = fcoe_start_io(skb); |
816 | __skb_queue_head(&fc->fcoe_pending_queue, skb); | 775 | spin_lock_bh(&fc->fcoe_pending_queue.lock); |
817 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); | ||
818 | } | ||
819 | 776 | ||
820 | /** | 777 | if (rc) { |
821 | * fcoe_insert_wait_queue - put the skb into fcoe pending queue tail | 778 | __skb_queue_head(&fc->fcoe_pending_queue, skb); |
822 | * @lp: the fc_port for this skb | 779 | /* undo temporary increment above */ |
823 | * @skb: the associated skb to be xmitted | 780 | fc->fcoe_pending_queue.qlen--; |
824 | * | 781 | break; |
825 | * Returns: none | 782 | } |
826 | **/ | 783 | /* undo temporary increment above */ |
827 | static void fcoe_insert_wait_queue(struct fc_lport *lp, | 784 | fc->fcoe_pending_queue.qlen--; |
828 | struct sk_buff *skb) | 785 | } |
829 | { | ||
830 | struct fcoe_softc *fc; | ||
831 | 786 | ||
832 | fc = fcoe_softc(lp); | 787 | if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH) |
833 | spin_lock_bh(&fc->fcoe_pending_queue.lock); | 788 | lp->qfull = 0; |
834 | __skb_queue_tail(&fc->fcoe_pending_queue, skb); | 789 | fc->fcoe_pending_queue_active = 0; |
790 | rc = fc->fcoe_pending_queue.qlen; | ||
791 | out: | ||
835 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); | 792 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); |
793 | return rc; | ||
836 | } | 794 | } |
837 | 795 | ||
838 | /** | 796 | /** |
839 | * fcoe_dev_setup - setup link change notification interface | 797 | * fcoe_dev_setup() - setup link change notification interface |
840 | * | 798 | */ |
841 | **/ | 799 | static void fcoe_dev_setup() |
842 | static void fcoe_dev_setup(void) | ||
843 | { | 800 | { |
844 | /* | 801 | /* |
845 | * here setup a interface specific wd time to | 802 | * here setup a interface specific wd time to |
@@ -849,15 +806,15 @@ static void fcoe_dev_setup(void) | |||
849 | } | 806 | } |
850 | 807 | ||
851 | /** | 808 | /** |
852 | * fcoe_dev_setup - cleanup link change notification interface | 809 | * fcoe_dev_setup() - cleanup link change notification interface |
853 | **/ | 810 | */ |
854 | static void fcoe_dev_cleanup(void) | 811 | static void fcoe_dev_cleanup(void) |
855 | { | 812 | { |
856 | unregister_netdevice_notifier(&fcoe_notifier); | 813 | unregister_netdevice_notifier(&fcoe_notifier); |
857 | } | 814 | } |
858 | 815 | ||
859 | /** | 816 | /** |
860 | * fcoe_device_notification - netdev event notification callback | 817 | * fcoe_device_notification() - netdev event notification callback |
861 | * @notifier: context of the notification | 818 | * @notifier: context of the notification |
862 | * @event: type of event | 819 | * @event: type of event |
863 | * @ptr: fixed array for output parsed ifname | 820 | * @ptr: fixed array for output parsed ifname |
@@ -865,7 +822,7 @@ static void fcoe_dev_cleanup(void) | |||
865 | * This function is called by the ethernet driver in case of link change event | 822 | * This function is called by the ethernet driver in case of link change event |
866 | * | 823 | * |
867 | * Returns: 0 for success | 824 | * Returns: 0 for success |
868 | **/ | 825 | */ |
869 | static int fcoe_device_notification(struct notifier_block *notifier, | 826 | static int fcoe_device_notification(struct notifier_block *notifier, |
870 | ulong event, void *ptr) | 827 | ulong event, void *ptr) |
871 | { | 828 | { |
@@ -873,7 +830,7 @@ static int fcoe_device_notification(struct notifier_block *notifier, | |||
873 | struct net_device *real_dev = ptr; | 830 | struct net_device *real_dev = ptr; |
874 | struct fcoe_softc *fc; | 831 | struct fcoe_softc *fc; |
875 | struct fcoe_dev_stats *stats; | 832 | struct fcoe_dev_stats *stats; |
876 | u16 new_status; | 833 | u32 new_link_up; |
877 | u32 mfs; | 834 | u32 mfs; |
878 | int rc = NOTIFY_OK; | 835 | int rc = NOTIFY_OK; |
879 | 836 | ||
@@ -890,17 +847,15 @@ static int fcoe_device_notification(struct notifier_block *notifier, | |||
890 | goto out; | 847 | goto out; |
891 | } | 848 | } |
892 | 849 | ||
893 | new_status = lp->link_status; | 850 | new_link_up = lp->link_up; |
894 | switch (event) { | 851 | switch (event) { |
895 | case NETDEV_DOWN: | 852 | case NETDEV_DOWN: |
896 | case NETDEV_GOING_DOWN: | 853 | case NETDEV_GOING_DOWN: |
897 | new_status &= ~FC_LINK_UP; | 854 | new_link_up = 0; |
898 | break; | 855 | break; |
899 | case NETDEV_UP: | 856 | case NETDEV_UP: |
900 | case NETDEV_CHANGE: | 857 | case NETDEV_CHANGE: |
901 | new_status &= ~FC_LINK_UP; | 858 | new_link_up = !fcoe_link_ok(lp); |
902 | if (!fcoe_link_ok(lp)) | ||
903 | new_status |= FC_LINK_UP; | ||
904 | break; | 859 | break; |
905 | case NETDEV_CHANGEMTU: | 860 | case NETDEV_CHANGEMTU: |
906 | mfs = fc->real_dev->mtu - | 861 | mfs = fc->real_dev->mtu - |
@@ -908,17 +863,15 @@ static int fcoe_device_notification(struct notifier_block *notifier, | |||
908 | sizeof(struct fcoe_crc_eof)); | 863 | sizeof(struct fcoe_crc_eof)); |
909 | if (mfs >= FC_MIN_MAX_FRAME) | 864 | if (mfs >= FC_MIN_MAX_FRAME) |
910 | fc_set_mfs(lp, mfs); | 865 | fc_set_mfs(lp, mfs); |
911 | new_status &= ~FC_LINK_UP; | 866 | new_link_up = !fcoe_link_ok(lp); |
912 | if (!fcoe_link_ok(lp)) | ||
913 | new_status |= FC_LINK_UP; | ||
914 | break; | 867 | break; |
915 | case NETDEV_REGISTER: | 868 | case NETDEV_REGISTER: |
916 | break; | 869 | break; |
917 | default: | 870 | default: |
918 | FC_DBG("unknown event %ld call", event); | 871 | FC_DBG("unknown event %ld call", event); |
919 | } | 872 | } |
920 | if (lp->link_status != new_status) { | 873 | if (lp->link_up != new_link_up) { |
921 | if ((new_status & FC_LINK_UP) == FC_LINK_UP) | 874 | if (new_link_up) |
922 | fc_linkup(lp); | 875 | fc_linkup(lp); |
923 | else { | 876 | else { |
924 | stats = lp->dev_stats[smp_processor_id()]; | 877 | stats = lp->dev_stats[smp_processor_id()]; |
@@ -933,12 +886,12 @@ out: | |||
933 | } | 886 | } |
934 | 887 | ||
935 | /** | 888 | /** |
936 | * fcoe_if_to_netdev - parse a name buffer to get netdev | 889 | * fcoe_if_to_netdev() - parse a name buffer to get netdev |
937 | * @ifname: fixed array for output parsed ifname | 890 | * @ifname: fixed array for output parsed ifname |
938 | * @buffer: incoming buffer to be copied | 891 | * @buffer: incoming buffer to be copied |
939 | * | 892 | * |
940 | * Returns: NULL or ptr to netdeive | 893 | * Returns: NULL or ptr to netdeive |
941 | **/ | 894 | */ |
942 | static struct net_device *fcoe_if_to_netdev(const char *buffer) | 895 | static struct net_device *fcoe_if_to_netdev(const char *buffer) |
943 | { | 896 | { |
944 | char *cp; | 897 | char *cp; |
@@ -955,13 +908,13 @@ static struct net_device *fcoe_if_to_netdev(const char *buffer) | |||
955 | } | 908 | } |
956 | 909 | ||
957 | /** | 910 | /** |
958 | * fcoe_netdev_to_module_owner - finds out the nic drive moddule of the netdev | 911 | * fcoe_netdev_to_module_owner() - finds out the nic drive moddule of the netdev |
959 | * @netdev: the target netdev | 912 | * @netdev: the target netdev |
960 | * | 913 | * |
961 | * Returns: ptr to the struct module, NULL for failure | 914 | * Returns: ptr to the struct module, NULL for failure |
962 | **/ | 915 | */ |
963 | static struct module *fcoe_netdev_to_module_owner( | 916 | static struct module * |
964 | const struct net_device *netdev) | 917 | fcoe_netdev_to_module_owner(const struct net_device *netdev) |
965 | { | 918 | { |
966 | struct device *dev; | 919 | struct device *dev; |
967 | 920 | ||
@@ -979,12 +932,14 @@ static struct module *fcoe_netdev_to_module_owner( | |||
979 | } | 932 | } |
980 | 933 | ||
981 | /** | 934 | /** |
982 | * fcoe_ethdrv_get - holds the nic driver module by try_module_get() for | 935 | * fcoe_ethdrv_get() - Hold the Ethernet driver |
983 | * the corresponding netdev. | ||
984 | * @netdev: the target netdev | 936 | * @netdev: the target netdev |
985 | * | 937 | * |
938 | * Holds the Ethernet driver module by try_module_get() for | ||
939 | * the corresponding netdev. | ||
940 | * | ||
986 | * Returns: 0 for succsss | 941 | * Returns: 0 for succsss |
987 | **/ | 942 | */ |
988 | static int fcoe_ethdrv_get(const struct net_device *netdev) | 943 | static int fcoe_ethdrv_get(const struct net_device *netdev) |
989 | { | 944 | { |
990 | struct module *owner; | 945 | struct module *owner; |
@@ -999,12 +954,14 @@ static int fcoe_ethdrv_get(const struct net_device *netdev) | |||
999 | } | 954 | } |
1000 | 955 | ||
1001 | /** | 956 | /** |
1002 | * fcoe_ethdrv_get - releases the nic driver module by module_put for | 957 | * fcoe_ethdrv_put() - Release the Ethernet driver |
1003 | * the corresponding netdev. | ||
1004 | * @netdev: the target netdev | 958 | * @netdev: the target netdev |
1005 | * | 959 | * |
960 | * Releases the Ethernet driver module by module_put for | ||
961 | * the corresponding netdev. | ||
962 | * | ||
1006 | * Returns: 0 for succsss | 963 | * Returns: 0 for succsss |
1007 | **/ | 964 | */ |
1008 | static int fcoe_ethdrv_put(const struct net_device *netdev) | 965 | static int fcoe_ethdrv_put(const struct net_device *netdev) |
1009 | { | 966 | { |
1010 | struct module *owner; | 967 | struct module *owner; |
@@ -1020,12 +977,12 @@ static int fcoe_ethdrv_put(const struct net_device *netdev) | |||
1020 | } | 977 | } |
1021 | 978 | ||
1022 | /** | 979 | /** |
1023 | * fcoe_destroy- handles the destroy from sysfs | 980 | * fcoe_destroy() - handles the destroy from sysfs |
1024 | * @buffer: expcted to be a eth if name | 981 | * @buffer: expcted to be a eth if name |
1025 | * @kp: associated kernel param | 982 | * @kp: associated kernel param |
1026 | * | 983 | * |
1027 | * Returns: 0 for success | 984 | * Returns: 0 for success |
1028 | **/ | 985 | */ |
1029 | static int fcoe_destroy(const char *buffer, struct kernel_param *kp) | 986 | static int fcoe_destroy(const char *buffer, struct kernel_param *kp) |
1030 | { | 987 | { |
1031 | int rc; | 988 | int rc; |
@@ -1058,12 +1015,12 @@ out_nodev: | |||
1058 | } | 1015 | } |
1059 | 1016 | ||
1060 | /** | 1017 | /** |
1061 | * fcoe_create - handles the create call from sysfs | 1018 | * fcoe_create() - Handles the create call from sysfs |
1062 | * @buffer: expcted to be a eth if name | 1019 | * @buffer: expcted to be a eth if name |
1063 | * @kp: associated kernel param | 1020 | * @kp: associated kernel param |
1064 | * | 1021 | * |
1065 | * Returns: 0 for success | 1022 | * Returns: 0 for success |
1066 | **/ | 1023 | */ |
1067 | static int fcoe_create(const char *buffer, struct kernel_param *kp) | 1024 | static int fcoe_create(const char *buffer, struct kernel_param *kp) |
1068 | { | 1025 | { |
1069 | int rc; | 1026 | int rc; |
@@ -1104,8 +1061,8 @@ module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR); | |||
1104 | __MODULE_PARM_TYPE(destroy, "string"); | 1061 | __MODULE_PARM_TYPE(destroy, "string"); |
1105 | MODULE_PARM_DESC(destroy, "Destroy fcoe port"); | 1062 | MODULE_PARM_DESC(destroy, "Destroy fcoe port"); |
1106 | 1063 | ||
1107 | /* | 1064 | /** |
1108 | * fcoe_link_ok - check if link is ok for the fc_lport | 1065 | * fcoe_link_ok() - Check if link is ok for the fc_lport |
1109 | * @lp: ptr to the fc_lport | 1066 | * @lp: ptr to the fc_lport |
1110 | * | 1067 | * |
1111 | * Any permanently-disqualifying conditions have been previously checked. | 1068 | * Any permanently-disqualifying conditions have been previously checked. |
@@ -1120,7 +1077,7 @@ MODULE_PARM_DESC(destroy, "Destroy fcoe port"); | |||
1120 | */ | 1077 | */ |
1121 | int fcoe_link_ok(struct fc_lport *lp) | 1078 | int fcoe_link_ok(struct fc_lport *lp) |
1122 | { | 1079 | { |
1123 | struct fcoe_softc *fc = fcoe_softc(lp); | 1080 | struct fcoe_softc *fc = lport_priv(lp); |
1124 | struct net_device *dev = fc->real_dev; | 1081 | struct net_device *dev = fc->real_dev; |
1125 | struct ethtool_cmd ecmd = { ETHTOOL_GSET }; | 1082 | struct ethtool_cmd ecmd = { ETHTOOL_GSET }; |
1126 | int rc = 0; | 1083 | int rc = 0; |
@@ -1149,9 +1106,8 @@ int fcoe_link_ok(struct fc_lport *lp) | |||
1149 | } | 1106 | } |
1150 | EXPORT_SYMBOL_GPL(fcoe_link_ok); | 1107 | EXPORT_SYMBOL_GPL(fcoe_link_ok); |
1151 | 1108 | ||
1152 | /* | 1109 | /** |
1153 | * fcoe_percpu_clean - frees skb of the corresponding lport from the per | 1110 | * fcoe_percpu_clean() - Clear the pending skbs for an lport |
1154 | * cpu queue. | ||
1155 | * @lp: the fc_lport | 1111 | * @lp: the fc_lport |
1156 | */ | 1112 | */ |
1157 | void fcoe_percpu_clean(struct fc_lport *lp) | 1113 | void fcoe_percpu_clean(struct fc_lport *lp) |
@@ -1185,11 +1141,11 @@ void fcoe_percpu_clean(struct fc_lport *lp) | |||
1185 | EXPORT_SYMBOL_GPL(fcoe_percpu_clean); | 1141 | EXPORT_SYMBOL_GPL(fcoe_percpu_clean); |
1186 | 1142 | ||
1187 | /** | 1143 | /** |
1188 | * fcoe_clean_pending_queue - dequeue skb and free it | 1144 | * fcoe_clean_pending_queue() - Dequeue a skb and free it |
1189 | * @lp: the corresponding fc_lport | 1145 | * @lp: the corresponding fc_lport |
1190 | * | 1146 | * |
1191 | * Returns: none | 1147 | * Returns: none |
1192 | **/ | 1148 | */ |
1193 | void fcoe_clean_pending_queue(struct fc_lport *lp) | 1149 | void fcoe_clean_pending_queue(struct fc_lport *lp) |
1194 | { | 1150 | { |
1195 | struct fcoe_softc *fc = lport_priv(lp); | 1151 | struct fcoe_softc *fc = lport_priv(lp); |
@@ -1206,21 +1162,21 @@ void fcoe_clean_pending_queue(struct fc_lport *lp) | |||
1206 | EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue); | 1162 | EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue); |
1207 | 1163 | ||
1208 | /** | 1164 | /** |
1209 | * libfc_host_alloc - allocate a Scsi_Host with room for the fc_lport | 1165 | * libfc_host_alloc() - Allocate a Scsi_Host with room for the fc_lport |
1210 | * @sht: ptr to the scsi host templ | 1166 | * @sht: ptr to the scsi host templ |
1211 | * @priv_size: size of private data after fc_lport | 1167 | * @priv_size: size of private data after fc_lport |
1212 | * | 1168 | * |
1213 | * Returns: ptr to Scsi_Host | 1169 | * Returns: ptr to Scsi_Host |
1214 | * TODO - to libfc? | 1170 | * TODO: to libfc? |
1215 | */ | 1171 | */ |
1216 | static inline struct Scsi_Host *libfc_host_alloc( | 1172 | static inline struct Scsi_Host * |
1217 | struct scsi_host_template *sht, int priv_size) | 1173 | libfc_host_alloc(struct scsi_host_template *sht, int priv_size) |
1218 | { | 1174 | { |
1219 | return scsi_host_alloc(sht, sizeof(struct fc_lport) + priv_size); | 1175 | return scsi_host_alloc(sht, sizeof(struct fc_lport) + priv_size); |
1220 | } | 1176 | } |
1221 | 1177 | ||
1222 | /** | 1178 | /** |
1223 | * fcoe_host_alloc - allocate a Scsi_Host with room for the fcoe_softc | 1179 | * fcoe_host_alloc() - Allocate a Scsi_Host with room for the fcoe_softc |
1224 | * @sht: ptr to the scsi host templ | 1180 | * @sht: ptr to the scsi host templ |
1225 | * @priv_size: size of private data after fc_lport | 1181 | * @priv_size: size of private data after fc_lport |
1226 | * | 1182 | * |
@@ -1232,8 +1188,8 @@ struct Scsi_Host *fcoe_host_alloc(struct scsi_host_template *sht, int priv_size) | |||
1232 | } | 1188 | } |
1233 | EXPORT_SYMBOL_GPL(fcoe_host_alloc); | 1189 | EXPORT_SYMBOL_GPL(fcoe_host_alloc); |
1234 | 1190 | ||
1235 | /* | 1191 | /** |
1236 | * fcoe_reset - resets the fcoe | 1192 | * fcoe_reset() - Resets the fcoe |
1237 | * @shost: shost the reset is from | 1193 | * @shost: shost the reset is from |
1238 | * | 1194 | * |
1239 | * Returns: always 0 | 1195 | * Returns: always 0 |
@@ -1246,8 +1202,8 @@ int fcoe_reset(struct Scsi_Host *shost) | |||
1246 | } | 1202 | } |
1247 | EXPORT_SYMBOL_GPL(fcoe_reset); | 1203 | EXPORT_SYMBOL_GPL(fcoe_reset); |
1248 | 1204 | ||
1249 | /* | 1205 | /** |
1250 | * fcoe_wwn_from_mac - converts 48-bit IEEE MAC address to 64-bit FC WWN. | 1206 | * fcoe_wwn_from_mac() - Converts 48-bit IEEE MAC address to 64-bit FC WWN. |
1251 | * @mac: mac address | 1207 | * @mac: mac address |
1252 | * @scheme: check port | 1208 | * @scheme: check port |
1253 | * @port: port indicator for converting | 1209 | * @port: port indicator for converting |
@@ -1286,14 +1242,15 @@ u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN], | |||
1286 | return wwn; | 1242 | return wwn; |
1287 | } | 1243 | } |
1288 | EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac); | 1244 | EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac); |
1289 | /* | 1245 | |
1290 | * fcoe_hostlist_lookup_softc - find the corresponding lport by a given device | 1246 | /** |
1247 | * fcoe_hostlist_lookup_softc() - find the corresponding lport by a given device | ||
1291 | * @device: this is currently ptr to net_device | 1248 | * @device: this is currently ptr to net_device |
1292 | * | 1249 | * |
1293 | * Returns: NULL or the located fcoe_softc | 1250 | * Returns: NULL or the located fcoe_softc |
1294 | */ | 1251 | */ |
1295 | static struct fcoe_softc *fcoe_hostlist_lookup_softc( | 1252 | static struct fcoe_softc * |
1296 | const struct net_device *dev) | 1253 | fcoe_hostlist_lookup_softc(const struct net_device *dev) |
1297 | { | 1254 | { |
1298 | struct fcoe_softc *fc; | 1255 | struct fcoe_softc *fc; |
1299 | 1256 | ||
@@ -1308,8 +1265,8 @@ static struct fcoe_softc *fcoe_hostlist_lookup_softc( | |||
1308 | return NULL; | 1265 | return NULL; |
1309 | } | 1266 | } |
1310 | 1267 | ||
1311 | /* | 1268 | /** |
1312 | * fcoe_hostlist_lookup - find the corresponding lport by netdev | 1269 | * fcoe_hostlist_lookup() - Find the corresponding lport by netdev |
1313 | * @netdev: ptr to net_device | 1270 | * @netdev: ptr to net_device |
1314 | * | 1271 | * |
1315 | * Returns: 0 for success | 1272 | * Returns: 0 for success |
@@ -1324,8 +1281,8 @@ struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev) | |||
1324 | } | 1281 | } |
1325 | EXPORT_SYMBOL_GPL(fcoe_hostlist_lookup); | 1282 | EXPORT_SYMBOL_GPL(fcoe_hostlist_lookup); |
1326 | 1283 | ||
1327 | /* | 1284 | /** |
1328 | * fcoe_hostlist_add - add a lport to lports list | 1285 | * fcoe_hostlist_add() - Add a lport to lports list |
1329 | * @lp: ptr to the fc_lport to badded | 1286 | * @lp: ptr to the fc_lport to badded |
1330 | * | 1287 | * |
1331 | * Returns: 0 for success | 1288 | * Returns: 0 for success |
@@ -1336,7 +1293,7 @@ int fcoe_hostlist_add(const struct fc_lport *lp) | |||
1336 | 1293 | ||
1337 | fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp)); | 1294 | fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp)); |
1338 | if (!fc) { | 1295 | if (!fc) { |
1339 | fc = fcoe_softc(lp); | 1296 | fc = lport_priv(lp); |
1340 | write_lock_bh(&fcoe_hostlist_lock); | 1297 | write_lock_bh(&fcoe_hostlist_lock); |
1341 | list_add_tail(&fc->list, &fcoe_hostlist); | 1298 | list_add_tail(&fc->list, &fcoe_hostlist); |
1342 | write_unlock_bh(&fcoe_hostlist_lock); | 1299 | write_unlock_bh(&fcoe_hostlist_lock); |
@@ -1345,8 +1302,8 @@ int fcoe_hostlist_add(const struct fc_lport *lp) | |||
1345 | } | 1302 | } |
1346 | EXPORT_SYMBOL_GPL(fcoe_hostlist_add); | 1303 | EXPORT_SYMBOL_GPL(fcoe_hostlist_add); |
1347 | 1304 | ||
1348 | /* | 1305 | /** |
1349 | * fcoe_hostlist_remove - remove a lport from lports list | 1306 | * fcoe_hostlist_remove() - remove a lport from lports list |
1350 | * @lp: ptr to the fc_lport to badded | 1307 | * @lp: ptr to the fc_lport to badded |
1351 | * | 1308 | * |
1352 | * Returns: 0 for success | 1309 | * Returns: 0 for success |
@@ -1366,12 +1323,12 @@ int fcoe_hostlist_remove(const struct fc_lport *lp) | |||
1366 | EXPORT_SYMBOL_GPL(fcoe_hostlist_remove); | 1323 | EXPORT_SYMBOL_GPL(fcoe_hostlist_remove); |
1367 | 1324 | ||
1368 | /** | 1325 | /** |
1369 | * fcoe_libfc_config - sets up libfc related properties for lport | 1326 | * fcoe_libfc_config() - sets up libfc related properties for lport |
1370 | * @lp: ptr to the fc_lport | 1327 | * @lp: ptr to the fc_lport |
1371 | * @tt: libfc function template | 1328 | * @tt: libfc function template |
1372 | * | 1329 | * |
1373 | * Returns : 0 for success | 1330 | * Returns : 0 for success |
1374 | **/ | 1331 | */ |
1375 | int fcoe_libfc_config(struct fc_lport *lp, struct libfc_function_template *tt) | 1332 | int fcoe_libfc_config(struct fc_lport *lp, struct libfc_function_template *tt) |
1376 | { | 1333 | { |
1377 | /* Set the function pointers set by the LLDD */ | 1334 | /* Set the function pointers set by the LLDD */ |
@@ -1389,14 +1346,14 @@ int fcoe_libfc_config(struct fc_lport *lp, struct libfc_function_template *tt) | |||
1389 | EXPORT_SYMBOL_GPL(fcoe_libfc_config); | 1346 | EXPORT_SYMBOL_GPL(fcoe_libfc_config); |
1390 | 1347 | ||
1391 | /** | 1348 | /** |
1392 | * fcoe_init - fcoe module loading initialization | 1349 | * fcoe_init() - fcoe module loading initialization |
1393 | * | 1350 | * |
1394 | * Initialization routine | 1351 | * Initialization routine |
1395 | * 1. Will create fc transport software structure | 1352 | * 1. Will create fc transport software structure |
1396 | * 2. initialize the link list of port information structure | 1353 | * 2. initialize the link list of port information structure |
1397 | * | 1354 | * |
1398 | * Returns 0 on success, negative on failure | 1355 | * Returns 0 on success, negative on failure |
1399 | **/ | 1356 | */ |
1400 | static int __init fcoe_init(void) | 1357 | static int __init fcoe_init(void) |
1401 | { | 1358 | { |
1402 | int cpu; | 1359 | int cpu; |
@@ -1433,7 +1390,6 @@ static int __init fcoe_init(void) | |||
1433 | } else { | 1390 | } else { |
1434 | fcoe_percpu[cpu] = NULL; | 1391 | fcoe_percpu[cpu] = NULL; |
1435 | kfree(p); | 1392 | kfree(p); |
1436 | |||
1437 | } | 1393 | } |
1438 | } | 1394 | } |
1439 | } | 1395 | } |
@@ -1443,11 +1399,9 @@ static int __init fcoe_init(void) | |||
1443 | */ | 1399 | */ |
1444 | fcoe_dev_setup(); | 1400 | fcoe_dev_setup(); |
1445 | 1401 | ||
1446 | init_timer(&fcoe_timer); | 1402 | setup_timer(&fcoe_timer, fcoe_watchdog, 0); |
1447 | fcoe_timer.data = 0; | 1403 | |
1448 | fcoe_timer.function = fcoe_watchdog; | 1404 | mod_timer(&fcoe_timer, jiffies + (10 * HZ)); |
1449 | fcoe_timer.expires = (jiffies + (10 * HZ)); | ||
1450 | add_timer(&fcoe_timer); | ||
1451 | 1405 | ||
1452 | /* initiatlize the fcoe transport */ | 1406 | /* initiatlize the fcoe transport */ |
1453 | fcoe_transport_init(); | 1407 | fcoe_transport_init(); |
@@ -1459,10 +1413,10 @@ static int __init fcoe_init(void) | |||
1459 | module_init(fcoe_init); | 1413 | module_init(fcoe_init); |
1460 | 1414 | ||
1461 | /** | 1415 | /** |
1462 | * fcoe_exit - fcoe module unloading cleanup | 1416 | * fcoe_exit() - fcoe module unloading cleanup |
1463 | * | 1417 | * |
1464 | * Returns 0 on success, negative on failure | 1418 | * Returns 0 on success, negative on failure |
1465 | **/ | 1419 | */ |
1466 | static void __exit fcoe_exit(void) | 1420 | static void __exit fcoe_exit(void) |
1467 | { | 1421 | { |
1468 | u32 idx; | 1422 | u32 idx; |
@@ -1483,7 +1437,7 @@ static void __exit fcoe_exit(void) | |||
1483 | */ | 1437 | */ |
1484 | del_timer_sync(&fcoe_timer); | 1438 | del_timer_sync(&fcoe_timer); |
1485 | 1439 | ||
1486 | /* releases the assocaited fcoe transport for each lport */ | 1440 | /* releases the associated fcoe transport for each lport */ |
1487 | list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list) | 1441 | list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list) |
1488 | fcoe_transport_release(fc->real_dev); | 1442 | fcoe_transport_release(fc->real_dev); |
1489 | 1443 | ||
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c index a48e4990fe12..34be88d7afa5 100644 --- a/drivers/scsi/hptiop.c +++ b/drivers/scsi/hptiop.c | |||
@@ -1251,6 +1251,7 @@ static struct pci_device_id hptiop_id_table[] = { | |||
1251 | { PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops }, | 1251 | { PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops }, |
1252 | { PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops }, | 1252 | { PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops }, |
1253 | { PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops }, | 1253 | { PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops }, |
1254 | { PCI_VDEVICE(TTI, 0x4321), (kernel_ulong_t)&hptiop_itl_ops }, | ||
1254 | { PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops }, | 1255 | { PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops }, |
1255 | { PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops }, | 1256 | { PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops }, |
1256 | { PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops }, | 1257 | { PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops }, |
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index a1a511bdec8c..ed1e728763a2 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c | |||
@@ -1573,9 +1573,6 @@ static int ibmvfc_queuecommand(struct scsi_cmnd *cmnd, | |||
1573 | vfc_cmd->resp_len = sizeof(vfc_cmd->rsp); | 1573 | vfc_cmd->resp_len = sizeof(vfc_cmd->rsp); |
1574 | vfc_cmd->cancel_key = (unsigned long)cmnd->device->hostdata; | 1574 | vfc_cmd->cancel_key = (unsigned long)cmnd->device->hostdata; |
1575 | vfc_cmd->tgt_scsi_id = rport->port_id; | 1575 | vfc_cmd->tgt_scsi_id = rport->port_id; |
1576 | if ((rport->supported_classes & FC_COS_CLASS3) && | ||
1577 | (fc_host_supported_classes(vhost->host) & FC_COS_CLASS3)) | ||
1578 | vfc_cmd->flags = IBMVFC_CLASS_3_ERR; | ||
1579 | vfc_cmd->iu.xfer_len = scsi_bufflen(cmnd); | 1576 | vfc_cmd->iu.xfer_len = scsi_bufflen(cmnd); |
1580 | int_to_scsilun(cmnd->device->lun, &vfc_cmd->iu.lun); | 1577 | int_to_scsilun(cmnd->device->lun, &vfc_cmd->iu.lun); |
1581 | memcpy(vfc_cmd->iu.cdb, cmnd->cmnd, cmnd->cmd_len); | 1578 | memcpy(vfc_cmd->iu.cdb, cmnd->cmnd, cmnd->cmd_len); |
@@ -3266,6 +3263,7 @@ static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id) | |||
3266 | return -ENOMEM; | 3263 | return -ENOMEM; |
3267 | } | 3264 | } |
3268 | 3265 | ||
3266 | memset(tgt, 0, sizeof(*tgt)); | ||
3269 | tgt->scsi_id = scsi_id; | 3267 | tgt->scsi_id = scsi_id; |
3270 | tgt->new_scsi_id = scsi_id; | 3268 | tgt->new_scsi_id = scsi_id; |
3271 | tgt->vhost = vhost; | 3269 | tgt->vhost = vhost; |
@@ -3576,9 +3574,18 @@ static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events) | |||
3576 | static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt) | 3574 | static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt) |
3577 | { | 3575 | { |
3578 | struct ibmvfc_host *vhost = tgt->vhost; | 3576 | struct ibmvfc_host *vhost = tgt->vhost; |
3579 | struct fc_rport *rport; | 3577 | struct fc_rport *rport = tgt->rport; |
3580 | unsigned long flags; | 3578 | unsigned long flags; |
3581 | 3579 | ||
3580 | if (rport) { | ||
3581 | tgt_dbg(tgt, "Setting rport roles\n"); | ||
3582 | fc_remote_port_rolechg(rport, tgt->ids.roles); | ||
3583 | spin_lock_irqsave(vhost->host->host_lock, flags); | ||
3584 | ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); | ||
3585 | spin_unlock_irqrestore(vhost->host->host_lock, flags); | ||
3586 | return; | ||
3587 | } | ||
3588 | |||
3582 | tgt_dbg(tgt, "Adding rport\n"); | 3589 | tgt_dbg(tgt, "Adding rport\n"); |
3583 | rport = fc_remote_port_add(vhost->host, 0, &tgt->ids); | 3590 | rport = fc_remote_port_add(vhost->host, 0, &tgt->ids); |
3584 | spin_lock_irqsave(vhost->host->host_lock, flags); | 3591 | spin_lock_irqsave(vhost->host->host_lock, flags); |
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index 87dafd0f8d44..b21e071b9862 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h | |||
@@ -32,7 +32,7 @@ | |||
32 | #define IBMVFC_DRIVER_VERSION "1.0.4" | 32 | #define IBMVFC_DRIVER_VERSION "1.0.4" |
33 | #define IBMVFC_DRIVER_DATE "(November 14, 2008)" | 33 | #define IBMVFC_DRIVER_DATE "(November 14, 2008)" |
34 | 34 | ||
35 | #define IBMVFC_DEFAULT_TIMEOUT 15 | 35 | #define IBMVFC_DEFAULT_TIMEOUT 60 |
36 | #define IBMVFC_INIT_TIMEOUT 120 | 36 | #define IBMVFC_INIT_TIMEOUT 120 |
37 | #define IBMVFC_MAX_REQUESTS_DEFAULT 100 | 37 | #define IBMVFC_MAX_REQUESTS_DEFAULT 100 |
38 | 38 | ||
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 74d07d137dae..c9aa7611e408 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c | |||
@@ -432,6 +432,7 @@ static int map_sg_data(struct scsi_cmnd *cmd, | |||
432 | sdev_printk(KERN_ERR, cmd->device, | 432 | sdev_printk(KERN_ERR, cmd->device, |
433 | "Can't allocate memory " | 433 | "Can't allocate memory " |
434 | "for indirect table\n"); | 434 | "for indirect table\n"); |
435 | scsi_dma_unmap(cmd); | ||
435 | return 0; | 436 | return 0; |
436 | } | 437 | } |
437 | } | 438 | } |
diff --git a/drivers/scsi/lasi700.c b/drivers/scsi/lasi700.c index 4a4e6954ec79..f23c4ca9a2ee 100644 --- a/drivers/scsi/lasi700.c +++ b/drivers/scsi/lasi700.c | |||
@@ -103,7 +103,7 @@ lasi700_probe(struct parisc_device *dev) | |||
103 | 103 | ||
104 | hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL); | 104 | hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL); |
105 | if (!hostdata) { | 105 | if (!hostdata) { |
106 | dev_printk(KERN_ERR, dev, "Failed to allocate host data\n"); | 106 | dev_printk(KERN_ERR, &dev->dev, "Failed to allocate host data\n"); |
107 | return -ENOMEM; | 107 | return -ENOMEM; |
108 | } | 108 | } |
109 | 109 | ||
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index dd1564c9e04a..e57556ea5b48 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c | |||
@@ -64,7 +64,7 @@ static void fc_disc_single(struct fc_disc *, struct fc_disc_port *); | |||
64 | static void fc_disc_restart(struct fc_disc *); | 64 | static void fc_disc_restart(struct fc_disc *); |
65 | 65 | ||
66 | /** | 66 | /** |
67 | * fc_disc_lookup_rport - lookup a remote port by port_id | 67 | * fc_disc_lookup_rport() - lookup a remote port by port_id |
68 | * @lport: Fibre Channel host port instance | 68 | * @lport: Fibre Channel host port instance |
69 | * @port_id: remote port port_id to match | 69 | * @port_id: remote port port_id to match |
70 | */ | 70 | */ |
@@ -92,7 +92,7 @@ struct fc_rport *fc_disc_lookup_rport(const struct fc_lport *lport, | |||
92 | } | 92 | } |
93 | 93 | ||
94 | /** | 94 | /** |
95 | * fc_disc_stop_rports - delete all the remote ports associated with the lport | 95 | * fc_disc_stop_rports() - delete all the remote ports associated with the lport |
96 | * @disc: The discovery job to stop rports on | 96 | * @disc: The discovery job to stop rports on |
97 | * | 97 | * |
98 | * Locking Note: This function expects that the lport mutex is locked before | 98 | * Locking Note: This function expects that the lport mutex is locked before |
@@ -117,7 +117,7 @@ void fc_disc_stop_rports(struct fc_disc *disc) | |||
117 | } | 117 | } |
118 | 118 | ||
119 | /** | 119 | /** |
120 | * fc_disc_rport_callback - Event handler for rport events | 120 | * fc_disc_rport_callback() - Event handler for rport events |
121 | * @lport: The lport which is receiving the event | 121 | * @lport: The lport which is receiving the event |
122 | * @rport: The rport which the event has occured on | 122 | * @rport: The rport which the event has occured on |
123 | * @event: The event that occured | 123 | * @event: The event that occured |
@@ -151,7 +151,7 @@ static void fc_disc_rport_callback(struct fc_lport *lport, | |||
151 | } | 151 | } |
152 | 152 | ||
153 | /** | 153 | /** |
154 | * fc_disc_recv_rscn_req - Handle Registered State Change Notification (RSCN) | 154 | * fc_disc_recv_rscn_req() - Handle Registered State Change Notification (RSCN) |
155 | * @sp: Current sequence of the RSCN exchange | 155 | * @sp: Current sequence of the RSCN exchange |
156 | * @fp: RSCN Frame | 156 | * @fp: RSCN Frame |
157 | * @lport: Fibre Channel host port instance | 157 | * @lport: Fibre Channel host port instance |
@@ -246,7 +246,7 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp, | |||
246 | list_del(&dp->peers); | 246 | list_del(&dp->peers); |
247 | rport = lport->tt.rport_lookup(lport, dp->ids.port_id); | 247 | rport = lport->tt.rport_lookup(lport, dp->ids.port_id); |
248 | if (rport) { | 248 | if (rport) { |
249 | rdata = RPORT_TO_PRIV(rport); | 249 | rdata = rport->dd_data; |
250 | list_del(&rdata->peers); | 250 | list_del(&rdata->peers); |
251 | lport->tt.rport_logoff(rport); | 251 | lport->tt.rport_logoff(rport); |
252 | } | 252 | } |
@@ -265,7 +265,7 @@ reject: | |||
265 | } | 265 | } |
266 | 266 | ||
267 | /** | 267 | /** |
268 | * fc_disc_recv_req - Handle incoming requests | 268 | * fc_disc_recv_req() - Handle incoming requests |
269 | * @sp: Current sequence of the request exchange | 269 | * @sp: Current sequence of the request exchange |
270 | * @fp: The frame | 270 | * @fp: The frame |
271 | * @lport: The FC local port | 271 | * @lport: The FC local port |
@@ -294,7 +294,7 @@ static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp, | |||
294 | } | 294 | } |
295 | 295 | ||
296 | /** | 296 | /** |
297 | * fc_disc_restart - Restart discovery | 297 | * fc_disc_restart() - Restart discovery |
298 | * @lport: FC discovery context | 298 | * @lport: FC discovery context |
299 | * | 299 | * |
300 | * Locking Note: This function expects that the disc mutex | 300 | * Locking Note: This function expects that the disc mutex |
@@ -322,7 +322,7 @@ static void fc_disc_restart(struct fc_disc *disc) | |||
322 | } | 322 | } |
323 | 323 | ||
324 | /** | 324 | /** |
325 | * fc_disc_start - Fibre Channel Target discovery | 325 | * fc_disc_start() - Fibre Channel Target discovery |
326 | * @lport: FC local port | 326 | * @lport: FC local port |
327 | * | 327 | * |
328 | * Returns non-zero if discovery cannot be started. | 328 | * Returns non-zero if discovery cannot be started. |
@@ -383,7 +383,7 @@ static struct fc_rport_operations fc_disc_rport_ops = { | |||
383 | }; | 383 | }; |
384 | 384 | ||
385 | /** | 385 | /** |
386 | * fc_disc_new_target - Handle new target found by discovery | 386 | * fc_disc_new_target() - Handle new target found by discovery |
387 | * @lport: FC local port | 387 | * @lport: FC local port |
388 | * @rport: The previous FC remote port (NULL if new remote port) | 388 | * @rport: The previous FC remote port (NULL if new remote port) |
389 | * @ids: Identifiers for the new FC remote port | 389 | * @ids: Identifiers for the new FC remote port |
@@ -396,7 +396,7 @@ static int fc_disc_new_target(struct fc_disc *disc, | |||
396 | struct fc_rport_identifiers *ids) | 396 | struct fc_rport_identifiers *ids) |
397 | { | 397 | { |
398 | struct fc_lport *lport = disc->lport; | 398 | struct fc_lport *lport = disc->lport; |
399 | struct fc_rport_libfc_priv *rp; | 399 | struct fc_rport_libfc_priv *rdata; |
400 | int error = 0; | 400 | int error = 0; |
401 | 401 | ||
402 | if (rport && ids->port_name) { | 402 | if (rport && ids->port_name) { |
@@ -430,15 +430,15 @@ static int fc_disc_new_target(struct fc_disc *disc, | |||
430 | dp.ids.port_name = ids->port_name; | 430 | dp.ids.port_name = ids->port_name; |
431 | dp.ids.node_name = ids->node_name; | 431 | dp.ids.node_name = ids->node_name; |
432 | dp.ids.roles = ids->roles; | 432 | dp.ids.roles = ids->roles; |
433 | rport = fc_rport_rogue_create(&dp); | 433 | rport = lport->tt.rport_create(&dp); |
434 | } | 434 | } |
435 | if (!rport) | 435 | if (!rport) |
436 | error = -ENOMEM; | 436 | error = -ENOMEM; |
437 | } | 437 | } |
438 | if (rport) { | 438 | if (rport) { |
439 | rp = rport->dd_data; | 439 | rdata = rport->dd_data; |
440 | rp->ops = &fc_disc_rport_ops; | 440 | rdata->ops = &fc_disc_rport_ops; |
441 | rp->rp_state = RPORT_ST_INIT; | 441 | rdata->rp_state = RPORT_ST_INIT; |
442 | lport->tt.rport_login(rport); | 442 | lport->tt.rport_login(rport); |
443 | } | 443 | } |
444 | } | 444 | } |
@@ -446,20 +446,20 @@ static int fc_disc_new_target(struct fc_disc *disc, | |||
446 | } | 446 | } |
447 | 447 | ||
448 | /** | 448 | /** |
449 | * fc_disc_del_target - Delete a target | 449 | * fc_disc_del_target() - Delete a target |
450 | * @disc: FC discovery context | 450 | * @disc: FC discovery context |
451 | * @rport: The remote port to be removed | 451 | * @rport: The remote port to be removed |
452 | */ | 452 | */ |
453 | static void fc_disc_del_target(struct fc_disc *disc, struct fc_rport *rport) | 453 | static void fc_disc_del_target(struct fc_disc *disc, struct fc_rport *rport) |
454 | { | 454 | { |
455 | struct fc_lport *lport = disc->lport; | 455 | struct fc_lport *lport = disc->lport; |
456 | struct fc_rport_libfc_priv *rdata = RPORT_TO_PRIV(rport); | 456 | struct fc_rport_libfc_priv *rdata = rport->dd_data; |
457 | list_del(&rdata->peers); | 457 | list_del(&rdata->peers); |
458 | lport->tt.rport_logoff(rport); | 458 | lport->tt.rport_logoff(rport); |
459 | } | 459 | } |
460 | 460 | ||
461 | /** | 461 | /** |
462 | * fc_disc_done - Discovery has been completed | 462 | * fc_disc_done() - Discovery has been completed |
463 | * @disc: FC discovery context | 463 | * @disc: FC discovery context |
464 | */ | 464 | */ |
465 | static void fc_disc_done(struct fc_disc *disc) | 465 | static void fc_disc_done(struct fc_disc *disc) |
@@ -479,7 +479,7 @@ static void fc_disc_done(struct fc_disc *disc) | |||
479 | } | 479 | } |
480 | 480 | ||
481 | /** | 481 | /** |
482 | * fc_disc_error - Handle error on dNS request | 482 | * fc_disc_error() - Handle error on dNS request |
483 | * @disc: FC discovery context | 483 | * @disc: FC discovery context |
484 | * @fp: The frame pointer | 484 | * @fp: The frame pointer |
485 | */ | 485 | */ |
@@ -519,7 +519,7 @@ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp) | |||
519 | } | 519 | } |
520 | 520 | ||
521 | /** | 521 | /** |
522 | * fc_disc_gpn_ft_req - Send Get Port Names by FC-4 type (GPN_FT) request | 522 | * fc_disc_gpn_ft_req() - Send Get Port Names by FC-4 type (GPN_FT) request |
523 | * @lport: FC discovery context | 523 | * @lport: FC discovery context |
524 | * | 524 | * |
525 | * Locking Note: This function expects that the disc_mutex is locked | 525 | * Locking Note: This function expects that the disc_mutex is locked |
@@ -553,7 +553,7 @@ err: | |||
553 | } | 553 | } |
554 | 554 | ||
555 | /** | 555 | /** |
556 | * fc_disc_gpn_ft_parse - Parse the list of IDs and names resulting from a request | 556 | * fc_disc_gpn_ft_parse() - Parse the list of IDs and names resulting from a request |
557 | * @lport: Fibre Channel host port instance | 557 | * @lport: Fibre Channel host port instance |
558 | * @buf: GPN_FT response buffer | 558 | * @buf: GPN_FT response buffer |
559 | * @len: size of response buffer | 559 | * @len: size of response buffer |
@@ -617,7 +617,7 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len) | |||
617 | 617 | ||
618 | if ((dp.ids.port_id != fc_host_port_id(lport->host)) && | 618 | if ((dp.ids.port_id != fc_host_port_id(lport->host)) && |
619 | (dp.ids.port_name != lport->wwpn)) { | 619 | (dp.ids.port_name != lport->wwpn)) { |
620 | rport = fc_rport_rogue_create(&dp); | 620 | rport = lport->tt.rport_create(&dp); |
621 | if (rport) { | 621 | if (rport) { |
622 | rdata = rport->dd_data; | 622 | rdata = rport->dd_data; |
623 | rdata->ops = &fc_disc_rport_ops; | 623 | rdata->ops = &fc_disc_rport_ops; |
@@ -658,7 +658,10 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len) | |||
658 | return error; | 658 | return error; |
659 | } | 659 | } |
660 | 660 | ||
661 | /* | 661 | /** |
662 | * fc_disc_timeout() - Retry handler for the disc component | ||
663 | * @work: Structure holding disc obj that needs retry discovery | ||
664 | * | ||
662 | * Handle retry of memory allocation for remote ports. | 665 | * Handle retry of memory allocation for remote ports. |
663 | */ | 666 | */ |
664 | static void fc_disc_timeout(struct work_struct *work) | 667 | static void fc_disc_timeout(struct work_struct *work) |
@@ -673,7 +676,7 @@ static void fc_disc_timeout(struct work_struct *work) | |||
673 | } | 676 | } |
674 | 677 | ||
675 | /** | 678 | /** |
676 | * fc_disc_gpn_ft_resp - Handle a response frame from Get Port Names (GPN_FT) | 679 | * fc_disc_gpn_ft_resp() - Handle a response frame from Get Port Names (GPN_FT) |
677 | * @sp: Current sequence of GPN_FT exchange | 680 | * @sp: Current sequence of GPN_FT exchange |
678 | * @fp: response frame | 681 | * @fp: response frame |
679 | * @lp_arg: Fibre Channel host port instance | 682 | * @lp_arg: Fibre Channel host port instance |
@@ -712,9 +715,7 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
712 | fr_len(fp)); | 715 | fr_len(fp)); |
713 | } else if (ntohs(cp->ct_cmd) == FC_FS_ACC) { | 716 | } else if (ntohs(cp->ct_cmd) == FC_FS_ACC) { |
714 | 717 | ||
715 | /* | 718 | /* Accepted, parse the response. */ |
716 | * Accepted. Parse response. | ||
717 | */ | ||
718 | buf = cp + 1; | 719 | buf = cp + 1; |
719 | len -= sizeof(*cp); | 720 | len -= sizeof(*cp); |
720 | } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) { | 721 | } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) { |
@@ -746,7 +747,7 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
746 | } | 747 | } |
747 | 748 | ||
748 | /** | 749 | /** |
749 | * fc_disc_single - Discover the directory information for a single target | 750 | * fc_disc_single() - Discover the directory information for a single target |
750 | * @lport: FC local port | 751 | * @lport: FC local port |
751 | * @dp: The port to rediscover | 752 | * @dp: The port to rediscover |
752 | * | 753 | * |
@@ -769,7 +770,7 @@ static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp) | |||
769 | if (rport) | 770 | if (rport) |
770 | fc_disc_del_target(disc, rport); | 771 | fc_disc_del_target(disc, rport); |
771 | 772 | ||
772 | new_rport = fc_rport_rogue_create(dp); | 773 | new_rport = lport->tt.rport_create(dp); |
773 | if (new_rport) { | 774 | if (new_rport) { |
774 | rdata = new_rport->dd_data; | 775 | rdata = new_rport->dd_data; |
775 | rdata->ops = &fc_disc_rport_ops; | 776 | rdata->ops = &fc_disc_rport_ops; |
@@ -782,7 +783,7 @@ out: | |||
782 | } | 783 | } |
783 | 784 | ||
784 | /** | 785 | /** |
785 | * fc_disc_stop - Stop discovery for a given lport | 786 | * fc_disc_stop() - Stop discovery for a given lport |
786 | * @lport: The lport that discovery should stop for | 787 | * @lport: The lport that discovery should stop for |
787 | */ | 788 | */ |
788 | void fc_disc_stop(struct fc_lport *lport) | 789 | void fc_disc_stop(struct fc_lport *lport) |
@@ -796,7 +797,7 @@ void fc_disc_stop(struct fc_lport *lport) | |||
796 | } | 797 | } |
797 | 798 | ||
798 | /** | 799 | /** |
799 | * fc_disc_stop_final - Stop discovery for a given lport | 800 | * fc_disc_stop_final() - Stop discovery for a given lport |
800 | * @lport: The lport that discovery should stop for | 801 | * @lport: The lport that discovery should stop for |
801 | * | 802 | * |
802 | * This function will block until discovery has been | 803 | * This function will block until discovery has been |
@@ -809,7 +810,7 @@ void fc_disc_stop_final(struct fc_lport *lport) | |||
809 | } | 810 | } |
810 | 811 | ||
811 | /** | 812 | /** |
812 | * fc_disc_init - Initialize the discovery block | 813 | * fc_disc_init() - Initialize the discovery block |
813 | * @lport: FC local port | 814 | * @lport: FC local port |
814 | */ | 815 | */ |
815 | int fc_disc_init(struct fc_lport *lport) | 816 | int fc_disc_init(struct fc_lport *lport) |
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 66db08a5f27f..505825b6124d 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c | |||
@@ -32,8 +32,6 @@ | |||
32 | #include <scsi/libfc.h> | 32 | #include <scsi/libfc.h> |
33 | #include <scsi/fc_encode.h> | 33 | #include <scsi/fc_encode.h> |
34 | 34 | ||
35 | #define FC_DEF_R_A_TOV (10 * 1000) /* resource allocation timeout */ | ||
36 | |||
37 | /* | 35 | /* |
38 | * fc_exch_debug can be set in debugger or at compile time to get more logs. | 36 | * fc_exch_debug can be set in debugger or at compile time to get more logs. |
39 | */ | 37 | */ |
@@ -627,7 +625,6 @@ static struct fc_exch *fc_exch_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) | |||
627 | { | 625 | { |
628 | struct fc_exch *ep; | 626 | struct fc_exch *ep; |
629 | struct fc_frame_header *fh; | 627 | struct fc_frame_header *fh; |
630 | u16 rxid; | ||
631 | 628 | ||
632 | ep = mp->lp->tt.exch_get(mp->lp, fp); | 629 | ep = mp->lp->tt.exch_get(mp->lp, fp); |
633 | if (ep) { | 630 | if (ep) { |
@@ -654,18 +651,6 @@ static struct fc_exch *fc_exch_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) | |||
654 | if ((ntoh24(fh->fh_f_ctl) & FC_FC_SEQ_INIT) == 0) | 651 | if ((ntoh24(fh->fh_f_ctl) & FC_FC_SEQ_INIT) == 0) |
655 | ep->esb_stat &= ~ESB_ST_SEQ_INIT; | 652 | ep->esb_stat &= ~ESB_ST_SEQ_INIT; |
656 | 653 | ||
657 | /* | ||
658 | * Set the responder ID in the frame header. | ||
659 | * The old one should've been 0xffff. | ||
660 | * If it isn't, don't assign one. | ||
661 | * Incoming basic link service frames may specify | ||
662 | * a referenced RX_ID. | ||
663 | */ | ||
664 | if (fh->fh_type != FC_TYPE_BLS) { | ||
665 | rxid = ntohs(fh->fh_rx_id); | ||
666 | WARN_ON(rxid != FC_XID_UNKNOWN); | ||
667 | fh->fh_rx_id = htons(ep->rxid); | ||
668 | } | ||
669 | fc_exch_hold(ep); /* hold for caller */ | 654 | fc_exch_hold(ep); /* hold for caller */ |
670 | spin_unlock_bh(&ep->ex_lock); /* lock from exch_get */ | 655 | spin_unlock_bh(&ep->ex_lock); /* lock from exch_get */ |
671 | } | 656 | } |
@@ -677,8 +662,8 @@ static struct fc_exch *fc_exch_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) | |||
677 | * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold | 662 | * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold |
678 | * on the ep that should be released by the caller. | 663 | * on the ep that should be released by the caller. |
679 | */ | 664 | */ |
680 | static enum fc_pf_rjt_reason | 665 | static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_exch_mgr *mp, |
681 | fc_seq_lookup_recip(struct fc_exch_mgr *mp, struct fc_frame *fp) | 666 | struct fc_frame *fp) |
682 | { | 667 | { |
683 | struct fc_frame_header *fh = fc_frame_header_get(fp); | 668 | struct fc_frame_header *fh = fc_frame_header_get(fp); |
684 | struct fc_exch *ep = NULL; | 669 | struct fc_exch *ep = NULL; |
@@ -996,9 +981,9 @@ static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp) | |||
996 | * Send BLS Reject. | 981 | * Send BLS Reject. |
997 | * This is for rejecting BA_ABTS only. | 982 | * This is for rejecting BA_ABTS only. |
998 | */ | 983 | */ |
999 | static void | 984 | static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp, |
1000 | fc_exch_send_ba_rjt(struct fc_frame *rx_fp, enum fc_ba_rjt_reason reason, | 985 | enum fc_ba_rjt_reason reason, |
1001 | enum fc_ba_rjt_explan explan) | 986 | enum fc_ba_rjt_explan explan) |
1002 | { | 987 | { |
1003 | struct fc_frame *fp; | 988 | struct fc_frame *fp; |
1004 | struct fc_frame_header *rx_fh; | 989 | struct fc_frame_header *rx_fh; |
@@ -1096,7 +1081,7 @@ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp) | |||
1096 | ap->ba_high_seq_cnt = fh->fh_seq_cnt; | 1081 | ap->ba_high_seq_cnt = fh->fh_seq_cnt; |
1097 | ap->ba_low_seq_cnt = htons(sp->cnt); | 1082 | ap->ba_low_seq_cnt = htons(sp->cnt); |
1098 | } | 1083 | } |
1099 | sp = fc_seq_start_next(sp); | 1084 | sp = fc_seq_start_next_locked(sp); |
1100 | spin_unlock_bh(&ep->ex_lock); | 1085 | spin_unlock_bh(&ep->ex_lock); |
1101 | fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS); | 1086 | fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS); |
1102 | fc_frame_free(rx_fp); | 1087 | fc_frame_free(rx_fp); |
@@ -1480,10 +1465,11 @@ static void fc_exch_reset(struct fc_exch *ep) | |||
1480 | * If sid is non-zero, reset only exchanges we source from that FID. | 1465 | * If sid is non-zero, reset only exchanges we source from that FID. |
1481 | * If did is non-zero, reset only exchanges destined to that FID. | 1466 | * If did is non-zero, reset only exchanges destined to that FID. |
1482 | */ | 1467 | */ |
1483 | void fc_exch_mgr_reset(struct fc_exch_mgr *mp, u32 sid, u32 did) | 1468 | void fc_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did) |
1484 | { | 1469 | { |
1485 | struct fc_exch *ep; | 1470 | struct fc_exch *ep; |
1486 | struct fc_exch *next; | 1471 | struct fc_exch *next; |
1472 | struct fc_exch_mgr *mp = lp->emp; | ||
1487 | 1473 | ||
1488 | spin_lock_bh(&mp->em_lock); | 1474 | spin_lock_bh(&mp->em_lock); |
1489 | restart: | 1475 | restart: |
@@ -1607,7 +1593,7 @@ static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg) | |||
1607 | if (IS_ERR(fp)) { | 1593 | if (IS_ERR(fp)) { |
1608 | int err = PTR_ERR(fp); | 1594 | int err = PTR_ERR(fp); |
1609 | 1595 | ||
1610 | if (err == -FC_EX_CLOSED) | 1596 | if (err == -FC_EX_CLOSED || err == -FC_EX_TIMEOUT) |
1611 | goto cleanup; | 1597 | goto cleanup; |
1612 | FC_DBG("Cannot process RRQ, because of frame error %d\n", err); | 1598 | FC_DBG("Cannot process RRQ, because of frame error %d\n", err); |
1613 | return; | 1599 | return; |
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 404e63ff46b8..2a631d7dbcec 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c | |||
@@ -161,7 +161,7 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lp, gfp_t gfp) | |||
161 | } | 161 | } |
162 | 162 | ||
163 | /** | 163 | /** |
164 | * fc_fcp_pkt_release - release hold on scsi_pkt packet | 164 | * fc_fcp_pkt_release() - release hold on scsi_pkt packet |
165 | * @fsp: fcp packet struct | 165 | * @fsp: fcp packet struct |
166 | * | 166 | * |
167 | * This is used by upper layer scsi driver. | 167 | * This is used by upper layer scsi driver. |
@@ -183,8 +183,7 @@ static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp) | |||
183 | } | 183 | } |
184 | 184 | ||
185 | /** | 185 | /** |
186 | * fc_fcp_pkt_destory - release hold on scsi_pkt packet | 186 | * fc_fcp_pkt_destory() - release hold on scsi_pkt packet |
187 | * | ||
188 | * @seq: exchange sequence | 187 | * @seq: exchange sequence |
189 | * @fsp: fcp packet struct | 188 | * @fsp: fcp packet struct |
190 | * | 189 | * |
@@ -199,7 +198,7 @@ static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp) | |||
199 | } | 198 | } |
200 | 199 | ||
201 | /** | 200 | /** |
202 | * fc_fcp_lock_pkt - lock a packet and get a ref to it. | 201 | * fc_fcp_lock_pkt() - lock a packet and get a ref to it. |
203 | * @fsp: fcp packet | 202 | * @fsp: fcp packet |
204 | * | 203 | * |
205 | * We should only return error if we return a command to scsi-ml before | 204 | * We should only return error if we return a command to scsi-ml before |
@@ -291,9 +290,7 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | |||
291 | buf = fc_frame_payload_get(fp, 0); | 290 | buf = fc_frame_payload_get(fp, 0); |
292 | 291 | ||
293 | if (offset + len > fsp->data_len) { | 292 | if (offset + len > fsp->data_len) { |
294 | /* | 293 | /* this should never happen */ |
295 | * this should never happen | ||
296 | */ | ||
297 | if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) && | 294 | if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) && |
298 | fc_frame_crc_check(fp)) | 295 | fc_frame_crc_check(fp)) |
299 | goto crc_err; | 296 | goto crc_err; |
@@ -387,8 +384,8 @@ crc_err: | |||
387 | fc_fcp_complete_locked(fsp); | 384 | fc_fcp_complete_locked(fsp); |
388 | } | 385 | } |
389 | 386 | ||
390 | /* | 387 | /** |
391 | * fc_fcp_send_data - Send SCSI data to target. | 388 | * fc_fcp_send_data() - Send SCSI data to target. |
392 | * @fsp: ptr to fc_fcp_pkt | 389 | * @fsp: ptr to fc_fcp_pkt |
393 | * @sp: ptr to this sequence | 390 | * @sp: ptr to this sequence |
394 | * @offset: starting offset for this data request | 391 | * @offset: starting offset for this data request |
@@ -610,8 +607,8 @@ static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | |||
610 | } | 607 | } |
611 | } | 608 | } |
612 | 609 | ||
613 | /* | 610 | /** |
614 | * fc_fcp_reduce_can_queue - drop can_queue | 611 | * fc_fcp_reduce_can_queue() - drop can_queue |
615 | * @lp: lport to drop queueing for | 612 | * @lp: lport to drop queueing for |
616 | * | 613 | * |
617 | * If we are getting memory allocation failures, then we may | 614 | * If we are getting memory allocation failures, then we may |
@@ -642,9 +639,11 @@ done: | |||
642 | spin_unlock_irqrestore(lp->host->host_lock, flags); | 639 | spin_unlock_irqrestore(lp->host->host_lock, flags); |
643 | } | 640 | } |
644 | 641 | ||
645 | /* | 642 | /** |
646 | * exch mgr calls this routine to process scsi | 643 | * fc_fcp_recv() - Reveive FCP frames |
647 | * exchanges. | 644 | * @seq: The sequence the frame is on |
645 | * @fp: The FC frame | ||
646 | * @arg: The related FCP packet | ||
648 | * | 647 | * |
649 | * Return : None | 648 | * Return : None |
650 | * Context : called from Soft IRQ context | 649 | * Context : called from Soft IRQ context |
@@ -832,7 +831,7 @@ err: | |||
832 | } | 831 | } |
833 | 832 | ||
834 | /** | 833 | /** |
835 | * fc_fcp_complete_locked - complete processing of a fcp packet | 834 | * fc_fcp_complete_locked() - complete processing of a fcp packet |
836 | * @fsp: fcp packet | 835 | * @fsp: fcp packet |
837 | * | 836 | * |
838 | * This function may sleep if a timer is pending. The packet lock must be | 837 | * This function may sleep if a timer is pending. The packet lock must be |
@@ -900,7 +899,7 @@ static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error) | |||
900 | } | 899 | } |
901 | 900 | ||
902 | /** | 901 | /** |
903 | * fc_fcp_cleanup_each_cmd - run fn on each active command | 902 | * fc_fcp_cleanup_each_cmd() - Cleanup active commads |
904 | * @lp: logical port | 903 | * @lp: logical port |
905 | * @id: target id | 904 | * @id: target id |
906 | * @lun: lun | 905 | * @lun: lun |
@@ -952,7 +951,7 @@ static void fc_fcp_abort_io(struct fc_lport *lp) | |||
952 | } | 951 | } |
953 | 952 | ||
954 | /** | 953 | /** |
955 | * fc_fcp_pkt_send - send a fcp packet to the lower level. | 954 | * fc_fcp_pkt_send() - send a fcp packet to the lower level. |
956 | * @lp: fc lport | 955 | * @lp: fc lport |
957 | * @fsp: fc packet. | 956 | * @fsp: fc packet. |
958 | * | 957 | * |
@@ -1621,7 +1620,7 @@ out: | |||
1621 | static inline int fc_fcp_lport_queue_ready(struct fc_lport *lp) | 1620 | static inline int fc_fcp_lport_queue_ready(struct fc_lport *lp) |
1622 | { | 1621 | { |
1623 | /* lock ? */ | 1622 | /* lock ? */ |
1624 | return (lp->state == LPORT_ST_READY) && (lp->link_status & FC_LINK_UP); | 1623 | return (lp->state == LPORT_ST_READY) && lp->link_up && !lp->qfull; |
1625 | } | 1624 | } |
1626 | 1625 | ||
1627 | /** | 1626 | /** |
@@ -1727,7 +1726,7 @@ out: | |||
1727 | EXPORT_SYMBOL(fc_queuecommand); | 1726 | EXPORT_SYMBOL(fc_queuecommand); |
1728 | 1727 | ||
1729 | /** | 1728 | /** |
1730 | * fc_io_compl - Handle responses for completed commands | 1729 | * fc_io_compl() - Handle responses for completed commands |
1731 | * @fsp: scsi packet | 1730 | * @fsp: scsi packet |
1732 | * | 1731 | * |
1733 | * Translates a error to a Linux SCSI error. | 1732 | * Translates a error to a Linux SCSI error. |
@@ -1810,12 +1809,12 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) | |||
1810 | sc_cmd->result = DID_ERROR << 16; | 1809 | sc_cmd->result = DID_ERROR << 16; |
1811 | break; | 1810 | break; |
1812 | case FC_DATA_UNDRUN: | 1811 | case FC_DATA_UNDRUN: |
1813 | if (fsp->cdb_status == 0) { | 1812 | if ((fsp->cdb_status == 0) && !(fsp->req_flags & FC_SRB_READ)) { |
1814 | /* | 1813 | /* |
1815 | * scsi status is good but transport level | 1814 | * scsi status is good but transport level |
1816 | * underrun. for read it should be an error?? | 1815 | * underrun. |
1817 | */ | 1816 | */ |
1818 | sc_cmd->result = (DID_OK << 16) | fsp->cdb_status; | 1817 | sc_cmd->result = DID_OK << 16; |
1819 | } else { | 1818 | } else { |
1820 | /* | 1819 | /* |
1821 | * scsi got underrun, this is an error | 1820 | * scsi got underrun, this is an error |
@@ -1857,7 +1856,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) | |||
1857 | } | 1856 | } |
1858 | 1857 | ||
1859 | /** | 1858 | /** |
1860 | * fc_fcp_complete - complete processing of a fcp packet | 1859 | * fc_fcp_complete() - complete processing of a fcp packet |
1861 | * @fsp: fcp packet | 1860 | * @fsp: fcp packet |
1862 | * | 1861 | * |
1863 | * This function may sleep if a fsp timer is pending. | 1862 | * This function may sleep if a fsp timer is pending. |
@@ -1874,9 +1873,10 @@ void fc_fcp_complete(struct fc_fcp_pkt *fsp) | |||
1874 | EXPORT_SYMBOL(fc_fcp_complete); | 1873 | EXPORT_SYMBOL(fc_fcp_complete); |
1875 | 1874 | ||
1876 | /** | 1875 | /** |
1877 | * fc_eh_abort - Abort a command...from scsi host template | 1876 | * fc_eh_abort() - Abort a command |
1878 | * @sc_cmd: scsi command to abort | 1877 | * @sc_cmd: scsi command to abort |
1879 | * | 1878 | * |
1879 | * From scsi host template. | ||
1880 | * send ABTS to the target device and wait for the response | 1880 | * send ABTS to the target device and wait for the response |
1881 | * sc_cmd is the pointer to the command to be aborted. | 1881 | * sc_cmd is the pointer to the command to be aborted. |
1882 | */ | 1882 | */ |
@@ -1890,7 +1890,7 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd) | |||
1890 | lp = shost_priv(sc_cmd->device->host); | 1890 | lp = shost_priv(sc_cmd->device->host); |
1891 | if (lp->state != LPORT_ST_READY) | 1891 | if (lp->state != LPORT_ST_READY) |
1892 | return rc; | 1892 | return rc; |
1893 | else if (!(lp->link_status & FC_LINK_UP)) | 1893 | else if (!lp->link_up) |
1894 | return rc; | 1894 | return rc; |
1895 | 1895 | ||
1896 | spin_lock_irqsave(lp->host->host_lock, flags); | 1896 | spin_lock_irqsave(lp->host->host_lock, flags); |
@@ -1920,7 +1920,7 @@ release_pkt: | |||
1920 | EXPORT_SYMBOL(fc_eh_abort); | 1920 | EXPORT_SYMBOL(fc_eh_abort); |
1921 | 1921 | ||
1922 | /** | 1922 | /** |
1923 | * fc_eh_device_reset: Reset a single LUN | 1923 | * fc_eh_device_reset() Reset a single LUN |
1924 | * @sc_cmd: scsi command | 1924 | * @sc_cmd: scsi command |
1925 | * | 1925 | * |
1926 | * Set from scsi host template to send tm cmd to the target and wait for the | 1926 | * Set from scsi host template to send tm cmd to the target and wait for the |
@@ -1973,7 +1973,7 @@ out: | |||
1973 | EXPORT_SYMBOL(fc_eh_device_reset); | 1973 | EXPORT_SYMBOL(fc_eh_device_reset); |
1974 | 1974 | ||
1975 | /** | 1975 | /** |
1976 | * fc_eh_host_reset - The reset function will reset the ports on the host. | 1976 | * fc_eh_host_reset() - The reset function will reset the ports on the host. |
1977 | * @sc_cmd: scsi command | 1977 | * @sc_cmd: scsi command |
1978 | */ | 1978 | */ |
1979 | int fc_eh_host_reset(struct scsi_cmnd *sc_cmd) | 1979 | int fc_eh_host_reset(struct scsi_cmnd *sc_cmd) |
@@ -1999,7 +1999,7 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd) | |||
1999 | EXPORT_SYMBOL(fc_eh_host_reset); | 1999 | EXPORT_SYMBOL(fc_eh_host_reset); |
2000 | 2000 | ||
2001 | /** | 2001 | /** |
2002 | * fc_slave_alloc - configure queue depth | 2002 | * fc_slave_alloc() - configure queue depth |
2003 | * @sdev: scsi device | 2003 | * @sdev: scsi device |
2004 | * | 2004 | * |
2005 | * Configures queue depth based on host's cmd_per_len. If not set | 2005 | * Configures queue depth based on host's cmd_per_len. If not set |
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 0b9bdb1fb807..2ae50a1188e6 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c | |||
@@ -139,7 +139,7 @@ static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp) | |||
139 | } | 139 | } |
140 | 140 | ||
141 | /** | 141 | /** |
142 | * fc_lport_rport_callback - Event handler for rport events | 142 | * fc_lport_rport_callback() - Event handler for rport events |
143 | * @lport: The lport which is receiving the event | 143 | * @lport: The lport which is receiving the event |
144 | * @rport: The rport which the event has occured on | 144 | * @rport: The rport which the event has occured on |
145 | * @event: The event that occured | 145 | * @event: The event that occured |
@@ -195,7 +195,7 @@ static void fc_lport_rport_callback(struct fc_lport *lport, | |||
195 | } | 195 | } |
196 | 196 | ||
197 | /** | 197 | /** |
198 | * fc_lport_state - Return a string which represents the lport's state | 198 | * fc_lport_state() - Return a string which represents the lport's state |
199 | * @lport: The lport whose state is to converted to a string | 199 | * @lport: The lport whose state is to converted to a string |
200 | */ | 200 | */ |
201 | static const char *fc_lport_state(struct fc_lport *lport) | 201 | static const char *fc_lport_state(struct fc_lport *lport) |
@@ -209,7 +209,7 @@ static const char *fc_lport_state(struct fc_lport *lport) | |||
209 | } | 209 | } |
210 | 210 | ||
211 | /** | 211 | /** |
212 | * fc_lport_ptp_setup - Create an rport for point-to-point mode | 212 | * fc_lport_ptp_setup() - Create an rport for point-to-point mode |
213 | * @lport: The lport to attach the ptp rport to | 213 | * @lport: The lport to attach the ptp rport to |
214 | * @fid: The FID of the ptp rport | 214 | * @fid: The FID of the ptp rport |
215 | * @remote_wwpn: The WWPN of the ptp rport | 215 | * @remote_wwpn: The WWPN of the ptp rport |
@@ -232,7 +232,7 @@ static void fc_lport_ptp_setup(struct fc_lport *lport, | |||
232 | lport->ptp_rp = NULL; | 232 | lport->ptp_rp = NULL; |
233 | } | 233 | } |
234 | 234 | ||
235 | lport->ptp_rp = fc_rport_rogue_create(&dp); | 235 | lport->ptp_rp = lport->tt.rport_create(&dp); |
236 | 236 | ||
237 | lport->tt.rport_login(lport->ptp_rp); | 237 | lport->tt.rport_login(lport->ptp_rp); |
238 | 238 | ||
@@ -250,7 +250,7 @@ void fc_get_host_port_state(struct Scsi_Host *shost) | |||
250 | { | 250 | { |
251 | struct fc_lport *lp = shost_priv(shost); | 251 | struct fc_lport *lp = shost_priv(shost); |
252 | 252 | ||
253 | if ((lp->link_status & FC_LINK_UP) == FC_LINK_UP) | 253 | if (lp->link_up) |
254 | fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; | 254 | fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; |
255 | else | 255 | else |
256 | fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; | 256 | fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; |
@@ -351,7 +351,7 @@ static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type) | |||
351 | } | 351 | } |
352 | 352 | ||
353 | /** | 353 | /** |
354 | * fc_lport_recv_rlir_req - Handle received Registered Link Incident Report. | 354 | * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report. |
355 | * @lport: Fibre Channel local port recieving the RLIR | 355 | * @lport: Fibre Channel local port recieving the RLIR |
356 | * @sp: current sequence in the RLIR exchange | 356 | * @sp: current sequence in the RLIR exchange |
357 | * @fp: RLIR request frame | 357 | * @fp: RLIR request frame |
@@ -370,7 +370,7 @@ static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp, | |||
370 | } | 370 | } |
371 | 371 | ||
372 | /** | 372 | /** |
373 | * fc_lport_recv_echo_req - Handle received ECHO request | 373 | * fc_lport_recv_echo_req() - Handle received ECHO request |
374 | * @lport: Fibre Channel local port recieving the ECHO | 374 | * @lport: Fibre Channel local port recieving the ECHO |
375 | * @sp: current sequence in the ECHO exchange | 375 | * @sp: current sequence in the ECHO exchange |
376 | * @fp: ECHO request frame | 376 | * @fp: ECHO request frame |
@@ -412,7 +412,7 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, | |||
412 | } | 412 | } |
413 | 413 | ||
414 | /** | 414 | /** |
415 | * fc_lport_recv_echo_req - Handle received Request Node ID data request | 415 | * fc_lport_recv_echo_req() - Handle received Request Node ID data request |
416 | * @lport: Fibre Channel local port recieving the RNID | 416 | * @lport: Fibre Channel local port recieving the RNID |
417 | * @sp: current sequence in the RNID exchange | 417 | * @sp: current sequence in the RNID exchange |
418 | * @fp: RNID request frame | 418 | * @fp: RNID request frame |
@@ -479,7 +479,7 @@ static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp, | |||
479 | } | 479 | } |
480 | 480 | ||
481 | /** | 481 | /** |
482 | * fc_lport_recv_adisc_req - Handle received Address Discovery Request | 482 | * fc_lport_recv_adisc_req() - Handle received Address Discovery Request |
483 | * @lport: Fibre Channel local port recieving the ADISC | 483 | * @lport: Fibre Channel local port recieving the ADISC |
484 | * @sp: current sequence in the ADISC exchange | 484 | * @sp: current sequence in the ADISC exchange |
485 | * @fp: ADISC request frame | 485 | * @fp: ADISC request frame |
@@ -529,7 +529,7 @@ static void fc_lport_recv_adisc_req(struct fc_seq *sp, struct fc_frame *in_fp, | |||
529 | } | 529 | } |
530 | 530 | ||
531 | /** | 531 | /** |
532 | * fc_lport_recv_logo_req - Handle received fabric LOGO request | 532 | * fc_lport_recv_logo_req() - Handle received fabric LOGO request |
533 | * @lport: Fibre Channel local port recieving the LOGO | 533 | * @lport: Fibre Channel local port recieving the LOGO |
534 | * @sp: current sequence in the LOGO exchange | 534 | * @sp: current sequence in the LOGO exchange |
535 | * @fp: LOGO request frame | 535 | * @fp: LOGO request frame |
@@ -546,7 +546,7 @@ static void fc_lport_recv_logo_req(struct fc_seq *sp, struct fc_frame *fp, | |||
546 | } | 546 | } |
547 | 547 | ||
548 | /** | 548 | /** |
549 | * fc_fabric_login - Start the lport state machine | 549 | * fc_fabric_login() - Start the lport state machine |
550 | * @lport: The lport that should log into the fabric | 550 | * @lport: The lport that should log into the fabric |
551 | * | 551 | * |
552 | * Locking Note: This function should not be called | 552 | * Locking Note: This function should not be called |
@@ -568,7 +568,7 @@ int fc_fabric_login(struct fc_lport *lport) | |||
568 | EXPORT_SYMBOL(fc_fabric_login); | 568 | EXPORT_SYMBOL(fc_fabric_login); |
569 | 569 | ||
570 | /** | 570 | /** |
571 | * fc_linkup - Handler for transport linkup events | 571 | * fc_linkup() - Handler for transport linkup events |
572 | * @lport: The lport whose link is up | 572 | * @lport: The lport whose link is up |
573 | */ | 573 | */ |
574 | void fc_linkup(struct fc_lport *lport) | 574 | void fc_linkup(struct fc_lport *lport) |
@@ -577,8 +577,8 @@ void fc_linkup(struct fc_lport *lport) | |||
577 | fc_host_port_id(lport->host)); | 577 | fc_host_port_id(lport->host)); |
578 | 578 | ||
579 | mutex_lock(&lport->lp_mutex); | 579 | mutex_lock(&lport->lp_mutex); |
580 | if ((lport->link_status & FC_LINK_UP) != FC_LINK_UP) { | 580 | if (!lport->link_up) { |
581 | lport->link_status |= FC_LINK_UP; | 581 | lport->link_up = 1; |
582 | 582 | ||
583 | if (lport->state == LPORT_ST_RESET) | 583 | if (lport->state == LPORT_ST_RESET) |
584 | fc_lport_enter_flogi(lport); | 584 | fc_lport_enter_flogi(lport); |
@@ -588,7 +588,7 @@ void fc_linkup(struct fc_lport *lport) | |||
588 | EXPORT_SYMBOL(fc_linkup); | 588 | EXPORT_SYMBOL(fc_linkup); |
589 | 589 | ||
590 | /** | 590 | /** |
591 | * fc_linkdown - Handler for transport linkdown events | 591 | * fc_linkdown() - Handler for transport linkdown events |
592 | * @lport: The lport whose link is down | 592 | * @lport: The lport whose link is down |
593 | */ | 593 | */ |
594 | void fc_linkdown(struct fc_lport *lport) | 594 | void fc_linkdown(struct fc_lport *lport) |
@@ -597,8 +597,8 @@ void fc_linkdown(struct fc_lport *lport) | |||
597 | FC_DEBUG_LPORT("Link is down for port (%6x)\n", | 597 | FC_DEBUG_LPORT("Link is down for port (%6x)\n", |
598 | fc_host_port_id(lport->host)); | 598 | fc_host_port_id(lport->host)); |
599 | 599 | ||
600 | if ((lport->link_status & FC_LINK_UP) == FC_LINK_UP) { | 600 | if (lport->link_up) { |
601 | lport->link_status &= ~(FC_LINK_UP); | 601 | lport->link_up = 0; |
602 | fc_lport_enter_reset(lport); | 602 | fc_lport_enter_reset(lport); |
603 | lport->tt.fcp_cleanup(lport); | 603 | lport->tt.fcp_cleanup(lport); |
604 | } | 604 | } |
@@ -607,48 +607,25 @@ void fc_linkdown(struct fc_lport *lport) | |||
607 | EXPORT_SYMBOL(fc_linkdown); | 607 | EXPORT_SYMBOL(fc_linkdown); |
608 | 608 | ||
609 | /** | 609 | /** |
610 | * fc_pause - Pause the flow of frames | 610 | * fc_fabric_logoff() - Logout of the fabric |
611 | * @lport: The lport to be paused | ||
612 | */ | ||
613 | void fc_pause(struct fc_lport *lport) | ||
614 | { | ||
615 | mutex_lock(&lport->lp_mutex); | ||
616 | lport->link_status |= FC_PAUSE; | ||
617 | mutex_unlock(&lport->lp_mutex); | ||
618 | } | ||
619 | EXPORT_SYMBOL(fc_pause); | ||
620 | |||
621 | /** | ||
622 | * fc_unpause - Unpause the flow of frames | ||
623 | * @lport: The lport to be unpaused | ||
624 | */ | ||
625 | void fc_unpause(struct fc_lport *lport) | ||
626 | { | ||
627 | mutex_lock(&lport->lp_mutex); | ||
628 | lport->link_status &= ~(FC_PAUSE); | ||
629 | mutex_unlock(&lport->lp_mutex); | ||
630 | } | ||
631 | EXPORT_SYMBOL(fc_unpause); | ||
632 | |||
633 | /** | ||
634 | * fc_fabric_logoff - Logout of the fabric | ||
635 | * @lport: fc_lport pointer to logoff the fabric | 611 | * @lport: fc_lport pointer to logoff the fabric |
636 | * | 612 | * |
637 | * Return value: | 613 | * Return value: |
638 | * 0 for success, -1 for failure | 614 | * 0 for success, -1 for failure |
639 | **/ | 615 | */ |
640 | int fc_fabric_logoff(struct fc_lport *lport) | 616 | int fc_fabric_logoff(struct fc_lport *lport) |
641 | { | 617 | { |
642 | lport->tt.disc_stop_final(lport); | 618 | lport->tt.disc_stop_final(lport); |
643 | mutex_lock(&lport->lp_mutex); | 619 | mutex_lock(&lport->lp_mutex); |
644 | fc_lport_enter_logo(lport); | 620 | fc_lport_enter_logo(lport); |
645 | mutex_unlock(&lport->lp_mutex); | 621 | mutex_unlock(&lport->lp_mutex); |
622 | cancel_delayed_work_sync(&lport->retry_work); | ||
646 | return 0; | 623 | return 0; |
647 | } | 624 | } |
648 | EXPORT_SYMBOL(fc_fabric_logoff); | 625 | EXPORT_SYMBOL(fc_fabric_logoff); |
649 | 626 | ||
650 | /** | 627 | /** |
651 | * fc_lport_destroy - unregister a fc_lport | 628 | * fc_lport_destroy() - unregister a fc_lport |
652 | * @lport: fc_lport pointer to unregister | 629 | * @lport: fc_lport pointer to unregister |
653 | * | 630 | * |
654 | * Return value: | 631 | * Return value: |
@@ -658,26 +635,25 @@ EXPORT_SYMBOL(fc_fabric_logoff); | |||
658 | * clean-up all the allocated memory | 635 | * clean-up all the allocated memory |
659 | * and free up other system resources. | 636 | * and free up other system resources. |
660 | * | 637 | * |
661 | **/ | 638 | */ |
662 | int fc_lport_destroy(struct fc_lport *lport) | 639 | int fc_lport_destroy(struct fc_lport *lport) |
663 | { | 640 | { |
664 | lport->tt.frame_send = fc_frame_drop; | 641 | lport->tt.frame_send = fc_frame_drop; |
665 | lport->tt.fcp_abort_io(lport); | 642 | lport->tt.fcp_abort_io(lport); |
666 | lport->tt.exch_mgr_reset(lport->emp, 0, 0); | 643 | lport->tt.exch_mgr_reset(lport, 0, 0); |
667 | return 0; | 644 | return 0; |
668 | } | 645 | } |
669 | EXPORT_SYMBOL(fc_lport_destroy); | 646 | EXPORT_SYMBOL(fc_lport_destroy); |
670 | 647 | ||
671 | /** | 648 | /** |
672 | * fc_set_mfs - sets up the mfs for the corresponding fc_lport | 649 | * fc_set_mfs() - sets up the mfs for the corresponding fc_lport |
673 | * @lport: fc_lport pointer to unregister | 650 | * @lport: fc_lport pointer to unregister |
674 | * @mfs: the new mfs for fc_lport | 651 | * @mfs: the new mfs for fc_lport |
675 | * | 652 | * |
676 | * Set mfs for the given fc_lport to the new mfs. | 653 | * Set mfs for the given fc_lport to the new mfs. |
677 | * | 654 | * |
678 | * Return: 0 for success | 655 | * Return: 0 for success |
679 | * | 656 | */ |
680 | **/ | ||
681 | int fc_set_mfs(struct fc_lport *lport, u32 mfs) | 657 | int fc_set_mfs(struct fc_lport *lport, u32 mfs) |
682 | { | 658 | { |
683 | unsigned int old_mfs; | 659 | unsigned int old_mfs; |
@@ -706,7 +682,7 @@ int fc_set_mfs(struct fc_lport *lport, u32 mfs) | |||
706 | EXPORT_SYMBOL(fc_set_mfs); | 682 | EXPORT_SYMBOL(fc_set_mfs); |
707 | 683 | ||
708 | /** | 684 | /** |
709 | * fc_lport_disc_callback - Callback for discovery events | 685 | * fc_lport_disc_callback() - Callback for discovery events |
710 | * @lport: FC local port | 686 | * @lport: FC local port |
711 | * @event: The discovery event | 687 | * @event: The discovery event |
712 | */ | 688 | */ |
@@ -731,7 +707,7 @@ void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event) | |||
731 | } | 707 | } |
732 | 708 | ||
733 | /** | 709 | /** |
734 | * fc_rport_enter_ready - Enter the ready state and start discovery | 710 | * fc_rport_enter_ready() - Enter the ready state and start discovery |
735 | * @lport: Fibre Channel local port that is ready | 711 | * @lport: Fibre Channel local port that is ready |
736 | * | 712 | * |
737 | * Locking Note: The lport lock is expected to be held before calling | 713 | * Locking Note: The lport lock is expected to be held before calling |
@@ -748,7 +724,7 @@ static void fc_lport_enter_ready(struct fc_lport *lport) | |||
748 | } | 724 | } |
749 | 725 | ||
750 | /** | 726 | /** |
751 | * fc_lport_recv_flogi_req - Receive a FLOGI request | 727 | * fc_lport_recv_flogi_req() - Receive a FLOGI request |
752 | * @sp_in: The sequence the FLOGI is on | 728 | * @sp_in: The sequence the FLOGI is on |
753 | * @rx_fp: The frame the FLOGI is in | 729 | * @rx_fp: The frame the FLOGI is in |
754 | * @lport: The lport that recieved the request | 730 | * @lport: The lport that recieved the request |
@@ -838,7 +814,7 @@ out: | |||
838 | } | 814 | } |
839 | 815 | ||
840 | /** | 816 | /** |
841 | * fc_lport_recv_req - The generic lport request handler | 817 | * fc_lport_recv_req() - The generic lport request handler |
842 | * @lport: The lport that received the request | 818 | * @lport: The lport that received the request |
843 | * @sp: The sequence the request is on | 819 | * @sp: The sequence the request is on |
844 | * @fp: The frame the request is in | 820 | * @fp: The frame the request is in |
@@ -934,7 +910,7 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp, | |||
934 | } | 910 | } |
935 | 911 | ||
936 | /** | 912 | /** |
937 | * fc_lport_reset - Reset an lport | 913 | * fc_lport_reset() - Reset an lport |
938 | * @lport: The lport which should be reset | 914 | * @lport: The lport which should be reset |
939 | * | 915 | * |
940 | * Locking Note: This functions should not be called with the | 916 | * Locking Note: This functions should not be called with the |
@@ -942,6 +918,7 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp, | |||
942 | */ | 918 | */ |
943 | int fc_lport_reset(struct fc_lport *lport) | 919 | int fc_lport_reset(struct fc_lport *lport) |
944 | { | 920 | { |
921 | cancel_delayed_work_sync(&lport->retry_work); | ||
945 | mutex_lock(&lport->lp_mutex); | 922 | mutex_lock(&lport->lp_mutex); |
946 | fc_lport_enter_reset(lport); | 923 | fc_lport_enter_reset(lport); |
947 | mutex_unlock(&lport->lp_mutex); | 924 | mutex_unlock(&lport->lp_mutex); |
@@ -950,7 +927,7 @@ int fc_lport_reset(struct fc_lport *lport) | |||
950 | EXPORT_SYMBOL(fc_lport_reset); | 927 | EXPORT_SYMBOL(fc_lport_reset); |
951 | 928 | ||
952 | /** | 929 | /** |
953 | * fc_rport_enter_reset - Reset the local port | 930 | * fc_rport_enter_reset() - Reset the local port |
954 | * @lport: Fibre Channel local port to be reset | 931 | * @lport: Fibre Channel local port to be reset |
955 | * | 932 | * |
956 | * Locking Note: The lport lock is expected to be held before calling | 933 | * Locking Note: The lport lock is expected to be held before calling |
@@ -973,16 +950,16 @@ static void fc_lport_enter_reset(struct fc_lport *lport) | |||
973 | 950 | ||
974 | lport->tt.disc_stop(lport); | 951 | lport->tt.disc_stop(lport); |
975 | 952 | ||
976 | lport->tt.exch_mgr_reset(lport->emp, 0, 0); | 953 | lport->tt.exch_mgr_reset(lport, 0, 0); |
977 | fc_host_fabric_name(lport->host) = 0; | 954 | fc_host_fabric_name(lport->host) = 0; |
978 | fc_host_port_id(lport->host) = 0; | 955 | fc_host_port_id(lport->host) = 0; |
979 | 956 | ||
980 | if ((lport->link_status & FC_LINK_UP) == FC_LINK_UP) | 957 | if (lport->link_up) |
981 | fc_lport_enter_flogi(lport); | 958 | fc_lport_enter_flogi(lport); |
982 | } | 959 | } |
983 | 960 | ||
984 | /** | 961 | /** |
985 | * fc_lport_error - Handler for any errors | 962 | * fc_lport_error() - Handler for any errors |
986 | * @lport: The fc_lport object | 963 | * @lport: The fc_lport object |
987 | * @fp: The frame pointer | 964 | * @fp: The frame pointer |
988 | * | 965 | * |
@@ -1029,8 +1006,8 @@ static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp) | |||
1029 | } | 1006 | } |
1030 | 1007 | ||
1031 | /** | 1008 | /** |
1032 | * fc_lport_rft_id_resp - Handle response to Register Fibre | 1009 | * fc_lport_rft_id_resp() - Handle response to Register Fibre |
1033 | * Channel Types by ID (RPN_ID) request | 1010 | * Channel Types by ID (RPN_ID) request |
1034 | * @sp: current sequence in RPN_ID exchange | 1011 | * @sp: current sequence in RPN_ID exchange |
1035 | * @fp: response frame | 1012 | * @fp: response frame |
1036 | * @lp_arg: Fibre Channel host port instance | 1013 | * @lp_arg: Fibre Channel host port instance |
@@ -1053,17 +1030,17 @@ static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
1053 | 1030 | ||
1054 | FC_DEBUG_LPORT("Received a RFT_ID response\n"); | 1031 | FC_DEBUG_LPORT("Received a RFT_ID response\n"); |
1055 | 1032 | ||
1033 | if (IS_ERR(fp)) { | ||
1034 | fc_lport_error(lport, fp); | ||
1035 | goto err; | ||
1036 | } | ||
1037 | |||
1056 | if (lport->state != LPORT_ST_RFT_ID) { | 1038 | if (lport->state != LPORT_ST_RFT_ID) { |
1057 | FC_DBG("Received a RFT_ID response, but in state %s\n", | 1039 | FC_DBG("Received a RFT_ID response, but in state %s\n", |
1058 | fc_lport_state(lport)); | 1040 | fc_lport_state(lport)); |
1059 | goto out; | 1041 | goto out; |
1060 | } | 1042 | } |
1061 | 1043 | ||
1062 | if (IS_ERR(fp)) { | ||
1063 | fc_lport_error(lport, fp); | ||
1064 | goto err; | ||
1065 | } | ||
1066 | |||
1067 | fh = fc_frame_header_get(fp); | 1044 | fh = fc_frame_header_get(fp); |
1068 | ct = fc_frame_payload_get(fp, sizeof(*ct)); | 1045 | ct = fc_frame_payload_get(fp, sizeof(*ct)); |
1069 | 1046 | ||
@@ -1081,8 +1058,8 @@ err: | |||
1081 | } | 1058 | } |
1082 | 1059 | ||
1083 | /** | 1060 | /** |
1084 | * fc_lport_rpn_id_resp - Handle response to Register Port | 1061 | * fc_lport_rpn_id_resp() - Handle response to Register Port |
1085 | * Name by ID (RPN_ID) request | 1062 | * Name by ID (RPN_ID) request |
1086 | * @sp: current sequence in RPN_ID exchange | 1063 | * @sp: current sequence in RPN_ID exchange |
1087 | * @fp: response frame | 1064 | * @fp: response frame |
1088 | * @lp_arg: Fibre Channel host port instance | 1065 | * @lp_arg: Fibre Channel host port instance |
@@ -1105,17 +1082,17 @@ static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
1105 | 1082 | ||
1106 | FC_DEBUG_LPORT("Received a RPN_ID response\n"); | 1083 | FC_DEBUG_LPORT("Received a RPN_ID response\n"); |
1107 | 1084 | ||
1085 | if (IS_ERR(fp)) { | ||
1086 | fc_lport_error(lport, fp); | ||
1087 | goto err; | ||
1088 | } | ||
1089 | |||
1108 | if (lport->state != LPORT_ST_RPN_ID) { | 1090 | if (lport->state != LPORT_ST_RPN_ID) { |
1109 | FC_DBG("Received a RPN_ID response, but in state %s\n", | 1091 | FC_DBG("Received a RPN_ID response, but in state %s\n", |
1110 | fc_lport_state(lport)); | 1092 | fc_lport_state(lport)); |
1111 | goto out; | 1093 | goto out; |
1112 | } | 1094 | } |
1113 | 1095 | ||
1114 | if (IS_ERR(fp)) { | ||
1115 | fc_lport_error(lport, fp); | ||
1116 | goto err; | ||
1117 | } | ||
1118 | |||
1119 | fh = fc_frame_header_get(fp); | 1096 | fh = fc_frame_header_get(fp); |
1120 | ct = fc_frame_payload_get(fp, sizeof(*ct)); | 1097 | ct = fc_frame_payload_get(fp, sizeof(*ct)); |
1121 | if (fh && ct && fh->fh_type == FC_TYPE_CT && | 1098 | if (fh && ct && fh->fh_type == FC_TYPE_CT && |
@@ -1133,7 +1110,7 @@ err: | |||
1133 | } | 1110 | } |
1134 | 1111 | ||
1135 | /** | 1112 | /** |
1136 | * fc_lport_scr_resp - Handle response to State Change Register (SCR) request | 1113 | * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request |
1137 | * @sp: current sequence in SCR exchange | 1114 | * @sp: current sequence in SCR exchange |
1138 | * @fp: response frame | 1115 | * @fp: response frame |
1139 | * @lp_arg: Fibre Channel lport port instance that sent the registration request | 1116 | * @lp_arg: Fibre Channel lport port instance that sent the registration request |
@@ -1155,17 +1132,17 @@ static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
1155 | 1132 | ||
1156 | FC_DEBUG_LPORT("Received a SCR response\n"); | 1133 | FC_DEBUG_LPORT("Received a SCR response\n"); |
1157 | 1134 | ||
1135 | if (IS_ERR(fp)) { | ||
1136 | fc_lport_error(lport, fp); | ||
1137 | goto err; | ||
1138 | } | ||
1139 | |||
1158 | if (lport->state != LPORT_ST_SCR) { | 1140 | if (lport->state != LPORT_ST_SCR) { |
1159 | FC_DBG("Received a SCR response, but in state %s\n", | 1141 | FC_DBG("Received a SCR response, but in state %s\n", |
1160 | fc_lport_state(lport)); | 1142 | fc_lport_state(lport)); |
1161 | goto out; | 1143 | goto out; |
1162 | } | 1144 | } |
1163 | 1145 | ||
1164 | if (IS_ERR(fp)) { | ||
1165 | fc_lport_error(lport, fp); | ||
1166 | goto err; | ||
1167 | } | ||
1168 | |||
1169 | op = fc_frame_payload_op(fp); | 1146 | op = fc_frame_payload_op(fp); |
1170 | if (op == ELS_LS_ACC) | 1147 | if (op == ELS_LS_ACC) |
1171 | fc_lport_enter_ready(lport); | 1148 | fc_lport_enter_ready(lport); |
@@ -1179,7 +1156,7 @@ err: | |||
1179 | } | 1156 | } |
1180 | 1157 | ||
1181 | /** | 1158 | /** |
1182 | * fc_lport_enter_scr - Send a State Change Register (SCR) request | 1159 | * fc_lport_enter_scr() - Send a State Change Register (SCR) request |
1183 | * @lport: Fibre Channel local port to register for state changes | 1160 | * @lport: Fibre Channel local port to register for state changes |
1184 | * | 1161 | * |
1185 | * Locking Note: The lport lock is expected to be held before calling | 1162 | * Locking Note: The lport lock is expected to be held before calling |
@@ -1206,7 +1183,7 @@ static void fc_lport_enter_scr(struct fc_lport *lport) | |||
1206 | } | 1183 | } |
1207 | 1184 | ||
1208 | /** | 1185 | /** |
1209 | * fc_lport_enter_rft_id - Register FC4-types with the name server | 1186 | * fc_lport_enter_rft_id() - Register FC4-types with the name server |
1210 | * @lport: Fibre Channel local port to register | 1187 | * @lport: Fibre Channel local port to register |
1211 | * | 1188 | * |
1212 | * Locking Note: The lport lock is expected to be held before calling | 1189 | * Locking Note: The lport lock is expected to be held before calling |
@@ -1248,7 +1225,7 @@ static void fc_lport_enter_rft_id(struct fc_lport *lport) | |||
1248 | } | 1225 | } |
1249 | 1226 | ||
1250 | /** | 1227 | /** |
1251 | * fc_rport_enter_rft_id - Register port name with the name server | 1228 | * fc_rport_enter_rft_id() - Register port name with the name server |
1252 | * @lport: Fibre Channel local port to register | 1229 | * @lport: Fibre Channel local port to register |
1253 | * | 1230 | * |
1254 | * Locking Note: The lport lock is expected to be held before calling | 1231 | * Locking Note: The lport lock is expected to be held before calling |
@@ -1281,7 +1258,7 @@ static struct fc_rport_operations fc_lport_rport_ops = { | |||
1281 | }; | 1258 | }; |
1282 | 1259 | ||
1283 | /** | 1260 | /** |
1284 | * fc_rport_enter_dns - Create a rport to the name server | 1261 | * fc_rport_enter_dns() - Create a rport to the name server |
1285 | * @lport: Fibre Channel local port requesting a rport for the name server | 1262 | * @lport: Fibre Channel local port requesting a rport for the name server |
1286 | * | 1263 | * |
1287 | * Locking Note: The lport lock is expected to be held before calling | 1264 | * Locking Note: The lport lock is expected to be held before calling |
@@ -1304,7 +1281,7 @@ static void fc_lport_enter_dns(struct fc_lport *lport) | |||
1304 | 1281 | ||
1305 | fc_lport_state_enter(lport, LPORT_ST_DNS); | 1282 | fc_lport_state_enter(lport, LPORT_ST_DNS); |
1306 | 1283 | ||
1307 | rport = fc_rport_rogue_create(&dp); | 1284 | rport = lport->tt.rport_create(&dp); |
1308 | if (!rport) | 1285 | if (!rport) |
1309 | goto err; | 1286 | goto err; |
1310 | 1287 | ||
@@ -1318,7 +1295,7 @@ err: | |||
1318 | } | 1295 | } |
1319 | 1296 | ||
1320 | /** | 1297 | /** |
1321 | * fc_lport_timeout - Handler for the retry_work timer. | 1298 | * fc_lport_timeout() - Handler for the retry_work timer. |
1322 | * @work: The work struct of the fc_lport | 1299 | * @work: The work struct of the fc_lport |
1323 | */ | 1300 | */ |
1324 | static void fc_lport_timeout(struct work_struct *work) | 1301 | static void fc_lport_timeout(struct work_struct *work) |
@@ -1359,7 +1336,7 @@ static void fc_lport_timeout(struct work_struct *work) | |||
1359 | } | 1336 | } |
1360 | 1337 | ||
1361 | /** | 1338 | /** |
1362 | * fc_lport_logo_resp - Handle response to LOGO request | 1339 | * fc_lport_logo_resp() - Handle response to LOGO request |
1363 | * @sp: current sequence in LOGO exchange | 1340 | * @sp: current sequence in LOGO exchange |
1364 | * @fp: response frame | 1341 | * @fp: response frame |
1365 | * @lp_arg: Fibre Channel lport port instance that sent the LOGO request | 1342 | * @lp_arg: Fibre Channel lport port instance that sent the LOGO request |
@@ -1381,17 +1358,17 @@ static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
1381 | 1358 | ||
1382 | FC_DEBUG_LPORT("Received a LOGO response\n"); | 1359 | FC_DEBUG_LPORT("Received a LOGO response\n"); |
1383 | 1360 | ||
1361 | if (IS_ERR(fp)) { | ||
1362 | fc_lport_error(lport, fp); | ||
1363 | goto err; | ||
1364 | } | ||
1365 | |||
1384 | if (lport->state != LPORT_ST_LOGO) { | 1366 | if (lport->state != LPORT_ST_LOGO) { |
1385 | FC_DBG("Received a LOGO response, but in state %s\n", | 1367 | FC_DBG("Received a LOGO response, but in state %s\n", |
1386 | fc_lport_state(lport)); | 1368 | fc_lport_state(lport)); |
1387 | goto out; | 1369 | goto out; |
1388 | } | 1370 | } |
1389 | 1371 | ||
1390 | if (IS_ERR(fp)) { | ||
1391 | fc_lport_error(lport, fp); | ||
1392 | goto err; | ||
1393 | } | ||
1394 | |||
1395 | op = fc_frame_payload_op(fp); | 1372 | op = fc_frame_payload_op(fp); |
1396 | if (op == ELS_LS_ACC) | 1373 | if (op == ELS_LS_ACC) |
1397 | fc_lport_enter_reset(lport); | 1374 | fc_lport_enter_reset(lport); |
@@ -1405,7 +1382,7 @@ err: | |||
1405 | } | 1382 | } |
1406 | 1383 | ||
1407 | /** | 1384 | /** |
1408 | * fc_rport_enter_logo - Logout of the fabric | 1385 | * fc_rport_enter_logo() - Logout of the fabric |
1409 | * @lport: Fibre Channel local port to be logged out | 1386 | * @lport: Fibre Channel local port to be logged out |
1410 | * | 1387 | * |
1411 | * Locking Note: The lport lock is expected to be held before calling | 1388 | * Locking Note: The lport lock is expected to be held before calling |
@@ -1437,7 +1414,7 @@ static void fc_lport_enter_logo(struct fc_lport *lport) | |||
1437 | } | 1414 | } |
1438 | 1415 | ||
1439 | /** | 1416 | /** |
1440 | * fc_lport_flogi_resp - Handle response to FLOGI request | 1417 | * fc_lport_flogi_resp() - Handle response to FLOGI request |
1441 | * @sp: current sequence in FLOGI exchange | 1418 | * @sp: current sequence in FLOGI exchange |
1442 | * @fp: response frame | 1419 | * @fp: response frame |
1443 | * @lp_arg: Fibre Channel lport port instance that sent the FLOGI request | 1420 | * @lp_arg: Fibre Channel lport port instance that sent the FLOGI request |
@@ -1465,17 +1442,17 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
1465 | 1442 | ||
1466 | FC_DEBUG_LPORT("Received a FLOGI response\n"); | 1443 | FC_DEBUG_LPORT("Received a FLOGI response\n"); |
1467 | 1444 | ||
1445 | if (IS_ERR(fp)) { | ||
1446 | fc_lport_error(lport, fp); | ||
1447 | goto err; | ||
1448 | } | ||
1449 | |||
1468 | if (lport->state != LPORT_ST_FLOGI) { | 1450 | if (lport->state != LPORT_ST_FLOGI) { |
1469 | FC_DBG("Received a FLOGI response, but in state %s\n", | 1451 | FC_DBG("Received a FLOGI response, but in state %s\n", |
1470 | fc_lport_state(lport)); | 1452 | fc_lport_state(lport)); |
1471 | goto out; | 1453 | goto out; |
1472 | } | 1454 | } |
1473 | 1455 | ||
1474 | if (IS_ERR(fp)) { | ||
1475 | fc_lport_error(lport, fp); | ||
1476 | goto err; | ||
1477 | } | ||
1478 | |||
1479 | fh = fc_frame_header_get(fp); | 1456 | fh = fc_frame_header_get(fp); |
1480 | did = ntoh24(fh->fh_d_id); | 1457 | did = ntoh24(fh->fh_d_id); |
1481 | if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) { | 1458 | if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) { |
@@ -1532,7 +1509,7 @@ err: | |||
1532 | } | 1509 | } |
1533 | 1510 | ||
1534 | /** | 1511 | /** |
1535 | * fc_rport_enter_flogi - Send a FLOGI request to the fabric manager | 1512 | * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager |
1536 | * @lport: Fibre Channel local port to be logged in to the fabric | 1513 | * @lport: Fibre Channel local port to be logged in to the fabric |
1537 | * | 1514 | * |
1538 | * Locking Note: The lport lock is expected to be held before calling | 1515 | * Locking Note: The lport lock is expected to be held before calling |
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index e780d8caf70e..dae65133a833 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c | |||
@@ -81,6 +81,7 @@ static void fc_rport_recv_logo_req(struct fc_rport *, | |||
81 | struct fc_seq *, struct fc_frame *); | 81 | struct fc_seq *, struct fc_frame *); |
82 | static void fc_rport_timeout(struct work_struct *); | 82 | static void fc_rport_timeout(struct work_struct *); |
83 | static void fc_rport_error(struct fc_rport *, struct fc_frame *); | 83 | static void fc_rport_error(struct fc_rport *, struct fc_frame *); |
84 | static void fc_rport_error_retry(struct fc_rport *, struct fc_frame *); | ||
84 | static void fc_rport_work(struct work_struct *); | 85 | static void fc_rport_work(struct work_struct *); |
85 | 86 | ||
86 | static const char *fc_rport_state_names[] = { | 87 | static const char *fc_rport_state_names[] = { |
@@ -145,7 +146,7 @@ struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *dp) | |||
145 | } | 146 | } |
146 | 147 | ||
147 | /** | 148 | /** |
148 | * fc_rport_state - return a string for the state the rport is in | 149 | * fc_rport_state() - return a string for the state the rport is in |
149 | * @rport: The rport whose state we want to get a string for | 150 | * @rport: The rport whose state we want to get a string for |
150 | */ | 151 | */ |
151 | static const char *fc_rport_state(struct fc_rport *rport) | 152 | static const char *fc_rport_state(struct fc_rport *rport) |
@@ -160,7 +161,7 @@ static const char *fc_rport_state(struct fc_rport *rport) | |||
160 | } | 161 | } |
161 | 162 | ||
162 | /** | 163 | /** |
163 | * fc_set_rport_loss_tmo - Set the remote port loss timeout in seconds. | 164 | * fc_set_rport_loss_tmo() - Set the remote port loss timeout in seconds. |
164 | * @rport: Pointer to Fibre Channel remote port structure | 165 | * @rport: Pointer to Fibre Channel remote port structure |
165 | * @timeout: timeout in seconds | 166 | * @timeout: timeout in seconds |
166 | */ | 167 | */ |
@@ -174,12 +175,12 @@ void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout) | |||
174 | EXPORT_SYMBOL(fc_set_rport_loss_tmo); | 175 | EXPORT_SYMBOL(fc_set_rport_loss_tmo); |
175 | 176 | ||
176 | /** | 177 | /** |
177 | * fc_plogi_get_maxframe - Get max payload from the common service parameters | 178 | * fc_plogi_get_maxframe() - Get max payload from the common service parameters |
178 | * @flp: FLOGI payload structure | 179 | * @flp: FLOGI payload structure |
179 | * @maxval: upper limit, may be less than what is in the service parameters | 180 | * @maxval: upper limit, may be less than what is in the service parameters |
180 | */ | 181 | */ |
181 | static unsigned int | 182 | static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp, |
182 | fc_plogi_get_maxframe(struct fc_els_flogi *flp, unsigned int maxval) | 183 | unsigned int maxval) |
183 | { | 184 | { |
184 | unsigned int mfs; | 185 | unsigned int mfs; |
185 | 186 | ||
@@ -197,7 +198,7 @@ fc_plogi_get_maxframe(struct fc_els_flogi *flp, unsigned int maxval) | |||
197 | } | 198 | } |
198 | 199 | ||
199 | /** | 200 | /** |
200 | * fc_rport_state_enter - Change the rport's state | 201 | * fc_rport_state_enter() - Change the rport's state |
201 | * @rport: The rport whose state should change | 202 | * @rport: The rport whose state should change |
202 | * @new: The new state of the rport | 203 | * @new: The new state of the rport |
203 | * | 204 | * |
@@ -214,6 +215,7 @@ static void fc_rport_state_enter(struct fc_rport *rport, | |||
214 | 215 | ||
215 | static void fc_rport_work(struct work_struct *work) | 216 | static void fc_rport_work(struct work_struct *work) |
216 | { | 217 | { |
218 | u32 port_id; | ||
217 | struct fc_rport_libfc_priv *rdata = | 219 | struct fc_rport_libfc_priv *rdata = |
218 | container_of(work, struct fc_rport_libfc_priv, event_work); | 220 | container_of(work, struct fc_rport_libfc_priv, event_work); |
219 | enum fc_rport_event event; | 221 | enum fc_rport_event event; |
@@ -279,14 +281,18 @@ static void fc_rport_work(struct work_struct *work) | |||
279 | rport_ops->event_callback(lport, rport, event); | 281 | rport_ops->event_callback(lport, rport, event); |
280 | if (trans_state == FC_PORTSTATE_ROGUE) | 282 | if (trans_state == FC_PORTSTATE_ROGUE) |
281 | put_device(&rport->dev); | 283 | put_device(&rport->dev); |
282 | else | 284 | else { |
285 | port_id = rport->port_id; | ||
283 | fc_remote_port_delete(rport); | 286 | fc_remote_port_delete(rport); |
287 | lport->tt.exch_mgr_reset(lport, 0, port_id); | ||
288 | lport->tt.exch_mgr_reset(lport, port_id, 0); | ||
289 | } | ||
284 | } else | 290 | } else |
285 | mutex_unlock(&rdata->rp_mutex); | 291 | mutex_unlock(&rdata->rp_mutex); |
286 | } | 292 | } |
287 | 293 | ||
288 | /** | 294 | /** |
289 | * fc_rport_login - Start the remote port login state machine | 295 | * fc_rport_login() - Start the remote port login state machine |
290 | * @rport: Fibre Channel remote port | 296 | * @rport: Fibre Channel remote port |
291 | * | 297 | * |
292 | * Locking Note: Called without the rport lock held. This | 298 | * Locking Note: Called without the rport lock held. This |
@@ -309,7 +315,7 @@ int fc_rport_login(struct fc_rport *rport) | |||
309 | } | 315 | } |
310 | 316 | ||
311 | /** | 317 | /** |
312 | * fc_rport_logoff - Logoff and remove an rport | 318 | * fc_rport_logoff() - Logoff and remove an rport |
313 | * @rport: Fibre Channel remote port to be removed | 319 | * @rport: Fibre Channel remote port to be removed |
314 | * | 320 | * |
315 | * Locking Note: Called without the rport lock held. This | 321 | * Locking Note: Called without the rport lock held. This |
@@ -347,7 +353,7 @@ int fc_rport_logoff(struct fc_rport *rport) | |||
347 | } | 353 | } |
348 | 354 | ||
349 | /** | 355 | /** |
350 | * fc_rport_enter_ready - The rport is ready | 356 | * fc_rport_enter_ready() - The rport is ready |
351 | * @rport: Fibre Channel remote port that is ready | 357 | * @rport: Fibre Channel remote port that is ready |
352 | * | 358 | * |
353 | * Locking Note: The rport lock is expected to be held before calling | 359 | * Locking Note: The rport lock is expected to be held before calling |
@@ -366,7 +372,7 @@ static void fc_rport_enter_ready(struct fc_rport *rport) | |||
366 | } | 372 | } |
367 | 373 | ||
368 | /** | 374 | /** |
369 | * fc_rport_timeout - Handler for the retry_work timer. | 375 | * fc_rport_timeout() - Handler for the retry_work timer. |
370 | * @work: The work struct of the fc_rport_libfc_priv | 376 | * @work: The work struct of the fc_rport_libfc_priv |
371 | * | 377 | * |
372 | * Locking Note: Called without the rport lock held. This | 378 | * Locking Note: Called without the rport lock held. This |
@@ -405,59 +411,75 @@ static void fc_rport_timeout(struct work_struct *work) | |||
405 | } | 411 | } |
406 | 412 | ||
407 | /** | 413 | /** |
408 | * fc_rport_error - Handler for any errors | 414 | * fc_rport_error() - Error handler, called once retries have been exhausted |
409 | * @rport: The fc_rport object | 415 | * @rport: The fc_rport object |
410 | * @fp: The frame pointer | 416 | * @fp: The frame pointer |
411 | * | 417 | * |
412 | * If the error was caused by a resource allocation failure | ||
413 | * then wait for half a second and retry, otherwise retry | ||
414 | * immediately. | ||
415 | * | ||
416 | * Locking Note: The rport lock is expected to be held before | 418 | * Locking Note: The rport lock is expected to be held before |
417 | * calling this routine | 419 | * calling this routine |
418 | */ | 420 | */ |
419 | static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp) | 421 | static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp) |
420 | { | 422 | { |
421 | struct fc_rport_libfc_priv *rdata = rport->dd_data; | 423 | struct fc_rport_libfc_priv *rdata = rport->dd_data; |
422 | unsigned long delay = 0; | ||
423 | 424 | ||
424 | FC_DEBUG_RPORT("Error %ld in state %s, retries %d\n", | 425 | FC_DEBUG_RPORT("Error %ld in state %s, retries %d\n", |
425 | PTR_ERR(fp), fc_rport_state(rport), rdata->retries); | 426 | PTR_ERR(fp), fc_rport_state(rport), rdata->retries); |
426 | 427 | ||
427 | if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) { | 428 | switch (rdata->rp_state) { |
428 | /* | 429 | case RPORT_ST_PLOGI: |
429 | * Memory allocation failure, or the exchange timed out. | 430 | case RPORT_ST_PRLI: |
430 | * Retry after delay | 431 | case RPORT_ST_LOGO: |
431 | */ | 432 | rdata->event = RPORT_EV_FAILED; |
432 | if (rdata->retries < rdata->local_port->max_retry_count) { | 433 | queue_work(rport_event_queue, |
433 | rdata->retries++; | 434 | &rdata->event_work); |
434 | if (!fp) | 435 | break; |
435 | delay = msecs_to_jiffies(500); | 436 | case RPORT_ST_RTV: |
436 | get_device(&rport->dev); | 437 | fc_rport_enter_ready(rport); |
437 | schedule_delayed_work(&rdata->retry_work, delay); | 438 | break; |
438 | } else { | 439 | case RPORT_ST_NONE: |
439 | switch (rdata->rp_state) { | 440 | case RPORT_ST_READY: |
440 | case RPORT_ST_PLOGI: | 441 | case RPORT_ST_INIT: |
441 | case RPORT_ST_PRLI: | 442 | break; |
442 | case RPORT_ST_LOGO: | ||
443 | rdata->event = RPORT_EV_FAILED; | ||
444 | queue_work(rport_event_queue, | ||
445 | &rdata->event_work); | ||
446 | break; | ||
447 | case RPORT_ST_RTV: | ||
448 | fc_rport_enter_ready(rport); | ||
449 | break; | ||
450 | case RPORT_ST_NONE: | ||
451 | case RPORT_ST_READY: | ||
452 | case RPORT_ST_INIT: | ||
453 | break; | ||
454 | } | ||
455 | } | ||
456 | } | 443 | } |
457 | } | 444 | } |
458 | 445 | ||
459 | /** | 446 | /** |
460 | * fc_rport_plogi_recv_resp - Handle incoming ELS PLOGI response | 447 | * fc_rport_error_retry() - Error handler when retries are desired |
448 | * @rport: The fc_rport object | ||
449 | * @fp: The frame pointer | ||
450 | * | ||
451 | * If the error was an exchange timeout retry immediately, | ||
452 | * otherwise wait for E_D_TOV. | ||
453 | * | ||
454 | * Locking Note: The rport lock is expected to be held before | ||
455 | * calling this routine | ||
456 | */ | ||
457 | static void fc_rport_error_retry(struct fc_rport *rport, struct fc_frame *fp) | ||
458 | { | ||
459 | struct fc_rport_libfc_priv *rdata = rport->dd_data; | ||
460 | unsigned long delay = FC_DEF_E_D_TOV; | ||
461 | |||
462 | /* make sure this isn't an FC_EX_CLOSED error, never retry those */ | ||
463 | if (PTR_ERR(fp) == -FC_EX_CLOSED) | ||
464 | return fc_rport_error(rport, fp); | ||
465 | |||
466 | if (rdata->retries < rdata->local_port->max_retry_count) { | ||
467 | FC_DEBUG_RPORT("Error %ld in state %s, retrying\n", | ||
468 | PTR_ERR(fp), fc_rport_state(rport)); | ||
469 | rdata->retries++; | ||
470 | /* no additional delay on exchange timeouts */ | ||
471 | if (PTR_ERR(fp) == -FC_EX_TIMEOUT) | ||
472 | delay = 0; | ||
473 | get_device(&rport->dev); | ||
474 | schedule_delayed_work(&rdata->retry_work, delay); | ||
475 | return; | ||
476 | } | ||
477 | |||
478 | return fc_rport_error(rport, fp); | ||
479 | } | ||
480 | |||
481 | /** | ||
482 | * fc_rport_plogi_recv_resp() - Handle incoming ELS PLOGI response | ||
461 | * @sp: current sequence in the PLOGI exchange | 483 | * @sp: current sequence in the PLOGI exchange |
462 | * @fp: response frame | 484 | * @fp: response frame |
463 | * @rp_arg: Fibre Channel remote port | 485 | * @rp_arg: Fibre Channel remote port |
@@ -483,17 +505,17 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
483 | FC_DEBUG_RPORT("Received a PLOGI response from port (%6x)\n", | 505 | FC_DEBUG_RPORT("Received a PLOGI response from port (%6x)\n", |
484 | rport->port_id); | 506 | rport->port_id); |
485 | 507 | ||
508 | if (IS_ERR(fp)) { | ||
509 | fc_rport_error_retry(rport, fp); | ||
510 | goto err; | ||
511 | } | ||
512 | |||
486 | if (rdata->rp_state != RPORT_ST_PLOGI) { | 513 | if (rdata->rp_state != RPORT_ST_PLOGI) { |
487 | FC_DBG("Received a PLOGI response, but in state %s\n", | 514 | FC_DBG("Received a PLOGI response, but in state %s\n", |
488 | fc_rport_state(rport)); | 515 | fc_rport_state(rport)); |
489 | goto out; | 516 | goto out; |
490 | } | 517 | } |
491 | 518 | ||
492 | if (IS_ERR(fp)) { | ||
493 | fc_rport_error(rport, fp); | ||
494 | goto err; | ||
495 | } | ||
496 | |||
497 | op = fc_frame_payload_op(fp); | 519 | op = fc_frame_payload_op(fp); |
498 | if (op == ELS_LS_ACC && | 520 | if (op == ELS_LS_ACC && |
499 | (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) { | 521 | (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) { |
@@ -522,7 +544,7 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
522 | else | 544 | else |
523 | fc_rport_enter_prli(rport); | 545 | fc_rport_enter_prli(rport); |
524 | } else | 546 | } else |
525 | fc_rport_error(rport, fp); | 547 | fc_rport_error_retry(rport, fp); |
526 | 548 | ||
527 | out: | 549 | out: |
528 | fc_frame_free(fp); | 550 | fc_frame_free(fp); |
@@ -532,7 +554,7 @@ err: | |||
532 | } | 554 | } |
533 | 555 | ||
534 | /** | 556 | /** |
535 | * fc_rport_enter_plogi - Send Port Login (PLOGI) request to peer | 557 | * fc_rport_enter_plogi() - Send Port Login (PLOGI) request to peer |
536 | * @rport: Fibre Channel remote port to send PLOGI to | 558 | * @rport: Fibre Channel remote port to send PLOGI to |
537 | * | 559 | * |
538 | * Locking Note: The rport lock is expected to be held before calling | 560 | * Locking Note: The rport lock is expected to be held before calling |
@@ -552,20 +574,20 @@ static void fc_rport_enter_plogi(struct fc_rport *rport) | |||
552 | rport->maxframe_size = FC_MIN_MAX_PAYLOAD; | 574 | rport->maxframe_size = FC_MIN_MAX_PAYLOAD; |
553 | fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); | 575 | fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); |
554 | if (!fp) { | 576 | if (!fp) { |
555 | fc_rport_error(rport, fp); | 577 | fc_rport_error_retry(rport, fp); |
556 | return; | 578 | return; |
557 | } | 579 | } |
558 | rdata->e_d_tov = lport->e_d_tov; | 580 | rdata->e_d_tov = lport->e_d_tov; |
559 | 581 | ||
560 | if (!lport->tt.elsct_send(lport, rport, fp, ELS_PLOGI, | 582 | if (!lport->tt.elsct_send(lport, rport, fp, ELS_PLOGI, |
561 | fc_rport_plogi_resp, rport, lport->e_d_tov)) | 583 | fc_rport_plogi_resp, rport, lport->e_d_tov)) |
562 | fc_rport_error(rport, fp); | 584 | fc_rport_error_retry(rport, fp); |
563 | else | 585 | else |
564 | get_device(&rport->dev); | 586 | get_device(&rport->dev); |
565 | } | 587 | } |
566 | 588 | ||
567 | /** | 589 | /** |
568 | * fc_rport_prli_resp - Process Login (PRLI) response handler | 590 | * fc_rport_prli_resp() - Process Login (PRLI) response handler |
569 | * @sp: current sequence in the PRLI exchange | 591 | * @sp: current sequence in the PRLI exchange |
570 | * @fp: response frame | 592 | * @fp: response frame |
571 | * @rp_arg: Fibre Channel remote port | 593 | * @rp_arg: Fibre Channel remote port |
@@ -592,17 +614,17 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
592 | FC_DEBUG_RPORT("Received a PRLI response from port (%6x)\n", | 614 | FC_DEBUG_RPORT("Received a PRLI response from port (%6x)\n", |
593 | rport->port_id); | 615 | rport->port_id); |
594 | 616 | ||
617 | if (IS_ERR(fp)) { | ||
618 | fc_rport_error_retry(rport, fp); | ||
619 | goto err; | ||
620 | } | ||
621 | |||
595 | if (rdata->rp_state != RPORT_ST_PRLI) { | 622 | if (rdata->rp_state != RPORT_ST_PRLI) { |
596 | FC_DBG("Received a PRLI response, but in state %s\n", | 623 | FC_DBG("Received a PRLI response, but in state %s\n", |
597 | fc_rport_state(rport)); | 624 | fc_rport_state(rport)); |
598 | goto out; | 625 | goto out; |
599 | } | 626 | } |
600 | 627 | ||
601 | if (IS_ERR(fp)) { | ||
602 | fc_rport_error(rport, fp); | ||
603 | goto err; | ||
604 | } | ||
605 | |||
606 | op = fc_frame_payload_op(fp); | 628 | op = fc_frame_payload_op(fp); |
607 | if (op == ELS_LS_ACC) { | 629 | if (op == ELS_LS_ACC) { |
608 | pp = fc_frame_payload_get(fp, sizeof(*pp)); | 630 | pp = fc_frame_payload_get(fp, sizeof(*pp)); |
@@ -635,7 +657,7 @@ err: | |||
635 | } | 657 | } |
636 | 658 | ||
637 | /** | 659 | /** |
638 | * fc_rport_logo_resp - Logout (LOGO) response handler | 660 | * fc_rport_logo_resp() - Logout (LOGO) response handler |
639 | * @sp: current sequence in the LOGO exchange | 661 | * @sp: current sequence in the LOGO exchange |
640 | * @fp: response frame | 662 | * @fp: response frame |
641 | * @rp_arg: Fibre Channel remote port | 663 | * @rp_arg: Fibre Channel remote port |
@@ -657,7 +679,7 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
657 | rport->port_id); | 679 | rport->port_id); |
658 | 680 | ||
659 | if (IS_ERR(fp)) { | 681 | if (IS_ERR(fp)) { |
660 | fc_rport_error(rport, fp); | 682 | fc_rport_error_retry(rport, fp); |
661 | goto err; | 683 | goto err; |
662 | } | 684 | } |
663 | 685 | ||
@@ -684,7 +706,7 @@ err: | |||
684 | } | 706 | } |
685 | 707 | ||
686 | /** | 708 | /** |
687 | * fc_rport_enter_prli - Send Process Login (PRLI) request to peer | 709 | * fc_rport_enter_prli() - Send Process Login (PRLI) request to peer |
688 | * @rport: Fibre Channel remote port to send PRLI to | 710 | * @rport: Fibre Channel remote port to send PRLI to |
689 | * | 711 | * |
690 | * Locking Note: The rport lock is expected to be held before calling | 712 | * Locking Note: The rport lock is expected to be held before calling |
@@ -707,19 +729,19 @@ static void fc_rport_enter_prli(struct fc_rport *rport) | |||
707 | 729 | ||
708 | fp = fc_frame_alloc(lport, sizeof(*pp)); | 730 | fp = fc_frame_alloc(lport, sizeof(*pp)); |
709 | if (!fp) { | 731 | if (!fp) { |
710 | fc_rport_error(rport, fp); | 732 | fc_rport_error_retry(rport, fp); |
711 | return; | 733 | return; |
712 | } | 734 | } |
713 | 735 | ||
714 | if (!lport->tt.elsct_send(lport, rport, fp, ELS_PRLI, | 736 | if (!lport->tt.elsct_send(lport, rport, fp, ELS_PRLI, |
715 | fc_rport_prli_resp, rport, lport->e_d_tov)) | 737 | fc_rport_prli_resp, rport, lport->e_d_tov)) |
716 | fc_rport_error(rport, fp); | 738 | fc_rport_error_retry(rport, fp); |
717 | else | 739 | else |
718 | get_device(&rport->dev); | 740 | get_device(&rport->dev); |
719 | } | 741 | } |
720 | 742 | ||
721 | /** | 743 | /** |
722 | * fc_rport_els_rtv_resp - Request Timeout Value response handler | 744 | * fc_rport_els_rtv_resp() - Request Timeout Value response handler |
723 | * @sp: current sequence in the RTV exchange | 745 | * @sp: current sequence in the RTV exchange |
724 | * @fp: response frame | 746 | * @fp: response frame |
725 | * @rp_arg: Fibre Channel remote port | 747 | * @rp_arg: Fibre Channel remote port |
@@ -742,17 +764,17 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
742 | FC_DEBUG_RPORT("Received a RTV response from port (%6x)\n", | 764 | FC_DEBUG_RPORT("Received a RTV response from port (%6x)\n", |
743 | rport->port_id); | 765 | rport->port_id); |
744 | 766 | ||
767 | if (IS_ERR(fp)) { | ||
768 | fc_rport_error(rport, fp); | ||
769 | goto err; | ||
770 | } | ||
771 | |||
745 | if (rdata->rp_state != RPORT_ST_RTV) { | 772 | if (rdata->rp_state != RPORT_ST_RTV) { |
746 | FC_DBG("Received a RTV response, but in state %s\n", | 773 | FC_DBG("Received a RTV response, but in state %s\n", |
747 | fc_rport_state(rport)); | 774 | fc_rport_state(rport)); |
748 | goto out; | 775 | goto out; |
749 | } | 776 | } |
750 | 777 | ||
751 | if (IS_ERR(fp)) { | ||
752 | fc_rport_error(rport, fp); | ||
753 | goto err; | ||
754 | } | ||
755 | |||
756 | op = fc_frame_payload_op(fp); | 778 | op = fc_frame_payload_op(fp); |
757 | if (op == ELS_LS_ACC) { | 779 | if (op == ELS_LS_ACC) { |
758 | struct fc_els_rtv_acc *rtv; | 780 | struct fc_els_rtv_acc *rtv; |
@@ -785,7 +807,7 @@ err: | |||
785 | } | 807 | } |
786 | 808 | ||
787 | /** | 809 | /** |
788 | * fc_rport_enter_rtv - Send Request Timeout Value (RTV) request to peer | 810 | * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request to peer |
789 | * @rport: Fibre Channel remote port to send RTV to | 811 | * @rport: Fibre Channel remote port to send RTV to |
790 | * | 812 | * |
791 | * Locking Note: The rport lock is expected to be held before calling | 813 | * Locking Note: The rport lock is expected to be held before calling |
@@ -804,19 +826,19 @@ static void fc_rport_enter_rtv(struct fc_rport *rport) | |||
804 | 826 | ||
805 | fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv)); | 827 | fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv)); |
806 | if (!fp) { | 828 | if (!fp) { |
807 | fc_rport_error(rport, fp); | 829 | fc_rport_error_retry(rport, fp); |
808 | return; | 830 | return; |
809 | } | 831 | } |
810 | 832 | ||
811 | if (!lport->tt.elsct_send(lport, rport, fp, ELS_RTV, | 833 | if (!lport->tt.elsct_send(lport, rport, fp, ELS_RTV, |
812 | fc_rport_rtv_resp, rport, lport->e_d_tov)) | 834 | fc_rport_rtv_resp, rport, lport->e_d_tov)) |
813 | fc_rport_error(rport, fp); | 835 | fc_rport_error_retry(rport, fp); |
814 | else | 836 | else |
815 | get_device(&rport->dev); | 837 | get_device(&rport->dev); |
816 | } | 838 | } |
817 | 839 | ||
818 | /** | 840 | /** |
819 | * fc_rport_enter_logo - Send Logout (LOGO) request to peer | 841 | * fc_rport_enter_logo() - Send Logout (LOGO) request to peer |
820 | * @rport: Fibre Channel remote port to send LOGO to | 842 | * @rport: Fibre Channel remote port to send LOGO to |
821 | * | 843 | * |
822 | * Locking Note: The rport lock is expected to be held before calling | 844 | * Locking Note: The rport lock is expected to be held before calling |
@@ -835,20 +857,20 @@ static void fc_rport_enter_logo(struct fc_rport *rport) | |||
835 | 857 | ||
836 | fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo)); | 858 | fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo)); |
837 | if (!fp) { | 859 | if (!fp) { |
838 | fc_rport_error(rport, fp); | 860 | fc_rport_error_retry(rport, fp); |
839 | return; | 861 | return; |
840 | } | 862 | } |
841 | 863 | ||
842 | if (!lport->tt.elsct_send(lport, rport, fp, ELS_LOGO, | 864 | if (!lport->tt.elsct_send(lport, rport, fp, ELS_LOGO, |
843 | fc_rport_logo_resp, rport, lport->e_d_tov)) | 865 | fc_rport_logo_resp, rport, lport->e_d_tov)) |
844 | fc_rport_error(rport, fp); | 866 | fc_rport_error_retry(rport, fp); |
845 | else | 867 | else |
846 | get_device(&rport->dev); | 868 | get_device(&rport->dev); |
847 | } | 869 | } |
848 | 870 | ||
849 | 871 | ||
850 | /** | 872 | /** |
851 | * fc_rport_recv_req - Receive a request from a rport | 873 | * fc_rport_recv_req() - Receive a request from a rport |
852 | * @sp: current sequence in the PLOGI exchange | 874 | * @sp: current sequence in the PLOGI exchange |
853 | * @fp: response frame | 875 | * @fp: response frame |
854 | * @rp_arg: Fibre Channel remote port | 876 | * @rp_arg: Fibre Channel remote port |
@@ -909,7 +931,7 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, | |||
909 | } | 931 | } |
910 | 932 | ||
911 | /** | 933 | /** |
912 | * fc_rport_recv_plogi_req - Handle incoming Port Login (PLOGI) request | 934 | * fc_rport_recv_plogi_req() - Handle incoming Port Login (PLOGI) request |
913 | * @rport: Fibre Channel remote port that initiated PLOGI | 935 | * @rport: Fibre Channel remote port that initiated PLOGI |
914 | * @sp: current sequence in the PLOGI exchange | 936 | * @sp: current sequence in the PLOGI exchange |
915 | * @fp: PLOGI request frame | 937 | * @fp: PLOGI request frame |
@@ -1031,7 +1053,7 @@ static void fc_rport_recv_plogi_req(struct fc_rport *rport, | |||
1031 | } | 1053 | } |
1032 | 1054 | ||
1033 | /** | 1055 | /** |
1034 | * fc_rport_recv_prli_req - Handle incoming Process Login (PRLI) request | 1056 | * fc_rport_recv_prli_req() - Handle incoming Process Login (PRLI) request |
1035 | * @rport: Fibre Channel remote port that initiated PRLI | 1057 | * @rport: Fibre Channel remote port that initiated PRLI |
1036 | * @sp: current sequence in the PRLI exchange | 1058 | * @sp: current sequence in the PRLI exchange |
1037 | * @fp: PRLI request frame | 1059 | * @fp: PRLI request frame |
@@ -1182,7 +1204,7 @@ static void fc_rport_recv_prli_req(struct fc_rport *rport, | |||
1182 | } | 1204 | } |
1183 | 1205 | ||
1184 | /** | 1206 | /** |
1185 | * fc_rport_recv_prlo_req - Handle incoming Process Logout (PRLO) request | 1207 | * fc_rport_recv_prlo_req() - Handle incoming Process Logout (PRLO) request |
1186 | * @rport: Fibre Channel remote port that initiated PRLO | 1208 | * @rport: Fibre Channel remote port that initiated PRLO |
1187 | * @sp: current sequence in the PRLO exchange | 1209 | * @sp: current sequence in the PRLO exchange |
1188 | * @fp: PRLO request frame | 1210 | * @fp: PRLO request frame |
@@ -1213,7 +1235,7 @@ static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp, | |||
1213 | } | 1235 | } |
1214 | 1236 | ||
1215 | /** | 1237 | /** |
1216 | * fc_rport_recv_logo_req - Handle incoming Logout (LOGO) request | 1238 | * fc_rport_recv_logo_req() - Handle incoming Logout (LOGO) request |
1217 | * @rport: Fibre Channel remote port that initiated LOGO | 1239 | * @rport: Fibre Channel remote port that initiated LOGO |
1218 | * @sp: current sequence in the LOGO exchange | 1240 | * @sp: current sequence in the LOGO exchange |
1219 | * @fp: LOGO request frame | 1241 | * @fp: LOGO request frame |
@@ -1249,6 +1271,9 @@ static void fc_rport_flush_queue(void) | |||
1249 | 1271 | ||
1250 | int fc_rport_init(struct fc_lport *lport) | 1272 | int fc_rport_init(struct fc_lport *lport) |
1251 | { | 1273 | { |
1274 | if (!lport->tt.rport_create) | ||
1275 | lport->tt.rport_create = fc_rport_rogue_create; | ||
1276 | |||
1252 | if (!lport->tt.rport_login) | 1277 | if (!lport->tt.rport_login) |
1253 | lport->tt.rport_login = fc_rport_login; | 1278 | lport->tt.rport_login = fc_rport_login; |
1254 | 1279 | ||
@@ -1285,7 +1310,7 @@ void fc_rport_terminate_io(struct fc_rport *rport) | |||
1285 | struct fc_rport_libfc_priv *rdata = rport->dd_data; | 1310 | struct fc_rport_libfc_priv *rdata = rport->dd_data; |
1286 | struct fc_lport *lport = rdata->local_port; | 1311 | struct fc_lport *lport = rdata->local_port; |
1287 | 1312 | ||
1288 | lport->tt.exch_mgr_reset(lport->emp, 0, rport->port_id); | 1313 | lport->tt.exch_mgr_reset(lport, 0, rport->port_id); |
1289 | lport->tt.exch_mgr_reset(lport->emp, rport->port_id, 0); | 1314 | lport->tt.exch_mgr_reset(lport, rport->port_id, 0); |
1290 | } | 1315 | } |
1291 | EXPORT_SYMBOL(fc_rport_terminate_io); | 1316 | EXPORT_SYMBOL(fc_rport_terminate_io); |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 257c24115de9..809d32d95c76 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
@@ -1998,6 +1998,8 @@ int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev) | |||
1998 | if (!shost->can_queue) | 1998 | if (!shost->can_queue) |
1999 | shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX; | 1999 | shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX; |
2000 | 2000 | ||
2001 | if (!shost->transportt->eh_timed_out) | ||
2002 | shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out; | ||
2001 | return scsi_add_host(shost, pdev); | 2003 | return scsi_add_host(shost, pdev); |
2002 | } | 2004 | } |
2003 | EXPORT_SYMBOL_GPL(iscsi_host_add); | 2005 | EXPORT_SYMBOL_GPL(iscsi_host_add); |
@@ -2020,7 +2022,6 @@ struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht, | |||
2020 | shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size); | 2022 | shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size); |
2021 | if (!shost) | 2023 | if (!shost) |
2022 | return NULL; | 2024 | return NULL; |
2023 | shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out; | ||
2024 | 2025 | ||
2025 | if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) { | 2026 | if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) { |
2026 | if (qdepth != 0) | 2027 | if (qdepth != 0) |
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index a8f30bdaff69..a7302480bc4a 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c | |||
@@ -5258,6 +5258,7 @@ lpfc_send_els_event(struct lpfc_vport *vport, | |||
5258 | sizeof(struct lpfc_name)); | 5258 | sizeof(struct lpfc_name)); |
5259 | break; | 5259 | break; |
5260 | default: | 5260 | default: |
5261 | kfree(els_data); | ||
5261 | return; | 5262 | return; |
5262 | } | 5263 | } |
5263 | memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); | 5264 | memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); |
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 33a3c13fd893..ee9d40152430 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c | |||
@@ -244,12 +244,6 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj, | |||
244 | if (ha->optrom_state != QLA_SWAITING) | 244 | if (ha->optrom_state != QLA_SWAITING) |
245 | break; | 245 | break; |
246 | 246 | ||
247 | if (start & 0xfff) { | ||
248 | qla_printk(KERN_WARNING, ha, | ||
249 | "Invalid start region 0x%x/0x%x.\n", start, size); | ||
250 | return -EINVAL; | ||
251 | } | ||
252 | |||
253 | ha->optrom_region_start = start; | 247 | ha->optrom_region_start = start; |
254 | ha->optrom_region_size = start + size > ha->optrom_size ? | 248 | ha->optrom_region_size = start + size > ha->optrom_size ? |
255 | ha->optrom_size - start : size; | 249 | ha->optrom_size - start : size; |
@@ -303,8 +297,7 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj, | |||
303 | else if (start == (ha->flt_region_boot * 4) || | 297 | else if (start == (ha->flt_region_boot * 4) || |
304 | start == (ha->flt_region_fw * 4)) | 298 | start == (ha->flt_region_fw * 4)) |
305 | valid = 1; | 299 | valid = 1; |
306 | else if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && | 300 | else if (IS_QLA25XX(ha) || IS_QLA81XX(ha)) |
307 | start == (ha->flt_region_vpd_nvram * 4)) | ||
308 | valid = 1; | 301 | valid = 1; |
309 | if (!valid) { | 302 | if (!valid) { |
310 | qla_printk(KERN_WARNING, ha, | 303 | qla_printk(KERN_WARNING, ha, |
@@ -1265,13 +1258,6 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) | |||
1265 | test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) | 1258 | test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) |
1266 | msleep(1000); | 1259 | msleep(1000); |
1267 | 1260 | ||
1268 | if (ha->mqenable) { | ||
1269 | if (qla25xx_delete_queues(vha, 0) != QLA_SUCCESS) | ||
1270 | qla_printk(KERN_WARNING, ha, | ||
1271 | "Queue delete failed.\n"); | ||
1272 | vha->req_ques[0] = ha->req_q_map[0]->id; | ||
1273 | } | ||
1274 | |||
1275 | qla24xx_disable_vp(vha); | 1261 | qla24xx_disable_vp(vha); |
1276 | 1262 | ||
1277 | fc_remove_host(vha->host); | 1263 | fc_remove_host(vha->host); |
@@ -1293,6 +1279,12 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) | |||
1293 | vha->host_no, vha->vp_idx, vha)); | 1279 | vha->host_no, vha->vp_idx, vha)); |
1294 | } | 1280 | } |
1295 | 1281 | ||
1282 | if (ha->mqenable) { | ||
1283 | if (qla25xx_delete_queues(vha, 0) != QLA_SUCCESS) | ||
1284 | qla_printk(KERN_WARNING, ha, | ||
1285 | "Queue delete failed.\n"); | ||
1286 | } | ||
1287 | |||
1296 | scsi_host_put(vha->host); | 1288 | scsi_host_put(vha->host); |
1297 | qla_printk(KERN_INFO, ha, "vport %d deleted\n", id); | 1289 | qla_printk(KERN_INFO, ha, "vport %d deleted\n", id); |
1298 | return 0; | 1290 | return 0; |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 023ee77fb027..e0c5bb54b258 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
@@ -2135,6 +2135,7 @@ struct qla_msix_entry { | |||
2135 | /* Work events. */ | 2135 | /* Work events. */ |
2136 | enum qla_work_type { | 2136 | enum qla_work_type { |
2137 | QLA_EVT_AEN, | 2137 | QLA_EVT_AEN, |
2138 | QLA_EVT_IDC_ACK, | ||
2138 | }; | 2139 | }; |
2139 | 2140 | ||
2140 | 2141 | ||
@@ -2149,6 +2150,10 @@ struct qla_work_evt { | |||
2149 | enum fc_host_event_code code; | 2150 | enum fc_host_event_code code; |
2150 | u32 data; | 2151 | u32 data; |
2151 | } aen; | 2152 | } aen; |
2153 | struct { | ||
2154 | #define QLA_IDC_ACK_REGS 7 | ||
2155 | uint16_t mb[QLA_IDC_ACK_REGS]; | ||
2156 | } idc_ack; | ||
2152 | } u; | 2157 | } u; |
2153 | }; | 2158 | }; |
2154 | 2159 | ||
diff --git a/drivers/scsi/qla2xxx/qla_devtbl.h b/drivers/scsi/qla2xxx/qla_devtbl.h index d78d35e681ab..d6ea69df7c5c 100644 --- a/drivers/scsi/qla2xxx/qla_devtbl.h +++ b/drivers/scsi/qla2xxx/qla_devtbl.h | |||
@@ -72,7 +72,7 @@ static char *qla2x00_model_name[QLA_MODEL_NAMES*2] = { | |||
72 | "QLA2462", "Sun PCI-X 2.0 to 4Gb FC, Dual Channel", /* 0x141 */ | 72 | "QLA2462", "Sun PCI-X 2.0 to 4Gb FC, Dual Channel", /* 0x141 */ |
73 | "QLE2460", "Sun PCI-Express to 2Gb FC, Single Channel", /* 0x142 */ | 73 | "QLE2460", "Sun PCI-Express to 2Gb FC, Single Channel", /* 0x142 */ |
74 | "QLE2462", "Sun PCI-Express to 4Gb FC, Single Channel", /* 0x143 */ | 74 | "QLE2462", "Sun PCI-Express to 4Gb FC, Single Channel", /* 0x143 */ |
75 | "QEM2462" "Server I/O Module 4Gb FC, Dual Channel", /* 0x144 */ | 75 | "QEM2462", "Server I/O Module 4Gb FC, Dual Channel", /* 0x144 */ |
76 | "QLE2440", "PCI-Express to 4Gb FC, Single Channel", /* 0x145 */ | 76 | "QLE2440", "PCI-Express to 4Gb FC, Single Channel", /* 0x145 */ |
77 | "QLE2464", "PCI-Express to 4Gb FC, Quad Channel", /* 0x146 */ | 77 | "QLE2464", "PCI-Express to 4Gb FC, Quad Channel", /* 0x146 */ |
78 | "QLA2440", "PCI-X 2.0 to 4Gb FC, Single Channel", /* 0x147 */ | 78 | "QLA2440", "PCI-X 2.0 to 4Gb FC, Single Channel", /* 0x147 */ |
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 7abb045a0410..ffff42554087 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h | |||
@@ -1402,6 +1402,8 @@ struct access_chip_rsp_84xx { | |||
1402 | #define MBA_IDC_NOTIFY 0x8101 | 1402 | #define MBA_IDC_NOTIFY 0x8101 |
1403 | #define MBA_IDC_TIME_EXT 0x8102 | 1403 | #define MBA_IDC_TIME_EXT 0x8102 |
1404 | 1404 | ||
1405 | #define MBC_IDC_ACK 0x101 | ||
1406 | |||
1405 | struct nvram_81xx { | 1407 | struct nvram_81xx { |
1406 | /* NVRAM header. */ | 1408 | /* NVRAM header. */ |
1407 | uint8_t id[4]; | 1409 | uint8_t id[4]; |
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index a336b4bc81a7..6de283f8f111 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h | |||
@@ -72,6 +72,7 @@ extern int qla2x00_loop_reset(scsi_qla_host_t *); | |||
72 | extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); | 72 | extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); |
73 | extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum | 73 | extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum |
74 | fc_host_event_code, u32); | 74 | fc_host_event_code, u32); |
75 | extern int qla2x00_post_idc_ack_work(struct scsi_qla_host *, uint16_t *); | ||
75 | 76 | ||
76 | extern void qla2x00_abort_fcport_cmds(fc_port_t *); | 77 | extern void qla2x00_abort_fcport_cmds(fc_port_t *); |
77 | extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *, | 78 | extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *, |
@@ -266,6 +267,8 @@ qla2x00_set_idma_speed(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t *); | |||
266 | 267 | ||
267 | extern int qla84xx_verify_chip(struct scsi_qla_host *, uint16_t *); | 268 | extern int qla84xx_verify_chip(struct scsi_qla_host *, uint16_t *); |
268 | 269 | ||
270 | extern int qla81xx_idc_ack(scsi_qla_host_t *, uint16_t *); | ||
271 | |||
269 | /* | 272 | /* |
270 | * Global Function Prototypes in qla_isr.c source file. | 273 | * Global Function Prototypes in qla_isr.c source file. |
271 | */ | 274 | */ |
@@ -376,10 +379,8 @@ extern int qla2x00_dfs_remove(scsi_qla_host_t *); | |||
376 | 379 | ||
377 | /* Globa function prototypes for multi-q */ | 380 | /* Globa function prototypes for multi-q */ |
378 | extern int qla25xx_request_irq(struct rsp_que *); | 381 | extern int qla25xx_request_irq(struct rsp_que *); |
379 | extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *, | 382 | extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *); |
380 | uint8_t); | 383 | extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *); |
381 | extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *, | ||
382 | uint8_t); | ||
383 | extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t, | 384 | extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t, |
384 | uint16_t, uint8_t, uint8_t); | 385 | uint16_t, uint8_t, uint8_t); |
385 | extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t, | 386 | extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t, |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index f6368a1d3021..87f9abc71460 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -1226,9 +1226,8 @@ qla24xx_config_rings(struct scsi_qla_host *vha) | |||
1226 | icb->firmware_options_2 |= | 1226 | icb->firmware_options_2 |= |
1227 | __constant_cpu_to_le32(BIT_18); | 1227 | __constant_cpu_to_le32(BIT_18); |
1228 | 1228 | ||
1229 | icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_22); | 1229 | icb->firmware_options_2 &= __constant_cpu_to_le32(~BIT_22); |
1230 | icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23); | 1230 | icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23); |
1231 | ha->rsp_q_map[0]->options = icb->firmware_options_2; | ||
1232 | 1231 | ||
1233 | WRT_REG_DWORD(®->isp25mq.req_q_in, 0); | 1232 | WRT_REG_DWORD(®->isp25mq.req_q_in, 0); |
1234 | WRT_REG_DWORD(®->isp25mq.req_q_out, 0); | 1233 | WRT_REG_DWORD(®->isp25mq.req_q_out, 0); |
@@ -1309,8 +1308,12 @@ qla2x00_init_rings(scsi_qla_host_t *vha) | |||
1309 | 1308 | ||
1310 | DEBUG(printk("scsi(%ld): Issue init firmware.\n", vha->host_no)); | 1309 | DEBUG(printk("scsi(%ld): Issue init firmware.\n", vha->host_no)); |
1311 | 1310 | ||
1312 | if (ha->flags.npiv_supported) | 1311 | if (ha->flags.npiv_supported) { |
1312 | if (ha->operating_mode == LOOP) | ||
1313 | ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1; | ||
1313 | mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports); | 1314 | mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports); |
1315 | } | ||
1316 | |||
1314 | 1317 | ||
1315 | mid_init_cb->options = __constant_cpu_to_le16(BIT_1); | 1318 | mid_init_cb->options = __constant_cpu_to_le16(BIT_1); |
1316 | 1319 | ||
@@ -2611,6 +2614,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, | |||
2611 | port_id_t wrap, nxt_d_id; | 2614 | port_id_t wrap, nxt_d_id; |
2612 | struct qla_hw_data *ha = vha->hw; | 2615 | struct qla_hw_data *ha = vha->hw; |
2613 | struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev); | 2616 | struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev); |
2617 | struct scsi_qla_host *tvp; | ||
2614 | 2618 | ||
2615 | rval = QLA_SUCCESS; | 2619 | rval = QLA_SUCCESS; |
2616 | 2620 | ||
@@ -2710,7 +2714,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, | |||
2710 | /* Bypass virtual ports of the same host. */ | 2714 | /* Bypass virtual ports of the same host. */ |
2711 | found = 0; | 2715 | found = 0; |
2712 | if (ha->num_vhosts) { | 2716 | if (ha->num_vhosts) { |
2713 | list_for_each_entry(vp, &ha->vp_list, list) { | 2717 | list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { |
2714 | if (new_fcport->d_id.b24 == vp->d_id.b24) { | 2718 | if (new_fcport->d_id.b24 == vp->d_id.b24) { |
2715 | found = 1; | 2719 | found = 1; |
2716 | break; | 2720 | break; |
@@ -2833,6 +2837,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) | |||
2833 | uint16_t first_loop_id; | 2837 | uint16_t first_loop_id; |
2834 | struct qla_hw_data *ha = vha->hw; | 2838 | struct qla_hw_data *ha = vha->hw; |
2835 | struct scsi_qla_host *vp; | 2839 | struct scsi_qla_host *vp; |
2840 | struct scsi_qla_host *tvp; | ||
2836 | 2841 | ||
2837 | rval = QLA_SUCCESS; | 2842 | rval = QLA_SUCCESS; |
2838 | 2843 | ||
@@ -2857,7 +2862,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) | |||
2857 | /* Check for loop ID being already in use. */ | 2862 | /* Check for loop ID being already in use. */ |
2858 | found = 0; | 2863 | found = 0; |
2859 | fcport = NULL; | 2864 | fcport = NULL; |
2860 | list_for_each_entry(vp, &ha->vp_list, list) { | 2865 | list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { |
2861 | list_for_each_entry(fcport, &vp->vp_fcports, list) { | 2866 | list_for_each_entry(fcport, &vp->vp_fcports, list) { |
2862 | if (fcport->loop_id == dev->loop_id && | 2867 | if (fcport->loop_id == dev->loop_id && |
2863 | fcport != dev) { | 2868 | fcport != dev) { |
@@ -3292,6 +3297,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) | |||
3292 | uint8_t status = 0; | 3297 | uint8_t status = 0; |
3293 | struct qla_hw_data *ha = vha->hw; | 3298 | struct qla_hw_data *ha = vha->hw; |
3294 | struct scsi_qla_host *vp; | 3299 | struct scsi_qla_host *vp; |
3300 | struct scsi_qla_host *tvp; | ||
3295 | struct req_que *req = ha->req_q_map[0]; | 3301 | struct req_que *req = ha->req_q_map[0]; |
3296 | 3302 | ||
3297 | if (vha->flags.online) { | 3303 | if (vha->flags.online) { |
@@ -3307,7 +3313,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) | |||
3307 | if (atomic_read(&vha->loop_state) != LOOP_DOWN) { | 3313 | if (atomic_read(&vha->loop_state) != LOOP_DOWN) { |
3308 | atomic_set(&vha->loop_state, LOOP_DOWN); | 3314 | atomic_set(&vha->loop_state, LOOP_DOWN); |
3309 | qla2x00_mark_all_devices_lost(vha, 0); | 3315 | qla2x00_mark_all_devices_lost(vha, 0); |
3310 | list_for_each_entry(vp, &ha->vp_list, list) | 3316 | list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) |
3311 | qla2x00_mark_all_devices_lost(vp, 0); | 3317 | qla2x00_mark_all_devices_lost(vp, 0); |
3312 | } else { | 3318 | } else { |
3313 | if (!atomic_read(&vha->loop_down_timer)) | 3319 | if (!atomic_read(&vha->loop_down_timer)) |
@@ -3404,7 +3410,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) | |||
3404 | DEBUG(printk(KERN_INFO | 3410 | DEBUG(printk(KERN_INFO |
3405 | "qla2x00_abort_isp(%ld): succeeded.\n", | 3411 | "qla2x00_abort_isp(%ld): succeeded.\n", |
3406 | vha->host_no)); | 3412 | vha->host_no)); |
3407 | list_for_each_entry(vp, &ha->vp_list, list) { | 3413 | list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { |
3408 | if (vp->vp_idx) | 3414 | if (vp->vp_idx) |
3409 | qla2x00_vp_abort_isp(vp); | 3415 | qla2x00_vp_abort_isp(vp); |
3410 | } | 3416 | } |
@@ -3429,7 +3435,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) | |||
3429 | static int | 3435 | static int |
3430 | qla2x00_restart_isp(scsi_qla_host_t *vha) | 3436 | qla2x00_restart_isp(scsi_qla_host_t *vha) |
3431 | { | 3437 | { |
3432 | uint8_t status = 0; | 3438 | int status = 0; |
3433 | uint32_t wait_time; | 3439 | uint32_t wait_time; |
3434 | struct qla_hw_data *ha = vha->hw; | 3440 | struct qla_hw_data *ha = vha->hw; |
3435 | struct req_que *req = ha->req_q_map[0]; | 3441 | struct req_que *req = ha->req_q_map[0]; |
@@ -3493,7 +3499,7 @@ qla25xx_init_queues(struct qla_hw_data *ha) | |||
3493 | rsp = ha->rsp_q_map[i]; | 3499 | rsp = ha->rsp_q_map[i]; |
3494 | if (rsp) { | 3500 | if (rsp) { |
3495 | rsp->options &= ~BIT_0; | 3501 | rsp->options &= ~BIT_0; |
3496 | ret = qla25xx_init_rsp_que(base_vha, rsp, rsp->options); | 3502 | ret = qla25xx_init_rsp_que(base_vha, rsp); |
3497 | if (ret != QLA_SUCCESS) | 3503 | if (ret != QLA_SUCCESS) |
3498 | DEBUG2_17(printk(KERN_WARNING | 3504 | DEBUG2_17(printk(KERN_WARNING |
3499 | "%s Rsp que:%d init failed\n", __func__, | 3505 | "%s Rsp que:%d init failed\n", __func__, |
@@ -3507,7 +3513,7 @@ qla25xx_init_queues(struct qla_hw_data *ha) | |||
3507 | if (req) { | 3513 | if (req) { |
3508 | /* Clear outstanding commands array. */ | 3514 | /* Clear outstanding commands array. */ |
3509 | req->options &= ~BIT_0; | 3515 | req->options &= ~BIT_0; |
3510 | ret = qla25xx_init_req_que(base_vha, req, req->options); | 3516 | ret = qla25xx_init_req_que(base_vha, req); |
3511 | if (ret != QLA_SUCCESS) | 3517 | if (ret != QLA_SUCCESS) |
3512 | DEBUG2_17(printk(KERN_WARNING | 3518 | DEBUG2_17(printk(KERN_WARNING |
3513 | "%s Req que:%d init failed\n", __func__, | 3519 | "%s Req que:%d init failed\n", __func__, |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index e28ad81baf1e..f250e5b7897c 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -266,6 +266,40 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) | |||
266 | } | 266 | } |
267 | } | 267 | } |
268 | 268 | ||
269 | static void | ||
270 | qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) | ||
271 | { | ||
272 | static char *event[] = | ||
273 | { "Complete", "Request Notification", "Time Extension" }; | ||
274 | int rval; | ||
275 | struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; | ||
276 | uint16_t __iomem *wptr; | ||
277 | uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; | ||
278 | |||
279 | /* Seed data -- mailbox1 -> mailbox7. */ | ||
280 | wptr = (uint16_t __iomem *)®24->mailbox1; | ||
281 | for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) | ||
282 | mb[cnt] = RD_REG_WORD(wptr); | ||
283 | |||
284 | DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- " | ||
285 | "%04x %04x %04x %04x %04x %04x %04x.\n", vha->host_no, | ||
286 | event[aen & 0xff], | ||
287 | mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6])); | ||
288 | |||
289 | /* Acknowledgement needed? [Notify && non-zero timeout]. */ | ||
290 | timeout = (descr >> 8) & 0xf; | ||
291 | if (aen != MBA_IDC_NOTIFY || !timeout) | ||
292 | return; | ||
293 | |||
294 | DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- " | ||
295 | "ACK timeout=%d.\n", vha->host_no, event[aen & 0xff], timeout)); | ||
296 | |||
297 | rval = qla2x00_post_idc_ack_work(vha, mb); | ||
298 | if (rval != QLA_SUCCESS) | ||
299 | qla_printk(KERN_WARNING, vha->hw, | ||
300 | "IDC failed to post ACK.\n"); | ||
301 | } | ||
302 | |||
269 | /** | 303 | /** |
270 | * qla2x00_async_event() - Process aynchronous events. | 304 | * qla2x00_async_event() - Process aynchronous events. |
271 | * @ha: SCSI driver HA context | 305 | * @ha: SCSI driver HA context |
@@ -714,21 +748,9 @@ skip_rio: | |||
714 | "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3])); | 748 | "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3])); |
715 | break; | 749 | break; |
716 | case MBA_IDC_COMPLETE: | 750 | case MBA_IDC_COMPLETE: |
717 | DEBUG2(printk("scsi(%ld): Inter-Driver Commucation " | ||
718 | "Complete -- %04x %04x %04x\n", vha->host_no, mb[1], mb[2], | ||
719 | mb[3])); | ||
720 | break; | ||
721 | case MBA_IDC_NOTIFY: | 751 | case MBA_IDC_NOTIFY: |
722 | DEBUG2(printk("scsi(%ld): Inter-Driver Commucation " | ||
723 | "Request Notification -- %04x %04x %04x\n", vha->host_no, | ||
724 | mb[1], mb[2], mb[3])); | ||
725 | /**** Mailbox registers 4 - 7 valid!!! */ | ||
726 | break; | ||
727 | case MBA_IDC_TIME_EXT: | 752 | case MBA_IDC_TIME_EXT: |
728 | DEBUG2(printk("scsi(%ld): Inter-Driver Commucation " | 753 | qla81xx_idc_event(vha, mb[0], mb[1]); |
729 | "Time Extension -- %04x %04x %04x\n", vha->host_no, mb[1], | ||
730 | mb[2], mb[3])); | ||
731 | /**** Mailbox registers 4 - 7 valid!!! */ | ||
732 | break; | 754 | break; |
733 | } | 755 | } |
734 | 756 | ||
@@ -1707,7 +1729,6 @@ qla25xx_msix_rsp_q(int irq, void *dev_id) | |||
1707 | struct qla_hw_data *ha; | 1729 | struct qla_hw_data *ha; |
1708 | struct rsp_que *rsp; | 1730 | struct rsp_que *rsp; |
1709 | struct device_reg_24xx __iomem *reg; | 1731 | struct device_reg_24xx __iomem *reg; |
1710 | uint16_t msix_disabled_hccr = 0; | ||
1711 | 1732 | ||
1712 | rsp = (struct rsp_que *) dev_id; | 1733 | rsp = (struct rsp_que *) dev_id; |
1713 | if (!rsp) { | 1734 | if (!rsp) { |
@@ -1720,17 +1741,8 @@ qla25xx_msix_rsp_q(int irq, void *dev_id) | |||
1720 | 1741 | ||
1721 | spin_lock_irq(&ha->hardware_lock); | 1742 | spin_lock_irq(&ha->hardware_lock); |
1722 | 1743 | ||
1723 | msix_disabled_hccr = rsp->options; | ||
1724 | if (!rsp->id) | ||
1725 | msix_disabled_hccr &= __constant_cpu_to_le32(BIT_22); | ||
1726 | else | ||
1727 | msix_disabled_hccr &= __constant_cpu_to_le32(BIT_6); | ||
1728 | |||
1729 | qla24xx_process_response_queue(rsp); | 1744 | qla24xx_process_response_queue(rsp); |
1730 | 1745 | ||
1731 | if (!msix_disabled_hccr) | ||
1732 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); | ||
1733 | |||
1734 | spin_unlock_irq(&ha->hardware_lock); | 1746 | spin_unlock_irq(&ha->hardware_lock); |
1735 | 1747 | ||
1736 | return IRQ_HANDLED; | 1748 | return IRQ_HANDLED; |
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index f94ffbb98e95..4aab7acf7525 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c | |||
@@ -2685,6 +2685,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, | |||
2685 | uint16_t stat = le16_to_cpu(rptid_entry->vp_idx); | 2685 | uint16_t stat = le16_to_cpu(rptid_entry->vp_idx); |
2686 | struct qla_hw_data *ha = vha->hw; | 2686 | struct qla_hw_data *ha = vha->hw; |
2687 | scsi_qla_host_t *vp; | 2687 | scsi_qla_host_t *vp; |
2688 | scsi_qla_host_t *tvp; | ||
2688 | 2689 | ||
2689 | if (rptid_entry->entry_status != 0) | 2690 | if (rptid_entry->entry_status != 0) |
2690 | return; | 2691 | return; |
@@ -2710,7 +2711,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, | |||
2710 | if (MSB(stat) == 1) | 2711 | if (MSB(stat) == 1) |
2711 | return; | 2712 | return; |
2712 | 2713 | ||
2713 | list_for_each_entry(vp, &ha->vp_list, list) | 2714 | list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) |
2714 | if (vp_idx == vp->vp_idx) | 2715 | if (vp_idx == vp->vp_idx) |
2715 | break; | 2716 | break; |
2716 | if (!vp) | 2717 | if (!vp) |
@@ -3090,8 +3091,7 @@ verify_done: | |||
3090 | } | 3091 | } |
3091 | 3092 | ||
3092 | int | 3093 | int |
3093 | qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req, | 3094 | qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) |
3094 | uint8_t options) | ||
3095 | { | 3095 | { |
3096 | int rval; | 3096 | int rval; |
3097 | unsigned long flags; | 3097 | unsigned long flags; |
@@ -3101,7 +3101,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req, | |||
3101 | struct qla_hw_data *ha = vha->hw; | 3101 | struct qla_hw_data *ha = vha->hw; |
3102 | 3102 | ||
3103 | mcp->mb[0] = MBC_INITIALIZE_MULTIQ; | 3103 | mcp->mb[0] = MBC_INITIALIZE_MULTIQ; |
3104 | mcp->mb[1] = options; | 3104 | mcp->mb[1] = req->options; |
3105 | mcp->mb[2] = MSW(LSD(req->dma)); | 3105 | mcp->mb[2] = MSW(LSD(req->dma)); |
3106 | mcp->mb[3] = LSW(LSD(req->dma)); | 3106 | mcp->mb[3] = LSW(LSD(req->dma)); |
3107 | mcp->mb[6] = MSW(MSD(req->dma)); | 3107 | mcp->mb[6] = MSW(MSD(req->dma)); |
@@ -3128,7 +3128,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req, | |||
3128 | mcp->tov = 60; | 3128 | mcp->tov = 60; |
3129 | 3129 | ||
3130 | spin_lock_irqsave(&ha->hardware_lock, flags); | 3130 | spin_lock_irqsave(&ha->hardware_lock, flags); |
3131 | if (!(options & BIT_0)) { | 3131 | if (!(req->options & BIT_0)) { |
3132 | WRT_REG_DWORD(®->req_q_in, 0); | 3132 | WRT_REG_DWORD(®->req_q_in, 0); |
3133 | WRT_REG_DWORD(®->req_q_out, 0); | 3133 | WRT_REG_DWORD(®->req_q_out, 0); |
3134 | } | 3134 | } |
@@ -3142,8 +3142,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req, | |||
3142 | } | 3142 | } |
3143 | 3143 | ||
3144 | int | 3144 | int |
3145 | qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp, | 3145 | qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) |
3146 | uint8_t options) | ||
3147 | { | 3146 | { |
3148 | int rval; | 3147 | int rval; |
3149 | unsigned long flags; | 3148 | unsigned long flags; |
@@ -3153,7 +3152,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp, | |||
3153 | struct qla_hw_data *ha = vha->hw; | 3152 | struct qla_hw_data *ha = vha->hw; |
3154 | 3153 | ||
3155 | mcp->mb[0] = MBC_INITIALIZE_MULTIQ; | 3154 | mcp->mb[0] = MBC_INITIALIZE_MULTIQ; |
3156 | mcp->mb[1] = options; | 3155 | mcp->mb[1] = rsp->options; |
3157 | mcp->mb[2] = MSW(LSD(rsp->dma)); | 3156 | mcp->mb[2] = MSW(LSD(rsp->dma)); |
3158 | mcp->mb[3] = LSW(LSD(rsp->dma)); | 3157 | mcp->mb[3] = LSW(LSD(rsp->dma)); |
3159 | mcp->mb[6] = MSW(MSD(rsp->dma)); | 3158 | mcp->mb[6] = MSW(MSD(rsp->dma)); |
@@ -3178,7 +3177,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp, | |||
3178 | mcp->tov = 60; | 3177 | mcp->tov = 60; |
3179 | 3178 | ||
3180 | spin_lock_irqsave(&ha->hardware_lock, flags); | 3179 | spin_lock_irqsave(&ha->hardware_lock, flags); |
3181 | if (!(options & BIT_0)) { | 3180 | if (!(rsp->options & BIT_0)) { |
3182 | WRT_REG_DWORD(®->rsp_q_out, 0); | 3181 | WRT_REG_DWORD(®->rsp_q_out, 0); |
3183 | WRT_REG_DWORD(®->rsp_q_in, 0); | 3182 | WRT_REG_DWORD(®->rsp_q_in, 0); |
3184 | } | 3183 | } |
@@ -3193,3 +3192,29 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp, | |||
3193 | return rval; | 3192 | return rval; |
3194 | } | 3193 | } |
3195 | 3194 | ||
3195 | int | ||
3196 | qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb) | ||
3197 | { | ||
3198 | int rval; | ||
3199 | mbx_cmd_t mc; | ||
3200 | mbx_cmd_t *mcp = &mc; | ||
3201 | |||
3202 | DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); | ||
3203 | |||
3204 | mcp->mb[0] = MBC_IDC_ACK; | ||
3205 | memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); | ||
3206 | mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; | ||
3207 | mcp->in_mb = MBX_0; | ||
3208 | mcp->tov = MBX_TOV_SECONDS; | ||
3209 | mcp->flags = 0; | ||
3210 | rval = qla2x00_mailbox_command(vha, mcp); | ||
3211 | |||
3212 | if (rval != QLA_SUCCESS) { | ||
3213 | DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, | ||
3214 | vha->host_no, rval, mcp->mb[0])); | ||
3215 | } else { | ||
3216 | DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); | ||
3217 | } | ||
3218 | |||
3219 | return rval; | ||
3220 | } | ||
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index f53179c46423..785c61279e6e 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c | |||
@@ -69,9 +69,10 @@ static scsi_qla_host_t * | |||
69 | qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name) | 69 | qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name) |
70 | { | 70 | { |
71 | scsi_qla_host_t *vha; | 71 | scsi_qla_host_t *vha; |
72 | struct scsi_qla_host *tvha; | ||
72 | 73 | ||
73 | /* Locate matching device in database. */ | 74 | /* Locate matching device in database. */ |
74 | list_for_each_entry(vha, &ha->vp_list, list) { | 75 | list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) { |
75 | if (!memcmp(port_name, vha->port_name, WWN_SIZE)) | 76 | if (!memcmp(port_name, vha->port_name, WWN_SIZE)) |
76 | return vha; | 77 | return vha; |
77 | } | 78 | } |
@@ -194,11 +195,11 @@ qla24xx_configure_vp(scsi_qla_host_t *vha) | |||
194 | void | 195 | void |
195 | qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) | 196 | qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) |
196 | { | 197 | { |
197 | scsi_qla_host_t *vha; | 198 | scsi_qla_host_t *vha, *tvha; |
198 | struct qla_hw_data *ha = rsp->hw; | 199 | struct qla_hw_data *ha = rsp->hw; |
199 | int i = 0; | 200 | int i = 0; |
200 | 201 | ||
201 | list_for_each_entry(vha, &ha->vp_list, list) { | 202 | list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) { |
202 | if (vha->vp_idx) { | 203 | if (vha->vp_idx) { |
203 | switch (mb[0]) { | 204 | switch (mb[0]) { |
204 | case MBA_LIP_OCCURRED: | 205 | case MBA_LIP_OCCURRED: |
@@ -300,6 +301,7 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha) | |||
300 | int ret; | 301 | int ret; |
301 | struct qla_hw_data *ha = vha->hw; | 302 | struct qla_hw_data *ha = vha->hw; |
302 | scsi_qla_host_t *vp; | 303 | scsi_qla_host_t *vp; |
304 | struct scsi_qla_host *tvp; | ||
303 | 305 | ||
304 | if (vha->vp_idx) | 306 | if (vha->vp_idx) |
305 | return; | 307 | return; |
@@ -308,7 +310,7 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha) | |||
308 | 310 | ||
309 | clear_bit(VP_DPC_NEEDED, &vha->dpc_flags); | 311 | clear_bit(VP_DPC_NEEDED, &vha->dpc_flags); |
310 | 312 | ||
311 | list_for_each_entry(vp, &ha->vp_list, list) { | 313 | list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { |
312 | if (vp->vp_idx) | 314 | if (vp->vp_idx) |
313 | ret = qla2x00_do_dpc_vp(vp); | 315 | ret = qla2x00_do_dpc_vp(vp); |
314 | } | 316 | } |
@@ -396,7 +398,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport) | |||
396 | 398 | ||
397 | qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL); | 399 | qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL); |
398 | 400 | ||
399 | memset(vha->req_ques, 0, sizeof(vha->req_ques) * QLA_MAX_HOST_QUES); | 401 | memset(vha->req_ques, 0, sizeof(vha->req_ques)); |
400 | vha->req_ques[0] = ha->req_q_map[0]->id; | 402 | vha->req_ques[0] = ha->req_q_map[0]->id; |
401 | host->can_queue = ha->req_q_map[0]->length + 128; | 403 | host->can_queue = ha->req_q_map[0]->length + 128; |
402 | host->this_id = 255; | 404 | host->this_id = 255; |
@@ -471,7 +473,7 @@ qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req) | |||
471 | 473 | ||
472 | if (req) { | 474 | if (req) { |
473 | req->options |= BIT_0; | 475 | req->options |= BIT_0; |
474 | ret = qla25xx_init_req_que(vha, req, req->options); | 476 | ret = qla25xx_init_req_que(vha, req); |
475 | } | 477 | } |
476 | if (ret == QLA_SUCCESS) | 478 | if (ret == QLA_SUCCESS) |
477 | qla25xx_free_req_que(vha, req); | 479 | qla25xx_free_req_que(vha, req); |
@@ -486,7 +488,7 @@ qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) | |||
486 | 488 | ||
487 | if (rsp) { | 489 | if (rsp) { |
488 | rsp->options |= BIT_0; | 490 | rsp->options |= BIT_0; |
489 | ret = qla25xx_init_rsp_que(vha, rsp, rsp->options); | 491 | ret = qla25xx_init_rsp_que(vha, rsp); |
490 | } | 492 | } |
491 | if (ret == QLA_SUCCESS) | 493 | if (ret == QLA_SUCCESS) |
492 | qla25xx_free_rsp_que(vha, rsp); | 494 | qla25xx_free_rsp_que(vha, rsp); |
@@ -502,7 +504,7 @@ int qla25xx_update_req_que(struct scsi_qla_host *vha, uint8_t que, uint8_t qos) | |||
502 | 504 | ||
503 | req->options |= BIT_3; | 505 | req->options |= BIT_3; |
504 | req->qos = qos; | 506 | req->qos = qos; |
505 | ret = qla25xx_init_req_que(vha, req, req->options); | 507 | ret = qla25xx_init_req_que(vha, req); |
506 | if (ret != QLA_SUCCESS) | 508 | if (ret != QLA_SUCCESS) |
507 | DEBUG2_17(printk(KERN_WARNING "%s failed\n", __func__)); | 509 | DEBUG2_17(printk(KERN_WARNING "%s failed\n", __func__)); |
508 | /* restore options bit */ | 510 | /* restore options bit */ |
@@ -632,7 +634,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, | |||
632 | req->max_q_depth = ha->req_q_map[0]->max_q_depth; | 634 | req->max_q_depth = ha->req_q_map[0]->max_q_depth; |
633 | mutex_unlock(&ha->vport_lock); | 635 | mutex_unlock(&ha->vport_lock); |
634 | 636 | ||
635 | ret = qla25xx_init_req_que(base_vha, req, options); | 637 | ret = qla25xx_init_req_que(base_vha, req); |
636 | if (ret != QLA_SUCCESS) { | 638 | if (ret != QLA_SUCCESS) { |
637 | qla_printk(KERN_WARNING, ha, "%s failed\n", __func__); | 639 | qla_printk(KERN_WARNING, ha, "%s failed\n", __func__); |
638 | mutex_lock(&ha->vport_lock); | 640 | mutex_lock(&ha->vport_lock); |
@@ -710,7 +712,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, | |||
710 | if (ret) | 712 | if (ret) |
711 | goto que_failed; | 713 | goto que_failed; |
712 | 714 | ||
713 | ret = qla25xx_init_rsp_que(base_vha, rsp, options); | 715 | ret = qla25xx_init_rsp_que(base_vha, rsp); |
714 | if (ret != QLA_SUCCESS) { | 716 | if (ret != QLA_SUCCESS) { |
715 | qla_printk(KERN_WARNING, ha, "%s failed\n", __func__); | 717 | qla_printk(KERN_WARNING, ha, "%s failed\n", __func__); |
716 | mutex_lock(&ha->vport_lock); | 718 | mutex_lock(&ha->vport_lock); |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index c11f872d3e10..3ddfa889e949 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -2222,10 +2222,6 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, | |||
2222 | { | 2222 | { |
2223 | char name[16]; | 2223 | char name[16]; |
2224 | 2224 | ||
2225 | ha->init_cb_size = sizeof(init_cb_t); | ||
2226 | if (IS_QLA2XXX_MIDTYPE(ha)) | ||
2227 | ha->init_cb_size = sizeof(struct mid_init_cb_24xx); | ||
2228 | |||
2229 | ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size, | 2225 | ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size, |
2230 | &ha->init_cb_dma, GFP_KERNEL); | 2226 | &ha->init_cb_dma, GFP_KERNEL); |
2231 | if (!ha->init_cb) | 2227 | if (!ha->init_cb) |
@@ -2522,6 +2518,19 @@ qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code, | |||
2522 | return qla2x00_post_work(vha, e, 1); | 2518 | return qla2x00_post_work(vha, e, 1); |
2523 | } | 2519 | } |
2524 | 2520 | ||
2521 | int | ||
2522 | qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb) | ||
2523 | { | ||
2524 | struct qla_work_evt *e; | ||
2525 | |||
2526 | e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK, 1); | ||
2527 | if (!e) | ||
2528 | return QLA_FUNCTION_FAILED; | ||
2529 | |||
2530 | memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); | ||
2531 | return qla2x00_post_work(vha, e, 1); | ||
2532 | } | ||
2533 | |||
2525 | static void | 2534 | static void |
2526 | qla2x00_do_work(struct scsi_qla_host *vha) | 2535 | qla2x00_do_work(struct scsi_qla_host *vha) |
2527 | { | 2536 | { |
@@ -2539,6 +2548,9 @@ qla2x00_do_work(struct scsi_qla_host *vha) | |||
2539 | fc_host_post_event(vha->host, fc_get_event_number(), | 2548 | fc_host_post_event(vha->host, fc_get_event_number(), |
2540 | e->u.aen.code, e->u.aen.data); | 2549 | e->u.aen.code, e->u.aen.data); |
2541 | break; | 2550 | break; |
2551 | case QLA_EVT_IDC_ACK: | ||
2552 | qla81xx_idc_ack(vha, e->u.idc_ack.mb); | ||
2553 | break; | ||
2542 | } | 2554 | } |
2543 | if (e->flags & QLA_EVT_FLAG_FREE) | 2555 | if (e->flags & QLA_EVT_FLAG_FREE) |
2544 | kfree(e); | 2556 | kfree(e); |
@@ -2552,7 +2564,7 @@ qla2x00_do_work(struct scsi_qla_host *vha) | |||
2552 | void qla2x00_relogin(struct scsi_qla_host *vha) | 2564 | void qla2x00_relogin(struct scsi_qla_host *vha) |
2553 | { | 2565 | { |
2554 | fc_port_t *fcport; | 2566 | fc_port_t *fcport; |
2555 | uint8_t status; | 2567 | int status; |
2556 | uint16_t next_loopid = 0; | 2568 | uint16_t next_loopid = 0; |
2557 | struct qla_hw_data *ha = vha->hw; | 2569 | struct qla_hw_data *ha = vha->hw; |
2558 | 2570 | ||
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 9c3b694c049d..284827926eff 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c | |||
@@ -684,7 +684,7 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) | |||
684 | "end=0x%x size=0x%x.\n", le32_to_cpu(region->code), start, | 684 | "end=0x%x size=0x%x.\n", le32_to_cpu(region->code), start, |
685 | le32_to_cpu(region->end) >> 2, le32_to_cpu(region->size))); | 685 | le32_to_cpu(region->end) >> 2, le32_to_cpu(region->size))); |
686 | 686 | ||
687 | switch (le32_to_cpu(region->code)) { | 687 | switch (le32_to_cpu(region->code) & 0xff) { |
688 | case FLT_REG_FW: | 688 | case FLT_REG_FW: |
689 | ha->flt_region_fw = start; | 689 | ha->flt_region_fw = start; |
690 | break; | 690 | break; |
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index cfa4c11a4797..a772eab2f0ea 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h | |||
@@ -7,7 +7,7 @@ | |||
7 | /* | 7 | /* |
8 | * Driver version | 8 | * Driver version |
9 | */ | 9 | */ |
10 | #define QLA2XXX_VERSION "8.03.00-k2" | 10 | #define QLA2XXX_VERSION "8.03.00-k4" |
11 | 11 | ||
12 | #define QLA_DRIVER_MAJOR_VER 8 | 12 | #define QLA_DRIVER_MAJOR_VER 8 |
13 | #define QLA_DRIVER_MINOR_VER 3 | 13 | #define QLA_DRIVER_MINOR_VER 3 |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 940dc32ff0dc..b82ffd90632e 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -1040,12 +1040,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
1040 | action = ACTION_FAIL; | 1040 | action = ACTION_FAIL; |
1041 | break; | 1041 | break; |
1042 | case ABORTED_COMMAND: | 1042 | case ABORTED_COMMAND: |
1043 | action = ACTION_FAIL; | ||
1043 | if (sshdr.asc == 0x10) { /* DIF */ | 1044 | if (sshdr.asc == 0x10) { /* DIF */ |
1044 | description = "Target Data Integrity Failure"; | 1045 | description = "Target Data Integrity Failure"; |
1045 | action = ACTION_FAIL; | ||
1046 | error = -EILSEQ; | 1046 | error = -EILSEQ; |
1047 | } else | 1047 | } |
1048 | action = ACTION_RETRY; | ||
1049 | break; | 1048 | break; |
1050 | case NOT_READY: | 1049 | case NOT_READY: |
1051 | /* If the device is in the process of becoming | 1050 | /* If the device is in the process of becoming |
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 66505bb79410..8f4de20c9deb 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c | |||
@@ -317,6 +317,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, | |||
317 | return sdev; | 317 | return sdev; |
318 | 318 | ||
319 | out_device_destroy: | 319 | out_device_destroy: |
320 | scsi_device_set_state(sdev, SDEV_DEL); | ||
320 | transport_destroy_device(&sdev->sdev_gendev); | 321 | transport_destroy_device(&sdev->sdev_gendev); |
321 | put_device(&sdev->sdev_gendev); | 322 | put_device(&sdev->sdev_gendev); |
322 | out: | 323 | out: |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index d57566b8be0a..4970ae4a62d6 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -107,6 +107,7 @@ static void scsi_disk_release(struct device *cdev); | |||
107 | static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *); | 107 | static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *); |
108 | static void sd_print_result(struct scsi_disk *, int); | 108 | static void sd_print_result(struct scsi_disk *, int); |
109 | 109 | ||
110 | static DEFINE_SPINLOCK(sd_index_lock); | ||
110 | static DEFINE_IDA(sd_index_ida); | 111 | static DEFINE_IDA(sd_index_ida); |
111 | 112 | ||
112 | /* This semaphore is used to mediate the 0->1 reference get in the | 113 | /* This semaphore is used to mediate the 0->1 reference get in the |
@@ -1166,23 +1167,19 @@ sd_spinup_disk(struct scsi_disk *sdkp) | |||
1166 | /* | 1167 | /* |
1167 | * The device does not want the automatic start to be issued. | 1168 | * The device does not want the automatic start to be issued. |
1168 | */ | 1169 | */ |
1169 | if (sdkp->device->no_start_on_add) { | 1170 | if (sdkp->device->no_start_on_add) |
1170 | break; | 1171 | break; |
1171 | } | ||
1172 | |||
1173 | /* | ||
1174 | * If manual intervention is required, or this is an | ||
1175 | * absent USB storage device, a spinup is meaningless. | ||
1176 | */ | ||
1177 | if (sense_valid && | ||
1178 | sshdr.sense_key == NOT_READY && | ||
1179 | sshdr.asc == 4 && sshdr.ascq == 3) { | ||
1180 | break; /* manual intervention required */ | ||
1181 | 1172 | ||
1182 | /* | 1173 | if (sense_valid && sshdr.sense_key == NOT_READY) { |
1183 | * Issue command to spin up drive when not ready | 1174 | if (sshdr.asc == 4 && sshdr.ascq == 3) |
1184 | */ | 1175 | break; /* manual intervention required */ |
1185 | } else if (sense_valid && sshdr.sense_key == NOT_READY) { | 1176 | if (sshdr.asc == 4 && sshdr.ascq == 0xb) |
1177 | break; /* standby */ | ||
1178 | if (sshdr.asc == 4 && sshdr.ascq == 0xc) | ||
1179 | break; /* unavailable */ | ||
1180 | /* | ||
1181 | * Issue command to spin up drive when not ready | ||
1182 | */ | ||
1186 | if (!spintime) { | 1183 | if (!spintime) { |
1187 | sd_printk(KERN_NOTICE, sdkp, "Spinning up disk..."); | 1184 | sd_printk(KERN_NOTICE, sdkp, "Spinning up disk..."); |
1188 | cmd[0] = START_STOP; | 1185 | cmd[0] = START_STOP; |
@@ -1914,7 +1911,9 @@ static int sd_probe(struct device *dev) | |||
1914 | if (!ida_pre_get(&sd_index_ida, GFP_KERNEL)) | 1911 | if (!ida_pre_get(&sd_index_ida, GFP_KERNEL)) |
1915 | goto out_put; | 1912 | goto out_put; |
1916 | 1913 | ||
1914 | spin_lock(&sd_index_lock); | ||
1917 | error = ida_get_new(&sd_index_ida, &index); | 1915 | error = ida_get_new(&sd_index_ida, &index); |
1916 | spin_unlock(&sd_index_lock); | ||
1918 | } while (error == -EAGAIN); | 1917 | } while (error == -EAGAIN); |
1919 | 1918 | ||
1920 | if (error) | 1919 | if (error) |
@@ -1936,7 +1935,9 @@ static int sd_probe(struct device *dev) | |||
1936 | return 0; | 1935 | return 0; |
1937 | 1936 | ||
1938 | out_free_index: | 1937 | out_free_index: |
1938 | spin_lock(&sd_index_lock); | ||
1939 | ida_remove(&sd_index_ida, index); | 1939 | ida_remove(&sd_index_ida, index); |
1940 | spin_unlock(&sd_index_lock); | ||
1940 | out_put: | 1941 | out_put: |
1941 | put_disk(gd); | 1942 | put_disk(gd); |
1942 | out_free: | 1943 | out_free: |
@@ -1986,7 +1987,9 @@ static void scsi_disk_release(struct device *dev) | |||
1986 | struct scsi_disk *sdkp = to_scsi_disk(dev); | 1987 | struct scsi_disk *sdkp = to_scsi_disk(dev); |
1987 | struct gendisk *disk = sdkp->disk; | 1988 | struct gendisk *disk = sdkp->disk; |
1988 | 1989 | ||
1990 | spin_lock(&sd_index_lock); | ||
1989 | ida_remove(&sd_index_ida, sdkp->index); | 1991 | ida_remove(&sd_index_ida, sdkp->index); |
1992 | spin_unlock(&sd_index_lock); | ||
1990 | 1993 | ||
1991 | disk->private_data = NULL; | 1994 | disk->private_data = NULL; |
1992 | put_disk(disk); | 1995 | put_disk(disk); |
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 8f0bd3f7a59f..516925d8b570 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c | |||
@@ -1078,7 +1078,7 @@ sg_ioctl(struct inode *inode, struct file *filp, | |||
1078 | case BLKTRACESETUP: | 1078 | case BLKTRACESETUP: |
1079 | return blk_trace_setup(sdp->device->request_queue, | 1079 | return blk_trace_setup(sdp->device->request_queue, |
1080 | sdp->disk->disk_name, | 1080 | sdp->disk->disk_name, |
1081 | sdp->device->sdev_gendev.devt, | 1081 | MKDEV(SCSI_GENERIC_MAJOR, sdp->index), |
1082 | (char *)arg); | 1082 | (char *)arg); |
1083 | case BLKTRACESTART: | 1083 | case BLKTRACESTART: |
1084 | return blk_trace_startstop(sdp->device->request_queue, 1); | 1084 | return blk_trace_startstop(sdp->device->request_queue, 1); |
diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c index a8d61a62522e..97f3158fa7b5 100644 --- a/drivers/scsi/zalon.c +++ b/drivers/scsi/zalon.c | |||
@@ -137,7 +137,7 @@ zalon_probe(struct parisc_device *dev) | |||
137 | goto fail; | 137 | goto fail; |
138 | 138 | ||
139 | if (request_irq(dev->irq, ncr53c8xx_intr, IRQF_SHARED, "zalon", host)) { | 139 | if (request_irq(dev->irq, ncr53c8xx_intr, IRQF_SHARED, "zalon", host)) { |
140 | dev_printk(KERN_ERR, dev, "irq problem with %d, detaching\n ", | 140 | dev_printk(KERN_ERR, &dev->dev, "irq problem with %d, detaching\n ", |
141 | dev->irq); | 141 | dev->irq); |
142 | goto fail; | 142 | goto fail; |
143 | } | 143 | } |