aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/cxgb3i
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/cxgb3i')
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i.h21
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_ddp.c19
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_ddp.h5
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_init.c4
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_iscsi.c22
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.c146
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.h29
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_pdu.c275
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_pdu.h2
9 files changed, 354 insertions, 169 deletions
diff --git a/drivers/scsi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgb3i/cxgb3i.h
index fde6e4c634e7..a7cf550b9cca 100644
--- a/drivers/scsi/cxgb3i/cxgb3i.h
+++ b/drivers/scsi/cxgb3i/cxgb3i.h
@@ -20,6 +20,7 @@
20#include <linux/list.h> 20#include <linux/list.h>
21#include <linux/netdevice.h> 21#include <linux/netdevice.h>
22#include <linux/scatterlist.h> 22#include <linux/scatterlist.h>
23#include <linux/skbuff.h>
23#include <scsi/libiscsi_tcp.h> 24#include <scsi/libiscsi_tcp.h>
24 25
25/* from cxgb3 LLD */ 26/* from cxgb3 LLD */
@@ -113,6 +114,26 @@ struct cxgb3i_endpoint {
113 struct cxgb3i_conn *cconn; 114 struct cxgb3i_conn *cconn;
114}; 115};
115 116
117/**
118 * struct cxgb3i_task_data - private iscsi task data
119 *
120 * @nr_frags: # of coalesced page frags (from scsi sgl)
121 * @frags: coalesced page frags (from scsi sgl)
122 * @skb: tx pdu skb
123 * @offset: data offset for the next pdu
124 * @count: max. possible pdu payload
125 * @sgoffset: offset to the first sg entry for a given offset
126 */
127#define MAX_PDU_FRAGS ((ULP2_MAX_PDU_PAYLOAD + 512 - 1) / 512)
128struct cxgb3i_task_data {
129 unsigned short nr_frags;
130 skb_frag_t frags[MAX_PDU_FRAGS];
131 struct sk_buff *skb;
132 unsigned int offset;
133 unsigned int count;
134 unsigned int sgoffset;
135};
136
116int cxgb3i_iscsi_init(void); 137int cxgb3i_iscsi_init(void);
117void cxgb3i_iscsi_cleanup(void); 138void cxgb3i_iscsi_cleanup(void);
118 139
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.c b/drivers/scsi/cxgb3i/cxgb3i_ddp.c
index 08f3a09d9233..a83d36e4926f 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_ddp.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.c
@@ -639,10 +639,11 @@ static int ddp_init(struct t3cdev *tdev)
639 write_unlock(&cxgb3i_ddp_rwlock); 639 write_unlock(&cxgb3i_ddp_rwlock);
640 640
641 ddp_log_info("nppods %u (0x%x ~ 0x%x), bits %u, mask 0x%x,0x%x " 641 ddp_log_info("nppods %u (0x%x ~ 0x%x), bits %u, mask 0x%x,0x%x "
642 "pkt %u,%u.\n", 642 "pkt %u/%u, %u/%u.\n",
643 ppmax, ddp->llimit, ddp->ulimit, ddp->idx_bits, 643 ppmax, ddp->llimit, ddp->ulimit, ddp->idx_bits,
644 ddp->idx_mask, ddp->rsvd_tag_mask, 644 ddp->idx_mask, ddp->rsvd_tag_mask,
645 ddp->max_txsz, ddp->max_rxsz); 645 ddp->max_txsz, uinfo.max_txsz,
646 ddp->max_rxsz, uinfo.max_rxsz);
646 return 0; 647 return 0;
647 648
648free_ddp_map: 649free_ddp_map:
@@ -654,8 +655,8 @@ free_ddp_map:
654 * cxgb3i_adapter_ddp_init - initialize the adapter's ddp resource 655 * cxgb3i_adapter_ddp_init - initialize the adapter's ddp resource
655 * @tdev: t3cdev adapter 656 * @tdev: t3cdev adapter
656 * @tformat: tag format 657 * @tformat: tag format
657 * @txsz: max tx pkt size, filled in by this func. 658 * @txsz: max tx pdu payload size, filled in by this func.
658 * @rxsz: max rx pkt size, filled in by this func. 659 * @rxsz: max rx pdu payload size, filled in by this func.
659 * initialize the ddp pagepod manager for a given adapter if needed and 660 * initialize the ddp pagepod manager for a given adapter if needed and
660 * setup the tag format for a given iscsi entity 661 * setup the tag format for a given iscsi entity
661 */ 662 */
@@ -685,10 +686,12 @@ int cxgb3i_adapter_ddp_init(struct t3cdev *tdev,
685 tformat->sw_bits, tformat->rsvd_bits, 686 tformat->sw_bits, tformat->rsvd_bits,
686 tformat->rsvd_shift, tformat->rsvd_mask); 687 tformat->rsvd_shift, tformat->rsvd_mask);
687 688
688 *txsz = ddp->max_txsz; 689 *txsz = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
689 *rxsz = ddp->max_rxsz; 690 ddp->max_txsz - ISCSI_PDU_NONPAYLOAD_LEN);
690 ddp_log_info("ddp max pkt size: %u, %u.\n", 691 *rxsz = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
691 ddp->max_txsz, ddp->max_rxsz); 692 ddp->max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN);
693 ddp_log_info("max payload size: %u/%u, %u/%u.\n",
694 *txsz, ddp->max_txsz, *rxsz, ddp->max_rxsz);
692 return 0; 695 return 0;
693} 696}
694EXPORT_SYMBOL_GPL(cxgb3i_adapter_ddp_init); 697EXPORT_SYMBOL_GPL(cxgb3i_adapter_ddp_init);
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.h b/drivers/scsi/cxgb3i/cxgb3i_ddp.h
index 5c7c4d95c493..3faae7831c83 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_ddp.h
+++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.h
@@ -13,6 +13,8 @@
13#ifndef __CXGB3I_ULP2_DDP_H__ 13#ifndef __CXGB3I_ULP2_DDP_H__
14#define __CXGB3I_ULP2_DDP_H__ 14#define __CXGB3I_ULP2_DDP_H__
15 15
16#include <linux/vmalloc.h>
17
16/** 18/**
17 * struct cxgb3i_tag_format - cxgb3i ulp tag format for an iscsi entity 19 * struct cxgb3i_tag_format - cxgb3i ulp tag format for an iscsi entity
18 * 20 *
@@ -85,8 +87,9 @@ struct cxgb3i_ddp_info {
85 struct sk_buff **gl_skb; 87 struct sk_buff **gl_skb;
86}; 88};
87 89
90#define ISCSI_PDU_NONPAYLOAD_LEN 312 /* bhs(48) + ahs(256) + digest(8) */
88#define ULP2_MAX_PKT_SIZE 16224 91#define ULP2_MAX_PKT_SIZE 16224
89#define ULP2_MAX_PDU_PAYLOAD (ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_MAX) 92#define ULP2_MAX_PDU_PAYLOAD (ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_LEN)
90#define PPOD_PAGES_MAX 4 93#define PPOD_PAGES_MAX 4
91#define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */ 94#define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */
92 95
diff --git a/drivers/scsi/cxgb3i/cxgb3i_init.c b/drivers/scsi/cxgb3i/cxgb3i_init.c
index 091ecb4d9f3d..1ce9f244e46c 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_init.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_init.c
@@ -12,8 +12,8 @@
12#include "cxgb3i.h" 12#include "cxgb3i.h"
13 13
14#define DRV_MODULE_NAME "cxgb3i" 14#define DRV_MODULE_NAME "cxgb3i"
15#define DRV_MODULE_VERSION "1.0.0" 15#define DRV_MODULE_VERSION "1.0.1"
16#define DRV_MODULE_RELDATE "Jun. 1, 2008" 16#define DRV_MODULE_RELDATE "Jan. 2009"
17 17
18static char version[] = 18static char version[] =
19 "Chelsio S3xx iSCSI Driver " DRV_MODULE_NAME 19 "Chelsio S3xx iSCSI Driver " DRV_MODULE_NAME
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
index d83464b9b3f9..fa2a44f37b36 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
@@ -364,7 +364,8 @@ cxgb3i_session_create(struct iscsi_endpoint *ep, u16 cmds_max, u16 qdepth,
364 364
365 cls_session = iscsi_session_setup(&cxgb3i_iscsi_transport, shost, 365 cls_session = iscsi_session_setup(&cxgb3i_iscsi_transport, shost,
366 cmds_max, 366 cmds_max,
367 sizeof(struct iscsi_tcp_task), 367 sizeof(struct iscsi_tcp_task) +
368 sizeof(struct cxgb3i_task_data),
368 initial_cmdsn, ISCSI_MAX_TARGET); 369 initial_cmdsn, ISCSI_MAX_TARGET);
369 if (!cls_session) 370 if (!cls_session)
370 return NULL; 371 return NULL;
@@ -402,17 +403,15 @@ static inline int cxgb3i_conn_max_xmit_dlength(struct iscsi_conn *conn)
402{ 403{
403 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 404 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
404 struct cxgb3i_conn *cconn = tcp_conn->dd_data; 405 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
405 unsigned int max = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, 406 unsigned int max = max(512 * MAX_SKB_FRAGS, SKB_TX_HEADROOM);
406 cconn->hba->snic->tx_max_size -
407 ISCSI_PDU_NONPAYLOAD_MAX);
408 407
408 max = min(cconn->hba->snic->tx_max_size, max);
409 if (conn->max_xmit_dlength) 409 if (conn->max_xmit_dlength)
410 conn->max_xmit_dlength = min_t(unsigned int, 410 conn->max_xmit_dlength = min(conn->max_xmit_dlength, max);
411 conn->max_xmit_dlength, max);
412 else 411 else
413 conn->max_xmit_dlength = max; 412 conn->max_xmit_dlength = max;
414 align_pdu_size(conn->max_xmit_dlength); 413 align_pdu_size(conn->max_xmit_dlength);
415 cxgb3i_log_info("conn 0x%p, max xmit %u.\n", 414 cxgb3i_api_debug("conn 0x%p, max xmit %u.\n",
416 conn, conn->max_xmit_dlength); 415 conn, conn->max_xmit_dlength);
417 return 0; 416 return 0;
418} 417}
@@ -427,9 +426,7 @@ static inline int cxgb3i_conn_max_recv_dlength(struct iscsi_conn *conn)
427{ 426{
428 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 427 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
429 struct cxgb3i_conn *cconn = tcp_conn->dd_data; 428 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
430 unsigned int max = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, 429 unsigned int max = cconn->hba->snic->rx_max_size;
431 cconn->hba->snic->rx_max_size -
432 ISCSI_PDU_NONPAYLOAD_MAX);
433 430
434 align_pdu_size(max); 431 align_pdu_size(max);
435 if (conn->max_recv_dlength) { 432 if (conn->max_recv_dlength) {
@@ -439,8 +436,7 @@ static inline int cxgb3i_conn_max_recv_dlength(struct iscsi_conn *conn)
439 conn->max_recv_dlength, max); 436 conn->max_recv_dlength, max);
440 return -EINVAL; 437 return -EINVAL;
441 } 438 }
442 conn->max_recv_dlength = min_t(unsigned int, 439 conn->max_recv_dlength = min(conn->max_recv_dlength, max);
443 conn->max_recv_dlength, max);
444 align_pdu_size(conn->max_recv_dlength); 440 align_pdu_size(conn->max_recv_dlength);
445 } else 441 } else
446 conn->max_recv_dlength = max; 442 conn->max_recv_dlength = max;
@@ -844,7 +840,7 @@ static struct scsi_host_template cxgb3i_host_template = {
844 .proc_name = "cxgb3i", 840 .proc_name = "cxgb3i",
845 .queuecommand = iscsi_queuecommand, 841 .queuecommand = iscsi_queuecommand,
846 .change_queue_depth = iscsi_change_queue_depth, 842 .change_queue_depth = iscsi_change_queue_depth,
847 .can_queue = 128 * (ISCSI_DEF_XMIT_CMDS_MAX - 1), 843 .can_queue = CXGB3I_SCSI_QDEPTH_DFLT - 1,
848 .sg_tablesize = SG_ALL, 844 .sg_tablesize = SG_ALL,
849 .max_sectors = 0xFFFF, 845 .max_sectors = 0xFFFF,
850 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, 846 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c
index a865f1fefe8b..de3b3b614cca 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c
@@ -23,19 +23,19 @@
23#include "cxgb3i_ddp.h" 23#include "cxgb3i_ddp.h"
24 24
25#ifdef __DEBUG_C3CN_CONN__ 25#ifdef __DEBUG_C3CN_CONN__
26#define c3cn_conn_debug cxgb3i_log_info 26#define c3cn_conn_debug cxgb3i_log_debug
27#else 27#else
28#define c3cn_conn_debug(fmt...) 28#define c3cn_conn_debug(fmt...)
29#endif 29#endif
30 30
31#ifdef __DEBUG_C3CN_TX__ 31#ifdef __DEBUG_C3CN_TX__
32#define c3cn_tx_debug cxgb3i_log_debug 32#define c3cn_tx_debug cxgb3i_log_debug
33#else 33#else
34#define c3cn_tx_debug(fmt...) 34#define c3cn_tx_debug(fmt...)
35#endif 35#endif
36 36
37#ifdef __DEBUG_C3CN_RX__ 37#ifdef __DEBUG_C3CN_RX__
38#define c3cn_rx_debug cxgb3i_log_debug 38#define c3cn_rx_debug cxgb3i_log_debug
39#else 39#else
40#define c3cn_rx_debug(fmt...) 40#define c3cn_rx_debug(fmt...)
41#endif 41#endif
@@ -47,9 +47,9 @@ static int cxgb3_rcv_win = 256 * 1024;
47module_param(cxgb3_rcv_win, int, 0644); 47module_param(cxgb3_rcv_win, int, 0644);
48MODULE_PARM_DESC(cxgb3_rcv_win, "TCP receive window in bytes (default=256KB)"); 48MODULE_PARM_DESC(cxgb3_rcv_win, "TCP receive window in bytes (default=256KB)");
49 49
50static int cxgb3_snd_win = 64 * 1024; 50static int cxgb3_snd_win = 128 * 1024;
51module_param(cxgb3_snd_win, int, 0644); 51module_param(cxgb3_snd_win, int, 0644);
52MODULE_PARM_DESC(cxgb3_snd_win, "TCP send window in bytes (default=64KB)"); 52MODULE_PARM_DESC(cxgb3_snd_win, "TCP send window in bytes (default=128KB)");
53 53
54static int cxgb3_rx_credit_thres = 10 * 1024; 54static int cxgb3_rx_credit_thres = 10 * 1024;
55module_param(cxgb3_rx_credit_thres, int, 0644); 55module_param(cxgb3_rx_credit_thres, int, 0644);
@@ -301,8 +301,8 @@ static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
301static void skb_entail(struct s3_conn *c3cn, struct sk_buff *skb, 301static void skb_entail(struct s3_conn *c3cn, struct sk_buff *skb,
302 int flags) 302 int flags)
303{ 303{
304 CXGB3_SKB_CB(skb)->seq = c3cn->write_seq; 304 skb_tcp_seq(skb) = c3cn->write_seq;
305 CXGB3_SKB_CB(skb)->flags = flags; 305 skb_flags(skb) = flags;
306 __skb_queue_tail(&c3cn->write_queue, skb); 306 __skb_queue_tail(&c3cn->write_queue, skb);
307} 307}
308 308
@@ -457,12 +457,9 @@ static unsigned int wrlen __read_mostly;
457 * The number of WRs needed for an skb depends on the number of fragments 457 * The number of WRs needed for an skb depends on the number of fragments
458 * in the skb and whether it has any payload in its main body. This maps the 458 * in the skb and whether it has any payload in its main body. This maps the
459 * length of the gather list represented by an skb into the # of necessary WRs. 459 * length of the gather list represented by an skb into the # of necessary WRs.
460 * 460 * The extra two fragments are for iscsi bhs and payload padding.
461 * The max. length of an skb is controlled by the max pdu size which is ~16K.
462 * Also, assume the min. fragment length is the sector size (512), then add
463 * extra fragment counts for iscsi bhs and payload padding.
464 */ 461 */
465#define SKB_WR_LIST_SIZE (16384/512 + 3) 462#define SKB_WR_LIST_SIZE (MAX_SKB_FRAGS + 2)
466static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly; 463static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly;
467 464
468static void s3_init_wr_tab(unsigned int wr_len) 465static void s3_init_wr_tab(unsigned int wr_len)
@@ -485,7 +482,7 @@ static void s3_init_wr_tab(unsigned int wr_len)
485 482
486static inline void reset_wr_list(struct s3_conn *c3cn) 483static inline void reset_wr_list(struct s3_conn *c3cn)
487{ 484{
488 c3cn->wr_pending_head = NULL; 485 c3cn->wr_pending_head = c3cn->wr_pending_tail = NULL;
489} 486}
490 487
491/* 488/*
@@ -496,7 +493,7 @@ static inline void reset_wr_list(struct s3_conn *c3cn)
496static inline void enqueue_wr(struct s3_conn *c3cn, 493static inline void enqueue_wr(struct s3_conn *c3cn,
497 struct sk_buff *skb) 494 struct sk_buff *skb)
498{ 495{
499 skb_wr_data(skb) = NULL; 496 skb_tx_wr_next(skb) = NULL;
500 497
501 /* 498 /*
502 * We want to take an extra reference since both us and the driver 499 * We want to take an extra reference since both us and the driver
@@ -509,10 +506,22 @@ static inline void enqueue_wr(struct s3_conn *c3cn,
509 if (!c3cn->wr_pending_head) 506 if (!c3cn->wr_pending_head)
510 c3cn->wr_pending_head = skb; 507 c3cn->wr_pending_head = skb;
511 else 508 else
512 skb_wr_data(skb) = skb; 509 skb_tx_wr_next(c3cn->wr_pending_tail) = skb;
513 c3cn->wr_pending_tail = skb; 510 c3cn->wr_pending_tail = skb;
514} 511}
515 512
513static int count_pending_wrs(struct s3_conn *c3cn)
514{
515 int n = 0;
516 const struct sk_buff *skb = c3cn->wr_pending_head;
517
518 while (skb) {
519 n += skb->csum;
520 skb = skb_tx_wr_next(skb);
521 }
522 return n;
523}
524
516static inline struct sk_buff *peek_wr(const struct s3_conn *c3cn) 525static inline struct sk_buff *peek_wr(const struct s3_conn *c3cn)
517{ 526{
518 return c3cn->wr_pending_head; 527 return c3cn->wr_pending_head;
@@ -529,8 +538,8 @@ static inline struct sk_buff *dequeue_wr(struct s3_conn *c3cn)
529 538
530 if (likely(skb)) { 539 if (likely(skb)) {
531 /* Don't bother clearing the tail */ 540 /* Don't bother clearing the tail */
532 c3cn->wr_pending_head = skb_wr_data(skb); 541 c3cn->wr_pending_head = skb_tx_wr_next(skb);
533 skb_wr_data(skb) = NULL; 542 skb_tx_wr_next(skb) = NULL;
534 } 543 }
535 return skb; 544 return skb;
536} 545}
@@ -543,13 +552,14 @@ static void purge_wr_queue(struct s3_conn *c3cn)
543} 552}
544 553
545static inline void make_tx_data_wr(struct s3_conn *c3cn, struct sk_buff *skb, 554static inline void make_tx_data_wr(struct s3_conn *c3cn, struct sk_buff *skb,
546 int len) 555 int len, int req_completion)
547{ 556{
548 struct tx_data_wr *req; 557 struct tx_data_wr *req;
549 558
550 skb_reset_transport_header(skb); 559 skb_reset_transport_header(skb);
551 req = (struct tx_data_wr *)__skb_push(skb, sizeof(*req)); 560 req = (struct tx_data_wr *)__skb_push(skb, sizeof(*req));
552 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); 561 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA) |
562 (req_completion ? F_WR_COMPL : 0));
553 req->wr_lo = htonl(V_WR_TID(c3cn->tid)); 563 req->wr_lo = htonl(V_WR_TID(c3cn->tid));
554 req->sndseq = htonl(c3cn->snd_nxt); 564 req->sndseq = htonl(c3cn->snd_nxt);
555 /* len includes the length of any HW ULP additions */ 565 /* len includes the length of any HW ULP additions */
@@ -592,7 +602,7 @@ static int c3cn_push_tx_frames(struct s3_conn *c3cn, int req_completion)
592 602
593 if (unlikely(c3cn->state == C3CN_STATE_CONNECTING || 603 if (unlikely(c3cn->state == C3CN_STATE_CONNECTING ||
594 c3cn->state == C3CN_STATE_CLOSE_WAIT_1 || 604 c3cn->state == C3CN_STATE_CLOSE_WAIT_1 ||
595 c3cn->state == C3CN_STATE_ABORTING)) { 605 c3cn->state >= C3CN_STATE_ABORTING)) {
596 c3cn_tx_debug("c3cn 0x%p, in closing state %u.\n", 606 c3cn_tx_debug("c3cn 0x%p, in closing state %u.\n",
597 c3cn, c3cn->state); 607 c3cn, c3cn->state);
598 return 0; 608 return 0;
@@ -615,7 +625,7 @@ static int c3cn_push_tx_frames(struct s3_conn *c3cn, int req_completion)
615 if (c3cn->wr_avail < wrs_needed) { 625 if (c3cn->wr_avail < wrs_needed) {
616 c3cn_tx_debug("c3cn 0x%p, skb len %u/%u, frag %u, " 626 c3cn_tx_debug("c3cn 0x%p, skb len %u/%u, frag %u, "
617 "wr %d < %u.\n", 627 "wr %d < %u.\n",
618 c3cn, skb->len, skb->datalen, frags, 628 c3cn, skb->len, skb->data_len, frags,
619 wrs_needed, c3cn->wr_avail); 629 wrs_needed, c3cn->wr_avail);
620 break; 630 break;
621 } 631 }
@@ -627,20 +637,24 @@ static int c3cn_push_tx_frames(struct s3_conn *c3cn, int req_completion)
627 c3cn->wr_unacked += wrs_needed; 637 c3cn->wr_unacked += wrs_needed;
628 enqueue_wr(c3cn, skb); 638 enqueue_wr(c3cn, skb);
629 639
630 if (likely(CXGB3_SKB_CB(skb)->flags & C3CB_FLAG_NEED_HDR)) { 640 c3cn_tx_debug("c3cn 0x%p, enqueue, skb len %u/%u, frag %u, "
631 len += ulp_extra_len(skb); 641 "wr %d, left %u, unack %u.\n",
632 make_tx_data_wr(c3cn, skb, len); 642 c3cn, skb->len, skb->data_len, frags,
633 c3cn->snd_nxt += len; 643 wrs_needed, c3cn->wr_avail, c3cn->wr_unacked);
634 if ((req_completion 644
635 && c3cn->wr_unacked == wrs_needed)
636 || (CXGB3_SKB_CB(skb)->flags & C3CB_FLAG_COMPL)
637 || c3cn->wr_unacked >= c3cn->wr_max / 2) {
638 struct work_request_hdr *wr = cplhdr(skb);
639 645
640 wr->wr_hi |= htonl(F_WR_COMPL); 646 if (likely(skb_flags(skb) & C3CB_FLAG_NEED_HDR)) {
647 if ((req_completion &&
648 c3cn->wr_unacked == wrs_needed) ||
649 (skb_flags(skb) & C3CB_FLAG_COMPL) ||
650 c3cn->wr_unacked >= c3cn->wr_max / 2) {
651 req_completion = 1;
641 c3cn->wr_unacked = 0; 652 c3cn->wr_unacked = 0;
642 } 653 }
643 CXGB3_SKB_CB(skb)->flags &= ~C3CB_FLAG_NEED_HDR; 654 len += ulp_extra_len(skb);
655 make_tx_data_wr(c3cn, skb, len, req_completion);
656 c3cn->snd_nxt += len;
657 skb_flags(skb) &= ~C3CB_FLAG_NEED_HDR;
644 } 658 }
645 659
646 total_size += skb->truesize; 660 total_size += skb->truesize;
@@ -735,8 +749,11 @@ static void process_act_establish(struct s3_conn *c3cn, struct sk_buff *skb)
735 if (unlikely(c3cn_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED))) 749 if (unlikely(c3cn_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED)))
736 /* upper layer has requested closing */ 750 /* upper layer has requested closing */
737 send_abort_req(c3cn); 751 send_abort_req(c3cn);
738 else if (c3cn_push_tx_frames(c3cn, 1)) 752 else {
753 if (skb_queue_len(&c3cn->write_queue))
754 c3cn_push_tx_frames(c3cn, 1);
739 cxgb3i_conn_tx_open(c3cn); 755 cxgb3i_conn_tx_open(c3cn);
756 }
740} 757}
741 758
742static int do_act_establish(struct t3cdev *cdev, struct sk_buff *skb, 759static int do_act_establish(struct t3cdev *cdev, struct sk_buff *skb,
@@ -1082,8 +1099,8 @@ static void process_rx_iscsi_hdr(struct s3_conn *c3cn, struct sk_buff *skb)
1082 return; 1099 return;
1083 } 1100 }
1084 1101
1085 CXGB3_SKB_CB(skb)->seq = ntohl(hdr_cpl->seq); 1102 skb_tcp_seq(skb) = ntohl(hdr_cpl->seq);
1086 CXGB3_SKB_CB(skb)->flags = 0; 1103 skb_flags(skb) = 0;
1087 1104
1088 skb_reset_transport_header(skb); 1105 skb_reset_transport_header(skb);
1089 __skb_pull(skb, sizeof(struct cpl_iscsi_hdr)); 1106 __skb_pull(skb, sizeof(struct cpl_iscsi_hdr));
@@ -1103,12 +1120,12 @@ static void process_rx_iscsi_hdr(struct s3_conn *c3cn, struct sk_buff *skb)
1103 goto abort_conn; 1120 goto abort_conn;
1104 1121
1105 skb_ulp_mode(skb) = ULP2_FLAG_DATA_READY; 1122 skb_ulp_mode(skb) = ULP2_FLAG_DATA_READY;
1106 skb_ulp_pdulen(skb) = ntohs(ddp_cpl.len); 1123 skb_rx_pdulen(skb) = ntohs(ddp_cpl.len);
1107 skb_ulp_ddigest(skb) = ntohl(ddp_cpl.ulp_crc); 1124 skb_rx_ddigest(skb) = ntohl(ddp_cpl.ulp_crc);
1108 status = ntohl(ddp_cpl.ddp_status); 1125 status = ntohl(ddp_cpl.ddp_status);
1109 1126
1110 c3cn_rx_debug("rx skb 0x%p, len %u, pdulen %u, ddp status 0x%x.\n", 1127 c3cn_rx_debug("rx skb 0x%p, len %u, pdulen %u, ddp status 0x%x.\n",
1111 skb, skb->len, skb_ulp_pdulen(skb), status); 1128 skb, skb->len, skb_rx_pdulen(skb), status);
1112 1129
1113 if (status & (1 << RX_DDP_STATUS_HCRC_SHIFT)) 1130 if (status & (1 << RX_DDP_STATUS_HCRC_SHIFT))
1114 skb_ulp_mode(skb) |= ULP2_FLAG_HCRC_ERROR; 1131 skb_ulp_mode(skb) |= ULP2_FLAG_HCRC_ERROR;
@@ -1126,7 +1143,7 @@ static void process_rx_iscsi_hdr(struct s3_conn *c3cn, struct sk_buff *skb)
1126 } else if (status & (1 << RX_DDP_STATUS_DDP_SHIFT)) 1143 } else if (status & (1 << RX_DDP_STATUS_DDP_SHIFT))
1127 skb_ulp_mode(skb) |= ULP2_FLAG_DATA_DDPED; 1144 skb_ulp_mode(skb) |= ULP2_FLAG_DATA_DDPED;
1128 1145
1129 c3cn->rcv_nxt = ntohl(ddp_cpl.seq) + skb_ulp_pdulen(skb); 1146 c3cn->rcv_nxt = ntohl(ddp_cpl.seq) + skb_rx_pdulen(skb);
1130 __pskb_trim(skb, len); 1147 __pskb_trim(skb, len);
1131 __skb_queue_tail(&c3cn->receive_queue, skb); 1148 __skb_queue_tail(&c3cn->receive_queue, skb);
1132 cxgb3i_conn_pdu_ready(c3cn); 1149 cxgb3i_conn_pdu_ready(c3cn);
@@ -1151,12 +1168,27 @@ static int do_iscsi_hdr(struct t3cdev *t3dev, struct sk_buff *skb, void *ctx)
1151 * Process an acknowledgment of WR completion. Advance snd_una and send the 1168 * Process an acknowledgment of WR completion. Advance snd_una and send the
1152 * next batch of work requests from the write queue. 1169 * next batch of work requests from the write queue.
1153 */ 1170 */
1171static void check_wr_invariants(struct s3_conn *c3cn)
1172{
1173 int pending = count_pending_wrs(c3cn);
1174
1175 if (unlikely(c3cn->wr_avail + pending != c3cn->wr_max))
1176 cxgb3i_log_error("TID %u: credit imbalance: avail %u, "
1177 "pending %u, total should be %u\n",
1178 c3cn->tid, c3cn->wr_avail, pending,
1179 c3cn->wr_max);
1180}
1181
1154static void process_wr_ack(struct s3_conn *c3cn, struct sk_buff *skb) 1182static void process_wr_ack(struct s3_conn *c3cn, struct sk_buff *skb)
1155{ 1183{
1156 struct cpl_wr_ack *hdr = cplhdr(skb); 1184 struct cpl_wr_ack *hdr = cplhdr(skb);
1157 unsigned int credits = ntohs(hdr->credits); 1185 unsigned int credits = ntohs(hdr->credits);
1158 u32 snd_una = ntohl(hdr->snd_una); 1186 u32 snd_una = ntohl(hdr->snd_una);
1159 1187
1188 c3cn_tx_debug("%u WR credits, avail %u, unack %u, TID %u, state %u.\n",
1189 credits, c3cn->wr_avail, c3cn->wr_unacked,
1190 c3cn->tid, c3cn->state);
1191
1160 c3cn->wr_avail += credits; 1192 c3cn->wr_avail += credits;
1161 if (c3cn->wr_unacked > c3cn->wr_max - c3cn->wr_avail) 1193 if (c3cn->wr_unacked > c3cn->wr_max - c3cn->wr_avail)
1162 c3cn->wr_unacked = c3cn->wr_max - c3cn->wr_avail; 1194 c3cn->wr_unacked = c3cn->wr_max - c3cn->wr_avail;
@@ -1171,6 +1203,17 @@ static void process_wr_ack(struct s3_conn *c3cn, struct sk_buff *skb)
1171 break; 1203 break;
1172 } 1204 }
1173 if (unlikely(credits < p->csum)) { 1205 if (unlikely(credits < p->csum)) {
1206 struct tx_data_wr *w = cplhdr(p);
1207 cxgb3i_log_error("TID %u got %u WR credits need %u, "
1208 "len %u, main body %u, frags %u, "
1209 "seq # %u, ACK una %u, ACK nxt %u, "
1210 "WR_AVAIL %u, WRs pending %u\n",
1211 c3cn->tid, credits, p->csum, p->len,
1212 p->len - p->data_len,
1213 skb_shinfo(p)->nr_frags,
1214 ntohl(w->sndseq), snd_una,
1215 ntohl(hdr->snd_nxt), c3cn->wr_avail,
1216 count_pending_wrs(c3cn) - credits);
1174 p->csum -= credits; 1217 p->csum -= credits;
1175 break; 1218 break;
1176 } else { 1219 } else {
@@ -1180,15 +1223,24 @@ static void process_wr_ack(struct s3_conn *c3cn, struct sk_buff *skb)
1180 } 1223 }
1181 } 1224 }
1182 1225
1183 if (unlikely(before(snd_una, c3cn->snd_una))) 1226 check_wr_invariants(c3cn);
1227
1228 if (unlikely(before(snd_una, c3cn->snd_una))) {
1229 cxgb3i_log_error("TID %u, unexpected sequence # %u in WR_ACK "
1230 "snd_una %u\n",
1231 c3cn->tid, snd_una, c3cn->snd_una);
1184 goto out_free; 1232 goto out_free;
1233 }
1185 1234
1186 if (c3cn->snd_una != snd_una) { 1235 if (c3cn->snd_una != snd_una) {
1187 c3cn->snd_una = snd_una; 1236 c3cn->snd_una = snd_una;
1188 dst_confirm(c3cn->dst_cache); 1237 dst_confirm(c3cn->dst_cache);
1189 } 1238 }
1190 1239
1191 if (skb_queue_len(&c3cn->write_queue) && c3cn_push_tx_frames(c3cn, 0)) 1240 if (skb_queue_len(&c3cn->write_queue)) {
1241 if (c3cn_push_tx_frames(c3cn, 0))
1242 cxgb3i_conn_tx_open(c3cn);
1243 } else
1192 cxgb3i_conn_tx_open(c3cn); 1244 cxgb3i_conn_tx_open(c3cn);
1193out_free: 1245out_free:
1194 __kfree_skb(skb); 1246 __kfree_skb(skb);
@@ -1452,7 +1504,7 @@ static void init_offload_conn(struct s3_conn *c3cn,
1452 struct dst_entry *dst) 1504 struct dst_entry *dst)
1453{ 1505{
1454 BUG_ON(c3cn->cdev != cdev); 1506 BUG_ON(c3cn->cdev != cdev);
1455 c3cn->wr_max = c3cn->wr_avail = T3C_DATA(cdev)->max_wrs; 1507 c3cn->wr_max = c3cn->wr_avail = T3C_DATA(cdev)->max_wrs - 1;
1456 c3cn->wr_unacked = 0; 1508 c3cn->wr_unacked = 0;
1457 c3cn->mss_idx = select_mss(c3cn, dst_mtu(dst)); 1509 c3cn->mss_idx = select_mss(c3cn, dst_mtu(dst));
1458 1510
@@ -1671,9 +1723,17 @@ int cxgb3i_c3cn_send_pdus(struct s3_conn *c3cn, struct sk_buff *skb)
1671 goto out_err; 1723 goto out_err;
1672 } 1724 }
1673 1725
1674 err = -EPIPE;
1675 if (c3cn->err) { 1726 if (c3cn->err) {
1676 c3cn_tx_debug("c3cn 0x%p, err %d.\n", c3cn, c3cn->err); 1727 c3cn_tx_debug("c3cn 0x%p, err %d.\n", c3cn, c3cn->err);
1728 err = -EPIPE;
1729 goto out_err;
1730 }
1731
1732 if (c3cn->write_seq - c3cn->snd_una >= cxgb3_snd_win) {
1733 c3cn_tx_debug("c3cn 0x%p, snd %u - %u > %u.\n",
1734 c3cn, c3cn->write_seq, c3cn->snd_una,
1735 cxgb3_snd_win);
1736 err = -EAGAIN;
1677 goto out_err; 1737 goto out_err;
1678 } 1738 }
1679 1739
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.h b/drivers/scsi/cxgb3i/cxgb3i_offload.h
index d23156907ffd..6344b9eb2589 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.h
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.h
@@ -178,25 +178,33 @@ void cxgb3i_c3cn_release(struct s3_conn *);
178 * @flag: see C3CB_FLAG_* below 178 * @flag: see C3CB_FLAG_* below
179 * @ulp_mode: ULP mode/submode of sk_buff 179 * @ulp_mode: ULP mode/submode of sk_buff
180 * @seq: tcp sequence number 180 * @seq: tcp sequence number
181 * @ddigest: pdu data digest
182 * @pdulen: recovered pdu length
183 * @wr_data: scratch area for tx wr
184 */ 181 */
182struct cxgb3_skb_rx_cb {
183 __u32 ddigest; /* data digest */
184 __u32 pdulen; /* recovered pdu length */
185};
186
187struct cxgb3_skb_tx_cb {
188 struct sk_buff *wr_next; /* next wr */
189};
190
185struct cxgb3_skb_cb { 191struct cxgb3_skb_cb {
186 __u8 flags; 192 __u8 flags;
187 __u8 ulp_mode; 193 __u8 ulp_mode;
188 __u32 seq; 194 __u32 seq;
189 __u32 ddigest; 195 union {
190 __u32 pdulen; 196 struct cxgb3_skb_rx_cb rx;
191 struct sk_buff *wr_data; 197 struct cxgb3_skb_tx_cb tx;
198 };
192}; 199};
193 200
194#define CXGB3_SKB_CB(skb) ((struct cxgb3_skb_cb *)&((skb)->cb[0])) 201#define CXGB3_SKB_CB(skb) ((struct cxgb3_skb_cb *)&((skb)->cb[0]))
195 202#define skb_flags(skb) (CXGB3_SKB_CB(skb)->flags)
196#define skb_ulp_mode(skb) (CXGB3_SKB_CB(skb)->ulp_mode) 203#define skb_ulp_mode(skb) (CXGB3_SKB_CB(skb)->ulp_mode)
197#define skb_ulp_ddigest(skb) (CXGB3_SKB_CB(skb)->ddigest) 204#define skb_tcp_seq(skb) (CXGB3_SKB_CB(skb)->seq)
198#define skb_ulp_pdulen(skb) (CXGB3_SKB_CB(skb)->pdulen) 205#define skb_rx_ddigest(skb) (CXGB3_SKB_CB(skb)->rx.ddigest)
199#define skb_wr_data(skb) (CXGB3_SKB_CB(skb)->wr_data) 206#define skb_rx_pdulen(skb) (CXGB3_SKB_CB(skb)->rx.pdulen)
207#define skb_tx_wr_next(skb) (CXGB3_SKB_CB(skb)->tx.wr_next)
200 208
201enum c3cb_flags { 209enum c3cb_flags {
202 C3CB_FLAG_NEED_HDR = 1 << 0, /* packet needs a TX_DATA_WR header */ 210 C3CB_FLAG_NEED_HDR = 1 << 0, /* packet needs a TX_DATA_WR header */
@@ -217,6 +225,7 @@ struct sge_opaque_hdr {
217/* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */ 225/* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */
218#define TX_HEADER_LEN \ 226#define TX_HEADER_LEN \
219 (sizeof(struct tx_data_wr) + sizeof(struct sge_opaque_hdr)) 227 (sizeof(struct tx_data_wr) + sizeof(struct sge_opaque_hdr))
228#define SKB_TX_HEADROOM SKB_MAX_HEAD(TX_HEADER_LEN)
220 229
221/* 230/*
222 * get and set private ip for iscsi traffic 231 * get and set private ip for iscsi traffic
diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.c b/drivers/scsi/cxgb3i/cxgb3i_pdu.c
index ce7ce8c6094c..17115c230d65 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_pdu.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_pdu.c
@@ -32,6 +32,10 @@
32#define cxgb3i_tx_debug(fmt...) 32#define cxgb3i_tx_debug(fmt...)
33#endif 33#endif
34 34
35/* always allocate rooms for AHS */
36#define SKB_TX_PDU_HEADER_LEN \
37 (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE)
38static unsigned int skb_extra_headroom;
35static struct page *pad_page; 39static struct page *pad_page;
36 40
37/* 41/*
@@ -146,12 +150,13 @@ static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
146 150
147void cxgb3i_conn_cleanup_task(struct iscsi_task *task) 151void cxgb3i_conn_cleanup_task(struct iscsi_task *task)
148{ 152{
149 struct iscsi_tcp_task *tcp_task = task->dd_data; 153 struct cxgb3i_task_data *tdata = task->dd_data +
154 sizeof(struct iscsi_tcp_task);
150 155
151 /* never reached the xmit task callout */ 156 /* never reached the xmit task callout */
152 if (tcp_task->dd_data) 157 if (tdata->skb)
153 kfree_skb(tcp_task->dd_data); 158 __kfree_skb(tdata->skb);
154 tcp_task->dd_data = NULL; 159 memset(tdata, 0, sizeof(struct cxgb3i_task_data));
155 160
156 /* MNC - Do we need a check in case this is called but 161 /* MNC - Do we need a check in case this is called but
157 * cxgb3i_conn_alloc_pdu has never been called on the task */ 162 * cxgb3i_conn_alloc_pdu has never been called on the task */
@@ -159,28 +164,102 @@ void cxgb3i_conn_cleanup_task(struct iscsi_task *task)
159 iscsi_tcp_cleanup_task(task); 164 iscsi_tcp_cleanup_task(task);
160} 165}
161 166
162/* 167static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt,
163 * We do not support ahs yet 168 unsigned int offset, unsigned int *off,
164 */ 169 struct scatterlist **sgp)
170{
171 int i;
172 struct scatterlist *sg;
173
174 for_each_sg(sgl, sg, sgcnt, i) {
175 if (offset < sg->length) {
176 *off = offset;
177 *sgp = sg;
178 return 0;
179 }
180 offset -= sg->length;
181 }
182 return -EFAULT;
183}
184
185static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset,
186 unsigned int dlen, skb_frag_t *frags,
187 int frag_max)
188{
189 unsigned int datalen = dlen;
190 unsigned int sglen = sg->length - sgoffset;
191 struct page *page = sg_page(sg);
192 int i;
193
194 i = 0;
195 do {
196 unsigned int copy;
197
198 if (!sglen) {
199 sg = sg_next(sg);
200 if (!sg) {
201 cxgb3i_log_error("%s, sg NULL, len %u/%u.\n",
202 __func__, datalen, dlen);
203 return -EINVAL;
204 }
205 sgoffset = 0;
206 sglen = sg->length;
207 page = sg_page(sg);
208
209 }
210 copy = min(datalen, sglen);
211 if (i && page == frags[i - 1].page &&
212 sgoffset + sg->offset ==
213 frags[i - 1].page_offset + frags[i - 1].size) {
214 frags[i - 1].size += copy;
215 } else {
216 if (i >= frag_max) {
217 cxgb3i_log_error("%s, too many pages %u, "
218 "dlen %u.\n", __func__,
219 frag_max, dlen);
220 return -EINVAL;
221 }
222
223 frags[i].page = page;
224 frags[i].page_offset = sg->offset + sgoffset;
225 frags[i].size = copy;
226 i++;
227 }
228 datalen -= copy;
229 sgoffset += copy;
230 sglen -= copy;
231 } while (datalen);
232
233 return i;
234}
235
165int cxgb3i_conn_alloc_pdu(struct iscsi_task *task, u8 opcode) 236int cxgb3i_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
166{ 237{
238 struct iscsi_conn *conn = task->conn;
167 struct iscsi_tcp_task *tcp_task = task->dd_data; 239 struct iscsi_tcp_task *tcp_task = task->dd_data;
168 struct sk_buff *skb; 240 struct cxgb3i_task_data *tdata = task->dd_data + sizeof(*tcp_task);
241 struct scsi_cmnd *sc = task->sc;
242 int headroom = SKB_TX_PDU_HEADER_LEN;
169 243
244 tcp_task->dd_data = tdata;
170 task->hdr = NULL; 245 task->hdr = NULL;
171 /* always allocate rooms for AHS */ 246
172 skb = alloc_skb(sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE + 247 /* write command, need to send data pdus */
173 TX_HEADER_LEN, GFP_ATOMIC); 248 if (skb_extra_headroom && (opcode == ISCSI_OP_SCSI_DATA_OUT ||
174 if (!skb) 249 (opcode == ISCSI_OP_SCSI_CMD &&
250 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE))))
251 headroom += min(skb_extra_headroom, conn->max_xmit_dlength);
252
253 tdata->skb = alloc_skb(TX_HEADER_LEN + headroom, GFP_ATOMIC);
254 if (!tdata->skb)
175 return -ENOMEM; 255 return -ENOMEM;
256 skb_reserve(tdata->skb, TX_HEADER_LEN);
176 257
177 cxgb3i_tx_debug("task 0x%p, opcode 0x%x, skb 0x%p.\n", 258 cxgb3i_tx_debug("task 0x%p, opcode 0x%x, skb 0x%p.\n",
178 task, opcode, skb); 259 task, opcode, tdata->skb);
179 260
180 tcp_task->dd_data = skb; 261 task->hdr = (struct iscsi_hdr *)tdata->skb->data;
181 skb_reserve(skb, TX_HEADER_LEN); 262 task->hdr_max = SKB_TX_PDU_HEADER_LEN;
182 task->hdr = (struct iscsi_hdr *)skb->data;
183 task->hdr_max = sizeof(struct iscsi_hdr);
184 263
185 /* data_out uses scsi_cmd's itt */ 264 /* data_out uses scsi_cmd's itt */
186 if (opcode != ISCSI_OP_SCSI_DATA_OUT) 265 if (opcode != ISCSI_OP_SCSI_DATA_OUT)
@@ -192,13 +271,13 @@ int cxgb3i_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
192int cxgb3i_conn_init_pdu(struct iscsi_task *task, unsigned int offset, 271int cxgb3i_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
193 unsigned int count) 272 unsigned int count)
194{ 273{
195 struct iscsi_tcp_task *tcp_task = task->dd_data;
196 struct sk_buff *skb = tcp_task->dd_data;
197 struct iscsi_conn *conn = task->conn; 274 struct iscsi_conn *conn = task->conn;
198 struct page *pg; 275 struct iscsi_tcp_task *tcp_task = task->dd_data;
276 struct cxgb3i_task_data *tdata = tcp_task->dd_data;
277 struct sk_buff *skb = tdata->skb;
199 unsigned int datalen = count; 278 unsigned int datalen = count;
200 int i, padlen = iscsi_padding(count); 279 int i, padlen = iscsi_padding(count);
201 skb_frag_t *frag; 280 struct page *pg;
202 281
203 cxgb3i_tx_debug("task 0x%p,0x%p, offset %u, count %u, skb 0x%p.\n", 282 cxgb3i_tx_debug("task 0x%p,0x%p, offset %u, count %u, skb 0x%p.\n",
204 task, task->sc, offset, count, skb); 283 task, task->sc, offset, count, skb);
@@ -209,90 +288,94 @@ int cxgb3i_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
209 return 0; 288 return 0;
210 289
211 if (task->sc) { 290 if (task->sc) {
212 struct scatterlist *sg; 291 struct scsi_data_buffer *sdb = scsi_out(task->sc);
213 struct scsi_data_buffer *sdb; 292 struct scatterlist *sg = NULL;
214 unsigned int sgoffset = offset; 293 int err;
215 struct page *sgpg; 294
216 unsigned int sglen; 295 tdata->offset = offset;
217 296 tdata->count = count;
218 sdb = scsi_out(task->sc); 297 err = sgl_seek_offset(sdb->table.sgl, sdb->table.nents,
219 sg = sdb->table.sgl; 298 tdata->offset, &tdata->sgoffset, &sg);
220 299 if (err < 0) {
221 for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) { 300 cxgb3i_log_warn("tpdu, sgl %u, bad offset %u/%u.\n",
222 cxgb3i_tx_debug("sg %d, page 0x%p, len %u offset %u\n", 301 sdb->table.nents, tdata->offset,
223 i, sg_page(sg), sg->length, sg->offset); 302 sdb->length);
224 303 return err;
225 if (sgoffset < sg->length)
226 break;
227 sgoffset -= sg->length;
228 } 304 }
229 sgpg = sg_page(sg); 305 err = sgl_read_to_frags(sg, tdata->sgoffset, tdata->count,
230 sglen = sg->length - sgoffset; 306 tdata->frags, MAX_PDU_FRAGS);
231 307 if (err < 0) {
232 do { 308 cxgb3i_log_warn("tpdu, sgl %u, bad offset %u + %u.\n",
233 int j = skb_shinfo(skb)->nr_frags; 309 sdb->table.nents, tdata->offset,
234 unsigned int copy; 310 tdata->count);
235 311 return err;
236 if (!sglen) { 312 }
237 sg = sg_next(sg); 313 tdata->nr_frags = err;
238 sgpg = sg_page(sg); 314
239 sgoffset = 0; 315 if (tdata->nr_frags > MAX_SKB_FRAGS ||
240 sglen = sg->length; 316 (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) {
241 ++i; 317 char *dst = skb->data + task->hdr_len;
318 skb_frag_t *frag = tdata->frags;
319
320 /* data fits in the skb's headroom */
321 for (i = 0; i < tdata->nr_frags; i++, frag++) {
322 char *src = kmap_atomic(frag->page,
323 KM_SOFTIRQ0);
324
325 memcpy(dst, src+frag->page_offset, frag->size);
326 dst += frag->size;
327 kunmap_atomic(src, KM_SOFTIRQ0);
242 } 328 }
243 copy = min(sglen, datalen); 329 if (padlen) {
244 if (j && skb_can_coalesce(skb, j, sgpg, 330 memset(dst, 0, padlen);
245 sg->offset + sgoffset)) { 331 padlen = 0;
246 skb_shinfo(skb)->frags[j - 1].size += copy;
247 } else {
248 get_page(sgpg);
249 skb_fill_page_desc(skb, j, sgpg,
250 sg->offset + sgoffset, copy);
251 } 332 }
252 sgoffset += copy; 333 skb_put(skb, count + padlen);
253 sglen -= copy; 334 } else {
254 datalen -= copy; 335 /* data fit into frag_list */
255 } while (datalen); 336 for (i = 0; i < tdata->nr_frags; i++)
337 get_page(tdata->frags[i].page);
338
339 memcpy(skb_shinfo(skb)->frags, tdata->frags,
340 sizeof(skb_frag_t) * tdata->nr_frags);
341 skb_shinfo(skb)->nr_frags = tdata->nr_frags;
342 skb->len += count;
343 skb->data_len += count;
344 skb->truesize += count;
345 }
346
256 } else { 347 } else {
257 pg = virt_to_page(task->data); 348 pg = virt_to_page(task->data);
258 349
259 while (datalen) { 350 get_page(pg);
260 i = skb_shinfo(skb)->nr_frags; 351 skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data),
261 frag = &skb_shinfo(skb)->frags[i]; 352 count);
262 353 skb->len += count;
263 get_page(pg); 354 skb->data_len += count;
264 frag->page = pg; 355 skb->truesize += count;
265 frag->page_offset = 0;
266 frag->size = min((unsigned int)PAGE_SIZE, datalen);
267
268 skb_shinfo(skb)->nr_frags++;
269 datalen -= frag->size;
270 pg++;
271 }
272 } 356 }
273 357
274 if (padlen) { 358 if (padlen) {
275 i = skb_shinfo(skb)->nr_frags; 359 i = skb_shinfo(skb)->nr_frags;
276 frag = &skb_shinfo(skb)->frags[i]; 360 get_page(pad_page);
277 frag->page = pad_page; 361 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, pad_page, 0,
278 frag->page_offset = 0; 362 padlen);
279 frag->size = padlen; 363
280 skb_shinfo(skb)->nr_frags++; 364 skb->data_len += padlen;
365 skb->truesize += padlen;
366 skb->len += padlen;
281 } 367 }
282 368
283 datalen = count + padlen;
284 skb->data_len += datalen;
285 skb->truesize += datalen;
286 skb->len += datalen;
287 return 0; 369 return 0;
288} 370}
289 371
290int cxgb3i_conn_xmit_pdu(struct iscsi_task *task) 372int cxgb3i_conn_xmit_pdu(struct iscsi_task *task)
291{ 373{
292 struct iscsi_tcp_task *tcp_task = task->dd_data;
293 struct sk_buff *skb = tcp_task->dd_data;
294 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; 374 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
295 struct cxgb3i_conn *cconn = tcp_conn->dd_data; 375 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
376 struct iscsi_tcp_task *tcp_task = task->dd_data;
377 struct cxgb3i_task_data *tdata = tcp_task->dd_data;
378 struct sk_buff *skb = tdata->skb;
296 unsigned int datalen; 379 unsigned int datalen;
297 int err; 380 int err;
298 381
@@ -300,13 +383,14 @@ int cxgb3i_conn_xmit_pdu(struct iscsi_task *task)
300 return 0; 383 return 0;
301 384
302 datalen = skb->data_len; 385 datalen = skb->data_len;
303 tcp_task->dd_data = NULL; 386 tdata->skb = NULL;
304 err = cxgb3i_c3cn_send_pdus(cconn->cep->c3cn, skb); 387 err = cxgb3i_c3cn_send_pdus(cconn->cep->c3cn, skb);
305 cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n",
306 task, skb, skb->len, skb->data_len, err);
307 if (err > 0) { 388 if (err > 0) {
308 int pdulen = err; 389 int pdulen = err;
309 390
391 cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n",
392 task, skb, skb->len, skb->data_len, err);
393
310 if (task->conn->hdrdgst_en) 394 if (task->conn->hdrdgst_en)
311 pdulen += ISCSI_DIGEST_SIZE; 395 pdulen += ISCSI_DIGEST_SIZE;
312 if (datalen && task->conn->datadgst_en) 396 if (datalen && task->conn->datadgst_en)
@@ -325,12 +409,14 @@ int cxgb3i_conn_xmit_pdu(struct iscsi_task *task)
325 return err; 409 return err;
326 } 410 }
327 /* reset skb to send when we are called again */ 411 /* reset skb to send when we are called again */
328 tcp_task->dd_data = skb; 412 tdata->skb = skb;
329 return -EAGAIN; 413 return -EAGAIN;
330} 414}
331 415
332int cxgb3i_pdu_init(void) 416int cxgb3i_pdu_init(void)
333{ 417{
418 if (SKB_TX_HEADROOM > (512 * MAX_SKB_FRAGS))
419 skb_extra_headroom = SKB_TX_HEADROOM;
334 pad_page = alloc_page(GFP_KERNEL); 420 pad_page = alloc_page(GFP_KERNEL);
335 if (!pad_page) 421 if (!pad_page)
336 return -ENOMEM; 422 return -ENOMEM;
@@ -366,7 +452,9 @@ void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn)
366 skb = skb_peek(&c3cn->receive_queue); 452 skb = skb_peek(&c3cn->receive_queue);
367 while (!err && skb) { 453 while (!err && skb) {
368 __skb_unlink(skb, &c3cn->receive_queue); 454 __skb_unlink(skb, &c3cn->receive_queue);
369 read += skb_ulp_pdulen(skb); 455 read += skb_rx_pdulen(skb);
456 cxgb3i_rx_debug("conn 0x%p, cn 0x%p, rx skb 0x%p, pdulen %u.\n",
457 conn, c3cn, skb, skb_rx_pdulen(skb));
370 err = cxgb3i_conn_read_pdu_skb(conn, skb); 458 err = cxgb3i_conn_read_pdu_skb(conn, skb);
371 __kfree_skb(skb); 459 __kfree_skb(skb);
372 skb = skb_peek(&c3cn->receive_queue); 460 skb = skb_peek(&c3cn->receive_queue);
@@ -377,6 +465,11 @@ void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn)
377 cxgb3i_c3cn_rx_credits(c3cn, read); 465 cxgb3i_c3cn_rx_credits(c3cn, read);
378 } 466 }
379 conn->rxdata_octets += read; 467 conn->rxdata_octets += read;
468
469 if (err) {
470 cxgb3i_log_info("conn 0x%p rx failed err %d.\n", conn, err);
471 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
472 }
380} 473}
381 474
382void cxgb3i_conn_tx_open(struct s3_conn *c3cn) 475void cxgb3i_conn_tx_open(struct s3_conn *c3cn)
diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.h b/drivers/scsi/cxgb3i/cxgb3i_pdu.h
index a3f685cc2362..0770b23d90da 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_pdu.h
+++ b/drivers/scsi/cxgb3i/cxgb3i_pdu.h
@@ -53,7 +53,7 @@ struct cpl_rx_data_ddp_norss {
53#define ULP2_FLAG_DCRC_ERROR 0x20 53#define ULP2_FLAG_DCRC_ERROR 0x20
54#define ULP2_FLAG_PAD_ERROR 0x40 54#define ULP2_FLAG_PAD_ERROR 0x40
55 55
56void cxgb3i_conn_closing(struct s3_conn *); 56void cxgb3i_conn_closing(struct s3_conn *c3cn);
57void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn); 57void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn);
58void cxgb3i_conn_tx_open(struct s3_conn *c3cn); 58void cxgb3i_conn_tx_open(struct s3_conn *c3cn);
59#endif 59#endif