diff options
author | Martin K. Petersen <martin.petersen@oracle.com> | 2010-09-10 14:50:10 -0400 |
---|---|---|
committer | Jens Axboe <axboe@carl.home.kernel.dk> | 2010-09-10 14:50:10 -0400 |
commit | 13f05c8d8e98bbdce89158bfdb2e380940695a88 (patch) | |
tree | 055215e7e2b1bdc684ead64daa61b30b35eaa3c5 /drivers/scsi/scsi_lib.c | |
parent | c8bf1336824ebd698d37b71763e1c43190f2229a (diff) |
block/scsi: Provide a limit on the number of integrity segments
Some controllers have a hardware limit on the number of protection
information scatter-gather list segments they can handle.
Introduce a max_integrity_segments limit in the block layer and provide
a new scsi_host_template setting that allows HBA drivers to provide a
value suitable for the hardware.
Add support for honoring the integrity segment limit when merging both
bios and requests.
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Jens Axboe <axboe@carl.home.kernel.dk>
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r-- | drivers/scsi/scsi_lib.c | 26 |
1 files changed, 18 insertions, 8 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 9ade720422c6..861c0b937ac9 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -968,11 +968,13 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, | |||
968 | */ | 968 | */ |
969 | int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) | 969 | int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) |
970 | { | 970 | { |
971 | int error = scsi_init_sgtable(cmd->request, &cmd->sdb, gfp_mask); | 971 | struct request *rq = cmd->request; |
972 | |||
973 | int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask); | ||
972 | if (error) | 974 | if (error) |
973 | goto err_exit; | 975 | goto err_exit; |
974 | 976 | ||
975 | if (blk_bidi_rq(cmd->request)) { | 977 | if (blk_bidi_rq(rq)) { |
976 | struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc( | 978 | struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc( |
977 | scsi_sdb_cache, GFP_ATOMIC); | 979 | scsi_sdb_cache, GFP_ATOMIC); |
978 | if (!bidi_sdb) { | 980 | if (!bidi_sdb) { |
@@ -980,28 +982,28 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) | |||
980 | goto err_exit; | 982 | goto err_exit; |
981 | } | 983 | } |
982 | 984 | ||
983 | cmd->request->next_rq->special = bidi_sdb; | 985 | rq->next_rq->special = bidi_sdb; |
984 | error = scsi_init_sgtable(cmd->request->next_rq, bidi_sdb, | 986 | error = scsi_init_sgtable(rq->next_rq, bidi_sdb, GFP_ATOMIC); |
985 | GFP_ATOMIC); | ||
986 | if (error) | 987 | if (error) |
987 | goto err_exit; | 988 | goto err_exit; |
988 | } | 989 | } |
989 | 990 | ||
990 | if (blk_integrity_rq(cmd->request)) { | 991 | if (blk_integrity_rq(rq)) { |
991 | struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; | 992 | struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; |
992 | int ivecs, count; | 993 | int ivecs, count; |
993 | 994 | ||
994 | BUG_ON(prot_sdb == NULL); | 995 | BUG_ON(prot_sdb == NULL); |
995 | ivecs = blk_rq_count_integrity_sg(cmd->request); | 996 | ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); |
996 | 997 | ||
997 | if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) { | 998 | if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) { |
998 | error = BLKPREP_DEFER; | 999 | error = BLKPREP_DEFER; |
999 | goto err_exit; | 1000 | goto err_exit; |
1000 | } | 1001 | } |
1001 | 1002 | ||
1002 | count = blk_rq_map_integrity_sg(cmd->request, | 1003 | count = blk_rq_map_integrity_sg(rq->q, rq->bio, |
1003 | prot_sdb->table.sgl); | 1004 | prot_sdb->table.sgl); |
1004 | BUG_ON(unlikely(count > ivecs)); | 1005 | BUG_ON(unlikely(count > ivecs)); |
1006 | BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q))); | ||
1005 | 1007 | ||
1006 | cmd->prot_sdb = prot_sdb; | 1008 | cmd->prot_sdb = prot_sdb; |
1007 | cmd->prot_sdb->table.nents = count; | 1009 | cmd->prot_sdb->table.nents = count; |
@@ -1625,6 +1627,14 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, | |||
1625 | blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, | 1627 | blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, |
1626 | SCSI_MAX_SG_CHAIN_SEGMENTS)); | 1628 | SCSI_MAX_SG_CHAIN_SEGMENTS)); |
1627 | 1629 | ||
1630 | if (scsi_host_prot_dma(shost)) { | ||
1631 | shost->sg_prot_tablesize = | ||
1632 | min_not_zero(shost->sg_prot_tablesize, | ||
1633 | (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS); | ||
1634 | BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize); | ||
1635 | blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize); | ||
1636 | } | ||
1637 | |||
1628 | blk_queue_max_hw_sectors(q, shost->max_sectors); | 1638 | blk_queue_max_hw_sectors(q, shost->max_sectors); |
1629 | blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); | 1639 | blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); |
1630 | blk_queue_segment_boundary(q, shost->dma_boundary); | 1640 | blk_queue_segment_boundary(q, shost->dma_boundary); |