aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlan Stern <stern@rowland.harvard.edu>2007-02-20 11:01:57 -0500
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2007-04-17 18:09:56 -0400
commit44ec95425c1d9dce6e4638c29e4362cfb44814e7 (patch)
tree6af593dc23a2f7cdb9a0d3ecc46d37aae640c0d8
parent1079a2d251f24a7d9e7576217f5f738bc4218337 (diff)
[SCSI] sg: cap reserved_size values at max_sectors
This patch (as857) modifies the SG_GET_RESERVED_SIZE and SG_SET_RESERVED_SIZE ioctls in the sg driver, capping the values at the device's request_queue's max_sectors value. This will permit cdrecord to obtain a legal value for the maximum transfer length, fixing Bugzilla #7026. The patch also caps the initial reserved_size value. There's no reason to have a reserved buffer larger than max_sectors, since it would be impossible to use the extra space. The corresponding ioctls in the block layer are modified similarly, and the initial value for the reserved_size is set as large as possible. This will effectively make it default to max_sectors. Note that the actual value is meaningless anyway, since block devices don't have a reserved buffer. Finally, the BLKSECTGET ioctl is added to sg, so that there will be a uniform way for users to determine the actual max_sectors value for any raw SCSI transport. Signed-off-by: Alan Stern <stern@rowland.harvard.edu> Acked-by: Jens Axboe <jens.axboe@oracle.com> Acked-by: Douglas Gilbert <dougg@torque.net> Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
-rw-r--r--block/ll_rw_blk.c2
-rw-r--r--block/scsi_ioctl.c4
-rw-r--r--drivers/scsi/sg.c13
3 files changed, 16 insertions, 3 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 38c293b987b7..cf8752abd61a 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -1925,6 +1925,8 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
1925 blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); 1925 blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
1926 blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); 1926 blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
1927 1927
1928 q->sg_reserved_size = INT_MAX;
1929
1928 /* 1930 /*
1929 * all done 1931 * all done
1930 */ 1932 */
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 65c6a3cba6d6..e83f1dbf7c29 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -78,7 +78,9 @@ static int sg_set_timeout(request_queue_t *q, int __user *p)
78 78
79static int sg_get_reserved_size(request_queue_t *q, int __user *p) 79static int sg_get_reserved_size(request_queue_t *q, int __user *p)
80{ 80{
81 return put_user(q->sg_reserved_size, p); 81 unsigned val = min(q->sg_reserved_size, q->max_sectors << 9);
82
83 return put_user(val, p);
82} 84}
83 85
84static int sg_set_reserved_size(request_queue_t *q, int __user *p) 86static int sg_set_reserved_size(request_queue_t *q, int __user *p)
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 81e3bc7b02a1..570977cf9efb 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -917,6 +917,8 @@ sg_ioctl(struct inode *inode, struct file *filp,
917 return result; 917 return result;
918 if (val < 0) 918 if (val < 0)
919 return -EINVAL; 919 return -EINVAL;
920 val = min_t(int, val,
921 sdp->device->request_queue->max_sectors * 512);
920 if (val != sfp->reserve.bufflen) { 922 if (val != sfp->reserve.bufflen) {
921 if (sg_res_in_use(sfp) || sfp->mmap_called) 923 if (sg_res_in_use(sfp) || sfp->mmap_called)
922 return -EBUSY; 924 return -EBUSY;
@@ -925,7 +927,8 @@ sg_ioctl(struct inode *inode, struct file *filp,
925 } 927 }
926 return 0; 928 return 0;
927 case SG_GET_RESERVED_SIZE: 929 case SG_GET_RESERVED_SIZE:
928 val = (int) sfp->reserve.bufflen; 930 val = min_t(int, sfp->reserve.bufflen,
931 sdp->device->request_queue->max_sectors * 512);
929 return put_user(val, ip); 932 return put_user(val, ip);
930 case SG_SET_COMMAND_Q: 933 case SG_SET_COMMAND_Q:
931 result = get_user(val, ip); 934 result = get_user(val, ip);
@@ -1061,6 +1064,9 @@ sg_ioctl(struct inode *inode, struct file *filp,
1061 if (sdp->detached) 1064 if (sdp->detached)
1062 return -ENODEV; 1065 return -ENODEV;
1063 return scsi_ioctl(sdp->device, cmd_in, p); 1066 return scsi_ioctl(sdp->device, cmd_in, p);
1067 case BLKSECTGET:
1068 return put_user(sdp->device->request_queue->max_sectors * 512,
1069 ip);
1064 default: 1070 default:
1065 if (read_only) 1071 if (read_only)
1066 return -EPERM; /* don't know so take safe approach */ 1072 return -EPERM; /* don't know so take safe approach */
@@ -2339,6 +2345,7 @@ sg_add_sfp(Sg_device * sdp, int dev)
2339{ 2345{
2340 Sg_fd *sfp; 2346 Sg_fd *sfp;
2341 unsigned long iflags; 2347 unsigned long iflags;
2348 int bufflen;
2342 2349
2343 sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN); 2350 sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
2344 if (!sfp) 2351 if (!sfp)
@@ -2369,7 +2376,9 @@ sg_add_sfp(Sg_device * sdp, int dev)
2369 if (unlikely(sg_big_buff != def_reserved_size)) 2376 if (unlikely(sg_big_buff != def_reserved_size))
2370 sg_big_buff = def_reserved_size; 2377 sg_big_buff = def_reserved_size;
2371 2378
2372 sg_build_reserve(sfp, sg_big_buff); 2379 bufflen = min_t(int, sg_big_buff,
2380 sdp->device->request_queue->max_sectors * 512);
2381 sg_build_reserve(sfp, bufflen);
2373 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n", 2382 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
2374 sfp->reserve.bufflen, sfp->reserve.k_use_sg)); 2383 sfp->reserve.bufflen, sfp->reserve.k_use_sg));
2375 return sfp; 2384 return sfp;