aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDouglas Gilbert <dgilbert@interlog.com>2014-06-25 08:08:03 -0400
committerChristoph Hellwig <hch@lst.de>2014-07-17 16:07:34 -0400
commitcc833acbee9db5ca8c6162b015b4c93863c6f821 (patch)
tree31bdb4b633e51731fcb35383f8e3bee59765378d
parent16070cc189c5e343696c29c8cff779e692cfcb8d (diff)
sg: O_EXCL and other lock handling
This addresses a problem reported by Vaughan Cao concerning the correctness of the O_EXCL logic in the sg driver. POSIX doesn't defined O_EXCL semantics on devices but "allow only one open file descriptor at a time per sg device" is a rough definition. The sg driver's semantics have been to wait on an open() when O_NONBLOCK is not given and there are O_EXCL headwinds. Nasty things can happen during that wait such as the device being detached (removed). So multiple locks are reworked in this patch making it large and hard to break down into digestible bits. This patch is against Linus's current git repository which doesn't include any sg patches sent in the last few weeks. Hence this patch touches as little as possible that it doesn't need to and strips out most SCSI_LOG_TIMEOUT() changes in v3 because Hannes said he was going to rework all that stuff. The sg3_utils package has several test programs written to test this patch. See examples/sg_tst_excl*.cpp . Not all the locks and flags in sg have been re-worked in this patch, notably sg_request::done . That can wait for a follow-up patch if this one meets with approval. Signed-off-by: Douglas Gilbert <dgilbert@interlog.com> Reviewed-by: Hannes Reinecke <hare@suse.de>
-rw-r--r--drivers/scsi/sg.c424
1 files changed, 230 insertions, 194 deletions
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 32425ac61096..2e01a9dd26fa 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -49,6 +49,7 @@ static int sg_version_num = 30536; /* 2 digits for each component */
49#include <linux/delay.h> 49#include <linux/delay.h>
50#include <linux/blktrace_api.h> 50#include <linux/blktrace_api.h>
51#include <linux/mutex.h> 51#include <linux/mutex.h>
52#include <linux/atomic.h>
52#include <linux/ratelimit.h> 53#include <linux/ratelimit.h>
53 54
54#include "scsi.h" 55#include "scsi.h"
@@ -106,18 +107,16 @@ static int scatter_elem_sz_prev = SG_SCATTER_SZ;
106 107
107#define SG_SECTOR_SZ 512 108#define SG_SECTOR_SZ 512
108 109
109static int sg_add(struct device *, struct class_interface *); 110static int sg_add_device(struct device *, struct class_interface *);
110static void sg_remove(struct device *, struct class_interface *); 111static void sg_remove_device(struct device *, struct class_interface *);
111
112static DEFINE_SPINLOCK(sg_open_exclusive_lock);
113 112
114static DEFINE_IDR(sg_index_idr); 113static DEFINE_IDR(sg_index_idr);
115static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock 114static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock
116 file descriptor list for device */ 115 file descriptor list for device */
117 116
118static struct class_interface sg_interface = { 117static struct class_interface sg_interface = {
119 .add_dev = sg_add, 118 .add_dev = sg_add_device,
120 .remove_dev = sg_remove, 119 .remove_dev = sg_remove_device,
121}; 120};
122 121
123typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */ 122typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
@@ -150,8 +149,7 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
150} Sg_request; 149} Sg_request;
151 150
152typedef struct sg_fd { /* holds the state of a file descriptor */ 151typedef struct sg_fd { /* holds the state of a file descriptor */
153 /* sfd_siblings is protected by sg_index_lock */ 152 struct list_head sfd_siblings; /* protected by device's sfd_lock */
154 struct list_head sfd_siblings;
155 struct sg_device *parentdp; /* owning device */ 153 struct sg_device *parentdp; /* owning device */
156 wait_queue_head_t read_wait; /* queue read until command done */ 154 wait_queue_head_t read_wait; /* queue read until command done */
157 rwlock_t rq_list_lock; /* protect access to list in req_arr */ 155 rwlock_t rq_list_lock; /* protect access to list in req_arr */
@@ -174,14 +172,15 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
174 172
175typedef struct sg_device { /* holds the state of each scsi generic device */ 173typedef struct sg_device { /* holds the state of each scsi generic device */
176 struct scsi_device *device; 174 struct scsi_device *device;
177 wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */ 175 wait_queue_head_t open_wait; /* queue open() when O_EXCL present */
176 struct mutex open_rel_lock; /* held when in open() or release() */
178 int sg_tablesize; /* adapter's max scatter-gather table size */ 177 int sg_tablesize; /* adapter's max scatter-gather table size */
179 u32 index; /* device index number */ 178 u32 index; /* device index number */
180 /* sfds is protected by sg_index_lock */
181 struct list_head sfds; 179 struct list_head sfds;
182 volatile char detached; /* 0->attached, 1->detached pending removal */ 180 rwlock_t sfd_lock; /* protect access to sfd list */
183 /* exclude protected by sg_open_exclusive_lock */ 181 atomic_t detaching; /* 0->device usable, 1->device detaching */
184 char exclude; /* opened for exclusive access */ 182 bool exclude; /* 1->open(O_EXCL) succeeded and is active */
183 int open_cnt; /* count of opens (perhaps < num(sfds) ) */
185 char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */ 184 char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
186 struct gendisk *disk; 185 struct gendisk *disk;
187 struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */ 186 struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */
@@ -212,7 +211,7 @@ static Sg_request *sg_add_request(Sg_fd * sfp);
212static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); 211static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
213static int sg_res_in_use(Sg_fd * sfp); 212static int sg_res_in_use(Sg_fd * sfp);
214static Sg_device *sg_get_dev(int dev); 213static Sg_device *sg_get_dev(int dev);
215static void sg_put_dev(Sg_device *sdp); 214static void sg_device_destroy(struct kref *kref);
216 215
217#define SZ_SG_HEADER sizeof(struct sg_header) 216#define SZ_SG_HEADER sizeof(struct sg_header)
218#define SZ_SG_IO_HDR sizeof(sg_io_hdr_t) 217#define SZ_SG_IO_HDR sizeof(sg_io_hdr_t)
@@ -229,38 +228,43 @@ static int sg_allow_access(struct file *filp, unsigned char *cmd)
229 return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE); 228 return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
230} 229}
231 230
232static int get_exclude(Sg_device *sdp) 231static int
233{ 232open_wait(Sg_device *sdp, int flags)
234 unsigned long flags;
235 int ret;
236
237 spin_lock_irqsave(&sg_open_exclusive_lock, flags);
238 ret = sdp->exclude;
239 spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
240 return ret;
241}
242
243static int set_exclude(Sg_device *sdp, char val)
244{ 233{
245 unsigned long flags; 234 int retval = 0;
246
247 spin_lock_irqsave(&sg_open_exclusive_lock, flags);
248 sdp->exclude = val;
249 spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
250 return val;
251}
252 235
253static int sfds_list_empty(Sg_device *sdp) 236 if (flags & O_EXCL) {
254{ 237 while (sdp->open_cnt > 0) {
255 unsigned long flags; 238 mutex_unlock(&sdp->open_rel_lock);
256 int ret; 239 retval = wait_event_interruptible(sdp->open_wait,
240 (atomic_read(&sdp->detaching) ||
241 !sdp->open_cnt));
242 mutex_lock(&sdp->open_rel_lock);
243
244 if (retval) /* -ERESTARTSYS */
245 return retval;
246 if (atomic_read(&sdp->detaching))
247 return -ENODEV;
248 }
249 } else {
250 while (sdp->exclude) {
251 mutex_unlock(&sdp->open_rel_lock);
252 retval = wait_event_interruptible(sdp->open_wait,
253 (atomic_read(&sdp->detaching) ||
254 !sdp->exclude));
255 mutex_lock(&sdp->open_rel_lock);
256
257 if (retval) /* -ERESTARTSYS */
258 return retval;
259 if (atomic_read(&sdp->detaching))
260 return -ENODEV;
261 }
262 }
257 263
258 read_lock_irqsave(&sg_index_lock, flags); 264 return retval;
259 ret = list_empty(&sdp->sfds);
260 read_unlock_irqrestore(&sg_index_lock, flags);
261 return ret;
262} 265}
263 266
267/* Returns 0 on success, else a negated errno value */
264static int 268static int
265sg_open(struct inode *inode, struct file *filp) 269sg_open(struct inode *inode, struct file *filp)
266{ 270{
@@ -269,17 +273,15 @@ sg_open(struct inode *inode, struct file *filp)
269 struct request_queue *q; 273 struct request_queue *q;
270 Sg_device *sdp; 274 Sg_device *sdp;
271 Sg_fd *sfp; 275 Sg_fd *sfp;
272 int res;
273 int retval; 276 int retval;
274 277
275 nonseekable_open(inode, filp); 278 nonseekable_open(inode, filp);
276 SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags)); 279 SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags));
280 if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE)))
281 return -EPERM; /* Can't lock it with read only access */
277 sdp = sg_get_dev(dev); 282 sdp = sg_get_dev(dev);
278 if (IS_ERR(sdp)) { 283 if (IS_ERR(sdp))
279 retval = PTR_ERR(sdp); 284 return PTR_ERR(sdp);
280 sdp = NULL;
281 goto sg_put;
282 }
283 285
284 /* This driver's module count bumped by fops_get in <linux/fs.h> */ 286 /* This driver's module count bumped by fops_get in <linux/fs.h> */
285 /* Prevent the device driver from vanishing while we sleep */ 287 /* Prevent the device driver from vanishing while we sleep */
@@ -291,6 +293,9 @@ sg_open(struct inode *inode, struct file *filp)
291 if (retval) 293 if (retval)
292 goto sdp_put; 294 goto sdp_put;
293 295
296 /* scsi_block_when_processing_errors() may block so bypass
297 * check if O_NONBLOCK. Permits SCSI commands to be issued
298 * during error recovery. Tread carefully. */
294 if (!((flags & O_NONBLOCK) || 299 if (!((flags & O_NONBLOCK) ||
295 scsi_block_when_processing_errors(sdp->device))) { 300 scsi_block_when_processing_errors(sdp->device))) {
296 retval = -ENXIO; 301 retval = -ENXIO;
@@ -298,65 +303,65 @@ sg_open(struct inode *inode, struct file *filp)
298 goto error_out; 303 goto error_out;
299 } 304 }
300 305
301 if (flags & O_EXCL) { 306 mutex_lock(&sdp->open_rel_lock);
302 if (O_RDONLY == (flags & O_ACCMODE)) { 307 if (flags & O_NONBLOCK) {
303 retval = -EPERM; /* Can't lock it with read only access */ 308 if (flags & O_EXCL) {
304 goto error_out; 309 if (sdp->open_cnt > 0) {
305 } 310 retval = -EBUSY;
306 if (!sfds_list_empty(sdp) && (flags & O_NONBLOCK)) { 311 goto error_mutex_locked;
307 retval = -EBUSY; 312 }
308 goto error_out; 313 } else {
309 } 314 if (sdp->exclude) {
310 res = wait_event_interruptible(sdp->o_excl_wait, 315 retval = -EBUSY;
311 ((!sfds_list_empty(sdp) || get_exclude(sdp)) ? 0 : set_exclude(sdp, 1))); 316 goto error_mutex_locked;
312 if (res) { 317 }
313 retval = res; /* -ERESTARTSYS because signal hit process */
314 goto error_out;
315 }
316 } else if (get_exclude(sdp)) { /* some other fd has an exclusive lock on dev */
317 if (flags & O_NONBLOCK) {
318 retval = -EBUSY;
319 goto error_out;
320 }
321 res = wait_event_interruptible(sdp->o_excl_wait, !get_exclude(sdp));
322 if (res) {
323 retval = res; /* -ERESTARTSYS because signal hit process */
324 goto error_out;
325 } 318 }
319 } else {
320 retval = open_wait(sdp, flags);
321 if (retval) /* -ERESTARTSYS or -ENODEV */
322 goto error_mutex_locked;
326 } 323 }
327 if (sdp->detached) { 324
328 retval = -ENODEV; 325 /* N.B. at this point we are holding the open_rel_lock */
329 goto error_out; 326 if (flags & O_EXCL)
330 } 327 sdp->exclude = true;
331 if (sfds_list_empty(sdp)) { /* no existing opens on this device */ 328
329 if (sdp->open_cnt < 1) { /* no existing opens */
332 sdp->sgdebug = 0; 330 sdp->sgdebug = 0;
333 q = sdp->device->request_queue; 331 q = sdp->device->request_queue;
334 sdp->sg_tablesize = queue_max_segments(q); 332 sdp->sg_tablesize = queue_max_segments(q);
335 } 333 }
336 if ((sfp = sg_add_sfp(sdp, dev))) 334 sfp = sg_add_sfp(sdp, dev);
337 filp->private_data = sfp; 335 if (IS_ERR(sfp)) {
338 else { 336 retval = PTR_ERR(sfp);
339 if (flags & O_EXCL) { 337 goto out_undo;
340 set_exclude(sdp, 0); /* undo if error */
341 wake_up_interruptible(&sdp->o_excl_wait);
342 }
343 retval = -ENOMEM;
344 goto error_out;
345 } 338 }
339
340 filp->private_data = sfp;
341 sdp->open_cnt++;
342 mutex_unlock(&sdp->open_rel_lock);
343
346 retval = 0; 344 retval = 0;
347error_out:
348 if (retval) {
349 scsi_autopm_put_device(sdp->device);
350sdp_put:
351 scsi_device_put(sdp->device);
352 }
353sg_put: 345sg_put:
354 if (sdp) 346 kref_put(&sdp->d_ref, sg_device_destroy);
355 sg_put_dev(sdp);
356 return retval; 347 return retval;
348
349out_undo:
350 if (flags & O_EXCL) {
351 sdp->exclude = false; /* undo if error */
352 wake_up_interruptible(&sdp->open_wait);
353 }
354error_mutex_locked:
355 mutex_unlock(&sdp->open_rel_lock);
356error_out:
357 scsi_autopm_put_device(sdp->device);
358sdp_put:
359 scsi_device_put(sdp->device);
360 goto sg_put;
357} 361}
358 362
359/* Following function was formerly called 'sg_close' */ 363/* Release resources associated with a successful sg_open()
364 * Returns 0 on success, else a negated errno value */
360static int 365static int
361sg_release(struct inode *inode, struct file *filp) 366sg_release(struct inode *inode, struct file *filp)
362{ 367{
@@ -367,11 +372,20 @@ sg_release(struct inode *inode, struct file *filp)
367 return -ENXIO; 372 return -ENXIO;
368 SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name)); 373 SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
369 374
370 set_exclude(sdp, 0); 375 mutex_lock(&sdp->open_rel_lock);
371 wake_up_interruptible(&sdp->o_excl_wait);
372
373 scsi_autopm_put_device(sdp->device); 376 scsi_autopm_put_device(sdp->device);
374 kref_put(&sfp->f_ref, sg_remove_sfp); 377 kref_put(&sfp->f_ref, sg_remove_sfp);
378 sdp->open_cnt--;
379
380 /* possibly many open()s waiting on exlude clearing, start many;
381 * only open(O_EXCL)s wait on 0==open_cnt so only start one */
382 if (sdp->exclude) {
383 sdp->exclude = false;
384 wake_up_interruptible_all(&sdp->open_wait);
385 } else if (0 == sdp->open_cnt) {
386 wake_up_interruptible(&sdp->open_wait);
387 }
388 mutex_unlock(&sdp->open_rel_lock);
375 return 0; 389 return 0;
376} 390}
377 391
@@ -423,7 +437,7 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
423 } 437 }
424 srp = sg_get_rq_mark(sfp, req_pack_id); 438 srp = sg_get_rq_mark(sfp, req_pack_id);
425 if (!srp) { /* now wait on packet to arrive */ 439 if (!srp) { /* now wait on packet to arrive */
426 if (sdp->detached) { 440 if (atomic_read(&sdp->detaching)) {
427 retval = -ENODEV; 441 retval = -ENODEV;
428 goto free_old_hdr; 442 goto free_old_hdr;
429 } 443 }
@@ -432,9 +446,9 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
432 goto free_old_hdr; 446 goto free_old_hdr;
433 } 447 }
434 retval = wait_event_interruptible(sfp->read_wait, 448 retval = wait_event_interruptible(sfp->read_wait,
435 (sdp->detached || 449 (atomic_read(&sdp->detaching) ||
436 (srp = sg_get_rq_mark(sfp, req_pack_id)))); 450 (srp = sg_get_rq_mark(sfp, req_pack_id))));
437 if (sdp->detached) { 451 if (atomic_read(&sdp->detaching)) {
438 retval = -ENODEV; 452 retval = -ENODEV;
439 goto free_old_hdr; 453 goto free_old_hdr;
440 } 454 }
@@ -576,7 +590,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
576 return -ENXIO; 590 return -ENXIO;
577 SCSI_LOG_TIMEOUT(3, printk("sg_write: %s, count=%d\n", 591 SCSI_LOG_TIMEOUT(3, printk("sg_write: %s, count=%d\n",
578 sdp->disk->disk_name, (int) count)); 592 sdp->disk->disk_name, (int) count));
579 if (sdp->detached) 593 if (atomic_read(&sdp->detaching))
580 return -ENODEV; 594 return -ENODEV;
581 if (!((filp->f_flags & O_NONBLOCK) || 595 if (!((filp->f_flags & O_NONBLOCK) ||
582 scsi_block_when_processing_errors(sdp->device))) 596 scsi_block_when_processing_errors(sdp->device)))
@@ -762,7 +776,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
762 sg_finish_rem_req(srp); 776 sg_finish_rem_req(srp);
763 return k; /* probably out of space --> ENOMEM */ 777 return k; /* probably out of space --> ENOMEM */
764 } 778 }
765 if (sdp->detached) { 779 if (atomic_read(&sdp->detaching)) {
766 if (srp->bio) 780 if (srp->bio)
767 blk_end_request_all(srp->rq, -EIO); 781 blk_end_request_all(srp->rq, -EIO);
768 sg_finish_rem_req(srp); 782 sg_finish_rem_req(srp);
@@ -838,7 +852,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
838 852
839 switch (cmd_in) { 853 switch (cmd_in) {
840 case SG_IO: 854 case SG_IO:
841 if (sdp->detached) 855 if (atomic_read(&sdp->detaching))
842 return -ENODEV; 856 return -ENODEV;
843 if (!scsi_block_when_processing_errors(sdp->device)) 857 if (!scsi_block_when_processing_errors(sdp->device))
844 return -ENXIO; 858 return -ENXIO;
@@ -849,8 +863,8 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
849 if (result < 0) 863 if (result < 0)
850 return result; 864 return result;
851 result = wait_event_interruptible(sfp->read_wait, 865 result = wait_event_interruptible(sfp->read_wait,
852 (srp_done(sfp, srp) || sdp->detached)); 866 (srp_done(sfp, srp) || atomic_read(&sdp->detaching)));
853 if (sdp->detached) 867 if (atomic_read(&sdp->detaching))
854 return -ENODEV; 868 return -ENODEV;
855 write_lock_irq(&sfp->rq_list_lock); 869 write_lock_irq(&sfp->rq_list_lock);
856 if (srp->done) { 870 if (srp->done) {
@@ -889,7 +903,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
889 sg_build_reserve(sfp, val); 903 sg_build_reserve(sfp, val);
890 } 904 }
891 } else { 905 } else {
892 if (sdp->detached) 906 if (atomic_read(&sdp->detaching))
893 return -ENODEV; 907 return -ENODEV;
894 sfp->low_dma = sdp->device->host->unchecked_isa_dma; 908 sfp->low_dma = sdp->device->host->unchecked_isa_dma;
895 } 909 }
@@ -902,7 +916,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
902 else { 916 else {
903 sg_scsi_id_t __user *sg_idp = p; 917 sg_scsi_id_t __user *sg_idp = p;
904 918
905 if (sdp->detached) 919 if (atomic_read(&sdp->detaching))
906 return -ENODEV; 920 return -ENODEV;
907 __put_user((int) sdp->device->host->host_no, 921 __put_user((int) sdp->device->host->host_no,
908 &sg_idp->host_no); 922 &sg_idp->host_no);
@@ -1044,11 +1058,11 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1044 return result; 1058 return result;
1045 } 1059 }
1046 case SG_EMULATED_HOST: 1060 case SG_EMULATED_HOST:
1047 if (sdp->detached) 1061 if (atomic_read(&sdp->detaching))
1048 return -ENODEV; 1062 return -ENODEV;
1049 return put_user(sdp->device->host->hostt->emulated, ip); 1063 return put_user(sdp->device->host->hostt->emulated, ip);
1050 case SG_SCSI_RESET: 1064 case SG_SCSI_RESET:
1051 if (sdp->detached) 1065 if (atomic_read(&sdp->detaching))
1052 return -ENODEV; 1066 return -ENODEV;
1053 if (filp->f_flags & O_NONBLOCK) { 1067 if (filp->f_flags & O_NONBLOCK) {
1054 if (scsi_host_in_recovery(sdp->device->host)) 1068 if (scsi_host_in_recovery(sdp->device->host))
@@ -1081,7 +1095,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1081 return (scsi_reset_provider(sdp->device, val) == 1095 return (scsi_reset_provider(sdp->device, val) ==
1082 SUCCESS) ? 0 : -EIO; 1096 SUCCESS) ? 0 : -EIO;
1083 case SCSI_IOCTL_SEND_COMMAND: 1097 case SCSI_IOCTL_SEND_COMMAND:
1084 if (sdp->detached) 1098 if (atomic_read(&sdp->detaching))
1085 return -ENODEV; 1099 return -ENODEV;
1086 if (read_only) { 1100 if (read_only) {
1087 unsigned char opcode = WRITE_6; 1101 unsigned char opcode = WRITE_6;
@@ -1103,7 +1117,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1103 case SCSI_IOCTL_GET_BUS_NUMBER: 1117 case SCSI_IOCTL_GET_BUS_NUMBER:
1104 case SCSI_IOCTL_PROBE_HOST: 1118 case SCSI_IOCTL_PROBE_HOST:
1105 case SG_GET_TRANSFORM: 1119 case SG_GET_TRANSFORM:
1106 if (sdp->detached) 1120 if (atomic_read(&sdp->detaching))
1107 return -ENODEV; 1121 return -ENODEV;
1108 return scsi_ioctl(sdp->device, cmd_in, p); 1122 return scsi_ioctl(sdp->device, cmd_in, p);
1109 case BLKSECTGET: 1123 case BLKSECTGET:
@@ -1177,7 +1191,7 @@ sg_poll(struct file *filp, poll_table * wait)
1177 } 1191 }
1178 read_unlock_irqrestore(&sfp->rq_list_lock, iflags); 1192 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1179 1193
1180 if (sdp->detached) 1194 if (atomic_read(&sdp->detaching))
1181 res |= POLLHUP; 1195 res |= POLLHUP;
1182 else if (!sfp->cmd_q) { 1196 else if (!sfp->cmd_q) {
1183 if (0 == count) 1197 if (0 == count)
@@ -1276,7 +1290,8 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
1276 return 0; 1290 return 0;
1277} 1291}
1278 1292
1279static void sg_rq_end_io_usercontext(struct work_struct *work) 1293static void
1294sg_rq_end_io_usercontext(struct work_struct *work)
1280{ 1295{
1281 struct sg_request *srp = container_of(work, struct sg_request, ew.work); 1296 struct sg_request *srp = container_of(work, struct sg_request, ew.work);
1282 struct sg_fd *sfp = srp->parentfp; 1297 struct sg_fd *sfp = srp->parentfp;
@@ -1289,7 +1304,8 @@ static void sg_rq_end_io_usercontext(struct work_struct *work)
1289 * This function is a "bottom half" handler that is called by the mid 1304 * This function is a "bottom half" handler that is called by the mid
1290 * level when a command is completed (or has failed). 1305 * level when a command is completed (or has failed).
1291 */ 1306 */
1292static void sg_rq_end_io(struct request *rq, int uptodate) 1307static void
1308sg_rq_end_io(struct request *rq, int uptodate)
1293{ 1309{
1294 struct sg_request *srp = rq->end_io_data; 1310 struct sg_request *srp = rq->end_io_data;
1295 Sg_device *sdp; 1311 Sg_device *sdp;
@@ -1307,8 +1323,8 @@ static void sg_rq_end_io(struct request *rq, int uptodate)
1307 return; 1323 return;
1308 1324
1309 sdp = sfp->parentdp; 1325 sdp = sfp->parentdp;
1310 if (unlikely(sdp->detached)) 1326 if (unlikely(atomic_read(&sdp->detaching)))
1311 printk(KERN_INFO "sg_rq_end_io: device detached\n"); 1327 pr_info("%s: device detaching\n", __func__);
1312 1328
1313 sense = rq->sense; 1329 sense = rq->sense;
1314 result = rq->errors; 1330 result = rq->errors;
@@ -1331,7 +1347,7 @@ static void sg_rq_end_io(struct request *rq, int uptodate)
1331 if ((sdp->sgdebug > 0) && 1347 if ((sdp->sgdebug > 0) &&
1332 ((CHECK_CONDITION == srp->header.masked_status) || 1348 ((CHECK_CONDITION == srp->header.masked_status) ||
1333 (COMMAND_TERMINATED == srp->header.masked_status))) 1349 (COMMAND_TERMINATED == srp->header.masked_status)))
1334 __scsi_print_sense("sg_cmd_done", sense, 1350 __scsi_print_sense(__func__, sense,
1335 SCSI_SENSE_BUFFERSIZE); 1351 SCSI_SENSE_BUFFERSIZE);
1336 1352
1337 /* Following if statement is a patch supplied by Eric Youngdale */ 1353 /* Following if statement is a patch supplied by Eric Youngdale */
@@ -1390,7 +1406,8 @@ static struct class *sg_sysfs_class;
1390 1406
1391static int sg_sysfs_valid = 0; 1407static int sg_sysfs_valid = 0;
1392 1408
1393static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp) 1409static Sg_device *
1410sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1394{ 1411{
1395 struct request_queue *q = scsidp->request_queue; 1412 struct request_queue *q = scsidp->request_queue;
1396 Sg_device *sdp; 1413 Sg_device *sdp;
@@ -1400,7 +1417,8 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1400 1417
1401 sdp = kzalloc(sizeof(Sg_device), GFP_KERNEL); 1418 sdp = kzalloc(sizeof(Sg_device), GFP_KERNEL);
1402 if (!sdp) { 1419 if (!sdp) {
1403 printk(KERN_WARNING "kmalloc Sg_device failure\n"); 1420 sdev_printk(KERN_WARNING, scsidp, "%s: kmalloc Sg_device "
1421 "failure\n", __func__);
1404 return ERR_PTR(-ENOMEM); 1422 return ERR_PTR(-ENOMEM);
1405 } 1423 }
1406 1424
@@ -1415,8 +1433,9 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1415 scsidp->type, SG_MAX_DEVS - 1); 1433 scsidp->type, SG_MAX_DEVS - 1);
1416 error = -ENODEV; 1434 error = -ENODEV;
1417 } else { 1435 } else {
1418 printk(KERN_WARNING 1436 sdev_printk(KERN_WARNING, scsidp, "%s: idr "
1419 "idr allocation Sg_device failure: %d\n", error); 1437 "allocation Sg_device failure: %d\n",
1438 __func__, error);
1420 } 1439 }
1421 goto out_unlock; 1440 goto out_unlock;
1422 } 1441 }
@@ -1427,8 +1446,11 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1427 disk->first_minor = k; 1446 disk->first_minor = k;
1428 sdp->disk = disk; 1447 sdp->disk = disk;
1429 sdp->device = scsidp; 1448 sdp->device = scsidp;
1449 mutex_init(&sdp->open_rel_lock);
1430 INIT_LIST_HEAD(&sdp->sfds); 1450 INIT_LIST_HEAD(&sdp->sfds);
1431 init_waitqueue_head(&sdp->o_excl_wait); 1451 init_waitqueue_head(&sdp->open_wait);
1452 atomic_set(&sdp->detaching, 0);
1453 rwlock_init(&sdp->sfd_lock);
1432 sdp->sg_tablesize = queue_max_segments(q); 1454 sdp->sg_tablesize = queue_max_segments(q);
1433 sdp->index = k; 1455 sdp->index = k;
1434 kref_init(&sdp->d_ref); 1456 kref_init(&sdp->d_ref);
@@ -1446,7 +1468,7 @@ out_unlock:
1446} 1468}
1447 1469
1448static int 1470static int
1449sg_add(struct device *cl_dev, struct class_interface *cl_intf) 1471sg_add_device(struct device *cl_dev, struct class_interface *cl_intf)
1450{ 1472{
1451 struct scsi_device *scsidp = to_scsi_device(cl_dev->parent); 1473 struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
1452 struct gendisk *disk; 1474 struct gendisk *disk;
@@ -1457,7 +1479,7 @@ sg_add(struct device *cl_dev, struct class_interface *cl_intf)
1457 1479
1458 disk = alloc_disk(1); 1480 disk = alloc_disk(1);
1459 if (!disk) { 1481 if (!disk) {
1460 printk(KERN_WARNING "alloc_disk failed\n"); 1482 pr_warn("%s: alloc_disk failed\n", __func__);
1461 return -ENOMEM; 1483 return -ENOMEM;
1462 } 1484 }
1463 disk->major = SCSI_GENERIC_MAJOR; 1485 disk->major = SCSI_GENERIC_MAJOR;
@@ -1465,7 +1487,7 @@ sg_add(struct device *cl_dev, struct class_interface *cl_intf)
1465 error = -ENOMEM; 1487 error = -ENOMEM;
1466 cdev = cdev_alloc(); 1488 cdev = cdev_alloc();
1467 if (!cdev) { 1489 if (!cdev) {
1468 printk(KERN_WARNING "cdev_alloc failed\n"); 1490 pr_warn("%s: cdev_alloc failed\n", __func__);
1469 goto out; 1491 goto out;
1470 } 1492 }
1471 cdev->owner = THIS_MODULE; 1493 cdev->owner = THIS_MODULE;
@@ -1473,7 +1495,7 @@ sg_add(struct device *cl_dev, struct class_interface *cl_intf)
1473 1495
1474 sdp = sg_alloc(disk, scsidp); 1496 sdp = sg_alloc(disk, scsidp);
1475 if (IS_ERR(sdp)) { 1497 if (IS_ERR(sdp)) {
1476 printk(KERN_WARNING "sg_alloc failed\n"); 1498 pr_warn("%s: sg_alloc failed\n", __func__);
1477 error = PTR_ERR(sdp); 1499 error = PTR_ERR(sdp);
1478 goto out; 1500 goto out;
1479 } 1501 }
@@ -1491,22 +1513,20 @@ sg_add(struct device *cl_dev, struct class_interface *cl_intf)
1491 sdp->index), 1513 sdp->index),
1492 sdp, "%s", disk->disk_name); 1514 sdp, "%s", disk->disk_name);
1493 if (IS_ERR(sg_class_member)) { 1515 if (IS_ERR(sg_class_member)) {
1494 printk(KERN_ERR "sg_add: " 1516 pr_err("%s: device_create failed\n", __func__);
1495 "device_create failed\n");
1496 error = PTR_ERR(sg_class_member); 1517 error = PTR_ERR(sg_class_member);
1497 goto cdev_add_err; 1518 goto cdev_add_err;
1498 } 1519 }
1499 error = sysfs_create_link(&scsidp->sdev_gendev.kobj, 1520 error = sysfs_create_link(&scsidp->sdev_gendev.kobj,
1500 &sg_class_member->kobj, "generic"); 1521 &sg_class_member->kobj, "generic");
1501 if (error) 1522 if (error)
1502 printk(KERN_ERR "sg_add: unable to make symlink " 1523 pr_err("%s: unable to make symlink 'generic' back "
1503 "'generic' back to sg%d\n", sdp->index); 1524 "to sg%d\n", __func__, sdp->index);
1504 } else 1525 } else
1505 printk(KERN_WARNING "sg_add: sg_sys Invalid\n"); 1526 pr_warn("%s: sg_sys Invalid\n", __func__);
1506 1527
1507 sdev_printk(KERN_NOTICE, scsidp, 1528 sdev_printk(KERN_NOTICE, scsidp, "Attached scsi generic sg%d "
1508 "Attached scsi generic sg%d type %d\n", sdp->index, 1529 "type %d\n", sdp->index, scsidp->type);
1509 scsidp->type);
1510 1530
1511 dev_set_drvdata(cl_dev, sdp); 1531 dev_set_drvdata(cl_dev, sdp);
1512 1532
@@ -1525,7 +1545,8 @@ out:
1525 return error; 1545 return error;
1526} 1546}
1527 1547
1528static void sg_device_destroy(struct kref *kref) 1548static void
1549sg_device_destroy(struct kref *kref)
1529{ 1550{
1530 struct sg_device *sdp = container_of(kref, struct sg_device, d_ref); 1551 struct sg_device *sdp = container_of(kref, struct sg_device, d_ref);
1531 unsigned long flags; 1552 unsigned long flags;
@@ -1547,33 +1568,39 @@ static void sg_device_destroy(struct kref *kref)
1547 kfree(sdp); 1568 kfree(sdp);
1548} 1569}
1549 1570
1550static void sg_remove(struct device *cl_dev, struct class_interface *cl_intf) 1571static void
1572sg_remove_device(struct device *cl_dev, struct class_interface *cl_intf)
1551{ 1573{
1552 struct scsi_device *scsidp = to_scsi_device(cl_dev->parent); 1574 struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
1553 Sg_device *sdp = dev_get_drvdata(cl_dev); 1575 Sg_device *sdp = dev_get_drvdata(cl_dev);
1554 unsigned long iflags; 1576 unsigned long iflags;
1555 Sg_fd *sfp; 1577 Sg_fd *sfp;
1578 int val;
1556 1579
1557 if (!sdp || sdp->detached) 1580 if (!sdp)
1558 return; 1581 return;
1582 /* want sdp->detaching non-zero as soon as possible */
1583 val = atomic_inc_return(&sdp->detaching);
1584 if (val > 1)
1585 return; /* only want to do following once per device */
1559 1586
1560 SCSI_LOG_TIMEOUT(3, printk("sg_remove: %s\n", sdp->disk->disk_name)); 1587 SCSI_LOG_TIMEOUT(3, printk("%s: %s\n", __func__,
1588 sdp->disk->disk_name));
1561 1589
1562 /* Need a write lock to set sdp->detached. */ 1590 read_lock_irqsave(&sdp->sfd_lock, iflags);
1563 write_lock_irqsave(&sg_index_lock, iflags);
1564 sdp->detached = 1;
1565 list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) { 1591 list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) {
1566 wake_up_interruptible(&sfp->read_wait); 1592 wake_up_interruptible_all(&sfp->read_wait);
1567 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP); 1593 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP);
1568 } 1594 }
1569 write_unlock_irqrestore(&sg_index_lock, iflags); 1595 wake_up_interruptible_all(&sdp->open_wait);
1596 read_unlock_irqrestore(&sdp->sfd_lock, iflags);
1570 1597
1571 sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic"); 1598 sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
1572 device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index)); 1599 device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index));
1573 cdev_del(sdp->cdev); 1600 cdev_del(sdp->cdev);
1574 sdp->cdev = NULL; 1601 sdp->cdev = NULL;
1575 1602
1576 sg_put_dev(sdp); 1603 kref_put(&sdp->d_ref, sg_device_destroy);
1577} 1604}
1578 1605
1579module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR); 1606module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR);
@@ -1643,7 +1670,8 @@ exit_sg(void)
1643 idr_destroy(&sg_index_idr); 1670 idr_destroy(&sg_index_idr);
1644} 1671}
1645 1672
1646static int sg_start_req(Sg_request *srp, unsigned char *cmd) 1673static int
1674sg_start_req(Sg_request *srp, unsigned char *cmd)
1647{ 1675{
1648 int res; 1676 int res;
1649 struct request *rq; 1677 struct request *rq;
@@ -1750,7 +1778,8 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd)
1750 return res; 1778 return res;
1751} 1779}
1752 1780
1753static int sg_finish_rem_req(Sg_request * srp) 1781static int
1782sg_finish_rem_req(Sg_request *srp)
1754{ 1783{
1755 int ret = 0; 1784 int ret = 0;
1756 1785
@@ -2089,7 +2118,7 @@ sg_add_sfp(Sg_device * sdp, int dev)
2089 2118
2090 sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN); 2119 sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
2091 if (!sfp) 2120 if (!sfp)
2092 return NULL; 2121 return ERR_PTR(-ENOMEM);
2093 2122
2094 init_waitqueue_head(&sfp->read_wait); 2123 init_waitqueue_head(&sfp->read_wait);
2095 rwlock_init(&sfp->rq_list_lock); 2124 rwlock_init(&sfp->rq_list_lock);
@@ -2103,9 +2132,13 @@ sg_add_sfp(Sg_device * sdp, int dev)
2103 sfp->cmd_q = SG_DEF_COMMAND_Q; 2132 sfp->cmd_q = SG_DEF_COMMAND_Q;
2104 sfp->keep_orphan = SG_DEF_KEEP_ORPHAN; 2133 sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
2105 sfp->parentdp = sdp; 2134 sfp->parentdp = sdp;
2106 write_lock_irqsave(&sg_index_lock, iflags); 2135 write_lock_irqsave(&sdp->sfd_lock, iflags);
2136 if (atomic_read(&sdp->detaching)) {
2137 write_unlock_irqrestore(&sdp->sfd_lock, iflags);
2138 return ERR_PTR(-ENODEV);
2139 }
2107 list_add_tail(&sfp->sfd_siblings, &sdp->sfds); 2140 list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
2108 write_unlock_irqrestore(&sg_index_lock, iflags); 2141 write_unlock_irqrestore(&sdp->sfd_lock, iflags);
2109 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp)); 2142 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp));
2110 if (unlikely(sg_big_buff != def_reserved_size)) 2143 if (unlikely(sg_big_buff != def_reserved_size))
2111 sg_big_buff = def_reserved_size; 2144 sg_big_buff = def_reserved_size;
@@ -2121,7 +2154,8 @@ sg_add_sfp(Sg_device * sdp, int dev)
2121 return sfp; 2154 return sfp;
2122} 2155}
2123 2156
2124static void sg_remove_sfp_usercontext(struct work_struct *work) 2157static void
2158sg_remove_sfp_usercontext(struct work_struct *work)
2125{ 2159{
2126 struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work); 2160 struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
2127 struct sg_device *sdp = sfp->parentdp; 2161 struct sg_device *sdp = sfp->parentdp;
@@ -2145,20 +2179,20 @@ static void sg_remove_sfp_usercontext(struct work_struct *work)
2145 kfree(sfp); 2179 kfree(sfp);
2146 2180
2147 scsi_device_put(sdp->device); 2181 scsi_device_put(sdp->device);
2148 sg_put_dev(sdp); 2182 kref_put(&sdp->d_ref, sg_device_destroy);
2149 module_put(THIS_MODULE); 2183 module_put(THIS_MODULE);
2150} 2184}
2151 2185
2152static void sg_remove_sfp(struct kref *kref) 2186static void
2187sg_remove_sfp(struct kref *kref)
2153{ 2188{
2154 struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref); 2189 struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref);
2155 struct sg_device *sdp = sfp->parentdp; 2190 struct sg_device *sdp = sfp->parentdp;
2156 unsigned long iflags; 2191 unsigned long iflags;
2157 2192
2158 write_lock_irqsave(&sg_index_lock, iflags); 2193 write_lock_irqsave(&sdp->sfd_lock, iflags);
2159 list_del(&sfp->sfd_siblings); 2194 list_del(&sfp->sfd_siblings);
2160 write_unlock_irqrestore(&sg_index_lock, iflags); 2195 write_unlock_irqrestore(&sdp->sfd_lock, iflags);
2161 wake_up_interruptible(&sdp->o_excl_wait);
2162 2196
2163 INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext); 2197 INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext);
2164 schedule_work(&sfp->ew.work); 2198 schedule_work(&sfp->ew.work);
@@ -2209,7 +2243,8 @@ static Sg_device *sg_lookup_dev(int dev)
2209 return idr_find(&sg_index_idr, dev); 2243 return idr_find(&sg_index_idr, dev);
2210} 2244}
2211 2245
2212static Sg_device *sg_get_dev(int dev) 2246static Sg_device *
2247sg_get_dev(int dev)
2213{ 2248{
2214 struct sg_device *sdp; 2249 struct sg_device *sdp;
2215 unsigned long flags; 2250 unsigned long flags;
@@ -2218,8 +2253,8 @@ static Sg_device *sg_get_dev(int dev)
2218 sdp = sg_lookup_dev(dev); 2253 sdp = sg_lookup_dev(dev);
2219 if (!sdp) 2254 if (!sdp)
2220 sdp = ERR_PTR(-ENXIO); 2255 sdp = ERR_PTR(-ENXIO);
2221 else if (sdp->detached) { 2256 else if (atomic_read(&sdp->detaching)) {
2222 /* If sdp->detached, then the refcount may already be 0, in 2257 /* If sdp->detaching, then the refcount may already be 0, in
2223 * which case it would be a bug to do kref_get(). 2258 * which case it would be a bug to do kref_get().
2224 */ 2259 */
2225 sdp = ERR_PTR(-ENODEV); 2260 sdp = ERR_PTR(-ENODEV);
@@ -2230,11 +2265,6 @@ static Sg_device *sg_get_dev(int dev)
2230 return sdp; 2265 return sdp;
2231} 2266}
2232 2267
2233static void sg_put_dev(struct sg_device *sdp)
2234{
2235 kref_put(&sdp->d_ref, sg_device_destroy);
2236}
2237
2238#ifdef CONFIG_SCSI_PROC_FS 2268#ifdef CONFIG_SCSI_PROC_FS
2239 2269
2240static struct proc_dir_entry *sg_proc_sgp = NULL; 2270static struct proc_dir_entry *sg_proc_sgp = NULL;
@@ -2451,8 +2481,7 @@ static int sg_proc_single_open_version(struct inode *inode, struct file *file)
2451 2481
2452static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v) 2482static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v)
2453{ 2483{
2454 seq_printf(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\t" 2484 seq_puts(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\tonline\n");
2455 "online\n");
2456 return 0; 2485 return 0;
2457} 2486}
2458 2487
@@ -2508,7 +2537,11 @@ static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
2508 2537
2509 read_lock_irqsave(&sg_index_lock, iflags); 2538 read_lock_irqsave(&sg_index_lock, iflags);
2510 sdp = it ? sg_lookup_dev(it->index) : NULL; 2539 sdp = it ? sg_lookup_dev(it->index) : NULL;
2511 if (sdp && (scsidp = sdp->device) && (!sdp->detached)) 2540 if ((NULL == sdp) || (NULL == sdp->device) ||
2541 (atomic_read(&sdp->detaching)))
2542 seq_puts(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
2543 else {
2544 scsidp = sdp->device;
2512 seq_printf(s, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n", 2545 seq_printf(s, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
2513 scsidp->host->host_no, scsidp->channel, 2546 scsidp->host->host_no, scsidp->channel,
2514 scsidp->id, scsidp->lun, (int) scsidp->type, 2547 scsidp->id, scsidp->lun, (int) scsidp->type,
@@ -2516,8 +2549,7 @@ static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
2516 (int) scsidp->queue_depth, 2549 (int) scsidp->queue_depth,
2517 (int) scsidp->device_busy, 2550 (int) scsidp->device_busy,
2518 (int) scsi_device_online(scsidp)); 2551 (int) scsi_device_online(scsidp));
2519 else 2552 }
2520 seq_printf(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
2521 read_unlock_irqrestore(&sg_index_lock, iflags); 2553 read_unlock_irqrestore(&sg_index_lock, iflags);
2522 return 0; 2554 return 0;
2523} 2555}
@@ -2536,11 +2568,12 @@ static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
2536 2568
2537 read_lock_irqsave(&sg_index_lock, iflags); 2569 read_lock_irqsave(&sg_index_lock, iflags);
2538 sdp = it ? sg_lookup_dev(it->index) : NULL; 2570 sdp = it ? sg_lookup_dev(it->index) : NULL;
2539 if (sdp && (scsidp = sdp->device) && (!sdp->detached)) 2571 scsidp = sdp ? sdp->device : NULL;
2572 if (sdp && scsidp && (!atomic_read(&sdp->detaching)))
2540 seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n", 2573 seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n",
2541 scsidp->vendor, scsidp->model, scsidp->rev); 2574 scsidp->vendor, scsidp->model, scsidp->rev);
2542 else 2575 else
2543 seq_printf(s, "<no active device>\n"); 2576 seq_puts(s, "<no active device>\n");
2544 read_unlock_irqrestore(&sg_index_lock, iflags); 2577 read_unlock_irqrestore(&sg_index_lock, iflags);
2545 return 0; 2578 return 0;
2546} 2579}
@@ -2585,12 +2618,12 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
2585 else 2618 else
2586 cp = " "; 2619 cp = " ";
2587 } 2620 }
2588 seq_printf(s, cp); 2621 seq_puts(s, cp);
2589 blen = srp->data.bufflen; 2622 blen = srp->data.bufflen;
2590 usg = srp->data.k_use_sg; 2623 usg = srp->data.k_use_sg;
2591 seq_printf(s, srp->done ? 2624 seq_puts(s, srp->done ?
2592 ((1 == srp->done) ? "rcv:" : "fin:") 2625 ((1 == srp->done) ? "rcv:" : "fin:")
2593 : "act:"); 2626 : "act:");
2594 seq_printf(s, " id=%d blen=%d", 2627 seq_printf(s, " id=%d blen=%d",
2595 srp->header.pack_id, blen); 2628 srp->header.pack_id, blen);
2596 if (srp->done) 2629 if (srp->done)
@@ -2606,7 +2639,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
2606 (int) srp->data.cmd_opcode); 2639 (int) srp->data.cmd_opcode);
2607 } 2640 }
2608 if (0 == m) 2641 if (0 == m)
2609 seq_printf(s, " No requests active\n"); 2642 seq_puts(s, " No requests active\n");
2610 read_unlock(&fp->rq_list_lock); 2643 read_unlock(&fp->rq_list_lock);
2611 } 2644 }
2612} 2645}
@@ -2622,31 +2655,34 @@ static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
2622 Sg_device *sdp; 2655 Sg_device *sdp;
2623 unsigned long iflags; 2656 unsigned long iflags;
2624 2657
2625 if (it && (0 == it->index)) { 2658 if (it && (0 == it->index))
2626 seq_printf(s, "max_active_device=%d(origin 1)\n", 2659 seq_printf(s, "max_active_device=%d def_reserved_size=%d\n",
2627 (int)it->max); 2660 (int)it->max, sg_big_buff);
2628 seq_printf(s, " def_reserved_size=%d\n", sg_big_buff);
2629 }
2630 2661
2631 read_lock_irqsave(&sg_index_lock, iflags); 2662 read_lock_irqsave(&sg_index_lock, iflags);
2632 sdp = it ? sg_lookup_dev(it->index) : NULL; 2663 sdp = it ? sg_lookup_dev(it->index) : NULL;
2633 if (sdp && !list_empty(&sdp->sfds)) { 2664 if (NULL == sdp)
2634 struct scsi_device *scsidp = sdp->device; 2665 goto skip;
2635 2666 read_lock(&sdp->sfd_lock);
2667 if (!list_empty(&sdp->sfds)) {
2636 seq_printf(s, " >>> device=%s ", sdp->disk->disk_name); 2668 seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
2637 if (sdp->detached) 2669 if (atomic_read(&sdp->detaching))
2638 seq_printf(s, "detached pending close "); 2670 seq_puts(s, "detaching pending close ");
2639 else 2671 else if (sdp->device) {
2640 seq_printf 2672 struct scsi_device *scsidp = sdp->device;
2641 (s, "scsi%d chan=%d id=%d lun=%d em=%d", 2673
2642 scsidp->host->host_no, 2674 seq_printf(s, "%d:%d:%d:%d em=%d",
2643 scsidp->channel, scsidp->id, 2675 scsidp->host->host_no,
2644 scsidp->lun, 2676 scsidp->channel, scsidp->id,
2645 scsidp->host->hostt->emulated); 2677 scsidp->lun,
2646 seq_printf(s, " sg_tablesize=%d excl=%d\n", 2678 scsidp->host->hostt->emulated);
2647 sdp->sg_tablesize, get_exclude(sdp)); 2679 }
2680 seq_printf(s, " sg_tablesize=%d excl=%d open_cnt=%d\n",
2681 sdp->sg_tablesize, sdp->exclude, sdp->open_cnt);
2648 sg_proc_debug_helper(s, sdp); 2682 sg_proc_debug_helper(s, sdp);
2649 } 2683 }
2684 read_unlock(&sdp->sfd_lock);
2685skip:
2650 read_unlock_irqrestore(&sg_index_lock, iflags); 2686 read_unlock_irqrestore(&sg_index_lock, iflags);
2651 return 0; 2687 return 0;
2652} 2688}