diff options
author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2009-02-15 23:26:53 -0500 |
---|---|---|
committer | James Bottomley <James.Bottomley@HansenPartnership.com> | 2009-03-12 13:58:14 -0400 |
commit | 3442f802a8169a0c18d411d95f0e71b9205ed607 (patch) | |
tree | 80ea3057877ed78be770e9e9edcc6b51b5830649 /drivers/scsi | |
parent | b2ed6c69aa3c1a3f496e2a72f770d53069371df3 (diff) |
[SCSI] sg: remove the own list management for struct sg_fd
This replaces the own list management for struct sg_fd with the
standard list_head structure.
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: Douglas Gilbert <dgilbert@interlog.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi')
-rw-r--r-- | drivers/scsi/sg.c | 50 |
1 files changed, 14 insertions, 36 deletions
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index d45ac4aab92f..7bf54c9a33bb 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c | |||
@@ -140,7 +140,7 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */ | |||
140 | } Sg_request; | 140 | } Sg_request; |
141 | 141 | ||
142 | typedef struct sg_fd { /* holds the state of a file descriptor */ | 142 | typedef struct sg_fd { /* holds the state of a file descriptor */ |
143 | struct sg_fd *nextfp; /* NULL when last opened fd on this device */ | 143 | struct list_head sfd_siblings; |
144 | struct sg_device *parentdp; /* owning device */ | 144 | struct sg_device *parentdp; /* owning device */ |
145 | wait_queue_head_t read_wait; /* queue read until command done */ | 145 | wait_queue_head_t read_wait; /* queue read until command done */ |
146 | rwlock_t rq_list_lock; /* protect access to list in req_arr */ | 146 | rwlock_t rq_list_lock; /* protect access to list in req_arr */ |
@@ -167,7 +167,7 @@ typedef struct sg_device { /* holds the state of each scsi generic device */ | |||
167 | wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */ | 167 | wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */ |
168 | int sg_tablesize; /* adapter's max scatter-gather table size */ | 168 | int sg_tablesize; /* adapter's max scatter-gather table size */ |
169 | u32 index; /* device index number */ | 169 | u32 index; /* device index number */ |
170 | Sg_fd *headfp; /* first open fd belonging to this device */ | 170 | struct list_head sfds; |
171 | volatile char detached; /* 0->attached, 1->detached pending removal */ | 171 | volatile char detached; /* 0->attached, 1->detached pending removal */ |
172 | volatile char exclude; /* opened for exclusive access */ | 172 | volatile char exclude; /* opened for exclusive access */ |
173 | char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */ | 173 | char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */ |
@@ -258,13 +258,13 @@ sg_open(struct inode *inode, struct file *filp) | |||
258 | retval = -EPERM; /* Can't lock it with read only access */ | 258 | retval = -EPERM; /* Can't lock it with read only access */ |
259 | goto error_out; | 259 | goto error_out; |
260 | } | 260 | } |
261 | if (sdp->headfp && (flags & O_NONBLOCK)) { | 261 | if (!list_empty(&sdp->sfds) && (flags & O_NONBLOCK)) { |
262 | retval = -EBUSY; | 262 | retval = -EBUSY; |
263 | goto error_out; | 263 | goto error_out; |
264 | } | 264 | } |
265 | res = 0; | 265 | res = 0; |
266 | __wait_event_interruptible(sdp->o_excl_wait, | 266 | __wait_event_interruptible(sdp->o_excl_wait, |
267 | ((sdp->headfp || sdp->exclude) ? 0 : (sdp->exclude = 1)), res); | 267 | ((!list_empty(&sdp->sfds) || sdp->exclude) ? 0 : (sdp->exclude = 1)), res); |
268 | if (res) { | 268 | if (res) { |
269 | retval = res; /* -ERESTARTSYS because signal hit process */ | 269 | retval = res; /* -ERESTARTSYS because signal hit process */ |
270 | goto error_out; | 270 | goto error_out; |
@@ -286,7 +286,7 @@ sg_open(struct inode *inode, struct file *filp) | |||
286 | retval = -ENODEV; | 286 | retval = -ENODEV; |
287 | goto error_out; | 287 | goto error_out; |
288 | } | 288 | } |
289 | if (!sdp->headfp) { /* no existing opens on this device */ | 289 | if (list_empty(&sdp->sfds)) { /* no existing opens on this device */ |
290 | sdp->sgdebug = 0; | 290 | sdp->sgdebug = 0; |
291 | q = sdp->device->request_queue; | 291 | q = sdp->device->request_queue; |
292 | sdp->sg_tablesize = min(q->max_hw_segments, | 292 | sdp->sg_tablesize = min(q->max_hw_segments, |
@@ -1375,6 +1375,7 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp) | |||
1375 | disk->first_minor = k; | 1375 | disk->first_minor = k; |
1376 | sdp->disk = disk; | 1376 | sdp->disk = disk; |
1377 | sdp->device = scsidp; | 1377 | sdp->device = scsidp; |
1378 | INIT_LIST_HEAD(&sdp->sfds); | ||
1378 | init_waitqueue_head(&sdp->o_excl_wait); | 1379 | init_waitqueue_head(&sdp->o_excl_wait); |
1379 | sdp->sg_tablesize = min(q->max_hw_segments, q->max_phys_segments); | 1380 | sdp->sg_tablesize = min(q->max_hw_segments, q->max_phys_segments); |
1380 | sdp->index = k; | 1381 | sdp->index = k; |
@@ -1517,7 +1518,7 @@ static void sg_remove(struct device *cl_dev, struct class_interface *cl_intf) | |||
1517 | /* Need a write lock to set sdp->detached. */ | 1518 | /* Need a write lock to set sdp->detached. */ |
1518 | write_lock_irqsave(&sg_index_lock, iflags); | 1519 | write_lock_irqsave(&sg_index_lock, iflags); |
1519 | sdp->detached = 1; | 1520 | sdp->detached = 1; |
1520 | for (sfp = sdp->headfp; sfp; sfp = sfp->nextfp) { | 1521 | list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) { |
1521 | wake_up_interruptible(&sfp->read_wait); | 1522 | wake_up_interruptible(&sfp->read_wait); |
1522 | kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP); | 1523 | kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP); |
1523 | } | 1524 | } |
@@ -2024,14 +2025,7 @@ sg_add_sfp(Sg_device * sdp, int dev) | |||
2024 | sfp->keep_orphan = SG_DEF_KEEP_ORPHAN; | 2025 | sfp->keep_orphan = SG_DEF_KEEP_ORPHAN; |
2025 | sfp->parentdp = sdp; | 2026 | sfp->parentdp = sdp; |
2026 | write_lock_irqsave(&sg_index_lock, iflags); | 2027 | write_lock_irqsave(&sg_index_lock, iflags); |
2027 | if (!sdp->headfp) | 2028 | list_add_tail(&sfp->sfd_siblings, &sdp->sfds); |
2028 | sdp->headfp = sfp; | ||
2029 | else { /* add to tail of existing list */ | ||
2030 | Sg_fd *pfp = sdp->headfp; | ||
2031 | while (pfp->nextfp) | ||
2032 | pfp = pfp->nextfp; | ||
2033 | pfp->nextfp = sfp; | ||
2034 | } | ||
2035 | write_unlock_irqrestore(&sg_index_lock, iflags); | 2029 | write_unlock_irqrestore(&sg_index_lock, iflags); |
2036 | SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp)); | 2030 | SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp)); |
2037 | if (unlikely(sg_big_buff != def_reserved_size)) | 2031 | if (unlikely(sg_big_buff != def_reserved_size)) |
@@ -2080,28 +2074,10 @@ static void sg_remove_sfp(struct kref *kref) | |||
2080 | { | 2074 | { |
2081 | struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref); | 2075 | struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref); |
2082 | struct sg_device *sdp = sfp->parentdp; | 2076 | struct sg_device *sdp = sfp->parentdp; |
2083 | Sg_fd *fp; | ||
2084 | Sg_fd *prev_fp; | ||
2085 | unsigned long iflags; | 2077 | unsigned long iflags; |
2086 | 2078 | ||
2087 | /* CAUTION! Note that sfp can still be found by walking sdp->headfp | ||
2088 | * even though the refcount is now 0. Therefore, unlink sfp from | ||
2089 | * sdp->headfp BEFORE doing any other cleanup. | ||
2090 | */ | ||
2091 | |||
2092 | write_lock_irqsave(&sg_index_lock, iflags); | 2079 | write_lock_irqsave(&sg_index_lock, iflags); |
2093 | prev_fp = sdp->headfp; | 2080 | list_del(&sfp->sfd_siblings); |
2094 | if (sfp == prev_fp) | ||
2095 | sdp->headfp = prev_fp->nextfp; | ||
2096 | else { | ||
2097 | while ((fp = prev_fp->nextfp)) { | ||
2098 | if (sfp == fp) { | ||
2099 | prev_fp->nextfp = fp->nextfp; | ||
2100 | break; | ||
2101 | } | ||
2102 | prev_fp = fp; | ||
2103 | } | ||
2104 | } | ||
2105 | write_unlock_irqrestore(&sg_index_lock, iflags); | 2081 | write_unlock_irqrestore(&sg_index_lock, iflags); |
2106 | wake_up_interruptible(&sdp->o_excl_wait); | 2082 | wake_up_interruptible(&sdp->o_excl_wait); |
2107 | 2083 | ||
@@ -2486,10 +2462,12 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) | |||
2486 | const char * cp; | 2462 | const char * cp; |
2487 | unsigned int ms; | 2463 | unsigned int ms; |
2488 | 2464 | ||
2489 | for (k = 0, fp = sdp->headfp; fp != NULL; ++k, fp = fp->nextfp) { | 2465 | k = 0; |
2466 | list_for_each_entry(fp, &sdp->sfds, sfd_siblings) { | ||
2467 | k++; | ||
2490 | read_lock(&fp->rq_list_lock); /* irqs already disabled */ | 2468 | read_lock(&fp->rq_list_lock); /* irqs already disabled */ |
2491 | seq_printf(s, " FD(%d): timeout=%dms bufflen=%d " | 2469 | seq_printf(s, " FD(%d): timeout=%dms bufflen=%d " |
2492 | "(res)sgat=%d low_dma=%d\n", k + 1, | 2470 | "(res)sgat=%d low_dma=%d\n", k, |
2493 | jiffies_to_msecs(fp->timeout), | 2471 | jiffies_to_msecs(fp->timeout), |
2494 | fp->reserve.bufflen, | 2472 | fp->reserve.bufflen, |
2495 | (int) fp->reserve.k_use_sg, | 2473 | (int) fp->reserve.k_use_sg, |
@@ -2559,7 +2537,7 @@ static int sg_proc_seq_show_debug(struct seq_file *s, void *v) | |||
2559 | 2537 | ||
2560 | read_lock_irqsave(&sg_index_lock, iflags); | 2538 | read_lock_irqsave(&sg_index_lock, iflags); |
2561 | sdp = it ? sg_lookup_dev(it->index) : NULL; | 2539 | sdp = it ? sg_lookup_dev(it->index) : NULL; |
2562 | if (sdp && sdp->headfp) { | 2540 | if (sdp && !list_empty(&sdp->sfds)) { |
2563 | struct scsi_device *scsidp = sdp->device; | 2541 | struct scsi_device *scsidp = sdp->device; |
2564 | 2542 | ||
2565 | seq_printf(s, " >>> device=%s ", sdp->disk->disk_name); | 2543 | seq_printf(s, " >>> device=%s ", sdp->disk->disk_name); |