aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ieee1394/raw1394.c
diff options
context:
space:
mode:
authorStefan Richter <stefanr@s5r6.in-berlin.de>2006-07-03 12:02:33 -0400
committerBen Collins <bcollins@ubuntu.com>2006-07-03 12:02:33 -0400
commit45289bf6ac70b106f5000d10b040e4485dd3e9d5 (patch)
treeb01499ed66e27d512b9c573bbd2a5fd78d761f1c /drivers/ieee1394/raw1394.c
parent438bd525e5240a48233cd3290f7fe66ff0167e20 (diff)
[PATCH] ieee1394: raw1394: remove redundant counting semaphore
An already existing wait queue replaces raw1394's complete_sem which was maintained in parallel to the wait queue. The role of the semaphore's counter is taken over by a direct check of what was really counted: The presence of items in the list of completed requests. Notes: - raw1394_release() sleeps uninterruptibly until all requests were completed. This is the same behaviour as before the patch. - The macros wait_event and wait_event_interruptible are called with a condition argument which has a side effect, i.e. manipulation of the requests list. This side effect happens only if the condition is true. The patch relies on the fact that wait_event[_interruptible] does not evaluate the condition again after it became true. - The diffstat looks unfavorable with respect to added lines of code. However 19 of them are comments, and some are due to separation of existing code blocks into two small helper functions. Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de> Signed-off-by: Ben Collins <bcollins@ubuntu.com>
Diffstat (limited to 'drivers/ieee1394/raw1394.c')
-rw-r--r--drivers/ieee1394/raw1394.c91
1 files changed, 57 insertions, 34 deletions
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index 46c88e8a24f3..840b705fd5dd 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -133,10 +133,9 @@ static void free_pending_request(struct pending_request *req)
133static void __queue_complete_req(struct pending_request *req) 133static void __queue_complete_req(struct pending_request *req)
134{ 134{
135 struct file_info *fi = req->file_info; 135 struct file_info *fi = req->file_info;
136 list_move_tail(&req->list, &fi->req_complete);
137 136
138 up(&fi->complete_sem); 137 list_move_tail(&req->list, &fi->req_complete);
139 wake_up_interruptible(&fi->poll_wait_complete); 138 wake_up(&fi->wait_complete);
140} 139}
141 140
142static void queue_complete_req(struct pending_request *req) 141static void queue_complete_req(struct pending_request *req)
@@ -464,13 +463,36 @@ raw1394_compat_read(const char __user *buf, struct raw1394_request *r)
464 463
465#endif 464#endif
466 465
466/* get next completed request (caller must hold fi->reqlists_lock) */
467static inline struct pending_request *__next_complete_req(struct file_info *fi)
468{
469 struct list_head *lh;
470 struct pending_request *req = NULL;
471
472 if (!list_empty(&fi->req_complete)) {
473 lh = fi->req_complete.next;
474 list_del(lh);
475 req = list_entry(lh, struct pending_request, list);
476 }
477 return req;
478}
479
480/* atomically get next completed request */
481static struct pending_request *next_complete_req(struct file_info *fi)
482{
483 unsigned long flags;
484 struct pending_request *req;
485
486 spin_lock_irqsave(&fi->reqlists_lock, flags);
487 req = __next_complete_req(fi);
488 spin_unlock_irqrestore(&fi->reqlists_lock, flags);
489 return req;
490}
467 491
468static ssize_t raw1394_read(struct file *file, char __user * buffer, 492static ssize_t raw1394_read(struct file *file, char __user * buffer,
469 size_t count, loff_t * offset_is_ignored) 493 size_t count, loff_t * offset_is_ignored)
470{ 494{
471 unsigned long flags;
472 struct file_info *fi = (struct file_info *)file->private_data; 495 struct file_info *fi = (struct file_info *)file->private_data;
473 struct list_head *lh;
474 struct pending_request *req; 496 struct pending_request *req;
475 ssize_t ret; 497 ssize_t ret;
476 498
@@ -488,22 +510,21 @@ static ssize_t raw1394_read(struct file *file, char __user * buffer,
488 } 510 }
489 511
490 if (file->f_flags & O_NONBLOCK) { 512 if (file->f_flags & O_NONBLOCK) {
491 if (down_trylock(&fi->complete_sem)) { 513 if (!(req = next_complete_req(fi)))
492 return -EAGAIN; 514 return -EAGAIN;
493 }
494 } else { 515 } else {
495 if (down_interruptible(&fi->complete_sem)) { 516 /*
517 * NB: We call the macro wait_event_interruptible() with a
518 * condition argument with side effect. This is only possible
519 * because the side effect does not occur until the condition
520 * became true, and wait_event_interruptible() won't evaluate
521 * the condition again after that.
522 */
523 if (wait_event_interruptible(fi->wait_complete,
524 (req = next_complete_req(fi))))
496 return -ERESTARTSYS; 525 return -ERESTARTSYS;
497 }
498 } 526 }
499 527
500 spin_lock_irqsave(&fi->reqlists_lock, flags);
501 lh = fi->req_complete.next;
502 list_del(lh);
503 spin_unlock_irqrestore(&fi->reqlists_lock, flags);
504
505 req = list_entry(lh, struct pending_request, list);
506
507 if (req->req.length) { 528 if (req->req.length) {
508 if (copy_to_user(int2ptr(req->req.recvb), req->data, 529 if (copy_to_user(int2ptr(req->req.recvb), req->data,
509 req->req.length)) { 530 req->req.length)) {
@@ -2745,7 +2766,7 @@ static unsigned int raw1394_poll(struct file *file, poll_table * pt)
2745 unsigned int mask = POLLOUT | POLLWRNORM; 2766 unsigned int mask = POLLOUT | POLLWRNORM;
2746 unsigned long flags; 2767 unsigned long flags;
2747 2768
2748 poll_wait(file, &fi->poll_wait_complete, pt); 2769 poll_wait(file, &fi->wait_complete, pt);
2749 2770
2750 spin_lock_irqsave(&fi->reqlists_lock, flags); 2771 spin_lock_irqsave(&fi->reqlists_lock, flags);
2751 if (!list_empty(&fi->req_complete)) { 2772 if (!list_empty(&fi->req_complete)) {
@@ -2770,9 +2791,8 @@ static int raw1394_open(struct inode *inode, struct file *file)
2770 fi->state = opened; 2791 fi->state = opened;
2771 INIT_LIST_HEAD(&fi->req_pending); 2792 INIT_LIST_HEAD(&fi->req_pending);
2772 INIT_LIST_HEAD(&fi->req_complete); 2793 INIT_LIST_HEAD(&fi->req_complete);
2773 sema_init(&fi->complete_sem, 0);
2774 spin_lock_init(&fi->reqlists_lock); 2794 spin_lock_init(&fi->reqlists_lock);
2775 init_waitqueue_head(&fi->poll_wait_complete); 2795 init_waitqueue_head(&fi->wait_complete);
2776 INIT_LIST_HEAD(&fi->addr_list); 2796 INIT_LIST_HEAD(&fi->addr_list);
2777 2797
2778 file->private_data = fi; 2798 file->private_data = fi;
@@ -2785,7 +2805,7 @@ static int raw1394_release(struct inode *inode, struct file *file)
2785 struct file_info *fi = file->private_data; 2805 struct file_info *fi = file->private_data;
2786 struct list_head *lh; 2806 struct list_head *lh;
2787 struct pending_request *req; 2807 struct pending_request *req;
2788 int done = 0, i, fail = 0; 2808 int i, fail;
2789 int retval = 0; 2809 int retval = 0;
2790 struct list_head *entry; 2810 struct list_head *entry;
2791 struct arm_addr *addr = NULL; 2811 struct arm_addr *addr = NULL;
@@ -2865,25 +2885,28 @@ static int raw1394_release(struct inode *inode, struct file *file)
2865 "error(s) occurred \n"); 2885 "error(s) occurred \n");
2866 } 2886 }
2867 2887
2868 while (!done) { 2888 for (;;) {
2889 /* This locked section guarantees that neither
2890 * complete nor pending requests exist once i!=0 */
2869 spin_lock_irqsave(&fi->reqlists_lock, flags); 2891 spin_lock_irqsave(&fi->reqlists_lock, flags);
2870 2892 while ((req = __next_complete_req(fi)))
2871 while (!list_empty(&fi->req_complete)) {
2872 lh = fi->req_complete.next;
2873 list_del(lh);
2874
2875 req = list_entry(lh, struct pending_request, list);
2876
2877 free_pending_request(req); 2893 free_pending_request(req);
2878 }
2879
2880 if (list_empty(&fi->req_pending))
2881 done = 1;
2882 2894
2895 i = list_empty(&fi->req_pending);
2883 spin_unlock_irqrestore(&fi->reqlists_lock, flags); 2896 spin_unlock_irqrestore(&fi->reqlists_lock, flags);
2884 2897
2885 if (!done) 2898 if (i)
2886 down_interruptible(&fi->complete_sem); 2899 break;
2900 /*
2901 * Sleep until more requests can be freed.
2902 *
2903 * NB: We call the macro wait_event() with a condition argument
2904 * with side effect. This is only possible because the side
2905 * effect does not occur until the condition became true, and
2906 * wait_event() won't evaluate the condition again after that.
2907 */
2908 wait_event(fi->wait_complete, (req = next_complete_req(fi)));
2909 free_pending_request(req);
2887 } 2910 }
2888 2911
2889 /* Remove any sub-trees left by user space programs */ 2912 /* Remove any sub-trees left by user space programs */