diff options
author | Manfred Spraul <manfred@colorfullife.com> | 2013-07-08 19:01:24 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-09 13:33:28 -0400 |
commit | f269f40ad5aeee229ed70044926f44318abe41ef (patch) | |
tree | 878fd6dabbf34c154809ed21f6d64cb90c7e394c | |
parent | 1a82e9e1d0f1b45f47a97c9e2349020536ff8987 (diff) |
ipc/sem.c: always use only one queue for alter operations
There are two places that can contain alter operations:
- the global queue: sma->pending_alter
- the per-semaphore queues: sma->sem_base[].pending_alter.
Since one of the queues must be processed first, this causes an odd
priorization of the wakeups: complex operations have priority over
simple ops.
The patch restores the behavior of linux <=3.0.9: The longest waiting
operation has the highest priority.
This is done by using only one queue:
- if there are complex ops, then sma->pending_alter is used.
- otherwise, the per-semaphore queues are used.
As a side effect, do_smart_update_queue() becomes much simpler: no more
goto logic.
Signed-off-by: Manfred Spraul <manfred@colorfullife.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Davidlohr Bueso <davidlohr.bueso@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | ipc/sem.c | 128 |
1 files changed, 88 insertions, 40 deletions
@@ -192,6 +192,53 @@ void __init sem_init (void) | |||
192 | IPC_SEM_IDS, sysvipc_sem_proc_show); | 192 | IPC_SEM_IDS, sysvipc_sem_proc_show); |
193 | } | 193 | } |
194 | 194 | ||
195 | /** | ||
196 | * unmerge_queues - unmerge queues, if possible. | ||
197 | * @sma: semaphore array | ||
198 | * | ||
199 | * The function unmerges the wait queues if complex_count is 0. | ||
200 | * It must be called prior to dropping the global semaphore array lock. | ||
201 | */ | ||
202 | static void unmerge_queues(struct sem_array *sma) | ||
203 | { | ||
204 | struct sem_queue *q, *tq; | ||
205 | |||
206 | /* complex operations still around? */ | ||
207 | if (sma->complex_count) | ||
208 | return; | ||
209 | /* | ||
210 | * We will switch back to simple mode. | ||
211 | * Move all pending operation back into the per-semaphore | ||
212 | * queues. | ||
213 | */ | ||
214 | list_for_each_entry_safe(q, tq, &sma->pending_alter, list) { | ||
215 | struct sem *curr; | ||
216 | curr = &sma->sem_base[q->sops[0].sem_num]; | ||
217 | |||
218 | list_add_tail(&q->list, &curr->pending_alter); | ||
219 | } | ||
220 | INIT_LIST_HEAD(&sma->pending_alter); | ||
221 | } | ||
222 | |||
223 | /** | ||
224 | * merge_queues - Merge single semop queues into global queue | ||
225 | * @sma: semaphore array | ||
226 | * | ||
227 | * This function merges all per-semaphore queues into the global queue. | ||
228 | * It is necessary to achieve FIFO ordering for the pending single-sop | ||
229 | * operations when a multi-semop operation must sleep. | ||
230 | * Only the alter operations must be moved, the const operations can stay. | ||
231 | */ | ||
232 | static void merge_queues(struct sem_array *sma) | ||
233 | { | ||
234 | int i; | ||
235 | for (i = 0; i < sma->sem_nsems; i++) { | ||
236 | struct sem *sem = sma->sem_base + i; | ||
237 | |||
238 | list_splice_init(&sem->pending_alter, &sma->pending_alter); | ||
239 | } | ||
240 | } | ||
241 | |||
195 | /* | 242 | /* |
196 | * If the request contains only one semaphore operation, and there are | 243 | * If the request contains only one semaphore operation, and there are |
197 | * no complex transactions pending, lock only the semaphore involved. | 244 | * no complex transactions pending, lock only the semaphore involved. |
@@ -262,6 +309,7 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, | |||
262 | static inline void sem_unlock(struct sem_array *sma, int locknum) | 309 | static inline void sem_unlock(struct sem_array *sma, int locknum) |
263 | { | 310 | { |
264 | if (locknum == -1) { | 311 | if (locknum == -1) { |
312 | unmerge_queues(sma); | ||
265 | ipc_unlock_object(&sma->sem_perm); | 313 | ipc_unlock_object(&sma->sem_perm); |
266 | } else { | 314 | } else { |
267 | struct sem *sem = sma->sem_base + locknum; | 315 | struct sem *sem = sma->sem_base + locknum; |
@@ -831,49 +879,38 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop | |||
831 | int otime, struct list_head *pt) | 879 | int otime, struct list_head *pt) |
832 | { | 880 | { |
833 | int i; | 881 | int i; |
834 | int progress; | ||
835 | 882 | ||
836 | otime |= do_smart_wakeup_zero(sma, sops, nsops, pt); | 883 | otime |= do_smart_wakeup_zero(sma, sops, nsops, pt); |
837 | 884 | ||
838 | progress = 1; | 885 | if (!list_empty(&sma->pending_alter)) { |
839 | retry_global: | 886 | /* semaphore array uses the global queue - just process it. */ |
840 | if (sma->complex_count) { | 887 | otime |= update_queue(sma, -1, pt); |
841 | if (update_queue(sma, -1, pt)) { | 888 | } else { |
842 | progress = 1; | 889 | if (!sops) { |
843 | otime = 1; | 890 | /* |
844 | sops = NULL; | 891 | * No sops, thus the modified semaphores are not |
845 | } | 892 | * known. Check all. |
846 | } | 893 | */ |
847 | if (!progress) | 894 | for (i = 0; i < sma->sem_nsems; i++) |
848 | goto done; | 895 | otime |= update_queue(sma, i, pt); |
849 | 896 | } else { | |
850 | if (!sops) { | 897 | /* |
851 | /* No semops; something special is going on. */ | 898 | * Check the semaphores that were increased: |
852 | for (i = 0; i < sma->sem_nsems; i++) { | 899 | * - No complex ops, thus all sleeping ops are |
853 | if (update_queue(sma, i, pt)) { | 900 | * decrease. |
854 | otime = 1; | 901 | * - if we decreased the value, then any sleeping |
855 | progress = 1; | 902 | * semaphore ops wont be able to run: If the |
903 | * previous value was too small, then the new | ||
904 | * value will be too small, too. | ||
905 | */ | ||
906 | for (i = 0; i < nsops; i++) { | ||
907 | if (sops[i].sem_op > 0) { | ||
908 | otime |= update_queue(sma, | ||
909 | sops[i].sem_num, pt); | ||
910 | } | ||
856 | } | 911 | } |
857 | } | 912 | } |
858 | goto done_checkretry; | ||
859 | } | ||
860 | |||
861 | /* Check the semaphores that were modified. */ | ||
862 | for (i = 0; i < nsops; i++) { | ||
863 | if (sops[i].sem_op > 0 || | ||
864 | (sops[i].sem_op < 0 && | ||
865 | sma->sem_base[sops[i].sem_num].semval == 0)) | ||
866 | if (update_queue(sma, sops[i].sem_num, pt)) { | ||
867 | otime = 1; | ||
868 | progress = 1; | ||
869 | } | ||
870 | } | ||
871 | done_checkretry: | ||
872 | if (progress) { | ||
873 | progress = 0; | ||
874 | goto retry_global; | ||
875 | } | 913 | } |
876 | done: | ||
877 | if (otime) | 914 | if (otime) |
878 | sma->sem_otime = get_seconds(); | 915 | sma->sem_otime = get_seconds(); |
879 | } | 916 | } |
@@ -1747,11 +1784,22 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, | |||
1747 | struct sem *curr; | 1784 | struct sem *curr; |
1748 | curr = &sma->sem_base[sops->sem_num]; | 1785 | curr = &sma->sem_base[sops->sem_num]; |
1749 | 1786 | ||
1750 | if (alter) | 1787 | if (alter) { |
1751 | list_add_tail(&queue.list, &curr->pending_alter); | 1788 | if (sma->complex_count) { |
1752 | else | 1789 | list_add_tail(&queue.list, |
1790 | &sma->pending_alter); | ||
1791 | } else { | ||
1792 | |||
1793 | list_add_tail(&queue.list, | ||
1794 | &curr->pending_alter); | ||
1795 | } | ||
1796 | } else { | ||
1753 | list_add_tail(&queue.list, &curr->pending_const); | 1797 | list_add_tail(&queue.list, &curr->pending_const); |
1798 | } | ||
1754 | } else { | 1799 | } else { |
1800 | if (!sma->complex_count) | ||
1801 | merge_queues(sma); | ||
1802 | |||
1755 | if (alter) | 1803 | if (alter) |
1756 | list_add_tail(&queue.list, &sma->pending_alter); | 1804 | list_add_tail(&queue.list, &sma->pending_alter); |
1757 | else | 1805 | else |