diff options
author | Manfred Spraul <manfred@colorfullife.com> | 2014-06-06 17:37:48 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-06 19:08:15 -0400 |
commit | 2f2ed41dcaec34f2d6f224aa84efcc5a9dd8d5c3 (patch) | |
tree | 4122b855351d0747a49e47c4d675f0f83df7eafa /ipc/sem.c | |
parent | 1994862dc9c16f360a9169a4d27200d15ba29713 (diff) |
ipc/sem.c: remove code duplication
count_semzcnt and count_semncnt are more of less identical. The patch
creates a single function that either counts the number of tasks waiting
for zero or waiting due to a decrease operation.
Compared to the initial version, the BUG_ONs were removed.
Signed-off-by: Manfred Spraul <manfred@colorfullife.com>
Cc: Davidlohr Bueso <davidlohr.bueso@hp.com>
Cc: Michael Kerrisk <mtk.manpages@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'ipc/sem.c')
-rw-r--r-- | ipc/sem.c | 107 |
1 files changed, 51 insertions, 56 deletions
@@ -47,8 +47,7 @@ | |||
47 | * Thus: Perfect SMP scaling between independent semaphore arrays. | 47 | * Thus: Perfect SMP scaling between independent semaphore arrays. |
48 | * If multiple semaphores in one array are used, then cache line | 48 | * If multiple semaphores in one array are used, then cache line |
49 | * trashing on the semaphore array spinlock will limit the scaling. | 49 | * trashing on the semaphore array spinlock will limit the scaling. |
50 | * - semncnt and semzcnt are calculated on demand in count_semncnt() and | 50 | * - semncnt and semzcnt are calculated on demand in count_semcnt() |
51 | * count_semzcnt() | ||
52 | * - the task that performs a successful semop() scans the list of all | 51 | * - the task that performs a successful semop() scans the list of all |
53 | * sleeping tasks and completes any pending operations that can be fulfilled. | 52 | * sleeping tasks and completes any pending operations that can be fulfilled. |
54 | * Semaphores are actively given to waiting tasks (necessary for FIFO). | 53 | * Semaphores are actively given to waiting tasks (necessary for FIFO). |
@@ -989,6 +988,31 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop | |||
989 | set_semotime(sma, sops); | 988 | set_semotime(sma, sops); |
990 | } | 989 | } |
991 | 990 | ||
991 | /* | ||
992 | * check_qop: Test how often a queued operation sleeps on the semaphore semnum | ||
993 | */ | ||
994 | static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q, | ||
995 | bool count_zero) | ||
996 | { | ||
997 | struct sembuf *sops = q->sops; | ||
998 | int nsops = q->nsops; | ||
999 | int i, semcnt; | ||
1000 | |||
1001 | semcnt = 0; | ||
1002 | |||
1003 | for (i = 0; i < nsops; i++) { | ||
1004 | if (sops[i].sem_num != semnum) | ||
1005 | continue; | ||
1006 | if (sops[i].sem_flg & IPC_NOWAIT) | ||
1007 | continue; | ||
1008 | if (count_zero && sops[i].sem_op == 0) | ||
1009 | semcnt++; | ||
1010 | if (!count_zero && sops[i].sem_op < 0) | ||
1011 | semcnt++; | ||
1012 | } | ||
1013 | return semcnt; | ||
1014 | } | ||
1015 | |||
992 | /* The following counts are associated to each semaphore: | 1016 | /* The following counts are associated to each semaphore: |
993 | * semncnt number of tasks waiting on semval being nonzero | 1017 | * semncnt number of tasks waiting on semval being nonzero |
994 | * semzcnt number of tasks waiting on semval being zero | 1018 | * semzcnt number of tasks waiting on semval being zero |
@@ -998,66 +1022,37 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop | |||
998 | * The counts we return here are a rough approximation, but still | 1022 | * The counts we return here are a rough approximation, but still |
999 | * warrant that semncnt+semzcnt>0 if the task is on the pending queue. | 1023 | * warrant that semncnt+semzcnt>0 if the task is on the pending queue. |
1000 | */ | 1024 | */ |
1001 | static int count_semncnt(struct sem_array *sma, ushort semnum) | 1025 | static int count_semcnt(struct sem_array *sma, ushort semnum, |
1026 | bool count_zero) | ||
1002 | { | 1027 | { |
1003 | int semncnt; | 1028 | struct list_head *l; |
1004 | struct sem_queue *q; | 1029 | struct sem_queue *q; |
1030 | int semcnt; | ||
1005 | 1031 | ||
1006 | semncnt = 0; | 1032 | semcnt = 0; |
1007 | list_for_each_entry(q, &sma->sem_base[semnum].pending_alter, list) { | 1033 | /* First: check the simple operations. They are easy to evaluate */ |
1008 | struct sembuf *sops = q->sops; | 1034 | if (count_zero) |
1009 | BUG_ON(sops->sem_num != semnum); | 1035 | l = &sma->sem_base[semnum].pending_const; |
1010 | if ((sops->sem_op < 0) && !(sops->sem_flg & IPC_NOWAIT)) | 1036 | else |
1011 | semncnt++; | 1037 | l = &sma->sem_base[semnum].pending_alter; |
1012 | } | ||
1013 | |||
1014 | list_for_each_entry(q, &sma->pending_alter, list) { | ||
1015 | struct sembuf *sops = q->sops; | ||
1016 | int nsops = q->nsops; | ||
1017 | int i; | ||
1018 | for (i = 0; i < nsops; i++) | ||
1019 | if (sops[i].sem_num == semnum | ||
1020 | && (sops[i].sem_op < 0) | ||
1021 | && !(sops[i].sem_flg & IPC_NOWAIT)) | ||
1022 | semncnt++; | ||
1023 | } | ||
1024 | return semncnt; | ||
1025 | } | ||
1026 | |||
1027 | static int count_semzcnt(struct sem_array *sma, ushort semnum) | ||
1028 | { | ||
1029 | int semzcnt; | ||
1030 | struct sem_queue *q; | ||
1031 | 1038 | ||
1032 | semzcnt = 0; | 1039 | list_for_each_entry(q, l, list) { |
1033 | list_for_each_entry(q, &sma->sem_base[semnum].pending_const, list) { | 1040 | /* all task on a per-semaphore list sleep on exactly |
1034 | struct sembuf *sops = q->sops; | 1041 | * that semaphore |
1035 | BUG_ON(sops->sem_num != semnum); | 1042 | */ |
1036 | if ((sops->sem_op == 0) && !(sops->sem_flg & IPC_NOWAIT)) | 1043 | semcnt++; |
1037 | semzcnt++; | ||
1038 | } | 1044 | } |
1039 | 1045 | ||
1040 | list_for_each_entry(q, &sma->pending_const, list) { | 1046 | /* Then: check the complex operations. */ |
1041 | struct sembuf *sops = q->sops; | ||
1042 | int nsops = q->nsops; | ||
1043 | int i; | ||
1044 | for (i = 0; i < nsops; i++) | ||
1045 | if (sops[i].sem_num == semnum | ||
1046 | && (sops[i].sem_op == 0) | ||
1047 | && !(sops[i].sem_flg & IPC_NOWAIT)) | ||
1048 | semzcnt++; | ||
1049 | } | ||
1050 | list_for_each_entry(q, &sma->pending_alter, list) { | 1047 | list_for_each_entry(q, &sma->pending_alter, list) { |
1051 | struct sembuf *sops = q->sops; | 1048 | semcnt += check_qop(sma, semnum, q, count_zero); |
1052 | int nsops = q->nsops; | 1049 | } |
1053 | int i; | 1050 | if (count_zero) { |
1054 | for (i = 0; i < nsops; i++) | 1051 | list_for_each_entry(q, &sma->pending_const, list) { |
1055 | if (sops[i].sem_num == semnum | 1052 | semcnt += check_qop(sma, semnum, q, count_zero); |
1056 | && (sops[i].sem_op == 0) | 1053 | } |
1057 | && !(sops[i].sem_flg & IPC_NOWAIT)) | ||
1058 | semzcnt++; | ||
1059 | } | 1054 | } |
1060 | return semzcnt; | 1055 | return semcnt; |
1061 | } | 1056 | } |
1062 | 1057 | ||
1063 | /* Free a semaphore set. freeary() is called with sem_ids.rwsem locked | 1058 | /* Free a semaphore set. freeary() is called with sem_ids.rwsem locked |
@@ -1459,10 +1454,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, | |||
1459 | err = curr->sempid; | 1454 | err = curr->sempid; |
1460 | goto out_unlock; | 1455 | goto out_unlock; |
1461 | case GETNCNT: | 1456 | case GETNCNT: |
1462 | err = count_semncnt(sma, semnum); | 1457 | err = count_semcnt(sma, semnum, 0); |
1463 | goto out_unlock; | 1458 | goto out_unlock; |
1464 | case GETZCNT: | 1459 | case GETZCNT: |
1465 | err = count_semzcnt(sma, semnum); | 1460 | err = count_semcnt(sma, semnum, 1); |
1466 | goto out_unlock; | 1461 | goto out_unlock; |
1467 | } | 1462 | } |
1468 | 1463 | ||