diff options
| author | Nick Piggin <npiggin@suse.de> | 2009-12-15 19:47:28 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-16 10:20:09 -0500 |
| commit | bf17bb717759d50a2733a7a8157a7c4a25d93abc (patch) | |
| tree | 22468665036b37922664529df976c890c5bed2d0 /ipc | |
| parent | 7d6feeb287c61aafa88f06345387b1188edf4b86 (diff) | |
ipc/sem.c: sem optimise undo list search
Around a month ago, there was some discussion about an improvement of the
sysv sem algorithm: Most (at least: some important) users only use simple
semaphore operations, therefore it's worthwile to optimize this use case.
This patch:
Move last looked up sem_undo struct to the head of the task's undo list.
Attempt to move common entries to the front of the list so search time is
reduced. This reduces lookup_undo on oprofile of problematic SAP workload
by 30% (see patch 4 for a description of SAP workload).
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Manfred Spraul <manfred@colorfullife.com>
Cc: Pierre Peiffer <peifferp@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'ipc')
| -rw-r--r-- | ipc/sem.c | 26 |
1 files changed, 20 insertions, 6 deletions
| @@ -962,17 +962,31 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp) | |||
| 962 | return 0; | 962 | return 0; |
| 963 | } | 963 | } |
| 964 | 964 | ||
| 965 | static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) | 965 | static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid) |
| 966 | { | 966 | { |
| 967 | struct sem_undo *walk; | 967 | struct sem_undo *un; |
| 968 | 968 | ||
| 969 | list_for_each_entry_rcu(walk, &ulp->list_proc, list_proc) { | 969 | list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) { |
| 970 | if (walk->semid == semid) | 970 | if (un->semid == semid) |
| 971 | return walk; | 971 | return un; |
| 972 | } | 972 | } |
| 973 | return NULL; | 973 | return NULL; |
| 974 | } | 974 | } |
| 975 | 975 | ||
| 976 | static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) | ||
| 977 | { | ||
| 978 | struct sem_undo *un; | ||
| 979 | |||
| 980 | assert_spin_locked(&ulp->lock); | ||
| 981 | |||
| 982 | un = __lookup_undo(ulp, semid); | ||
| 983 | if (un) { | ||
| 984 | list_del_rcu(&un->list_proc); | ||
| 985 | list_add_rcu(&un->list_proc, &ulp->list_proc); | ||
| 986 | } | ||
| 987 | return un; | ||
| 988 | } | ||
| 989 | |||
| 976 | /** | 990 | /** |
| 977 | * find_alloc_undo - Lookup (and if not present create) undo array | 991 | * find_alloc_undo - Lookup (and if not present create) undo array |
| 978 | * @ns: namespace | 992 | * @ns: namespace |
| @@ -1308,7 +1322,7 @@ void exit_sem(struct task_struct *tsk) | |||
| 1308 | if (IS_ERR(sma)) | 1322 | if (IS_ERR(sma)) |
| 1309 | continue; | 1323 | continue; |
| 1310 | 1324 | ||
| 1311 | un = lookup_undo(ulp, semid); | 1325 | un = __lookup_undo(ulp, semid); |
| 1312 | if (un == NULL) { | 1326 | if (un == NULL) { |
| 1313 | /* exit_sem raced with IPC_RMID+semget() that created | 1327 | /* exit_sem raced with IPC_RMID+semget() that created |
| 1314 | * exactly the same semid. Nothing to do. | 1328 | * exactly the same semid. Nothing to do. |
