diff options
| -rw-r--r-- | kernel/sched_fair.c | 11 | ||||
| -rw-r--r-- | kernel/semaphore.c | 64 |
2 files changed, 38 insertions, 37 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index c863663d204d..e24ecd39c4b8 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -662,10 +662,15 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) | |||
| 662 | if (!initial) { | 662 | if (!initial) { |
| 663 | /* sleeps upto a single latency don't count. */ | 663 | /* sleeps upto a single latency don't count. */ |
| 664 | if (sched_feat(NEW_FAIR_SLEEPERS)) { | 664 | if (sched_feat(NEW_FAIR_SLEEPERS)) { |
| 665 | unsigned long thresh = sysctl_sched_latency; | ||
| 666 | |||
| 667 | /* | ||
| 668 | * convert the sleeper threshold into virtual time | ||
| 669 | */ | ||
| 665 | if (sched_feat(NORMALIZED_SLEEPER)) | 670 | if (sched_feat(NORMALIZED_SLEEPER)) |
| 666 | vruntime -= calc_delta_weight(sysctl_sched_latency, se); | 671 | thresh = calc_delta_fair(thresh, se); |
| 667 | else | 672 | |
| 668 | vruntime -= sysctl_sched_latency; | 673 | vruntime -= thresh; |
| 669 | } | 674 | } |
| 670 | 675 | ||
| 671 | /* ensure we never gain time by being placed backwards. */ | 676 | /* ensure we never gain time by being placed backwards. */ |
diff --git a/kernel/semaphore.c b/kernel/semaphore.c index 5c2942e768cd..5e41217239e8 100644 --- a/kernel/semaphore.c +++ b/kernel/semaphore.c | |||
| @@ -54,10 +54,9 @@ void down(struct semaphore *sem) | |||
| 54 | unsigned long flags; | 54 | unsigned long flags; |
| 55 | 55 | ||
| 56 | spin_lock_irqsave(&sem->lock, flags); | 56 | spin_lock_irqsave(&sem->lock, flags); |
| 57 | if (likely(sem->count > 0)) | 57 | if (unlikely(!sem->count)) |
| 58 | sem->count--; | ||
| 59 | else | ||
| 60 | __down(sem); | 58 | __down(sem); |
| 59 | sem->count--; | ||
| 61 | spin_unlock_irqrestore(&sem->lock, flags); | 60 | spin_unlock_irqrestore(&sem->lock, flags); |
| 62 | } | 61 | } |
| 63 | EXPORT_SYMBOL(down); | 62 | EXPORT_SYMBOL(down); |
| @@ -77,10 +76,10 @@ int down_interruptible(struct semaphore *sem) | |||
| 77 | int result = 0; | 76 | int result = 0; |
| 78 | 77 | ||
| 79 | spin_lock_irqsave(&sem->lock, flags); | 78 | spin_lock_irqsave(&sem->lock, flags); |
| 80 | if (likely(sem->count > 0)) | 79 | if (unlikely(!sem->count)) |
| 81 | sem->count--; | ||
| 82 | else | ||
| 83 | result = __down_interruptible(sem); | 80 | result = __down_interruptible(sem); |
| 81 | if (!result) | ||
| 82 | sem->count--; | ||
| 84 | spin_unlock_irqrestore(&sem->lock, flags); | 83 | spin_unlock_irqrestore(&sem->lock, flags); |
| 85 | 84 | ||
| 86 | return result; | 85 | return result; |
| @@ -103,10 +102,10 @@ int down_killable(struct semaphore *sem) | |||
| 103 | int result = 0; | 102 | int result = 0; |
| 104 | 103 | ||
| 105 | spin_lock_irqsave(&sem->lock, flags); | 104 | spin_lock_irqsave(&sem->lock, flags); |
| 106 | if (likely(sem->count > 0)) | 105 | if (unlikely(!sem->count)) |
| 107 | sem->count--; | ||
| 108 | else | ||
| 109 | result = __down_killable(sem); | 106 | result = __down_killable(sem); |
| 107 | if (!result) | ||
| 108 | sem->count--; | ||
| 110 | spin_unlock_irqrestore(&sem->lock, flags); | 109 | spin_unlock_irqrestore(&sem->lock, flags); |
| 111 | 110 | ||
| 112 | return result; | 111 | return result; |
| @@ -157,10 +156,10 @@ int down_timeout(struct semaphore *sem, long jiffies) | |||
| 157 | int result = 0; | 156 | int result = 0; |
| 158 | 157 | ||
| 159 | spin_lock_irqsave(&sem->lock, flags); | 158 | spin_lock_irqsave(&sem->lock, flags); |
| 160 | if (likely(sem->count > 0)) | 159 | if (unlikely(!sem->count)) |
| 161 | sem->count--; | ||
| 162 | else | ||
| 163 | result = __down_timeout(sem, jiffies); | 160 | result = __down_timeout(sem, jiffies); |
| 161 | if (!result) | ||
| 162 | sem->count--; | ||
| 164 | spin_unlock_irqrestore(&sem->lock, flags); | 163 | spin_unlock_irqrestore(&sem->lock, flags); |
| 165 | 164 | ||
| 166 | return result; | 165 | return result; |
| @@ -179,9 +178,8 @@ void up(struct semaphore *sem) | |||
| 179 | unsigned long flags; | 178 | unsigned long flags; |
| 180 | 179 | ||
| 181 | spin_lock_irqsave(&sem->lock, flags); | 180 | spin_lock_irqsave(&sem->lock, flags); |
| 182 | if (likely(list_empty(&sem->wait_list))) | 181 | sem->count++; |
| 183 | sem->count++; | 182 | if (unlikely(!list_empty(&sem->wait_list))) |
| 184 | else | ||
| 185 | __up(sem); | 183 | __up(sem); |
| 186 | spin_unlock_irqrestore(&sem->lock, flags); | 184 | spin_unlock_irqrestore(&sem->lock, flags); |
| 187 | } | 185 | } |
| @@ -192,7 +190,6 @@ EXPORT_SYMBOL(up); | |||
| 192 | struct semaphore_waiter { | 190 | struct semaphore_waiter { |
| 193 | struct list_head list; | 191 | struct list_head list; |
| 194 | struct task_struct *task; | 192 | struct task_struct *task; |
| 195 | int up; | ||
| 196 | }; | 193 | }; |
| 197 | 194 | ||
| 198 | /* | 195 | /* |
| @@ -205,33 +202,34 @@ static inline int __sched __down_common(struct semaphore *sem, long state, | |||
| 205 | { | 202 | { |
| 206 | struct task_struct *task = current; | 203 | struct task_struct *task = current; |
| 207 | struct semaphore_waiter waiter; | 204 | struct semaphore_waiter waiter; |
| 205 | int ret = 0; | ||
| 208 | 206 | ||
| 209 | list_add_tail(&waiter.list, &sem->wait_list); | ||
| 210 | waiter.task = task; | 207 | waiter.task = task; |
| 211 | waiter.up = 0; | 208 | list_add_tail(&waiter.list, &sem->wait_list); |
| 212 | 209 | ||
| 213 | for (;;) { | 210 | for (;;) { |
| 214 | if (state == TASK_INTERRUPTIBLE && signal_pending(task)) | 211 | if (state == TASK_INTERRUPTIBLE && signal_pending(task)) { |
| 215 | goto interrupted; | 212 | ret = -EINTR; |
| 216 | if (state == TASK_KILLABLE && fatal_signal_pending(task)) | 213 | break; |
| 217 | goto interrupted; | 214 | } |
| 218 | if (timeout <= 0) | 215 | if (state == TASK_KILLABLE && fatal_signal_pending(task)) { |
| 219 | goto timed_out; | 216 | ret = -EINTR; |
| 217 | break; | ||
| 218 | } | ||
| 219 | if (timeout <= 0) { | ||
| 220 | ret = -ETIME; | ||
| 221 | break; | ||
| 222 | } | ||
| 220 | __set_task_state(task, state); | 223 | __set_task_state(task, state); |
| 221 | spin_unlock_irq(&sem->lock); | 224 | spin_unlock_irq(&sem->lock); |
| 222 | timeout = schedule_timeout(timeout); | 225 | timeout = schedule_timeout(timeout); |
| 223 | spin_lock_irq(&sem->lock); | 226 | spin_lock_irq(&sem->lock); |
| 224 | if (waiter.up) | 227 | if (sem->count > 0) |
| 225 | return 0; | 228 | break; |
| 226 | } | 229 | } |
| 227 | 230 | ||
| 228 | timed_out: | ||
| 229 | list_del(&waiter.list); | ||
| 230 | return -ETIME; | ||
| 231 | |||
| 232 | interrupted: | ||
| 233 | list_del(&waiter.list); | 231 | list_del(&waiter.list); |
| 234 | return -EINTR; | 232 | return ret; |
| 235 | } | 233 | } |
| 236 | 234 | ||
| 237 | static noinline void __sched __down(struct semaphore *sem) | 235 | static noinline void __sched __down(struct semaphore *sem) |
| @@ -258,7 +256,5 @@ static noinline void __sched __up(struct semaphore *sem) | |||
| 258 | { | 256 | { |
| 259 | struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list, | 257 | struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list, |
| 260 | struct semaphore_waiter, list); | 258 | struct semaphore_waiter, list); |
| 261 | list_del(&waiter->list); | ||
| 262 | waiter->up = 1; | ||
| 263 | wake_up_process(waiter->task); | 259 | wake_up_process(waiter->task); |
| 264 | } | 260 | } |
