aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/module.c25
-rw-r--r--kernel/sched.c27
-rw-r--r--kernel/semaphore.c64
3 files changed, 55 insertions, 61 deletions
diff --git a/kernel/module.c b/kernel/module.c
index 8e4528c9909f..f5e9491ef7ac 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -917,6 +917,10 @@ static int check_version(Elf_Shdr *sechdrs,
917 if (!crc) 917 if (!crc)
918 return 1; 918 return 1;
919 919
920 /* No versions at all? modprobe --force does this. */
921 if (versindex == 0)
922 return try_to_force_load(mod, symname) == 0;
923
920 versions = (void *) sechdrs[versindex].sh_addr; 924 versions = (void *) sechdrs[versindex].sh_addr;
921 num_versions = sechdrs[versindex].sh_size 925 num_versions = sechdrs[versindex].sh_size
922 / sizeof(struct modversion_info); 926 / sizeof(struct modversion_info);
@@ -932,8 +936,9 @@ static int check_version(Elf_Shdr *sechdrs,
932 goto bad_version; 936 goto bad_version;
933 } 937 }
934 938
935 if (!try_to_force_load(mod, symname)) 939 printk(KERN_WARNING "%s: no symbol version for %s\n",
936 return 1; 940 mod->name, symname);
941 return 0;
937 942
938bad_version: 943bad_version:
939 printk("%s: disagrees about version of symbol %s\n", 944 printk("%s: disagrees about version of symbol %s\n",
@@ -952,11 +957,14 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs,
952 return check_version(sechdrs, versindex, "struct_module", mod, crc); 957 return check_version(sechdrs, versindex, "struct_module", mod, crc);
953} 958}
954 959
955/* First part is kernel version, which we ignore. */ 960/* First part is kernel version, which we ignore if module has crcs. */
956static inline int same_magic(const char *amagic, const char *bmagic) 961static inline int same_magic(const char *amagic, const char *bmagic,
962 bool has_crcs)
957{ 963{
958 amagic += strcspn(amagic, " "); 964 if (has_crcs) {
959 bmagic += strcspn(bmagic, " "); 965 amagic += strcspn(amagic, " ");
966 bmagic += strcspn(bmagic, " ");
967 }
960 return strcmp(amagic, bmagic) == 0; 968 return strcmp(amagic, bmagic) == 0;
961} 969}
962#else 970#else
@@ -976,7 +984,8 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs,
976 return 1; 984 return 1;
977} 985}
978 986
979static inline int same_magic(const char *amagic, const char *bmagic) 987static inline int same_magic(const char *amagic, const char *bmagic,
988 bool has_crcs)
980{ 989{
981 return strcmp(amagic, bmagic) == 0; 990 return strcmp(amagic, bmagic) == 0;
982} 991}
@@ -1869,7 +1878,7 @@ static struct module *load_module(void __user *umod,
1869 err = try_to_force_load(mod, "magic"); 1878 err = try_to_force_load(mod, "magic");
1870 if (err) 1879 if (err)
1871 goto free_hdr; 1880 goto free_hdr;
1872 } else if (!same_magic(modmagic, vermagic)) { 1881 } else if (!same_magic(modmagic, vermagic, versindex)) {
1873 printk(KERN_ERR "%s: version magic '%s' should be '%s'\n", 1882 printk(KERN_ERR "%s: version magic '%s' should be '%s'\n",
1874 mod->name, modmagic, vermagic); 1883 mod->name, modmagic, vermagic);
1875 err = -ENOEXEC; 1884 err = -ENOEXEC;
diff --git a/kernel/sched.c b/kernel/sched.c
index 58fb8af15776..c51b6565e07c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4567,8 +4567,6 @@ EXPORT_SYMBOL(schedule);
4567asmlinkage void __sched preempt_schedule(void) 4567asmlinkage void __sched preempt_schedule(void)
4568{ 4568{
4569 struct thread_info *ti = current_thread_info(); 4569 struct thread_info *ti = current_thread_info();
4570 struct task_struct *task = current;
4571 int saved_lock_depth;
4572 4570
4573 /* 4571 /*
4574 * If there is a non-zero preempt_count or interrupts are disabled, 4572 * If there is a non-zero preempt_count or interrupts are disabled,
@@ -4579,16 +4577,7 @@ asmlinkage void __sched preempt_schedule(void)
4579 4577
4580 do { 4578 do {
4581 add_preempt_count(PREEMPT_ACTIVE); 4579 add_preempt_count(PREEMPT_ACTIVE);
4582
4583 /*
4584 * We keep the big kernel semaphore locked, but we
4585 * clear ->lock_depth so that schedule() doesnt
4586 * auto-release the semaphore:
4587 */
4588 saved_lock_depth = task->lock_depth;
4589 task->lock_depth = -1;
4590 schedule(); 4580 schedule();
4591 task->lock_depth = saved_lock_depth;
4592 sub_preempt_count(PREEMPT_ACTIVE); 4581 sub_preempt_count(PREEMPT_ACTIVE);
4593 4582
4594 /* 4583 /*
@@ -4609,26 +4598,15 @@ EXPORT_SYMBOL(preempt_schedule);
4609asmlinkage void __sched preempt_schedule_irq(void) 4598asmlinkage void __sched preempt_schedule_irq(void)
4610{ 4599{
4611 struct thread_info *ti = current_thread_info(); 4600 struct thread_info *ti = current_thread_info();
4612 struct task_struct *task = current;
4613 int saved_lock_depth;
4614 4601
4615 /* Catch callers which need to be fixed */ 4602 /* Catch callers which need to be fixed */
4616 BUG_ON(ti->preempt_count || !irqs_disabled()); 4603 BUG_ON(ti->preempt_count || !irqs_disabled());
4617 4604
4618 do { 4605 do {
4619 add_preempt_count(PREEMPT_ACTIVE); 4606 add_preempt_count(PREEMPT_ACTIVE);
4620
4621 /*
4622 * We keep the big kernel semaphore locked, but we
4623 * clear ->lock_depth so that schedule() doesnt
4624 * auto-release the semaphore:
4625 */
4626 saved_lock_depth = task->lock_depth;
4627 task->lock_depth = -1;
4628 local_irq_enable(); 4607 local_irq_enable();
4629 schedule(); 4608 schedule();
4630 local_irq_disable(); 4609 local_irq_disable();
4631 task->lock_depth = saved_lock_depth;
4632 sub_preempt_count(PREEMPT_ACTIVE); 4610 sub_preempt_count(PREEMPT_ACTIVE);
4633 4611
4634 /* 4612 /*
@@ -5853,8 +5831,11 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
5853 spin_unlock_irqrestore(&rq->lock, flags); 5831 spin_unlock_irqrestore(&rq->lock, flags);
5854 5832
5855 /* Set the preempt count _outside_ the spinlocks! */ 5833 /* Set the preempt count _outside_ the spinlocks! */
5834#if defined(CONFIG_PREEMPT)
5835 task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
5836#else
5856 task_thread_info(idle)->preempt_count = 0; 5837 task_thread_info(idle)->preempt_count = 0;
5857 5838#endif
5858 /* 5839 /*
5859 * The idle tasks have their own, simple scheduling class: 5840 * The idle tasks have their own, simple scheduling class:
5860 */ 5841 */
diff --git a/kernel/semaphore.c b/kernel/semaphore.c
index 5e41217239e8..5c2942e768cd 100644
--- a/kernel/semaphore.c
+++ b/kernel/semaphore.c
@@ -54,9 +54,10 @@ void down(struct semaphore *sem)
54 unsigned long flags; 54 unsigned long flags;
55 55
56 spin_lock_irqsave(&sem->lock, flags); 56 spin_lock_irqsave(&sem->lock, flags);
57 if (unlikely(!sem->count)) 57 if (likely(sem->count > 0))
58 sem->count--;
59 else
58 __down(sem); 60 __down(sem);
59 sem->count--;
60 spin_unlock_irqrestore(&sem->lock, flags); 61 spin_unlock_irqrestore(&sem->lock, flags);
61} 62}
62EXPORT_SYMBOL(down); 63EXPORT_SYMBOL(down);
@@ -76,10 +77,10 @@ int down_interruptible(struct semaphore *sem)
76 int result = 0; 77 int result = 0;
77 78
78 spin_lock_irqsave(&sem->lock, flags); 79 spin_lock_irqsave(&sem->lock, flags);
79 if (unlikely(!sem->count)) 80 if (likely(sem->count > 0))
80 result = __down_interruptible(sem);
81 if (!result)
82 sem->count--; 81 sem->count--;
82 else
83 result = __down_interruptible(sem);
83 spin_unlock_irqrestore(&sem->lock, flags); 84 spin_unlock_irqrestore(&sem->lock, flags);
84 85
85 return result; 86 return result;
@@ -102,10 +103,10 @@ int down_killable(struct semaphore *sem)
102 int result = 0; 103 int result = 0;
103 104
104 spin_lock_irqsave(&sem->lock, flags); 105 spin_lock_irqsave(&sem->lock, flags);
105 if (unlikely(!sem->count)) 106 if (likely(sem->count > 0))
106 result = __down_killable(sem);
107 if (!result)
108 sem->count--; 107 sem->count--;
108 else
109 result = __down_killable(sem);
109 spin_unlock_irqrestore(&sem->lock, flags); 110 spin_unlock_irqrestore(&sem->lock, flags);
110 111
111 return result; 112 return result;
@@ -156,10 +157,10 @@ int down_timeout(struct semaphore *sem, long jiffies)
156 int result = 0; 157 int result = 0;
157 158
158 spin_lock_irqsave(&sem->lock, flags); 159 spin_lock_irqsave(&sem->lock, flags);
159 if (unlikely(!sem->count)) 160 if (likely(sem->count > 0))
160 result = __down_timeout(sem, jiffies);
161 if (!result)
162 sem->count--; 161 sem->count--;
162 else
163 result = __down_timeout(sem, jiffies);
163 spin_unlock_irqrestore(&sem->lock, flags); 164 spin_unlock_irqrestore(&sem->lock, flags);
164 165
165 return result; 166 return result;
@@ -178,8 +179,9 @@ void up(struct semaphore *sem)
178 unsigned long flags; 179 unsigned long flags;
179 180
180 spin_lock_irqsave(&sem->lock, flags); 181 spin_lock_irqsave(&sem->lock, flags);
181 sem->count++; 182 if (likely(list_empty(&sem->wait_list)))
182 if (unlikely(!list_empty(&sem->wait_list))) 183 sem->count++;
184 else
183 __up(sem); 185 __up(sem);
184 spin_unlock_irqrestore(&sem->lock, flags); 186 spin_unlock_irqrestore(&sem->lock, flags);
185} 187}
@@ -190,6 +192,7 @@ EXPORT_SYMBOL(up);
190struct semaphore_waiter { 192struct semaphore_waiter {
191 struct list_head list; 193 struct list_head list;
192 struct task_struct *task; 194 struct task_struct *task;
195 int up;
193}; 196};
194 197
195/* 198/*
@@ -202,34 +205,33 @@ static inline int __sched __down_common(struct semaphore *sem, long state,
202{ 205{
203 struct task_struct *task = current; 206 struct task_struct *task = current;
204 struct semaphore_waiter waiter; 207 struct semaphore_waiter waiter;
205 int ret = 0;
206 208
207 waiter.task = task;
208 list_add_tail(&waiter.list, &sem->wait_list); 209 list_add_tail(&waiter.list, &sem->wait_list);
210 waiter.task = task;
211 waiter.up = 0;
209 212
210 for (;;) { 213 for (;;) {
211 if (state == TASK_INTERRUPTIBLE && signal_pending(task)) { 214 if (state == TASK_INTERRUPTIBLE && signal_pending(task))
212 ret = -EINTR; 215 goto interrupted;
213 break; 216 if (state == TASK_KILLABLE && fatal_signal_pending(task))
214 } 217 goto interrupted;
215 if (state == TASK_KILLABLE && fatal_signal_pending(task)) { 218 if (timeout <= 0)
216 ret = -EINTR; 219 goto timed_out;
217 break;
218 }
219 if (timeout <= 0) {
220 ret = -ETIME;
221 break;
222 }
223 __set_task_state(task, state); 220 __set_task_state(task, state);
224 spin_unlock_irq(&sem->lock); 221 spin_unlock_irq(&sem->lock);
225 timeout = schedule_timeout(timeout); 222 timeout = schedule_timeout(timeout);
226 spin_lock_irq(&sem->lock); 223 spin_lock_irq(&sem->lock);
227 if (sem->count > 0) 224 if (waiter.up)
228 break; 225 return 0;
229 } 226 }
230 227
228 timed_out:
229 list_del(&waiter.list);
230 return -ETIME;
231
232 interrupted:
231 list_del(&waiter.list); 233 list_del(&waiter.list);
232 return ret; 234 return -EINTR;
233} 235}
234 236
235static noinline void __sched __down(struct semaphore *sem) 237static noinline void __sched __down(struct semaphore *sem)
@@ -256,5 +258,7 @@ static noinline void __sched __up(struct semaphore *sem)
256{ 258{
257 struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list, 259 struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
258 struct semaphore_waiter, list); 260 struct semaphore_waiter, list);
261 list_del(&waiter->list);
262 waiter->up = 1;
259 wake_up_process(waiter->task); 263 wake_up_process(waiter->task);
260} 264}