aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/futex.c6
-rw-r--r--kernel/lockdep.c136
-rw-r--r--kernel/panic.c2
-rw-r--r--kernel/power/pm.c37
-rw-r--r--kernel/power/snapshot.c10
-rw-r--r--kernel/power/swap.c26
-rw-r--r--kernel/printk.c4
-rw-r--r--kernel/resource.c2
-rw-r--r--kernel/sched.c19
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/sys.c2
-rw-r--r--kernel/timer.c85
-rw-r--r--kernel/wait.c8
14 files changed, 165 insertions, 178 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 56e4e07e45f7..926e5a68ea9e 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -61,9 +61,7 @@ int max_threads; /* tunable limit on nr_threads */
61 61
62DEFINE_PER_CPU(unsigned long, process_counts) = 0; 62DEFINE_PER_CPU(unsigned long, process_counts) = 0;
63 63
64 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ 64__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
65
66EXPORT_SYMBOL(tasklist_lock);
67 65
68int nr_processes(void) 66int nr_processes(void)
69{ 67{
diff --git a/kernel/futex.c b/kernel/futex.c
index 1dc98e4dd287..cf0c8e21d1ab 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -476,6 +476,12 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
476 * the refcount and return its pi_state: 476 * the refcount and return its pi_state:
477 */ 477 */
478 pi_state = this->pi_state; 478 pi_state = this->pi_state;
479 /*
480 * Userspace might have messed up non PI and PI futexes
481 */
482 if (unlikely(!pi_state))
483 return -EINVAL;
484
479 atomic_inc(&pi_state->refcount); 485 atomic_inc(&pi_state->refcount);
480 me->pi_state = pi_state; 486 me->pi_state = pi_state;
481 487
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index f32ca78c198d..9bad17884513 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -169,22 +169,17 @@ EXPORT_SYMBOL(lockdep_internal);
169 */ 169 */
170static int class_filter(struct lock_class *class) 170static int class_filter(struct lock_class *class)
171{ 171{
172#if 0
173 /* Example */
172 if (class->name_version == 1 && 174 if (class->name_version == 1 &&
173 !strcmp(class->name, "&rl->lock")) 175 !strcmp(class->name, "lockname"))
174 return 1; 176 return 1;
175 if (class->name_version == 1 && 177 if (class->name_version == 1 &&
176 !strcmp(class->name, "&ni->mrec_lock")) 178 !strcmp(class->name, "&struct->lockfield"))
177 return 1; 179 return 1;
178 if (class->name_version == 1 && 180#endif
179 !strcmp(class->name, "mft_ni_runlist_lock")) 181 /* Allow everything else. 0 would be filter everything else */
180 return 1; 182 return 1;
181 if (class->name_version == 1 &&
182 !strcmp(class->name, "mft_ni_mrec_lock"))
183 return 1;
184 if (class->name_version == 1 &&
185 !strcmp(class->name, "&vol->lcnbmp_lock"))
186 return 1;
187 return 0;
188} 183}
189#endif 184#endif
190 185
@@ -408,23 +403,12 @@ static void lockdep_print_held_locks(struct task_struct *curr)
408 print_lock(curr->held_locks + i); 403 print_lock(curr->held_locks + i);
409 } 404 }
410} 405}
411/*
412 * Helper to print a nice hierarchy of lock dependencies:
413 */
414static void print_spaces(int nr)
415{
416 int i;
417
418 for (i = 0; i < nr; i++)
419 printk(" ");
420}
421 406
422static void print_lock_class_header(struct lock_class *class, int depth) 407static void print_lock_class_header(struct lock_class *class, int depth)
423{ 408{
424 int bit; 409 int bit;
425 410
426 print_spaces(depth); 411 printk("%*s->", depth, "");
427 printk("->");
428 print_lock_name(class); 412 print_lock_name(class);
429 printk(" ops: %lu", class->ops); 413 printk(" ops: %lu", class->ops);
430 printk(" {\n"); 414 printk(" {\n");
@@ -433,17 +417,14 @@ static void print_lock_class_header(struct lock_class *class, int depth)
433 if (class->usage_mask & (1 << bit)) { 417 if (class->usage_mask & (1 << bit)) {
434 int len = depth; 418 int len = depth;
435 419
436 print_spaces(depth); 420 len += printk("%*s %s", depth, "", usage_str[bit]);
437 len += printk(" %s", usage_str[bit]);
438 len += printk(" at:\n"); 421 len += printk(" at:\n");
439 print_stack_trace(class->usage_traces + bit, len); 422 print_stack_trace(class->usage_traces + bit, len);
440 } 423 }
441 } 424 }
442 print_spaces(depth); 425 printk("%*s }\n", depth, "");
443 printk(" }\n");
444 426
445 print_spaces(depth); 427 printk("%*s ... key at: ",depth,"");
446 printk(" ... key at: ");
447 print_ip_sym((unsigned long)class->key); 428 print_ip_sym((unsigned long)class->key);
448} 429}
449 430
@@ -463,8 +444,7 @@ static void print_lock_dependencies(struct lock_class *class, int depth)
463 DEBUG_LOCKS_WARN_ON(!entry->class); 444 DEBUG_LOCKS_WARN_ON(!entry->class);
464 print_lock_dependencies(entry->class, depth + 1); 445 print_lock_dependencies(entry->class, depth + 1);
465 446
466 print_spaces(depth); 447 printk("%*s ... acquired at:\n",depth,"");
467 printk(" ... acquired at:\n");
468 print_stack_trace(&entry->trace, 2); 448 print_stack_trace(&entry->trace, 2);
469 printk("\n"); 449 printk("\n");
470 } 450 }
@@ -1124,7 +1104,7 @@ extern void __error_too_big_MAX_LOCKDEP_SUBCLASSES(void);
1124 * itself, so actual lookup of the hash should be once per lock object. 1104 * itself, so actual lookup of the hash should be once per lock object.
1125 */ 1105 */
1126static inline struct lock_class * 1106static inline struct lock_class *
1127register_lock_class(struct lockdep_map *lock, unsigned int subclass) 1107look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
1128{ 1108{
1129 struct lockdep_subclass_key *key; 1109 struct lockdep_subclass_key *key;
1130 struct list_head *hash_head; 1110 struct list_head *hash_head;
@@ -1168,7 +1148,26 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass)
1168 */ 1148 */
1169 list_for_each_entry(class, hash_head, hash_entry) 1149 list_for_each_entry(class, hash_head, hash_entry)
1170 if (class->key == key) 1150 if (class->key == key)
1171 goto out_set; 1151 return class;
1152
1153 return NULL;
1154}
1155
1156/*
1157 * Register a lock's class in the hash-table, if the class is not present
1158 * yet. Otherwise we look it up. We cache the result in the lock object
1159 * itself, so actual lookup of the hash should be once per lock object.
1160 */
1161static inline struct lock_class *
1162register_lock_class(struct lockdep_map *lock, unsigned int subclass)
1163{
1164 struct lockdep_subclass_key *key;
1165 struct list_head *hash_head;
1166 struct lock_class *class;
1167
1168 class = look_up_lock_class(lock, subclass);
1169 if (likely(class))
1170 return class;
1172 1171
1173 /* 1172 /*
1174 * Debug-check: all keys must be persistent! 1173 * Debug-check: all keys must be persistent!
@@ -1183,6 +1182,9 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass)
1183 return NULL; 1182 return NULL;
1184 } 1183 }
1185 1184
1185 key = lock->key->subkeys + subclass;
1186 hash_head = classhashentry(key);
1187
1186 __raw_spin_lock(&hash_lock); 1188 __raw_spin_lock(&hash_lock);
1187 /* 1189 /*
1188 * We have to do the hash-walk again, to avoid races 1190 * We have to do the hash-walk again, to avoid races
@@ -1229,8 +1231,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass)
1229out_unlock_set: 1231out_unlock_set:
1230 __raw_spin_unlock(&hash_lock); 1232 __raw_spin_unlock(&hash_lock);
1231 1233
1232out_set: 1234 if (!subclass)
1233 lock->class[subclass] = class; 1235 lock->class_cache = class;
1234 1236
1235 DEBUG_LOCKS_WARN_ON(class->subclass != subclass); 1237 DEBUG_LOCKS_WARN_ON(class->subclass != subclass);
1236 1238
@@ -1934,7 +1936,7 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
1934 } 1936 }
1935 lock->name = name; 1937 lock->name = name;
1936 lock->key = key; 1938 lock->key = key;
1937 memset(lock->class, 0, sizeof(lock->class[0])*MAX_LOCKDEP_SUBCLASSES); 1939 lock->class_cache = NULL;
1938} 1940}
1939 1941
1940EXPORT_SYMBOL_GPL(lockdep_init_map); 1942EXPORT_SYMBOL_GPL(lockdep_init_map);
@@ -1948,8 +1950,8 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
1948 unsigned long ip) 1950 unsigned long ip)
1949{ 1951{
1950 struct task_struct *curr = current; 1952 struct task_struct *curr = current;
1953 struct lock_class *class = NULL;
1951 struct held_lock *hlock; 1954 struct held_lock *hlock;
1952 struct lock_class *class;
1953 unsigned int depth, id; 1955 unsigned int depth, id;
1954 int chain_head = 0; 1956 int chain_head = 0;
1955 u64 chain_key; 1957 u64 chain_key;
@@ -1967,8 +1969,11 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
1967 return 0; 1969 return 0;
1968 } 1970 }
1969 1971
1970 class = lock->class[subclass]; 1972 if (!subclass)
1971 /* not cached yet? */ 1973 class = lock->class_cache;
1974 /*
1975 * Not cached yet or subclass?
1976 */
1972 if (unlikely(!class)) { 1977 if (unlikely(!class)) {
1973 class = register_lock_class(lock, subclass); 1978 class = register_lock_class(lock, subclass);
1974 if (!class) 1979 if (!class)
@@ -2469,48 +2474,44 @@ void lockdep_free_key_range(void *start, unsigned long size)
2469 2474
2470void lockdep_reset_lock(struct lockdep_map *lock) 2475void lockdep_reset_lock(struct lockdep_map *lock)
2471{ 2476{
2472 struct lock_class *class, *next, *entry; 2477 struct lock_class *class, *next;
2473 struct list_head *head; 2478 struct list_head *head;
2474 unsigned long flags; 2479 unsigned long flags;
2475 int i, j; 2480 int i, j;
2476 2481
2477 raw_local_irq_save(flags); 2482 raw_local_irq_save(flags);
2478 __raw_spin_lock(&hash_lock);
2479 2483
2480 /* 2484 /*
2481 * Remove all classes this lock has: 2485 * Remove all classes this lock might have:
2486 */
2487 for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
2488 /*
2489 * If the class exists we look it up and zap it:
2490 */
2491 class = look_up_lock_class(lock, j);
2492 if (class)
2493 zap_class(class);
2494 }
2495 /*
2496 * Debug check: in the end all mapped classes should
2497 * be gone.
2482 */ 2498 */
2499 __raw_spin_lock(&hash_lock);
2483 for (i = 0; i < CLASSHASH_SIZE; i++) { 2500 for (i = 0; i < CLASSHASH_SIZE; i++) {
2484 head = classhash_table + i; 2501 head = classhash_table + i;
2485 if (list_empty(head)) 2502 if (list_empty(head))
2486 continue; 2503 continue;
2487 list_for_each_entry_safe(class, next, head, hash_entry) { 2504 list_for_each_entry_safe(class, next, head, hash_entry) {
2488 for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) { 2505 if (unlikely(class == lock->class_cache)) {
2489 entry = lock->class[j]; 2506 __raw_spin_unlock(&hash_lock);
2490 if (class == entry) { 2507 DEBUG_LOCKS_WARN_ON(1);
2491 zap_class(class); 2508 goto out_restore;
2492 lock->class[j] = NULL;
2493 break;
2494 }
2495 } 2509 }
2496 } 2510 }
2497 } 2511 }
2498
2499 /*
2500 * Debug check: in the end all mapped classes should
2501 * be gone.
2502 */
2503 for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
2504 entry = lock->class[j];
2505 if (!entry)
2506 continue;
2507 __raw_spin_unlock(&hash_lock);
2508 DEBUG_LOCKS_WARN_ON(1);
2509 raw_local_irq_restore(flags);
2510 return;
2511 }
2512
2513 __raw_spin_unlock(&hash_lock); 2512 __raw_spin_unlock(&hash_lock);
2513
2514out_restore:
2514 raw_local_irq_restore(flags); 2515 raw_local_irq_restore(flags);
2515} 2516}
2516 2517
@@ -2571,7 +2572,7 @@ static inline int in_range(const void *start, const void *addr, const void *end)
2571 2572
2572static void 2573static void
2573print_freed_lock_bug(struct task_struct *curr, const void *mem_from, 2574print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
2574 const void *mem_to) 2575 const void *mem_to, struct held_lock *hlock)
2575{ 2576{
2576 if (!debug_locks_off()) 2577 if (!debug_locks_off())
2577 return; 2578 return;
@@ -2583,6 +2584,7 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
2583 printk( "-------------------------\n"); 2584 printk( "-------------------------\n");
2584 printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n", 2585 printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
2585 curr->comm, curr->pid, mem_from, mem_to-1); 2586 curr->comm, curr->pid, mem_from, mem_to-1);
2587 print_lock(hlock);
2586 lockdep_print_held_locks(curr); 2588 lockdep_print_held_locks(curr);
2587 2589
2588 printk("\nstack backtrace:\n"); 2590 printk("\nstack backtrace:\n");
@@ -2616,7 +2618,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
2616 !in_range(mem_from, lock_to, mem_to)) 2618 !in_range(mem_from, lock_to, mem_to))
2617 continue; 2619 continue;
2618 2620
2619 print_freed_lock_bug(curr, mem_from, mem_to); 2621 print_freed_lock_bug(curr, mem_from, mem_to, hlock);
2620 break; 2622 break;
2621 } 2623 }
2622 local_irq_restore(flags); 2624 local_irq_restore(flags);
diff --git a/kernel/panic.c b/kernel/panic.c
index ab13f0f668b5..d8a0bca21233 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -172,6 +172,7 @@ const char *print_tainted(void)
172 172
173void add_taint(unsigned flag) 173void add_taint(unsigned flag)
174{ 174{
175 debug_locks_off(); /* can't trust the integrity of the kernel anymore */
175 tainted |= flag; 176 tainted |= flag;
176} 177}
177EXPORT_SYMBOL(add_taint); 178EXPORT_SYMBOL(add_taint);
@@ -256,6 +257,7 @@ int oops_may_print(void)
256 */ 257 */
257void oops_enter(void) 258void oops_enter(void)
258{ 259{
260 debug_locks_off(); /* can't trust the integrity of the kernel anymore */
259 do_oops_enter_exit(); 261 do_oops_enter_exit();
260} 262}
261 263
diff --git a/kernel/power/pm.c b/kernel/power/pm.c
index 84063ac8fcfc..c50d15266c10 100644
--- a/kernel/power/pm.c
+++ b/kernel/power/pm.c
@@ -75,42 +75,6 @@ struct pm_dev *pm_register(pm_dev_t type,
75 return dev; 75 return dev;
76} 76}
77 77
78static void __pm_unregister(struct pm_dev *dev)
79{
80 if (dev) {
81 list_del(&dev->entry);
82 kfree(dev);
83 }
84}
85
86/**
87 * pm_unregister_all - unregister all devices with matching callback
88 * @callback: callback function pointer
89 *
90 * Unregister every device that would call the callback passed. This
91 * is primarily meant as a helper function for loadable modules. It
92 * enables a module to give up all its managed devices without keeping
93 * its own private list.
94 */
95
96void pm_unregister_all(pm_callback callback)
97{
98 struct list_head *entry;
99
100 if (!callback)
101 return;
102
103 mutex_lock(&pm_devs_lock);
104 entry = pm_devs.next;
105 while (entry != &pm_devs) {
106 struct pm_dev *dev = list_entry(entry, struct pm_dev, entry);
107 entry = entry->next;
108 if (dev->callback == callback)
109 __pm_unregister(dev);
110 }
111 mutex_unlock(&pm_devs_lock);
112}
113
114/** 78/**
115 * pm_send - send request to a single device 79 * pm_send - send request to a single device
116 * @dev: device to send to 80 * @dev: device to send to
@@ -239,7 +203,6 @@ int pm_send_all(pm_request_t rqst, void *data)
239} 203}
240 204
241EXPORT_SYMBOL(pm_register); 205EXPORT_SYMBOL(pm_register);
242EXPORT_SYMBOL(pm_unregister_all);
243EXPORT_SYMBOL(pm_send_all); 206EXPORT_SYMBOL(pm_send_all);
244EXPORT_SYMBOL(pm_active); 207EXPORT_SYMBOL(pm_active);
245 208
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 24c96f354231..75d4886e648e 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -227,11 +227,17 @@ static void copy_data_pages(struct pbe *pblist)
227 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) { 227 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) {
228 if (saveable(zone, &zone_pfn)) { 228 if (saveable(zone, &zone_pfn)) {
229 struct page *page; 229 struct page *page;
230 long *src, *dst;
231 int n;
232
230 page = pfn_to_page(zone_pfn + zone->zone_start_pfn); 233 page = pfn_to_page(zone_pfn + zone->zone_start_pfn);
231 BUG_ON(!pbe); 234 BUG_ON(!pbe);
232 pbe->orig_address = (unsigned long)page_address(page); 235 pbe->orig_address = (unsigned long)page_address(page);
233 /* copy_page is not usable for copying task structs. */ 236 /* copy_page and memcpy are not usable for copying task structs. */
234 memcpy((void *)pbe->address, (void *)pbe->orig_address, PAGE_SIZE); 237 dst = (long *)pbe->address;
238 src = (long *)pbe->orig_address;
239 for (n = PAGE_SIZE / sizeof(long); n; n--)
240 *dst++ = *src++;
235 pbe = pbe->next; 241 pbe = pbe->next;
236 } 242 }
237 } 243 }
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 044b8e0c1025..f1dd146bd64d 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -263,7 +263,6 @@ int swsusp_write(void)
263 struct swap_map_handle handle; 263 struct swap_map_handle handle;
264 struct snapshot_handle snapshot; 264 struct snapshot_handle snapshot;
265 struct swsusp_info *header; 265 struct swsusp_info *header;
266 unsigned long start;
267 int error; 266 int error;
268 267
269 if ((error = swsusp_swap_check())) { 268 if ((error = swsusp_swap_check())) {
@@ -281,16 +280,17 @@ int swsusp_write(void)
281 } 280 }
282 error = get_swap_writer(&handle); 281 error = get_swap_writer(&handle);
283 if (!error) { 282 if (!error) {
284 start = handle.cur_swap; 283 unsigned long start = handle.cur_swap;
285 error = swap_write_page(&handle, header); 284 error = swap_write_page(&handle, header);
286 } 285 if (!error)
287 if (!error) 286 error = save_image(&handle, &snapshot,
288 error = save_image(&handle, &snapshot, header->pages - 1); 287 header->pages - 1);
289 if (!error) { 288 if (!error) {
290 flush_swap_writer(&handle); 289 flush_swap_writer(&handle);
291 printk("S"); 290 printk("S");
292 error = mark_swapfiles(swp_entry(root_swap, start)); 291 error = mark_swapfiles(swp_entry(root_swap, start));
293 printk("|\n"); 292 printk("|\n");
293 }
294 } 294 }
295 if (error) 295 if (error)
296 free_all_swap_pages(root_swap, handle.bitmap); 296 free_all_swap_pages(root_swap, handle.bitmap);
@@ -311,8 +311,10 @@ static atomic_t io_done = ATOMIC_INIT(0);
311 311
312static int end_io(struct bio *bio, unsigned int num, int err) 312static int end_io(struct bio *bio, unsigned int num, int err)
313{ 313{
314 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 314 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
315 panic("I/O error reading memory image"); 315 printk(KERN_ERR "I/O error reading swsusp image.\n");
316 return -EIO;
317 }
316 atomic_set(&io_done, 0); 318 atomic_set(&io_done, 0);
317 return 0; 319 return 0;
318} 320}
diff --git a/kernel/printk.c b/kernel/printk.c
index bdba5d80496c..65ca0688f86f 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -52,7 +52,7 @@ int console_printk[4] = {
52 DEFAULT_CONSOLE_LOGLEVEL, /* default_console_loglevel */ 52 DEFAULT_CONSOLE_LOGLEVEL, /* default_console_loglevel */
53}; 53};
54 54
55EXPORT_SYMBOL(console_printk); 55EXPORT_UNUSED_SYMBOL(console_printk); /* June 2006 */
56 56
57/* 57/*
58 * Low lever drivers may need that to know if they can schedule in 58 * Low lever drivers may need that to know if they can schedule in
@@ -773,7 +773,7 @@ int is_console_locked(void)
773{ 773{
774 return console_locked; 774 return console_locked;
775} 775}
776EXPORT_SYMBOL(is_console_locked); 776EXPORT_UNUSED_SYMBOL(is_console_locked); /* June 2006 */
777 777
778/** 778/**
779 * release_console_sem - unlock the console system 779 * release_console_sem - unlock the console system
diff --git a/kernel/resource.c b/kernel/resource.c
index 129cf046e561..0dd3a857579e 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -404,8 +404,6 @@ int insert_resource(struct resource *parent, struct resource *new)
404 return result; 404 return result;
405} 405}
406 406
407EXPORT_SYMBOL(insert_resource);
408
409/* 407/*
410 * Given an existing resource, change its start and size to match the 408 * Given an existing resource, change its start and size to match the
411 * arguments. Returns -EBUSY if it can't fit. Existing children of 409 * arguments. Returns -EBUSY if it can't fit. Existing children of
diff --git a/kernel/sched.c b/kernel/sched.c
index 4ee400f9d56b..d714611f1691 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3384,7 +3384,7 @@ EXPORT_SYMBOL(schedule);
3384 3384
3385#ifdef CONFIG_PREEMPT 3385#ifdef CONFIG_PREEMPT
3386/* 3386/*
3387 * this is is the entry point to schedule() from in-kernel preemption 3387 * this is the entry point to schedule() from in-kernel preemption
3388 * off of preempt_enable. Kernel preemptions off return from interrupt 3388 * off of preempt_enable. Kernel preemptions off return from interrupt
3389 * occur there and call schedule directly. 3389 * occur there and call schedule directly.
3390 */ 3390 */
@@ -3427,7 +3427,7 @@ need_resched:
3427EXPORT_SYMBOL(preempt_schedule); 3427EXPORT_SYMBOL(preempt_schedule);
3428 3428
3429/* 3429/*
3430 * this is is the entry point to schedule() from kernel preemption 3430 * this is the entry point to schedule() from kernel preemption
3431 * off of irq context. 3431 * off of irq context.
3432 * Note, that this is called and return with irqs disabled. This will 3432 * Note, that this is called and return with irqs disabled. This will
3433 * protect us against recursive calling from irq. 3433 * protect us against recursive calling from irq.
@@ -3439,7 +3439,7 @@ asmlinkage void __sched preempt_schedule_irq(void)
3439 struct task_struct *task = current; 3439 struct task_struct *task = current;
3440 int saved_lock_depth; 3440 int saved_lock_depth;
3441#endif 3441#endif
3442 /* Catch callers which need to be fixed*/ 3442 /* Catch callers which need to be fixed */
3443 BUG_ON(ti->preempt_count || !irqs_disabled()); 3443 BUG_ON(ti->preempt_count || !irqs_disabled());
3444 3444
3445need_resched: 3445need_resched:
@@ -4650,7 +4650,7 @@ static inline struct task_struct *younger_sibling(struct task_struct *p)
4650 return list_entry(p->sibling.next,struct task_struct,sibling); 4650 return list_entry(p->sibling.next,struct task_struct,sibling);
4651} 4651}
4652 4652
4653static const char *stat_nam[] = { "R", "S", "D", "T", "t", "Z", "X" }; 4653static const char stat_nam[] = "RSDTtZX";
4654 4654
4655static void show_task(struct task_struct *p) 4655static void show_task(struct task_struct *p)
4656{ 4656{
@@ -4658,12 +4658,9 @@ static void show_task(struct task_struct *p)
4658 unsigned long free = 0; 4658 unsigned long free = 0;
4659 unsigned state; 4659 unsigned state;
4660 4660
4661 printk("%-13.13s ", p->comm);
4662 state = p->state ? __ffs(p->state) + 1 : 0; 4661 state = p->state ? __ffs(p->state) + 1 : 0;
4663 if (state < ARRAY_SIZE(stat_nam)) 4662 printk("%-13.13s %c", p->comm,
4664 printk(stat_nam[state]); 4663 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
4665 else
4666 printk("?");
4667#if (BITS_PER_LONG == 32) 4664#if (BITS_PER_LONG == 32)
4668 if (state == TASK_RUNNING) 4665 if (state == TASK_RUNNING)
4669 printk(" running "); 4666 printk(" running ");
@@ -4877,7 +4874,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
4877 p->timestamp = p->timestamp - rq_src->timestamp_last_tick 4874 p->timestamp = p->timestamp - rq_src->timestamp_last_tick
4878 + rq_dest->timestamp_last_tick; 4875 + rq_dest->timestamp_last_tick;
4879 deactivate_task(p, rq_src); 4876 deactivate_task(p, rq_src);
4880 activate_task(p, rq_dest, 0); 4877 __activate_task(p, rq_dest);
4881 if (TASK_PREEMPTS_CURR(p, rq_dest)) 4878 if (TASK_PREEMPTS_CURR(p, rq_dest))
4882 resched_task(rq_dest->curr); 4879 resched_task(rq_dest->curr);
4883 } 4880 }
@@ -5776,7 +5773,7 @@ static unsigned long long measure_migration_cost(int cpu1, int cpu2)
5776 cache = vmalloc(max_size); 5773 cache = vmalloc(max_size);
5777 if (!cache) { 5774 if (!cache) {
5778 printk("could not vmalloc %d bytes for cache!\n", 2*max_size); 5775 printk("could not vmalloc %d bytes for cache!\n", 2*max_size);
5779 return 1000000; // return 1 msec on very small boxen 5776 return 1000000; /* return 1 msec on very small boxen */
5780 } 5777 }
5781 5778
5782 while (size <= max_size) { 5779 while (size <= max_size) {
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 215541e26c1a..fd12f2556f0d 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -311,7 +311,7 @@ void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
311 softirq_vec[nr].action = action; 311 softirq_vec[nr].action = action;
312} 312}
313 313
314EXPORT_SYMBOL(open_softirq); 314EXPORT_UNUSED_SYMBOL(open_softirq); /* June 2006 */
315 315
316/* Tasklets */ 316/* Tasklets */
317struct tasklet_head 317struct tasklet_head
diff --git a/kernel/sys.c b/kernel/sys.c
index dbb3b9c7ea64..e236f98f7ec5 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1983,7 +1983,7 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
1983 error = current->mm->dumpable; 1983 error = current->mm->dumpable;
1984 break; 1984 break;
1985 case PR_SET_DUMPABLE: 1985 case PR_SET_DUMPABLE:
1986 if (arg2 < 0 || arg2 > 2) { 1986 if (arg2 < 0 || arg2 > 1) {
1987 error = -EINVAL; 1987 error = -EINVAL;
1988 break; 1988 break;
1989 } 1989 }
diff --git a/kernel/timer.c b/kernel/timer.c
index 396a3c024c2c..2a87430a58d4 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -891,6 +891,7 @@ int do_settimeofday(struct timespec *tv)
891 set_normalized_timespec(&xtime, sec, nsec); 891 set_normalized_timespec(&xtime, sec, nsec);
892 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 892 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
893 893
894 clock->error = 0;
894 ntp_clear(); 895 ntp_clear();
895 896
896 write_sequnlock_irqrestore(&xtime_lock, flags); 897 write_sequnlock_irqrestore(&xtime_lock, flags);
@@ -1008,52 +1009,52 @@ static int __init timekeeping_init_device(void)
1008device_initcall(timekeeping_init_device); 1009device_initcall(timekeeping_init_device);
1009 1010
1010/* 1011/*
1011 * If the error is already larger, we look ahead another tick, 1012 * If the error is already larger, we look ahead even further
1012 * to compensate for late or lost adjustments. 1013 * to compensate for late or lost adjustments.
1013 */ 1014 */
1014static __always_inline int clocksource_bigadjust(int sign, s64 error, s64 *interval, s64 *offset) 1015static __always_inline int clocksource_bigadjust(s64 error, s64 *interval, s64 *offset)
1015{ 1016{
1016 int adj; 1017 s64 tick_error, i;
1018 u32 look_ahead, adj;
1019 s32 error2, mult;
1017 1020
1018 /* 1021 /*
1019 * As soon as the machine is synchronized to the external time 1022 * Use the current error value to determine how much to look ahead.
1020 * source this should be the common case. 1023 * The larger the error the slower we adjust for it to avoid problems
1024 * with losing too many ticks, otherwise we would overadjust and
1025 * produce an even larger error. The smaller the adjustment the
1026 * faster we try to adjust for it, as lost ticks can do less harm
1027 * here. This is tuned so that an error of about 1 msec is adusted
1028 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
1021 */ 1029 */
1022 error >>= 2; 1030 error2 = clock->error >> (TICK_LENGTH_SHIFT + 22 - 2 * SHIFT_HZ);
1023 if (likely(sign > 0 ? error <= *interval : error >= *interval)) 1031 error2 = abs(error2);
1024 return sign; 1032 for (look_ahead = 0; error2 > 0; look_ahead++)
1033 error2 >>= 2;
1025 1034
1026 /* 1035 /*
1027 * An extra look ahead dampens the effect of the current error, 1036 * Now calculate the error in (1 << look_ahead) ticks, but first
1028 * which can grow quite large with continously late updates, as 1037 * remove the single look ahead already included in the error.
1029 * it would dominate the adjustment value and can lead to
1030 * oscillation.
1031 */ 1038 */
1032 error += current_tick_length() >> (TICK_LENGTH_SHIFT - clock->shift + 1); 1039 tick_error = current_tick_length() >> (TICK_LENGTH_SHIFT - clock->shift + 1);
1033 error -= clock->xtime_interval >> 1; 1040 tick_error -= clock->xtime_interval >> 1;
1034 1041 error = ((error - tick_error) >> look_ahead) + tick_error;
1035 adj = 0; 1042
1036 while (1) { 1043 /* Finally calculate the adjustment shift value. */
1037 error >>= 1; 1044 i = *interval;
1038 if (sign > 0 ? error <= *interval : error >= *interval) 1045 mult = 1;
1039 break; 1046 if (error < 0) {
1040 adj++; 1047 error = -error;
1048 *interval = -*interval;
1049 *offset = -*offset;
1050 mult = -1;
1041 } 1051 }
1042 1052 for (adj = 0; error > i; adj++)
1043 /* 1053 error >>= 1;
1044 * Add the current adjustments to the error and take the offset
1045 * into account, the latter can cause the error to be hardly
1046 * reduced at the next tick. Check the error again if there's
1047 * room for another adjustment, thus further reducing the error
1048 * which otherwise had to be corrected at the next update.
1049 */
1050 error = (error << 1) - *interval + *offset;
1051 if (sign > 0 ? error > *interval : error < *interval)
1052 adj++;
1053 1054
1054 *interval <<= adj; 1055 *interval <<= adj;
1055 *offset <<= adj; 1056 *offset <<= adj;
1056 return sign << adj; 1057 return mult << adj;
1057} 1058}
1058 1059
1059/* 1060/*
@@ -1068,11 +1069,19 @@ static void clocksource_adjust(struct clocksource *clock, s64 offset)
1068 1069
1069 error = clock->error >> (TICK_LENGTH_SHIFT - clock->shift - 1); 1070 error = clock->error >> (TICK_LENGTH_SHIFT - clock->shift - 1);
1070 if (error > interval) { 1071 if (error > interval) {
1071 adj = clocksource_bigadjust(1, error, &interval, &offset); 1072 error >>= 2;
1073 if (likely(error <= interval))
1074 adj = 1;
1075 else
1076 adj = clocksource_bigadjust(error, &interval, &offset);
1072 } else if (error < -interval) { 1077 } else if (error < -interval) {
1073 interval = -interval; 1078 error >>= 2;
1074 offset = -offset; 1079 if (likely(error >= -interval)) {
1075 adj = clocksource_bigadjust(-1, error, &interval, &offset); 1080 adj = -1;
1081 interval = -interval;
1082 offset = -offset;
1083 } else
1084 adj = clocksource_bigadjust(error, &interval, &offset);
1076 } else 1085 } else
1077 return; 1086 return;
1078 1087
@@ -1129,7 +1138,7 @@ static void update_wall_time(void)
1129 clocksource_adjust(clock, offset); 1138 clocksource_adjust(clock, offset);
1130 1139
1131 /* store full nanoseconds into xtime */ 1140 /* store full nanoseconds into xtime */
1132 xtime.tv_nsec = clock->xtime_nsec >> clock->shift; 1141 xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift;
1133 clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift; 1142 clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift;
1134 1143
1135 /* check to see if there is a new clocksource to use */ 1144 /* check to see if there is a new clocksource to use */
diff --git a/kernel/wait.c b/kernel/wait.c
index a1d57aeb7f75..59a82f63275d 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -10,9 +10,13 @@
10#include <linux/wait.h> 10#include <linux/wait.h>
11#include <linux/hash.h> 11#include <linux/hash.h>
12 12
13struct lock_class_key waitqueue_lock_key; 13void init_waitqueue_head(wait_queue_head_t *q)
14{
15 spin_lock_init(&q->lock);
16 INIT_LIST_HEAD(&q->task_list);
17}
14 18
15EXPORT_SYMBOL(waitqueue_lock_key); 19EXPORT_SYMBOL(init_waitqueue_head);
16 20
17void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) 21void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
18{ 22{