diff options
30 files changed, 298 insertions, 148 deletions
diff --git a/Documentation/x86_64/boot-options.txt b/Documentation/x86_64/boot-options.txt index 6887d44d2661..6da24e7a56cb 100644 --- a/Documentation/x86_64/boot-options.txt +++ b/Documentation/x86_64/boot-options.txt | |||
@@ -238,6 +238,13 @@ Debugging | |||
238 | pagefaulttrace Dump all page faults. Only useful for extreme debugging | 238 | pagefaulttrace Dump all page faults. Only useful for extreme debugging |
239 | and will create a lot of output. | 239 | and will create a lot of output. |
240 | 240 | ||
241 | call_trace=[old|both|newfallback|new] | ||
242 | old: use old inexact backtracer | ||
243 | new: use new exact dwarf2 unwinder | ||
244 | both: print entries from both | ||
245 | newfallback: use new unwinder but fall back to old if it gets | ||
246 | stuck (default) | ||
247 | |||
241 | Misc | 248 | Misc |
242 | 249 | ||
243 | noreplacement Don't replace instructions with more appropriate ones | 250 | noreplacement Don't replace instructions with more appropriate ones |
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index 923bb292f47f..8657c739656a 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c | |||
@@ -690,8 +690,8 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas | |||
690 | /* | 690 | /* |
691 | * Now maybe handle debug registers and/or IO bitmaps | 691 | * Now maybe handle debug registers and/or IO bitmaps |
692 | */ | 692 | */ |
693 | if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW)) | 693 | if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW) |
694 | || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) | 694 | || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP))) |
695 | __switch_to_xtra(next_p, tss); | 695 | __switch_to_xtra(next_p, tss); |
696 | 696 | ||
697 | disable_tsc(prev_p, next_p); | 697 | disable_tsc(prev_p, next_p); |
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c index 8705c0f05788..edd00f6cee37 100644 --- a/arch/i386/kernel/time.c +++ b/arch/i386/kernel/time.c | |||
@@ -135,7 +135,7 @@ unsigned long profile_pc(struct pt_regs *regs) | |||
135 | { | 135 | { |
136 | unsigned long pc = instruction_pointer(regs); | 136 | unsigned long pc = instruction_pointer(regs); |
137 | 137 | ||
138 | if (in_lock_functions(pc)) | 138 | if (!user_mode_vm(regs) && in_lock_functions(pc)) |
139 | return *(unsigned long *)(regs->ebp + 4); | 139 | return *(unsigned long *)(regs->ebp + 4); |
140 | 140 | ||
141 | return pc; | 141 | return pc; |
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c index 313ac1f7dc5a..3facc8fcb91e 100644 --- a/arch/i386/kernel/traps.c +++ b/arch/i386/kernel/traps.c | |||
@@ -187,10 +187,21 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, | |||
187 | if (unwind_init_blocked(&info, task) == 0) | 187 | if (unwind_init_blocked(&info, task) == 0) |
188 | unw_ret = show_trace_unwind(&info, log_lvl); | 188 | unw_ret = show_trace_unwind(&info, log_lvl); |
189 | } | 189 | } |
190 | if (unw_ret > 0) { | 190 | if (unw_ret > 0 && !arch_unw_user_mode(&info)) { |
191 | if (call_trace > 0) | 191 | #ifdef CONFIG_STACK_UNWIND |
192 | print_symbol("DWARF2 unwinder stuck at %s\n", | ||
193 | UNW_PC(info.regs)); | ||
194 | if (call_trace == 1) { | ||
195 | printk("Leftover inexact backtrace:\n"); | ||
196 | if (UNW_SP(info.regs)) | ||
197 | stack = (void *)UNW_SP(info.regs); | ||
198 | } else if (call_trace > 1) | ||
192 | return; | 199 | return; |
193 | printk("%sLegacy call trace:\n", log_lvl); | 200 | else |
201 | printk("Full inexact backtrace again:\n"); | ||
202 | #else | ||
203 | printk("Inexact backtrace:\n"); | ||
204 | #endif | ||
194 | } | 205 | } |
195 | } | 206 | } |
196 | 207 | ||
diff --git a/arch/s390/defconfig b/arch/s390/defconfig index f4dfc10026d2..f1d4591eddbb 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig | |||
@@ -1,13 +1,16 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.17-rc1 | 3 | # Linux kernel version: 2.6.18-rc2 |
4 | # Mon Apr 3 14:34:15 2006 | 4 | # Thu Jul 27 13:51:07 2006 |
5 | # | 5 | # |
6 | CONFIG_MMU=y | 6 | CONFIG_MMU=y |
7 | CONFIG_LOCKDEP_SUPPORT=y | ||
8 | CONFIG_STACKTRACE_SUPPORT=y | ||
7 | CONFIG_RWSEM_XCHGADD_ALGORITHM=y | 9 | CONFIG_RWSEM_XCHGADD_ALGORITHM=y |
8 | CONFIG_GENERIC_HWEIGHT=y | 10 | CONFIG_GENERIC_HWEIGHT=y |
9 | CONFIG_GENERIC_CALIBRATE_DELAY=y | 11 | CONFIG_GENERIC_CALIBRATE_DELAY=y |
10 | CONFIG_S390=y | 12 | CONFIG_S390=y |
13 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" | ||
11 | 14 | ||
12 | # | 15 | # |
13 | # Code maturity level options | 16 | # Code maturity level options |
@@ -25,6 +28,7 @@ CONFIG_SWAP=y | |||
25 | CONFIG_SYSVIPC=y | 28 | CONFIG_SYSVIPC=y |
26 | CONFIG_POSIX_MQUEUE=y | 29 | CONFIG_POSIX_MQUEUE=y |
27 | # CONFIG_BSD_PROCESS_ACCT is not set | 30 | # CONFIG_BSD_PROCESS_ACCT is not set |
31 | # CONFIG_TASKSTATS is not set | ||
28 | CONFIG_SYSCTL=y | 32 | CONFIG_SYSCTL=y |
29 | CONFIG_AUDIT=y | 33 | CONFIG_AUDIT=y |
30 | # CONFIG_AUDITSYSCALL is not set | 34 | # CONFIG_AUDITSYSCALL is not set |
@@ -43,10 +47,12 @@ CONFIG_PRINTK=y | |||
43 | CONFIG_BUG=y | 47 | CONFIG_BUG=y |
44 | CONFIG_ELF_CORE=y | 48 | CONFIG_ELF_CORE=y |
45 | CONFIG_BASE_FULL=y | 49 | CONFIG_BASE_FULL=y |
50 | CONFIG_RT_MUTEXES=y | ||
46 | CONFIG_FUTEX=y | 51 | CONFIG_FUTEX=y |
47 | CONFIG_EPOLL=y | 52 | CONFIG_EPOLL=y |
48 | CONFIG_SHMEM=y | 53 | CONFIG_SHMEM=y |
49 | CONFIG_SLAB=y | 54 | CONFIG_SLAB=y |
55 | CONFIG_VM_EVENT_COUNTERS=y | ||
50 | # CONFIG_TINY_SHMEM is not set | 56 | # CONFIG_TINY_SHMEM is not set |
51 | CONFIG_BASE_SMALL=0 | 57 | CONFIG_BASE_SMALL=0 |
52 | # CONFIG_SLOB is not set | 58 | # CONFIG_SLOB is not set |
@@ -94,7 +100,6 @@ CONFIG_HOTPLUG_CPU=y | |||
94 | CONFIG_DEFAULT_MIGRATION_COST=1000000 | 100 | CONFIG_DEFAULT_MIGRATION_COST=1000000 |
95 | CONFIG_COMPAT=y | 101 | CONFIG_COMPAT=y |
96 | CONFIG_SYSVIPC_COMPAT=y | 102 | CONFIG_SYSVIPC_COMPAT=y |
97 | CONFIG_BINFMT_ELF32=y | ||
98 | 103 | ||
99 | # | 104 | # |
100 | # Code generation options | 105 | # Code generation options |
@@ -115,6 +120,7 @@ CONFIG_FLATMEM=y | |||
115 | CONFIG_FLAT_NODE_MEM_MAP=y | 120 | CONFIG_FLAT_NODE_MEM_MAP=y |
116 | # CONFIG_SPARSEMEM_STATIC is not set | 121 | # CONFIG_SPARSEMEM_STATIC is not set |
117 | CONFIG_SPLIT_PTLOCK_CPUS=4 | 122 | CONFIG_SPLIT_PTLOCK_CPUS=4 |
123 | CONFIG_RESOURCES_64BIT=y | ||
118 | 124 | ||
119 | # | 125 | # |
120 | # I/O subsystem configuration | 126 | # I/O subsystem configuration |
@@ -142,6 +148,7 @@ CONFIG_VIRT_CPU_ACCOUNTING=y | |||
142 | # CONFIG_APPLDATA_BASE is not set | 148 | # CONFIG_APPLDATA_BASE is not set |
143 | CONFIG_NO_IDLE_HZ=y | 149 | CONFIG_NO_IDLE_HZ=y |
144 | CONFIG_NO_IDLE_HZ_INIT=y | 150 | CONFIG_NO_IDLE_HZ_INIT=y |
151 | CONFIG_S390_HYPFS_FS=y | ||
145 | CONFIG_KEXEC=y | 152 | CONFIG_KEXEC=y |
146 | 153 | ||
147 | # | 154 | # |
@@ -174,6 +181,8 @@ CONFIG_IP_FIB_HASH=y | |||
174 | # CONFIG_INET_IPCOMP is not set | 181 | # CONFIG_INET_IPCOMP is not set |
175 | # CONFIG_INET_XFRM_TUNNEL is not set | 182 | # CONFIG_INET_XFRM_TUNNEL is not set |
176 | # CONFIG_INET_TUNNEL is not set | 183 | # CONFIG_INET_TUNNEL is not set |
184 | CONFIG_INET_XFRM_MODE_TRANSPORT=y | ||
185 | CONFIG_INET_XFRM_MODE_TUNNEL=y | ||
177 | CONFIG_INET_DIAG=y | 186 | CONFIG_INET_DIAG=y |
178 | CONFIG_INET_TCP_DIAG=y | 187 | CONFIG_INET_TCP_DIAG=y |
179 | # CONFIG_TCP_CONG_ADVANCED is not set | 188 | # CONFIG_TCP_CONG_ADVANCED is not set |
@@ -186,7 +195,10 @@ CONFIG_IPV6=y | |||
186 | # CONFIG_INET6_IPCOMP is not set | 195 | # CONFIG_INET6_IPCOMP is not set |
187 | # CONFIG_INET6_XFRM_TUNNEL is not set | 196 | # CONFIG_INET6_XFRM_TUNNEL is not set |
188 | # CONFIG_INET6_TUNNEL is not set | 197 | # CONFIG_INET6_TUNNEL is not set |
198 | CONFIG_INET6_XFRM_MODE_TRANSPORT=y | ||
199 | CONFIG_INET6_XFRM_MODE_TUNNEL=y | ||
189 | # CONFIG_IPV6_TUNNEL is not set | 200 | # CONFIG_IPV6_TUNNEL is not set |
201 | # CONFIG_NETWORK_SECMARK is not set | ||
190 | # CONFIG_NETFILTER is not set | 202 | # CONFIG_NETFILTER is not set |
191 | 203 | ||
192 | # | 204 | # |
@@ -263,6 +275,7 @@ CONFIG_NET_ESTIMATOR=y | |||
263 | # Network testing | 275 | # Network testing |
264 | # | 276 | # |
265 | # CONFIG_NET_PKTGEN is not set | 277 | # CONFIG_NET_PKTGEN is not set |
278 | # CONFIG_NET_TCPPROBE is not set | ||
266 | # CONFIG_HAMRADIO is not set | 279 | # CONFIG_HAMRADIO is not set |
267 | # CONFIG_IRDA is not set | 280 | # CONFIG_IRDA is not set |
268 | # CONFIG_BT is not set | 281 | # CONFIG_BT is not set |
@@ -276,6 +289,7 @@ CONFIG_STANDALONE=y | |||
276 | CONFIG_PREVENT_FIRMWARE_BUILD=y | 289 | CONFIG_PREVENT_FIRMWARE_BUILD=y |
277 | # CONFIG_FW_LOADER is not set | 290 | # CONFIG_FW_LOADER is not set |
278 | # CONFIG_DEBUG_DRIVER is not set | 291 | # CONFIG_DEBUG_DRIVER is not set |
292 | CONFIG_SYS_HYPERVISOR=y | ||
279 | 293 | ||
280 | # | 294 | # |
281 | # Connector - unified userspace <-> kernelspace linker | 295 | # Connector - unified userspace <-> kernelspace linker |
@@ -334,6 +348,7 @@ CONFIG_BLK_DEV_NBD=m | |||
334 | CONFIG_BLK_DEV_RAM=y | 348 | CONFIG_BLK_DEV_RAM=y |
335 | CONFIG_BLK_DEV_RAM_COUNT=16 | 349 | CONFIG_BLK_DEV_RAM_COUNT=16 |
336 | CONFIG_BLK_DEV_RAM_SIZE=4096 | 350 | CONFIG_BLK_DEV_RAM_SIZE=4096 |
351 | CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024 | ||
337 | CONFIG_BLK_DEV_INITRD=y | 352 | CONFIG_BLK_DEV_INITRD=y |
338 | # CONFIG_CDROM_PKTCDVD is not set | 353 | # CONFIG_CDROM_PKTCDVD is not set |
339 | 354 | ||
@@ -359,9 +374,7 @@ CONFIG_MD_LINEAR=m | |||
359 | CONFIG_MD_RAID0=m | 374 | CONFIG_MD_RAID0=m |
360 | CONFIG_MD_RAID1=m | 375 | CONFIG_MD_RAID1=m |
361 | # CONFIG_MD_RAID10 is not set | 376 | # CONFIG_MD_RAID10 is not set |
362 | CONFIG_MD_RAID5=m | 377 | # CONFIG_MD_RAID456 is not set |
363 | # CONFIG_MD_RAID5_RESHAPE is not set | ||
364 | # CONFIG_MD_RAID6 is not set | ||
365 | CONFIG_MD_MULTIPATH=m | 378 | CONFIG_MD_MULTIPATH=m |
366 | # CONFIG_MD_FAULTY is not set | 379 | # CONFIG_MD_FAULTY is not set |
367 | CONFIG_BLK_DEV_DM=y | 380 | CONFIG_BLK_DEV_DM=y |
@@ -419,7 +432,8 @@ CONFIG_S390_TAPE_34XX=m | |||
419 | # | 432 | # |
420 | # Cryptographic devices | 433 | # Cryptographic devices |
421 | # | 434 | # |
422 | CONFIG_Z90CRYPT=m | 435 | CONFIG_ZCRYPT=m |
436 | # CONFIG_ZCRYPT_MONOLITHIC is not set | ||
423 | 437 | ||
424 | # | 438 | # |
425 | # Network device support | 439 | # Network device support |
@@ -509,6 +523,7 @@ CONFIG_FS_MBCACHE=y | |||
509 | # CONFIG_MINIX_FS is not set | 523 | # CONFIG_MINIX_FS is not set |
510 | # CONFIG_ROMFS_FS is not set | 524 | # CONFIG_ROMFS_FS is not set |
511 | CONFIG_INOTIFY=y | 525 | CONFIG_INOTIFY=y |
526 | CONFIG_INOTIFY_USER=y | ||
512 | # CONFIG_QUOTA is not set | 527 | # CONFIG_QUOTA is not set |
513 | CONFIG_DNOTIFY=y | 528 | CONFIG_DNOTIFY=y |
514 | # CONFIG_AUTOFS_FS is not set | 529 | # CONFIG_AUTOFS_FS is not set |
@@ -614,26 +629,36 @@ CONFIG_MSDOS_PARTITION=y | |||
614 | # Instrumentation Support | 629 | # Instrumentation Support |
615 | # | 630 | # |
616 | # CONFIG_PROFILING is not set | 631 | # CONFIG_PROFILING is not set |
617 | # CONFIG_STATISTICS is not set | 632 | CONFIG_STATISTICS=y |
633 | CONFIG_KPROBES=y | ||
618 | 634 | ||
619 | # | 635 | # |
620 | # Kernel hacking | 636 | # Kernel hacking |
621 | # | 637 | # |
638 | CONFIG_TRACE_IRQFLAGS_SUPPORT=y | ||
622 | # CONFIG_PRINTK_TIME is not set | 639 | # CONFIG_PRINTK_TIME is not set |
623 | CONFIG_MAGIC_SYSRQ=y | 640 | CONFIG_MAGIC_SYSRQ=y |
641 | # CONFIG_UNUSED_SYMBOLS is not set | ||
624 | CONFIG_DEBUG_KERNEL=y | 642 | CONFIG_DEBUG_KERNEL=y |
625 | CONFIG_LOG_BUF_SHIFT=17 | 643 | CONFIG_LOG_BUF_SHIFT=17 |
626 | # CONFIG_DETECT_SOFTLOCKUP is not set | 644 | # CONFIG_DETECT_SOFTLOCKUP is not set |
627 | # CONFIG_SCHEDSTATS is not set | 645 | # CONFIG_SCHEDSTATS is not set |
628 | # CONFIG_DEBUG_SLAB is not set | 646 | # CONFIG_DEBUG_SLAB is not set |
629 | CONFIG_DEBUG_PREEMPT=y | 647 | CONFIG_DEBUG_PREEMPT=y |
630 | CONFIG_DEBUG_MUTEXES=y | 648 | # CONFIG_DEBUG_RT_MUTEXES is not set |
649 | # CONFIG_RT_MUTEX_TESTER is not set | ||
631 | CONFIG_DEBUG_SPINLOCK=y | 650 | CONFIG_DEBUG_SPINLOCK=y |
651 | CONFIG_DEBUG_MUTEXES=y | ||
652 | # CONFIG_DEBUG_RWSEMS is not set | ||
653 | # CONFIG_DEBUG_LOCK_ALLOC is not set | ||
654 | # CONFIG_PROVE_LOCKING is not set | ||
632 | CONFIG_DEBUG_SPINLOCK_SLEEP=y | 655 | CONFIG_DEBUG_SPINLOCK_SLEEP=y |
656 | # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set | ||
633 | # CONFIG_DEBUG_KOBJECT is not set | 657 | # CONFIG_DEBUG_KOBJECT is not set |
634 | # CONFIG_DEBUG_INFO is not set | 658 | # CONFIG_DEBUG_INFO is not set |
635 | CONFIG_DEBUG_FS=y | 659 | CONFIG_DEBUG_FS=y |
636 | # CONFIG_DEBUG_VM is not set | 660 | # CONFIG_DEBUG_VM is not set |
661 | # CONFIG_FRAME_POINTER is not set | ||
637 | # CONFIG_UNWIND_INFO is not set | 662 | # CONFIG_UNWIND_INFO is not set |
638 | CONFIG_FORCED_INLINING=y | 663 | CONFIG_FORCED_INLINING=y |
639 | # CONFIG_RCU_TORTURE_TEST is not set | 664 | # CONFIG_RCU_TORTURE_TEST is not set |
@@ -688,3 +713,4 @@ CONFIG_CRYPTO=y | |||
688 | # CONFIG_CRC16 is not set | 713 | # CONFIG_CRC16 is not set |
689 | CONFIG_CRC32=m | 714 | CONFIG_CRC32=m |
690 | # CONFIG_LIBCRC32C is not set | 715 | # CONFIG_LIBCRC32C is not set |
716 | CONFIG_PLIST=y | ||
diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c index 04eb1eab6e3e..845081b01267 100644 --- a/arch/sparc/kernel/time.c +++ b/arch/sparc/kernel/time.c | |||
@@ -225,6 +225,32 @@ static __inline__ int has_low_battery(void) | |||
225 | return (data1 == data2); /* Was the write blocked? */ | 225 | return (data1 == data2); /* Was the write blocked? */ |
226 | } | 226 | } |
227 | 227 | ||
228 | static void __init mostek_set_system_time(void) | ||
229 | { | ||
230 | unsigned int year, mon, day, hour, min, sec; | ||
231 | struct mostek48t02 *mregs; | ||
232 | |||
233 | mregs = (struct mostek48t02 *)mstk48t02_regs; | ||
234 | if(!mregs) { | ||
235 | prom_printf("Something wrong, clock regs not mapped yet.\n"); | ||
236 | prom_halt(); | ||
237 | } | ||
238 | spin_lock_irq(&mostek_lock); | ||
239 | mregs->creg |= MSTK_CREG_READ; | ||
240 | sec = MSTK_REG_SEC(mregs); | ||
241 | min = MSTK_REG_MIN(mregs); | ||
242 | hour = MSTK_REG_HOUR(mregs); | ||
243 | day = MSTK_REG_DOM(mregs); | ||
244 | mon = MSTK_REG_MONTH(mregs); | ||
245 | year = MSTK_CVT_YEAR( MSTK_REG_YEAR(mregs) ); | ||
246 | xtime.tv_sec = mktime(year, mon, day, hour, min, sec); | ||
247 | xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ); | ||
248 | set_normalized_timespec(&wall_to_monotonic, | ||
249 | -xtime.tv_sec, -xtime.tv_nsec); | ||
250 | mregs->creg &= ~MSTK_CREG_READ; | ||
251 | spin_unlock_irq(&mostek_lock); | ||
252 | } | ||
253 | |||
228 | /* Probe for the real time clock chip on Sun4 */ | 254 | /* Probe for the real time clock chip on Sun4 */ |
229 | static __inline__ void sun4_clock_probe(void) | 255 | static __inline__ void sun4_clock_probe(void) |
230 | { | 256 | { |
@@ -273,6 +299,7 @@ static __inline__ void sun4_clock_probe(void) | |||
273 | #endif | 299 | #endif |
274 | } | 300 | } |
275 | 301 | ||
302 | #ifndef CONFIG_SUN4 | ||
276 | static int __devinit clock_probe(struct of_device *op, const struct of_device_id *match) | 303 | static int __devinit clock_probe(struct of_device *op, const struct of_device_id *match) |
277 | { | 304 | { |
278 | struct device_node *dp = op->node; | 305 | struct device_node *dp = op->node; |
@@ -307,6 +334,8 @@ static int __devinit clock_probe(struct of_device *op, const struct of_device_id | |||
307 | if (mostek_read(mstk48t02_regs + MOSTEK_SEC) & MSTK_STOP) | 334 | if (mostek_read(mstk48t02_regs + MOSTEK_SEC) & MSTK_STOP) |
308 | kick_start_clock(); | 335 | kick_start_clock(); |
309 | 336 | ||
337 | mostek_set_system_time(); | ||
338 | |||
310 | return 0; | 339 | return 0; |
311 | } | 340 | } |
312 | 341 | ||
@@ -325,56 +354,37 @@ static struct of_platform_driver clock_driver = { | |||
325 | 354 | ||
326 | 355 | ||
327 | /* Probe for the mostek real time clock chip. */ | 356 | /* Probe for the mostek real time clock chip. */ |
328 | static void clock_init(void) | 357 | static int __init clock_init(void) |
329 | { | 358 | { |
330 | of_register_driver(&clock_driver, &of_bus_type); | 359 | return of_register_driver(&clock_driver, &of_bus_type); |
331 | } | 360 | } |
332 | 361 | ||
362 | /* Must be after subsys_initcall() so that busses are probed. Must | ||
363 | * be before device_initcall() because things like the RTC driver | ||
364 | * need to see the clock registers. | ||
365 | */ | ||
366 | fs_initcall(clock_init); | ||
367 | #endif /* !CONFIG_SUN4 */ | ||
368 | |||
333 | void __init sbus_time_init(void) | 369 | void __init sbus_time_init(void) |
334 | { | 370 | { |
335 | unsigned int year, mon, day, hour, min, sec; | ||
336 | struct mostek48t02 *mregs; | ||
337 | |||
338 | #ifdef CONFIG_SUN4 | ||
339 | int temp; | ||
340 | struct intersil *iregs; | ||
341 | #endif | ||
342 | 371 | ||
343 | BTFIXUPSET_CALL(bus_do_settimeofday, sbus_do_settimeofday, BTFIXUPCALL_NORM); | 372 | BTFIXUPSET_CALL(bus_do_settimeofday, sbus_do_settimeofday, BTFIXUPCALL_NORM); |
344 | btfixup(); | 373 | btfixup(); |
345 | 374 | ||
346 | if (ARCH_SUN4) | 375 | if (ARCH_SUN4) |
347 | sun4_clock_probe(); | 376 | sun4_clock_probe(); |
348 | else | ||
349 | clock_init(); | ||
350 | 377 | ||
351 | sparc_init_timers(timer_interrupt); | 378 | sparc_init_timers(timer_interrupt); |
352 | 379 | ||
353 | #ifdef CONFIG_SUN4 | 380 | #ifdef CONFIG_SUN4 |
354 | if(idprom->id_machtype == (SM_SUN4 | SM_4_330)) { | 381 | if(idprom->id_machtype == (SM_SUN4 | SM_4_330)) { |
355 | #endif | 382 | mostek_set_system_time(); |
356 | mregs = (struct mostek48t02 *)mstk48t02_regs; | ||
357 | if(!mregs) { | ||
358 | prom_printf("Something wrong, clock regs not mapped yet.\n"); | ||
359 | prom_halt(); | ||
360 | } | ||
361 | spin_lock_irq(&mostek_lock); | ||
362 | mregs->creg |= MSTK_CREG_READ; | ||
363 | sec = MSTK_REG_SEC(mregs); | ||
364 | min = MSTK_REG_MIN(mregs); | ||
365 | hour = MSTK_REG_HOUR(mregs); | ||
366 | day = MSTK_REG_DOM(mregs); | ||
367 | mon = MSTK_REG_MONTH(mregs); | ||
368 | year = MSTK_CVT_YEAR( MSTK_REG_YEAR(mregs) ); | ||
369 | xtime.tv_sec = mktime(year, mon, day, hour, min, sec); | ||
370 | xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ); | ||
371 | set_normalized_timespec(&wall_to_monotonic, | ||
372 | -xtime.tv_sec, -xtime.tv_nsec); | ||
373 | mregs->creg &= ~MSTK_CREG_READ; | ||
374 | spin_unlock_irq(&mostek_lock); | ||
375 | #ifdef CONFIG_SUN4 | ||
376 | } else if(idprom->id_machtype == (SM_SUN4 | SM_4_260) ) { | 383 | } else if(idprom->id_machtype == (SM_SUN4 | SM_4_260) ) { |
377 | /* initialise the intersil on sun4 */ | 384 | /* initialise the intersil on sun4 */ |
385 | unsigned int year, mon, day, hour, min, sec; | ||
386 | int temp; | ||
387 | struct intersil *iregs; | ||
378 | 388 | ||
379 | iregs=intersil_clock; | 389 | iregs=intersil_clock; |
380 | if(!iregs) { | 390 | if(!iregs) { |
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S index 9b5bb413a6e9..5d4a7d125ed0 100644 --- a/arch/x86_64/ia32/ia32entry.S +++ b/arch/x86_64/ia32/ia32entry.S | |||
@@ -103,7 +103,7 @@ ENTRY(ia32_sysenter_target) | |||
103 | pushq %rax | 103 | pushq %rax |
104 | CFI_ADJUST_CFA_OFFSET 8 | 104 | CFI_ADJUST_CFA_OFFSET 8 |
105 | cld | 105 | cld |
106 | SAVE_ARGS 0,0,1 | 106 | SAVE_ARGS 0,0,0 |
107 | /* no need to do an access_ok check here because rbp has been | 107 | /* no need to do an access_ok check here because rbp has been |
108 | 32bit zero extended */ | 108 | 32bit zero extended */ |
109 | 1: movl (%rbp),%r9d | 109 | 1: movl (%rbp),%r9d |
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c index b9ff75992c16..e0341c6808e5 100644 --- a/arch/x86_64/kernel/time.c +++ b/arch/x86_64/kernel/time.c | |||
@@ -193,7 +193,7 @@ unsigned long profile_pc(struct pt_regs *regs) | |||
193 | is just accounted to the spinlock function. | 193 | is just accounted to the spinlock function. |
194 | Better would be to write these functions in assembler again | 194 | Better would be to write these functions in assembler again |
195 | and check exactly. */ | 195 | and check exactly. */ |
196 | if (in_lock_functions(pc)) { | 196 | if (!user_mode(regs) && in_lock_functions(pc)) { |
197 | char *v = *(char **)regs->rsp; | 197 | char *v = *(char **)regs->rsp; |
198 | if ((v >= _stext && v <= _etext) || | 198 | if ((v >= _stext && v <= _etext) || |
199 | (v >= _sinittext && v <= _einittext) || | 199 | (v >= _sinittext && v <= _einittext) || |
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c index eb39a2775236..f7a9d1421078 100644 --- a/arch/x86_64/kernel/traps.c +++ b/arch/x86_64/kernel/traps.c | |||
@@ -254,7 +254,6 @@ void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s | |||
254 | { | 254 | { |
255 | const unsigned cpu = safe_smp_processor_id(); | 255 | const unsigned cpu = safe_smp_processor_id(); |
256 | unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr; | 256 | unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr; |
257 | int i = 11; | ||
258 | unsigned used = 0; | 257 | unsigned used = 0; |
259 | 258 | ||
260 | printk("\nCall Trace:\n"); | 259 | printk("\nCall Trace:\n"); |
@@ -275,11 +274,20 @@ void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s | |||
275 | if (unwind_init_blocked(&info, tsk) == 0) | 274 | if (unwind_init_blocked(&info, tsk) == 0) |
276 | unw_ret = show_trace_unwind(&info, NULL); | 275 | unw_ret = show_trace_unwind(&info, NULL); |
277 | } | 276 | } |
278 | if (unw_ret > 0) { | 277 | if (unw_ret > 0 && !arch_unw_user_mode(&info)) { |
279 | if (call_trace > 0) | 278 | #ifdef CONFIG_STACK_UNWIND |
279 | unsigned long rip = info.regs.rip; | ||
280 | print_symbol("DWARF2 unwinder stuck at %s\n", rip); | ||
281 | if (call_trace == 1) { | ||
282 | printk("Leftover inexact backtrace:\n"); | ||
283 | stack = (unsigned long *)info.regs.rsp; | ||
284 | } else if (call_trace > 1) | ||
280 | return; | 285 | return; |
281 | printk("Legacy call trace:"); | 286 | else |
282 | i = 18; | 287 | printk("Full inexact backtrace again:\n"); |
288 | #else | ||
289 | printk("Inexact backtrace:\n"); | ||
290 | #endif | ||
283 | } | 291 | } |
284 | } | 292 | } |
285 | 293 | ||
@@ -1118,8 +1126,10 @@ static int __init call_trace_setup(char *s) | |||
1118 | call_trace = -1; | 1126 | call_trace = -1; |
1119 | else if (strcmp(s, "both") == 0) | 1127 | else if (strcmp(s, "both") == 0) |
1120 | call_trace = 0; | 1128 | call_trace = 0; |
1121 | else if (strcmp(s, "new") == 0) | 1129 | else if (strcmp(s, "newfallback") == 0) |
1122 | call_trace = 1; | 1130 | call_trace = 1; |
1131 | else if (strcmp(s, "new") == 0) | ||
1132 | call_trace = 2; | ||
1123 | return 1; | 1133 | return 1; |
1124 | } | 1134 | } |
1125 | __setup("call_trace=", call_trace_setup); | 1135 | __setup("call_trace=", call_trace_setup); |
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index f712e4cfd9dc..7cf3eb023521 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c | |||
@@ -776,7 +776,7 @@ static void update_ordered(ide_drive_t *drive) | |||
776 | * not available so we don't need to recheck that. | 776 | * not available so we don't need to recheck that. |
777 | */ | 777 | */ |
778 | capacity = idedisk_capacity(drive); | 778 | capacity = idedisk_capacity(drive); |
779 | barrier = ide_id_has_flush_cache(id) && | 779 | barrier = ide_id_has_flush_cache(id) && !drive->noflush && |
780 | (drive->addressing == 0 || capacity <= (1ULL << 28) || | 780 | (drive->addressing == 0 || capacity <= (1ULL << 28) || |
781 | ide_id_has_flush_cache_ext(id)); | 781 | ide_id_has_flush_cache_ext(id)); |
782 | 782 | ||
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c index 98918fb6b2ce..7c3a13e1cf64 100644 --- a/drivers/ide/ide-dma.c +++ b/drivers/ide/ide-dma.c | |||
@@ -750,7 +750,7 @@ void ide_dma_verbose(ide_drive_t *drive) | |||
750 | goto bug_dma_off; | 750 | goto bug_dma_off; |
751 | printk(", DMA"); | 751 | printk(", DMA"); |
752 | } else if (id->field_valid & 1) { | 752 | } else if (id->field_valid & 1) { |
753 | printk(", BUG"); | 753 | goto bug_dma_off; |
754 | } | 754 | } |
755 | return; | 755 | return; |
756 | bug_dma_off: | 756 | bug_dma_off: |
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c index 05fbd9298db7..defd4b4bd374 100644 --- a/drivers/ide/ide.c +++ b/drivers/ide/ide.c | |||
@@ -1539,7 +1539,7 @@ static int __init ide_setup(char *s) | |||
1539 | const char *hd_words[] = { | 1539 | const char *hd_words[] = { |
1540 | "none", "noprobe", "nowerr", "cdrom", "serialize", | 1540 | "none", "noprobe", "nowerr", "cdrom", "serialize", |
1541 | "autotune", "noautotune", "minus8", "swapdata", "bswap", | 1541 | "autotune", "noautotune", "minus8", "swapdata", "bswap", |
1542 | "minus11", "remap", "remap63", "scsi", NULL }; | 1542 | "noflush", "remap", "remap63", "scsi", NULL }; |
1543 | unit = s[2] - 'a'; | 1543 | unit = s[2] - 'a'; |
1544 | hw = unit / MAX_DRIVES; | 1544 | hw = unit / MAX_DRIVES; |
1545 | unit = unit % MAX_DRIVES; | 1545 | unit = unit % MAX_DRIVES; |
@@ -1578,6 +1578,9 @@ static int __init ide_setup(char *s) | |||
1578 | case -10: /* "bswap" */ | 1578 | case -10: /* "bswap" */ |
1579 | drive->bswap = 1; | 1579 | drive->bswap = 1; |
1580 | goto done; | 1580 | goto done; |
1581 | case -11: /* noflush */ | ||
1582 | drive->noflush = 1; | ||
1583 | goto done; | ||
1581 | case -12: /* "remap" */ | 1584 | case -12: /* "remap" */ |
1582 | drive->remap_0_to_1 = 1; | 1585 | drive->remap_0_to_1 = 1; |
1583 | goto done; | 1586 | goto done; |
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c index 3cb04424d351..e9bad185968a 100644 --- a/drivers/ide/pci/it821x.c +++ b/drivers/ide/pci/it821x.c | |||
@@ -498,9 +498,14 @@ static int config_chipset_for_dma (ide_drive_t *drive) | |||
498 | { | 498 | { |
499 | u8 speed = ide_dma_speed(drive, it821x_ratemask(drive)); | 499 | u8 speed = ide_dma_speed(drive, it821x_ratemask(drive)); |
500 | 500 | ||
501 | config_it821x_chipset_for_pio(drive, !speed); | 501 | if (speed) { |
502 | it821x_tune_chipset(drive, speed); | 502 | config_it821x_chipset_for_pio(drive, 0); |
503 | return ide_dma_enable(drive); | 503 | it821x_tune_chipset(drive, speed); |
504 | |||
505 | return ide_dma_enable(drive); | ||
506 | } | ||
507 | |||
508 | return 0; | ||
504 | } | 509 | } |
505 | 510 | ||
506 | /** | 511 | /** |
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c index 1ef9fd39a79a..0e3fdf7c6dd3 100644 --- a/drivers/net/sunlance.c +++ b/drivers/net/sunlance.c | |||
@@ -1537,7 +1537,7 @@ static int __init sparc_lance_init(void) | |||
1537 | { | 1537 | { |
1538 | if ((idprom->id_machtype == (SM_SUN4|SM_4_330)) || | 1538 | if ((idprom->id_machtype == (SM_SUN4|SM_4_330)) || |
1539 | (idprom->id_machtype == (SM_SUN4|SM_4_470))) { | 1539 | (idprom->id_machtype == (SM_SUN4|SM_4_470))) { |
1540 | memset(&sun4_sdev, 0, sizeof(sdev)); | 1540 | memset(&sun4_sdev, 0, sizeof(struct sbus_dev)); |
1541 | sun4_sdev.reg_addrs[0].phys_addr = sun4_eth_physaddr; | 1541 | sun4_sdev.reg_addrs[0].phys_addr = sun4_eth_physaddr; |
1542 | sun4_sdev.irqs[0] = 6; | 1542 | sun4_sdev.irqs[0] = 6; |
1543 | return sparc_lance_probe_one(&sun4_sdev, NULL, NULL); | 1543 | return sparc_lance_probe_one(&sun4_sdev, NULL, NULL); |
@@ -1547,16 +1547,16 @@ static int __init sparc_lance_init(void) | |||
1547 | 1547 | ||
1548 | static int __exit sunlance_sun4_remove(void) | 1548 | static int __exit sunlance_sun4_remove(void) |
1549 | { | 1549 | { |
1550 | struct lance_private *lp = dev_get_drvdata(&sun4_sdev->dev); | 1550 | struct lance_private *lp = dev_get_drvdata(&sun4_sdev.ofdev.dev); |
1551 | struct net_device *net_dev = lp->dev; | 1551 | struct net_device *net_dev = lp->dev; |
1552 | 1552 | ||
1553 | unregister_netdevice(net_dev); | 1553 | unregister_netdevice(net_dev); |
1554 | 1554 | ||
1555 | lance_free_hwresources(root_lance_dev); | 1555 | lance_free_hwresources(lp); |
1556 | 1556 | ||
1557 | free_netdev(net_dev); | 1557 | free_netdev(net_dev); |
1558 | 1558 | ||
1559 | dev_set_drvdata(&sun4_sdev->dev, NULL); | 1559 | dev_set_drvdata(&sun4_sdev.ofdev.dev, NULL); |
1560 | 1560 | ||
1561 | return 0; | 1561 | return 0; |
1562 | } | 1562 | } |
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index f26a2ee3aad8..3cba6c9fab11 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c | |||
@@ -152,7 +152,6 @@ ccwgroup_create(struct device *root, | |||
152 | struct ccwgroup_device *gdev; | 152 | struct ccwgroup_device *gdev; |
153 | int i; | 153 | int i; |
154 | int rc; | 154 | int rc; |
155 | int del_drvdata; | ||
156 | 155 | ||
157 | if (argc > 256) /* disallow dumb users */ | 156 | if (argc > 256) /* disallow dumb users */ |
158 | return -EINVAL; | 157 | return -EINVAL; |
@@ -163,7 +162,6 @@ ccwgroup_create(struct device *root, | |||
163 | 162 | ||
164 | atomic_set(&gdev->onoff, 0); | 163 | atomic_set(&gdev->onoff, 0); |
165 | 164 | ||
166 | del_drvdata = 0; | ||
167 | for (i = 0; i < argc; i++) { | 165 | for (i = 0; i < argc; i++) { |
168 | gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]); | 166 | gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]); |
169 | 167 | ||
@@ -180,10 +178,8 @@ ccwgroup_create(struct device *root, | |||
180 | rc = -EINVAL; | 178 | rc = -EINVAL; |
181 | goto free_dev; | 179 | goto free_dev; |
182 | } | 180 | } |
183 | } | ||
184 | for (i = 0; i < argc; i++) | ||
185 | gdev->cdev[i]->dev.driver_data = gdev; | 181 | gdev->cdev[i]->dev.driver_data = gdev; |
186 | del_drvdata = 1; | 182 | } |
187 | 183 | ||
188 | gdev->creator_id = creator_id; | 184 | gdev->creator_id = creator_id; |
189 | gdev->count = argc; | 185 | gdev->count = argc; |
@@ -226,9 +222,9 @@ error: | |||
226 | free_dev: | 222 | free_dev: |
227 | for (i = 0; i < argc; i++) | 223 | for (i = 0; i < argc; i++) |
228 | if (gdev->cdev[i]) { | 224 | if (gdev->cdev[i]) { |
229 | put_device(&gdev->cdev[i]->dev); | 225 | if (gdev->cdev[i]->dev.driver_data == gdev) |
230 | if (del_drvdata) | ||
231 | gdev->cdev[i]->dev.driver_data = NULL; | 226 | gdev->cdev[i]->dev.driver_data = NULL; |
227 | put_device(&gdev->cdev[i]->dev); | ||
232 | } | 228 | } |
233 | kfree(gdev); | 229 | kfree(gdev); |
234 | return rc; | 230 | return rc; |
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index ac6e0c7e43d9..7a39e0b0386c 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
@@ -152,7 +152,8 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev) | |||
152 | if (cdev->private->iretry) { | 152 | if (cdev->private->iretry) { |
153 | cdev->private->iretry--; | 153 | cdev->private->iretry--; |
154 | ret = cio_halt(sch); | 154 | ret = cio_halt(sch); |
155 | return (ret == 0) ? -EBUSY : ret; | 155 | if (ret != -EBUSY) |
156 | return (ret == 0) ? -EBUSY : ret; | ||
156 | } | 157 | } |
157 | /* halt io unsuccessful. */ | 158 | /* halt io unsuccessful. */ |
158 | cdev->private->iretry = 255; /* 255 clear retries. */ | 159 | cdev->private->iretry = 255; /* 255 clear retries. */ |
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c index a89c4115cfba..32293f451669 100644 --- a/drivers/scsi/scsi_ioctl.c +++ b/drivers/scsi/scsi_ioctl.c | |||
@@ -110,11 +110,8 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd, | |||
110 | sshdr.asc, sshdr.ascq); | 110 | sshdr.asc, sshdr.ascq); |
111 | break; | 111 | break; |
112 | case NOT_READY: /* This happens if there is no disc in drive */ | 112 | case NOT_READY: /* This happens if there is no disc in drive */ |
113 | if (sdev->removable && (cmd[0] != TEST_UNIT_READY)) { | 113 | if (sdev->removable) |
114 | printk(KERN_INFO "Device not ready. Make sure" | ||
115 | " there is a disc in the drive.\n"); | ||
116 | break; | 114 | break; |
117 | } | ||
118 | case UNIT_ATTENTION: | 115 | case UNIT_ATTENTION: |
119 | if (sdev->removable) { | 116 | if (sdev->removable) { |
120 | sdev->changed = 1; | 117 | sdev->changed = 1; |
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index ceda3a2859d2..7858703ed84c 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h | |||
@@ -246,8 +246,8 @@ extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *); | |||
246 | #define BUF_BUSY XBF_DONT_BLOCK | 246 | #define BUF_BUSY XBF_DONT_BLOCK |
247 | 247 | ||
248 | #define XFS_BUF_BFLAGS(bp) ((bp)->b_flags) | 248 | #define XFS_BUF_BFLAGS(bp) ((bp)->b_flags) |
249 | #define XFS_BUF_ZEROFLAGS(bp) \ | 249 | #define XFS_BUF_ZEROFLAGS(bp) ((bp)->b_flags &= \ |
250 | ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI)) | 250 | ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI|XBF_ORDERED)) |
251 | 251 | ||
252 | #define XFS_BUF_STALE(bp) ((bp)->b_flags |= XFS_B_STALE) | 252 | #define XFS_BUF_STALE(bp) ((bp)->b_flags |= XFS_B_STALE) |
253 | #define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XFS_B_STALE) | 253 | #define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XFS_B_STALE) |
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 9bdef9d51900..4754f342a5d3 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c | |||
@@ -314,6 +314,13 @@ xfs_mountfs_check_barriers(xfs_mount_t *mp) | |||
314 | return; | 314 | return; |
315 | } | 315 | } |
316 | 316 | ||
317 | if (xfs_readonly_buftarg(mp->m_ddev_targp)) { | ||
318 | xfs_fs_cmn_err(CE_NOTE, mp, | ||
319 | "Disabling barriers, underlying device is readonly"); | ||
320 | mp->m_flags &= ~XFS_MOUNT_BARRIER; | ||
321 | return; | ||
322 | } | ||
323 | |||
317 | error = xfs_barrier_test(mp); | 324 | error = xfs_barrier_test(mp); |
318 | if (error) { | 325 | if (error) { |
319 | xfs_fs_cmn_err(CE_NOTE, mp, | 326 | xfs_fs_cmn_err(CE_NOTE, mp, |
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c index e95e99f7168f..f137856c3261 100644 --- a/fs/xfs/quota/xfs_qm_bhv.c +++ b/fs/xfs/quota/xfs_qm_bhv.c | |||
@@ -217,17 +217,24 @@ xfs_qm_statvfs( | |||
217 | return 0; | 217 | return 0; |
218 | dp = &dqp->q_core; | 218 | dp = &dqp->q_core; |
219 | 219 | ||
220 | limit = dp->d_blk_softlimit ? dp->d_blk_softlimit : dp->d_blk_hardlimit; | 220 | limit = dp->d_blk_softlimit ? |
221 | be64_to_cpu(dp->d_blk_softlimit) : | ||
222 | be64_to_cpu(dp->d_blk_hardlimit); | ||
221 | if (limit && statp->f_blocks > limit) { | 223 | if (limit && statp->f_blocks > limit) { |
222 | statp->f_blocks = limit; | 224 | statp->f_blocks = limit; |
223 | statp->f_bfree = (statp->f_blocks > dp->d_bcount) ? | 225 | statp->f_bfree = |
224 | (statp->f_blocks - dp->d_bcount) : 0; | 226 | (statp->f_blocks > be64_to_cpu(dp->d_bcount)) ? |
227 | (statp->f_blocks - be64_to_cpu(dp->d_bcount)) : 0; | ||
225 | } | 228 | } |
226 | limit = dp->d_ino_softlimit ? dp->d_ino_softlimit : dp->d_ino_hardlimit; | 229 | |
230 | limit = dp->d_ino_softlimit ? | ||
231 | be64_to_cpu(dp->d_ino_softlimit) : | ||
232 | be64_to_cpu(dp->d_ino_hardlimit); | ||
227 | if (limit && statp->f_files > limit) { | 233 | if (limit && statp->f_files > limit) { |
228 | statp->f_files = limit; | 234 | statp->f_files = limit; |
229 | statp->f_ffree = (statp->f_files > dp->d_icount) ? | 235 | statp->f_ffree = |
230 | (statp->f_ffree - dp->d_icount) : 0; | 236 | (statp->f_files > be64_to_cpu(dp->d_icount)) ? |
237 | (statp->f_ffree - be64_to_cpu(dp->d_icount)) : 0; | ||
231 | } | 238 | } |
232 | 239 | ||
233 | xfs_qm_dqput(dqp); | 240 | xfs_qm_dqput(dqp); |
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 86c1bf0bba9e..1f8ecff8553a 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
@@ -334,10 +334,9 @@ xfs_itobp( | |||
334 | #if !defined(__KERNEL__) | 334 | #if !defined(__KERNEL__) |
335 | ni = 0; | 335 | ni = 0; |
336 | #elif defined(DEBUG) | 336 | #elif defined(DEBUG) |
337 | ni = (imap_flags & XFS_IMAP_BULKSTAT) ? 0 : | 337 | ni = BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog; |
338 | (BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog); | ||
339 | #else /* usual case */ | 338 | #else /* usual case */ |
340 | ni = (imap_flags & XFS_IMAP_BULKSTAT) ? 0 : 1; | 339 | ni = 1; |
341 | #endif | 340 | #endif |
342 | 341 | ||
343 | for (i = 0; i < ni; i++) { | 342 | for (i = 0; i < ni; i++) { |
@@ -348,11 +347,15 @@ xfs_itobp( | |||
348 | (i << mp->m_sb.sb_inodelog)); | 347 | (i << mp->m_sb.sb_inodelog)); |
349 | di_ok = INT_GET(dip->di_core.di_magic, ARCH_CONVERT) == XFS_DINODE_MAGIC && | 348 | di_ok = INT_GET(dip->di_core.di_magic, ARCH_CONVERT) == XFS_DINODE_MAGIC && |
350 | XFS_DINODE_GOOD_VERSION(INT_GET(dip->di_core.di_version, ARCH_CONVERT)); | 349 | XFS_DINODE_GOOD_VERSION(INT_GET(dip->di_core.di_version, ARCH_CONVERT)); |
351 | if (unlikely(XFS_TEST_ERROR(!di_ok, mp, XFS_ERRTAG_ITOBP_INOTOBP, | 350 | if (unlikely(XFS_TEST_ERROR(!di_ok, mp, |
352 | XFS_RANDOM_ITOBP_INOTOBP))) { | 351 | XFS_ERRTAG_ITOBP_INOTOBP, |
352 | XFS_RANDOM_ITOBP_INOTOBP))) { | ||
353 | if (imap_flags & XFS_IMAP_BULKSTAT) { | ||
354 | xfs_trans_brelse(tp, bp); | ||
355 | return XFS_ERROR(EINVAL); | ||
356 | } | ||
353 | #ifdef DEBUG | 357 | #ifdef DEBUG |
354 | if (!(imap_flags & XFS_IMAP_BULKSTAT)) | 358 | cmn_err(CE_ALERT, |
355 | cmn_err(CE_ALERT, | ||
356 | "Device %s - bad inode magic/vsn " | 359 | "Device %s - bad inode magic/vsn " |
357 | "daddr %lld #%d (magic=%x)", | 360 | "daddr %lld #%d (magic=%x)", |
358 | XFS_BUFTARG_NAME(mp->m_ddev_targp), | 361 | XFS_BUFTARG_NAME(mp->m_ddev_targp), |
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index e730328636c3..21ac1a67e3e0 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c | |||
@@ -1413,7 +1413,7 @@ xlog_sync(xlog_t *log, | |||
1413 | ops = iclog->ic_header.h_num_logops; | 1413 | ops = iclog->ic_header.h_num_logops; |
1414 | INT_SET(iclog->ic_header.h_num_logops, ARCH_CONVERT, ops); | 1414 | INT_SET(iclog->ic_header.h_num_logops, ARCH_CONVERT, ops); |
1415 | 1415 | ||
1416 | bp = iclog->ic_bp; | 1416 | bp = iclog->ic_bp; |
1417 | ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1); | 1417 | ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1); |
1418 | XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); | 1418 | XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); |
1419 | XFS_BUF_SET_ADDR(bp, BLOCK_LSN(INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT))); | 1419 | XFS_BUF_SET_ADDR(bp, BLOCK_LSN(INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT))); |
@@ -1430,15 +1430,14 @@ xlog_sync(xlog_t *log, | |||
1430 | } | 1430 | } |
1431 | XFS_BUF_SET_PTR(bp, (xfs_caddr_t) &(iclog->ic_header), count); | 1431 | XFS_BUF_SET_PTR(bp, (xfs_caddr_t) &(iclog->ic_header), count); |
1432 | XFS_BUF_SET_FSPRIVATE(bp, iclog); /* save for later */ | 1432 | XFS_BUF_SET_FSPRIVATE(bp, iclog); /* save for later */ |
1433 | XFS_BUF_ZEROFLAGS(bp); | ||
1433 | XFS_BUF_BUSY(bp); | 1434 | XFS_BUF_BUSY(bp); |
1434 | XFS_BUF_ASYNC(bp); | 1435 | XFS_BUF_ASYNC(bp); |
1435 | /* | 1436 | /* |
1436 | * Do an ordered write for the log block. | 1437 | * Do an ordered write for the log block. |
1437 | * | 1438 | * Its unnecessary to flush the first split block in the log wrap case. |
1438 | * It may not be needed to flush the first split block in the log wrap | ||
1439 | * case, but do it anyways to be safe -AK | ||
1440 | */ | 1439 | */ |
1441 | if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) | 1440 | if (!split && (log->l_mp->m_flags & XFS_MOUNT_BARRIER)) |
1442 | XFS_BUF_ORDERED(bp); | 1441 | XFS_BUF_ORDERED(bp); |
1443 | 1442 | ||
1444 | ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); | 1443 | ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); |
@@ -1460,7 +1459,7 @@ xlog_sync(xlog_t *log, | |||
1460 | return error; | 1459 | return error; |
1461 | } | 1460 | } |
1462 | if (split) { | 1461 | if (split) { |
1463 | bp = iclog->ic_log->l_xbuf; | 1462 | bp = iclog->ic_log->l_xbuf; |
1464 | ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == | 1463 | ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == |
1465 | (unsigned long)1); | 1464 | (unsigned long)1); |
1466 | XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); | 1465 | XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); |
@@ -1468,6 +1467,7 @@ xlog_sync(xlog_t *log, | |||
1468 | XFS_BUF_SET_PTR(bp, (xfs_caddr_t)((__psint_t)&(iclog->ic_header)+ | 1467 | XFS_BUF_SET_PTR(bp, (xfs_caddr_t)((__psint_t)&(iclog->ic_header)+ |
1469 | (__psint_t)count), split); | 1468 | (__psint_t)count), split); |
1470 | XFS_BUF_SET_FSPRIVATE(bp, iclog); | 1469 | XFS_BUF_SET_FSPRIVATE(bp, iclog); |
1470 | XFS_BUF_ZEROFLAGS(bp); | ||
1471 | XFS_BUF_BUSY(bp); | 1471 | XFS_BUF_BUSY(bp); |
1472 | XFS_BUF_ASYNC(bp); | 1472 | XFS_BUF_ASYNC(bp); |
1473 | if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) | 1473 | if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) |
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c index 6c96391f3f1a..b427d220a169 100644 --- a/fs/xfs/xfs_vfsops.c +++ b/fs/xfs/xfs_vfsops.c | |||
@@ -515,7 +515,7 @@ xfs_mount( | |||
515 | if (error) | 515 | if (error) |
516 | goto error2; | 516 | goto error2; |
517 | 517 | ||
518 | if ((mp->m_flags & XFS_MOUNT_BARRIER) && !(vfsp->vfs_flag & VFS_RDONLY)) | 518 | if (mp->m_flags & XFS_MOUNT_BARRIER) |
519 | xfs_mountfs_check_barriers(mp); | 519 | xfs_mountfs_check_barriers(mp); |
520 | 520 | ||
521 | error = XFS_IOINIT(vfsp, args, flags); | 521 | error = XFS_IOINIT(vfsp, args, flags); |
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index 03f5bc9b6bec..1ba19eb34ce3 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h | |||
@@ -339,7 +339,7 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot) | |||
339 | " .section .sun4v_2insn_patch, \"ax\"\n" | 339 | " .section .sun4v_2insn_patch, \"ax\"\n" |
340 | " .word 661b\n" | 340 | " .word 661b\n" |
341 | " andn %0, %4, %0\n" | 341 | " andn %0, %4, %0\n" |
342 | " or %0, %3, %0\n" | 342 | " or %0, %5, %0\n" |
343 | " .previous\n" | 343 | " .previous\n" |
344 | : "=r" (val) | 344 | : "=r" (val) |
345 | : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U), | 345 | : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U), |
diff --git a/include/asm-sparc64/sfp-machine.h b/include/asm-sparc64/sfp-machine.h index 5015bb8d6c32..89d42431efb5 100644 --- a/include/asm-sparc64/sfp-machine.h +++ b/include/asm-sparc64/sfp-machine.h | |||
@@ -34,7 +34,7 @@ | |||
34 | #define _FP_MUL_MEAT_D(R,X,Y) \ | 34 | #define _FP_MUL_MEAT_D(R,X,Y) \ |
35 | _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm) | 35 | _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm) |
36 | #define _FP_MUL_MEAT_Q(R,X,Y) \ | 36 | #define _FP_MUL_MEAT_Q(R,X,Y) \ |
37 | _FP_MUL_MEAT_2_wide_3mul(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm) | 37 | _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm) |
38 | 38 | ||
39 | #define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_imm(S,R,X,Y,_FP_DIV_HELP_imm) | 39 | #define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_imm(S,R,X,Y,_FP_DIV_HELP_imm) |
40 | #define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(D,R,X,Y) | 40 | #define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(D,R,X,Y) |
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h index f7bf875aae40..10f346165cab 100644 --- a/include/asm-x86_64/page.h +++ b/include/asm-x86_64/page.h | |||
@@ -19,7 +19,7 @@ | |||
19 | #define EXCEPTION_STACK_ORDER 0 | 19 | #define EXCEPTION_STACK_ORDER 0 |
20 | #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER) | 20 | #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER) |
21 | 21 | ||
22 | #define DEBUG_STACK_ORDER EXCEPTION_STACK_ORDER | 22 | #define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1) |
23 | #define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER) | 23 | #define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER) |
24 | 24 | ||
25 | #define IRQSTACK_ORDER 2 | 25 | #define IRQSTACK_ORDER 2 |
diff --git a/include/linux/futex.h b/include/linux/futex.h index 34c3a215f2cd..d097b5b72bc6 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h | |||
@@ -96,7 +96,8 @@ struct robust_list_head { | |||
96 | long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout, | 96 | long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout, |
97 | u32 __user *uaddr2, u32 val2, u32 val3); | 97 | u32 __user *uaddr2, u32 val2, u32 val3); |
98 | 98 | ||
99 | extern int handle_futex_death(u32 __user *uaddr, struct task_struct *curr); | 99 | extern int |
100 | handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi); | ||
100 | 101 | ||
101 | #ifdef CONFIG_FUTEX | 102 | #ifdef CONFIG_FUTEX |
102 | extern void exit_robust_list(struct task_struct *curr); | 103 | extern void exit_robust_list(struct task_struct *curr); |
diff --git a/include/linux/ide.h b/include/linux/ide.h index dc7abef10965..99620451d958 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h | |||
@@ -571,6 +571,7 @@ typedef struct ide_drive_s { | |||
571 | u8 waiting_for_dma; /* dma currently in progress */ | 571 | u8 waiting_for_dma; /* dma currently in progress */ |
572 | u8 unmask; /* okay to unmask other irqs */ | 572 | u8 unmask; /* okay to unmask other irqs */ |
573 | u8 bswap; /* byte swap data */ | 573 | u8 bswap; /* byte swap data */ |
574 | u8 noflush; /* don't attempt flushes */ | ||
574 | u8 dsc_overlap; /* DSC overlap */ | 575 | u8 dsc_overlap; /* DSC overlap */ |
575 | u8 nice1; /* give potential excess bandwidth */ | 576 | u8 nice1; /* give potential excess bandwidth */ |
576 | 577 | ||
diff --git a/kernel/futex.c b/kernel/futex.c index cf0c8e21d1ab..dda2049692a2 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -415,15 +415,15 @@ out_unlock: | |||
415 | */ | 415 | */ |
416 | void exit_pi_state_list(struct task_struct *curr) | 416 | void exit_pi_state_list(struct task_struct *curr) |
417 | { | 417 | { |
418 | struct futex_hash_bucket *hb; | ||
419 | struct list_head *next, *head = &curr->pi_state_list; | 418 | struct list_head *next, *head = &curr->pi_state_list; |
420 | struct futex_pi_state *pi_state; | 419 | struct futex_pi_state *pi_state; |
420 | struct futex_hash_bucket *hb; | ||
421 | union futex_key key; | 421 | union futex_key key; |
422 | 422 | ||
423 | /* | 423 | /* |
424 | * We are a ZOMBIE and nobody can enqueue itself on | 424 | * We are a ZOMBIE and nobody can enqueue itself on |
425 | * pi_state_list anymore, but we have to be careful | 425 | * pi_state_list anymore, but we have to be careful |
426 | * versus waiters unqueueing themselfs | 426 | * versus waiters unqueueing themselves: |
427 | */ | 427 | */ |
428 | spin_lock_irq(&curr->pi_lock); | 428 | spin_lock_irq(&curr->pi_lock); |
429 | while (!list_empty(head)) { | 429 | while (!list_empty(head)) { |
@@ -431,21 +431,24 @@ void exit_pi_state_list(struct task_struct *curr) | |||
431 | next = head->next; | 431 | next = head->next; |
432 | pi_state = list_entry(next, struct futex_pi_state, list); | 432 | pi_state = list_entry(next, struct futex_pi_state, list); |
433 | key = pi_state->key; | 433 | key = pi_state->key; |
434 | hb = hash_futex(&key); | ||
434 | spin_unlock_irq(&curr->pi_lock); | 435 | spin_unlock_irq(&curr->pi_lock); |
435 | 436 | ||
436 | hb = hash_futex(&key); | ||
437 | spin_lock(&hb->lock); | 437 | spin_lock(&hb->lock); |
438 | 438 | ||
439 | spin_lock_irq(&curr->pi_lock); | 439 | spin_lock_irq(&curr->pi_lock); |
440 | /* | ||
441 | * We dropped the pi-lock, so re-check whether this | ||
442 | * task still owns the PI-state: | ||
443 | */ | ||
440 | if (head->next != next) { | 444 | if (head->next != next) { |
441 | spin_unlock(&hb->lock); | 445 | spin_unlock(&hb->lock); |
442 | continue; | 446 | continue; |
443 | } | 447 | } |
444 | 448 | ||
445 | list_del_init(&pi_state->list); | ||
446 | |||
447 | WARN_ON(pi_state->owner != curr); | 449 | WARN_ON(pi_state->owner != curr); |
448 | 450 | WARN_ON(list_empty(&pi_state->list)); | |
451 | list_del_init(&pi_state->list); | ||
449 | pi_state->owner = NULL; | 452 | pi_state->owner = NULL; |
450 | spin_unlock_irq(&curr->pi_lock); | 453 | spin_unlock_irq(&curr->pi_lock); |
451 | 454 | ||
@@ -470,7 +473,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me) | |||
470 | head = &hb->chain; | 473 | head = &hb->chain; |
471 | 474 | ||
472 | list_for_each_entry_safe(this, next, head, list) { | 475 | list_for_each_entry_safe(this, next, head, list) { |
473 | if (match_futex (&this->key, &me->key)) { | 476 | if (match_futex(&this->key, &me->key)) { |
474 | /* | 477 | /* |
475 | * Another waiter already exists - bump up | 478 | * Another waiter already exists - bump up |
476 | * the refcount and return its pi_state: | 479 | * the refcount and return its pi_state: |
@@ -482,6 +485,8 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me) | |||
482 | if (unlikely(!pi_state)) | 485 | if (unlikely(!pi_state)) |
483 | return -EINVAL; | 486 | return -EINVAL; |
484 | 487 | ||
488 | WARN_ON(!atomic_read(&pi_state->refcount)); | ||
489 | |||
485 | atomic_inc(&pi_state->refcount); | 490 | atomic_inc(&pi_state->refcount); |
486 | me->pi_state = pi_state; | 491 | me->pi_state = pi_state; |
487 | 492 | ||
@@ -490,10 +495,13 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me) | |||
490 | } | 495 | } |
491 | 496 | ||
492 | /* | 497 | /* |
493 | * We are the first waiter - try to look up the real owner and | 498 | * We are the first waiter - try to look up the real owner and attach |
494 | * attach the new pi_state to it: | 499 | * the new pi_state to it, but bail out when the owner died bit is set |
500 | * and TID = 0: | ||
495 | */ | 501 | */ |
496 | pid = uval & FUTEX_TID_MASK; | 502 | pid = uval & FUTEX_TID_MASK; |
503 | if (!pid && (uval & FUTEX_OWNER_DIED)) | ||
504 | return -ESRCH; | ||
497 | p = futex_find_get_task(pid); | 505 | p = futex_find_get_task(pid); |
498 | if (!p) | 506 | if (!p) |
499 | return -ESRCH; | 507 | return -ESRCH; |
@@ -510,6 +518,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me) | |||
510 | pi_state->key = me->key; | 518 | pi_state->key = me->key; |
511 | 519 | ||
512 | spin_lock_irq(&p->pi_lock); | 520 | spin_lock_irq(&p->pi_lock); |
521 | WARN_ON(!list_empty(&pi_state->list)); | ||
513 | list_add(&pi_state->list, &p->pi_state_list); | 522 | list_add(&pi_state->list, &p->pi_state_list); |
514 | pi_state->owner = p; | 523 | pi_state->owner = p; |
515 | spin_unlock_irq(&p->pi_lock); | 524 | spin_unlock_irq(&p->pi_lock); |
@@ -573,20 +582,29 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) | |||
573 | * kept enabled while there is PI state around. We must also | 582 | * kept enabled while there is PI state around. We must also |
574 | * preserve the owner died bit.) | 583 | * preserve the owner died bit.) |
575 | */ | 584 | */ |
576 | newval = (uval & FUTEX_OWNER_DIED) | FUTEX_WAITERS | new_owner->pid; | 585 | if (!(uval & FUTEX_OWNER_DIED)) { |
586 | newval = FUTEX_WAITERS | new_owner->pid; | ||
577 | 587 | ||
578 | inc_preempt_count(); | 588 | inc_preempt_count(); |
579 | curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); | 589 | curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); |
580 | dec_preempt_count(); | 590 | dec_preempt_count(); |
591 | if (curval == -EFAULT) | ||
592 | return -EFAULT; | ||
593 | if (curval != uval) | ||
594 | return -EINVAL; | ||
595 | } | ||
581 | 596 | ||
582 | if (curval == -EFAULT) | 597 | spin_lock_irq(&pi_state->owner->pi_lock); |
583 | return -EFAULT; | 598 | WARN_ON(list_empty(&pi_state->list)); |
584 | if (curval != uval) | 599 | list_del_init(&pi_state->list); |
585 | return -EINVAL; | 600 | spin_unlock_irq(&pi_state->owner->pi_lock); |
586 | 601 | ||
587 | list_del_init(&pi_state->owner->pi_state_list); | 602 | spin_lock_irq(&new_owner->pi_lock); |
603 | WARN_ON(!list_empty(&pi_state->list)); | ||
588 | list_add(&pi_state->list, &new_owner->pi_state_list); | 604 | list_add(&pi_state->list, &new_owner->pi_state_list); |
589 | pi_state->owner = new_owner; | 605 | pi_state->owner = new_owner; |
606 | spin_unlock_irq(&new_owner->pi_lock); | ||
607 | |||
590 | rt_mutex_unlock(&pi_state->pi_mutex); | 608 | rt_mutex_unlock(&pi_state->pi_mutex); |
591 | 609 | ||
592 | return 0; | 610 | return 0; |
@@ -1236,6 +1254,7 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock, | |||
1236 | /* Owner died? */ | 1254 | /* Owner died? */ |
1237 | if (q.pi_state->owner != NULL) { | 1255 | if (q.pi_state->owner != NULL) { |
1238 | spin_lock_irq(&q.pi_state->owner->pi_lock); | 1256 | spin_lock_irq(&q.pi_state->owner->pi_lock); |
1257 | WARN_ON(list_empty(&q.pi_state->list)); | ||
1239 | list_del_init(&q.pi_state->list); | 1258 | list_del_init(&q.pi_state->list); |
1240 | spin_unlock_irq(&q.pi_state->owner->pi_lock); | 1259 | spin_unlock_irq(&q.pi_state->owner->pi_lock); |
1241 | } else | 1260 | } else |
@@ -1244,6 +1263,7 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock, | |||
1244 | q.pi_state->owner = current; | 1263 | q.pi_state->owner = current; |
1245 | 1264 | ||
1246 | spin_lock_irq(¤t->pi_lock); | 1265 | spin_lock_irq(¤t->pi_lock); |
1266 | WARN_ON(!list_empty(&q.pi_state->list)); | ||
1247 | list_add(&q.pi_state->list, ¤t->pi_state_list); | 1267 | list_add(&q.pi_state->list, ¤t->pi_state_list); |
1248 | spin_unlock_irq(¤t->pi_lock); | 1268 | spin_unlock_irq(¤t->pi_lock); |
1249 | 1269 | ||
@@ -1427,9 +1447,11 @@ retry_locked: | |||
1427 | * again. If it succeeds then we can return without waking | 1447 | * again. If it succeeds then we can return without waking |
1428 | * anyone else up: | 1448 | * anyone else up: |
1429 | */ | 1449 | */ |
1430 | inc_preempt_count(); | 1450 | if (!(uval & FUTEX_OWNER_DIED)) { |
1431 | uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0); | 1451 | inc_preempt_count(); |
1432 | dec_preempt_count(); | 1452 | uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0); |
1453 | dec_preempt_count(); | ||
1454 | } | ||
1433 | 1455 | ||
1434 | if (unlikely(uval == -EFAULT)) | 1456 | if (unlikely(uval == -EFAULT)) |
1435 | goto pi_faulted; | 1457 | goto pi_faulted; |
@@ -1462,9 +1484,11 @@ retry_locked: | |||
1462 | /* | 1484 | /* |
1463 | * No waiters - kernel unlocks the futex: | 1485 | * No waiters - kernel unlocks the futex: |
1464 | */ | 1486 | */ |
1465 | ret = unlock_futex_pi(uaddr, uval); | 1487 | if (!(uval & FUTEX_OWNER_DIED)) { |
1466 | if (ret == -EFAULT) | 1488 | ret = unlock_futex_pi(uaddr, uval); |
1467 | goto pi_faulted; | 1489 | if (ret == -EFAULT) |
1490 | goto pi_faulted; | ||
1491 | } | ||
1468 | 1492 | ||
1469 | out_unlock: | 1493 | out_unlock: |
1470 | spin_unlock(&hb->lock); | 1494 | spin_unlock(&hb->lock); |
@@ -1683,9 +1707,9 @@ err_unlock: | |||
1683 | * Process a futex-list entry, check whether it's owned by the | 1707 | * Process a futex-list entry, check whether it's owned by the |
1684 | * dying task, and do notification if so: | 1708 | * dying task, and do notification if so: |
1685 | */ | 1709 | */ |
1686 | int handle_futex_death(u32 __user *uaddr, struct task_struct *curr) | 1710 | int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi) |
1687 | { | 1711 | { |
1688 | u32 uval, nval; | 1712 | u32 uval, nval, mval; |
1689 | 1713 | ||
1690 | retry: | 1714 | retry: |
1691 | if (get_user(uval, uaddr)) | 1715 | if (get_user(uval, uaddr)) |
@@ -1702,21 +1726,45 @@ retry: | |||
1702 | * thread-death.) The rest of the cleanup is done in | 1726 | * thread-death.) The rest of the cleanup is done in |
1703 | * userspace. | 1727 | * userspace. |
1704 | */ | 1728 | */ |
1705 | nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, | 1729 | mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; |
1706 | uval | FUTEX_OWNER_DIED); | 1730 | nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval); |
1731 | |||
1707 | if (nval == -EFAULT) | 1732 | if (nval == -EFAULT) |
1708 | return -1; | 1733 | return -1; |
1709 | 1734 | ||
1710 | if (nval != uval) | 1735 | if (nval != uval) |
1711 | goto retry; | 1736 | goto retry; |
1712 | 1737 | ||
1713 | if (uval & FUTEX_WAITERS) | 1738 | /* |
1714 | futex_wake(uaddr, 1); | 1739 | * Wake robust non-PI futexes here. The wakeup of |
1740 | * PI futexes happens in exit_pi_state(): | ||
1741 | */ | ||
1742 | if (!pi) { | ||
1743 | if (uval & FUTEX_WAITERS) | ||
1744 | futex_wake(uaddr, 1); | ||
1745 | } | ||
1715 | } | 1746 | } |
1716 | return 0; | 1747 | return 0; |
1717 | } | 1748 | } |
1718 | 1749 | ||
1719 | /* | 1750 | /* |
1751 | * Fetch a robust-list pointer. Bit 0 signals PI futexes: | ||
1752 | */ | ||
1753 | static inline int fetch_robust_entry(struct robust_list __user **entry, | ||
1754 | struct robust_list __user **head, int *pi) | ||
1755 | { | ||
1756 | unsigned long uentry; | ||
1757 | |||
1758 | if (get_user(uentry, (unsigned long *)head)) | ||
1759 | return -EFAULT; | ||
1760 | |||
1761 | *entry = (void *)(uentry & ~1UL); | ||
1762 | *pi = uentry & 1; | ||
1763 | |||
1764 | return 0; | ||
1765 | } | ||
1766 | |||
1767 | /* | ||
1720 | * Walk curr->robust_list (very carefully, it's a userspace list!) | 1768 | * Walk curr->robust_list (very carefully, it's a userspace list!) |
1721 | * and mark any locks found there dead, and notify any waiters. | 1769 | * and mark any locks found there dead, and notify any waiters. |
1722 | * | 1770 | * |
@@ -1726,14 +1774,14 @@ void exit_robust_list(struct task_struct *curr) | |||
1726 | { | 1774 | { |
1727 | struct robust_list_head __user *head = curr->robust_list; | 1775 | struct robust_list_head __user *head = curr->robust_list; |
1728 | struct robust_list __user *entry, *pending; | 1776 | struct robust_list __user *entry, *pending; |
1729 | unsigned int limit = ROBUST_LIST_LIMIT; | 1777 | unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; |
1730 | unsigned long futex_offset; | 1778 | unsigned long futex_offset; |
1731 | 1779 | ||
1732 | /* | 1780 | /* |
1733 | * Fetch the list head (which was registered earlier, via | 1781 | * Fetch the list head (which was registered earlier, via |
1734 | * sys_set_robust_list()): | 1782 | * sys_set_robust_list()): |
1735 | */ | 1783 | */ |
1736 | if (get_user(entry, &head->list.next)) | 1784 | if (fetch_robust_entry(&entry, &head->list.next, &pi)) |
1737 | return; | 1785 | return; |
1738 | /* | 1786 | /* |
1739 | * Fetch the relative futex offset: | 1787 | * Fetch the relative futex offset: |
@@ -1744,10 +1792,11 @@ void exit_robust_list(struct task_struct *curr) | |||
1744 | * Fetch any possibly pending lock-add first, and handle it | 1792 | * Fetch any possibly pending lock-add first, and handle it |
1745 | * if it exists: | 1793 | * if it exists: |
1746 | */ | 1794 | */ |
1747 | if (get_user(pending, &head->list_op_pending)) | 1795 | if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) |
1748 | return; | 1796 | return; |
1797 | |||
1749 | if (pending) | 1798 | if (pending) |
1750 | handle_futex_death((void *)pending + futex_offset, curr); | 1799 | handle_futex_death((void *)pending + futex_offset, curr, pip); |
1751 | 1800 | ||
1752 | while (entry != &head->list) { | 1801 | while (entry != &head->list) { |
1753 | /* | 1802 | /* |
@@ -1756,12 +1805,12 @@ void exit_robust_list(struct task_struct *curr) | |||
1756 | */ | 1805 | */ |
1757 | if (entry != pending) | 1806 | if (entry != pending) |
1758 | if (handle_futex_death((void *)entry + futex_offset, | 1807 | if (handle_futex_death((void *)entry + futex_offset, |
1759 | curr)) | 1808 | curr, pi)) |
1760 | return; | 1809 | return; |
1761 | /* | 1810 | /* |
1762 | * Fetch the next entry in the list: | 1811 | * Fetch the next entry in the list: |
1763 | */ | 1812 | */ |
1764 | if (get_user(entry, &entry->next)) | 1813 | if (fetch_robust_entry(&entry, &entry->next, &pi)) |
1765 | return; | 1814 | return; |
1766 | /* | 1815 | /* |
1767 | * Avoid excessively long or circular lists: | 1816 | * Avoid excessively long or circular lists: |
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index d1d92b441fb7..d1aab1a452cc 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c | |||
@@ -12,6 +12,23 @@ | |||
12 | 12 | ||
13 | #include <asm/uaccess.h> | 13 | #include <asm/uaccess.h> |
14 | 14 | ||
15 | |||
16 | /* | ||
17 | * Fetch a robust-list pointer. Bit 0 signals PI futexes: | ||
18 | */ | ||
19 | static inline int | ||
20 | fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, | ||
21 | compat_uptr_t *head, int *pi) | ||
22 | { | ||
23 | if (get_user(*uentry, head)) | ||
24 | return -EFAULT; | ||
25 | |||
26 | *entry = compat_ptr((*uentry) & ~1); | ||
27 | *pi = (unsigned int)(*uentry) & 1; | ||
28 | |||
29 | return 0; | ||
30 | } | ||
31 | |||
15 | /* | 32 | /* |
16 | * Walk curr->robust_list (very carefully, it's a userspace list!) | 33 | * Walk curr->robust_list (very carefully, it's a userspace list!) |
17 | * and mark any locks found there dead, and notify any waiters. | 34 | * and mark any locks found there dead, and notify any waiters. |
@@ -22,17 +39,16 @@ void compat_exit_robust_list(struct task_struct *curr) | |||
22 | { | 39 | { |
23 | struct compat_robust_list_head __user *head = curr->compat_robust_list; | 40 | struct compat_robust_list_head __user *head = curr->compat_robust_list; |
24 | struct robust_list __user *entry, *pending; | 41 | struct robust_list __user *entry, *pending; |
42 | unsigned int limit = ROBUST_LIST_LIMIT, pi; | ||
25 | compat_uptr_t uentry, upending; | 43 | compat_uptr_t uentry, upending; |
26 | unsigned int limit = ROBUST_LIST_LIMIT; | ||
27 | compat_long_t futex_offset; | 44 | compat_long_t futex_offset; |
28 | 45 | ||
29 | /* | 46 | /* |
30 | * Fetch the list head (which was registered earlier, via | 47 | * Fetch the list head (which was registered earlier, via |
31 | * sys_set_robust_list()): | 48 | * sys_set_robust_list()): |
32 | */ | 49 | */ |
33 | if (get_user(uentry, &head->list.next)) | 50 | if (fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) |
34 | return; | 51 | return; |
35 | entry = compat_ptr(uentry); | ||
36 | /* | 52 | /* |
37 | * Fetch the relative futex offset: | 53 | * Fetch the relative futex offset: |
38 | */ | 54 | */ |
@@ -42,11 +58,11 @@ void compat_exit_robust_list(struct task_struct *curr) | |||
42 | * Fetch any possibly pending lock-add first, and handle it | 58 | * Fetch any possibly pending lock-add first, and handle it |
43 | * if it exists: | 59 | * if it exists: |
44 | */ | 60 | */ |
45 | if (get_user(upending, &head->list_op_pending)) | 61 | if (fetch_robust_entry(&upending, &pending, |
62 | &head->list_op_pending, &pi)) | ||
46 | return; | 63 | return; |
47 | pending = compat_ptr(upending); | ||
48 | if (upending) | 64 | if (upending) |
49 | handle_futex_death((void *)pending + futex_offset, curr); | 65 | handle_futex_death((void *)pending + futex_offset, curr, pi); |
50 | 66 | ||
51 | while (compat_ptr(uentry) != &head->list) { | 67 | while (compat_ptr(uentry) != &head->list) { |
52 | /* | 68 | /* |
@@ -55,15 +71,15 @@ void compat_exit_robust_list(struct task_struct *curr) | |||
55 | */ | 71 | */ |
56 | if (entry != pending) | 72 | if (entry != pending) |
57 | if (handle_futex_death((void *)entry + futex_offset, | 73 | if (handle_futex_death((void *)entry + futex_offset, |
58 | curr)) | 74 | curr, pi)) |
59 | return; | 75 | return; |
60 | 76 | ||
61 | /* | 77 | /* |
62 | * Fetch the next entry in the list: | 78 | * Fetch the next entry in the list: |
63 | */ | 79 | */ |
64 | if (get_user(uentry, (compat_uptr_t *)&entry->next)) | 80 | if (fetch_robust_entry(&uentry, &entry, |
81 | (compat_uptr_t *)&entry->next, &pi)) | ||
65 | return; | 82 | return; |
66 | entry = compat_ptr(uentry); | ||
67 | /* | 83 | /* |
68 | * Avoid excessively long or circular lists: | 84 | * Avoid excessively long or circular lists: |
69 | */ | 85 | */ |