diff options
82 files changed, 823 insertions, 541 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 9d3a0775a11d..87851efb0228 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt | |||
@@ -258,3 +258,19 @@ Why: These drivers never compiled since they were added to the kernel | |||
258 | Who: Jean Delvare <khali@linux-fr.org> | 258 | Who: Jean Delvare <khali@linux-fr.org> |
259 | 259 | ||
260 | --------------------------- | 260 | --------------------------- |
261 | |||
262 | What: Bridge netfilter deferred IPv4/IPv6 output hook calling | ||
263 | When: January 2007 | ||
264 | Why: The deferred output hooks are a layering violation causing unusual | ||
265 | and broken behaviour on bridge devices. Examples of things they | ||
266 | break include QoS classifation using the MARK or CLASSIFY targets, | ||
267 | the IPsec policy match and connection tracking with VLANs on a | ||
268 | bridge. Their only use is to enable bridge output port filtering | ||
269 | within iptables with the physdev match, which can also be done by | ||
270 | combining iptables and ebtables using netfilter marks. Until it | ||
271 | will get removed the hook deferral is disabled by default and is | ||
272 | only enabled when needed. | ||
273 | |||
274 | Who: Patrick McHardy <kaber@trash.net> | ||
275 | |||
276 | --------------------------- | ||
diff --git a/Documentation/x86_64/boot-options.txt b/Documentation/x86_64/boot-options.txt index 6887d44d2661..6da24e7a56cb 100644 --- a/Documentation/x86_64/boot-options.txt +++ b/Documentation/x86_64/boot-options.txt | |||
@@ -238,6 +238,13 @@ Debugging | |||
238 | pagefaulttrace Dump all page faults. Only useful for extreme debugging | 238 | pagefaulttrace Dump all page faults. Only useful for extreme debugging |
239 | and will create a lot of output. | 239 | and will create a lot of output. |
240 | 240 | ||
241 | call_trace=[old|both|newfallback|new] | ||
242 | old: use old inexact backtracer | ||
243 | new: use new exact dwarf2 unwinder | ||
244 | both: print entries from both | ||
245 | newfallback: use new unwinder but fall back to old if it gets | ||
246 | stuck (default) | ||
247 | |||
241 | Misc | 248 | Misc |
242 | 249 | ||
243 | noreplacement Don't replace instructions with more appropriate ones | 250 | noreplacement Don't replace instructions with more appropriate ones |
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index 923bb292f47f..8657c739656a 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c | |||
@@ -690,8 +690,8 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas | |||
690 | /* | 690 | /* |
691 | * Now maybe handle debug registers and/or IO bitmaps | 691 | * Now maybe handle debug registers and/or IO bitmaps |
692 | */ | 692 | */ |
693 | if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW)) | 693 | if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW) |
694 | || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) | 694 | || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP))) |
695 | __switch_to_xtra(next_p, tss); | 695 | __switch_to_xtra(next_p, tss); |
696 | 696 | ||
697 | disable_tsc(prev_p, next_p); | 697 | disable_tsc(prev_p, next_p); |
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c index 8705c0f05788..edd00f6cee37 100644 --- a/arch/i386/kernel/time.c +++ b/arch/i386/kernel/time.c | |||
@@ -135,7 +135,7 @@ unsigned long profile_pc(struct pt_regs *regs) | |||
135 | { | 135 | { |
136 | unsigned long pc = instruction_pointer(regs); | 136 | unsigned long pc = instruction_pointer(regs); |
137 | 137 | ||
138 | if (in_lock_functions(pc)) | 138 | if (!user_mode_vm(regs) && in_lock_functions(pc)) |
139 | return *(unsigned long *)(regs->ebp + 4); | 139 | return *(unsigned long *)(regs->ebp + 4); |
140 | 140 | ||
141 | return pc; | 141 | return pc; |
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c index 313ac1f7dc5a..3facc8fcb91e 100644 --- a/arch/i386/kernel/traps.c +++ b/arch/i386/kernel/traps.c | |||
@@ -187,10 +187,21 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, | |||
187 | if (unwind_init_blocked(&info, task) == 0) | 187 | if (unwind_init_blocked(&info, task) == 0) |
188 | unw_ret = show_trace_unwind(&info, log_lvl); | 188 | unw_ret = show_trace_unwind(&info, log_lvl); |
189 | } | 189 | } |
190 | if (unw_ret > 0) { | 190 | if (unw_ret > 0 && !arch_unw_user_mode(&info)) { |
191 | if (call_trace > 0) | 191 | #ifdef CONFIG_STACK_UNWIND |
192 | print_symbol("DWARF2 unwinder stuck at %s\n", | ||
193 | UNW_PC(info.regs)); | ||
194 | if (call_trace == 1) { | ||
195 | printk("Leftover inexact backtrace:\n"); | ||
196 | if (UNW_SP(info.regs)) | ||
197 | stack = (void *)UNW_SP(info.regs); | ||
198 | } else if (call_trace > 1) | ||
192 | return; | 199 | return; |
193 | printk("%sLegacy call trace:\n", log_lvl); | 200 | else |
201 | printk("Full inexact backtrace again:\n"); | ||
202 | #else | ||
203 | printk("Inexact backtrace:\n"); | ||
204 | #endif | ||
194 | } | 205 | } |
195 | } | 206 | } |
196 | 207 | ||
diff --git a/arch/s390/defconfig b/arch/s390/defconfig index f4dfc10026d2..f1d4591eddbb 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig | |||
@@ -1,13 +1,16 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.17-rc1 | 3 | # Linux kernel version: 2.6.18-rc2 |
4 | # Mon Apr 3 14:34:15 2006 | 4 | # Thu Jul 27 13:51:07 2006 |
5 | # | 5 | # |
6 | CONFIG_MMU=y | 6 | CONFIG_MMU=y |
7 | CONFIG_LOCKDEP_SUPPORT=y | ||
8 | CONFIG_STACKTRACE_SUPPORT=y | ||
7 | CONFIG_RWSEM_XCHGADD_ALGORITHM=y | 9 | CONFIG_RWSEM_XCHGADD_ALGORITHM=y |
8 | CONFIG_GENERIC_HWEIGHT=y | 10 | CONFIG_GENERIC_HWEIGHT=y |
9 | CONFIG_GENERIC_CALIBRATE_DELAY=y | 11 | CONFIG_GENERIC_CALIBRATE_DELAY=y |
10 | CONFIG_S390=y | 12 | CONFIG_S390=y |
13 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" | ||
11 | 14 | ||
12 | # | 15 | # |
13 | # Code maturity level options | 16 | # Code maturity level options |
@@ -25,6 +28,7 @@ CONFIG_SWAP=y | |||
25 | CONFIG_SYSVIPC=y | 28 | CONFIG_SYSVIPC=y |
26 | CONFIG_POSIX_MQUEUE=y | 29 | CONFIG_POSIX_MQUEUE=y |
27 | # CONFIG_BSD_PROCESS_ACCT is not set | 30 | # CONFIG_BSD_PROCESS_ACCT is not set |
31 | # CONFIG_TASKSTATS is not set | ||
28 | CONFIG_SYSCTL=y | 32 | CONFIG_SYSCTL=y |
29 | CONFIG_AUDIT=y | 33 | CONFIG_AUDIT=y |
30 | # CONFIG_AUDITSYSCALL is not set | 34 | # CONFIG_AUDITSYSCALL is not set |
@@ -43,10 +47,12 @@ CONFIG_PRINTK=y | |||
43 | CONFIG_BUG=y | 47 | CONFIG_BUG=y |
44 | CONFIG_ELF_CORE=y | 48 | CONFIG_ELF_CORE=y |
45 | CONFIG_BASE_FULL=y | 49 | CONFIG_BASE_FULL=y |
50 | CONFIG_RT_MUTEXES=y | ||
46 | CONFIG_FUTEX=y | 51 | CONFIG_FUTEX=y |
47 | CONFIG_EPOLL=y | 52 | CONFIG_EPOLL=y |
48 | CONFIG_SHMEM=y | 53 | CONFIG_SHMEM=y |
49 | CONFIG_SLAB=y | 54 | CONFIG_SLAB=y |
55 | CONFIG_VM_EVENT_COUNTERS=y | ||
50 | # CONFIG_TINY_SHMEM is not set | 56 | # CONFIG_TINY_SHMEM is not set |
51 | CONFIG_BASE_SMALL=0 | 57 | CONFIG_BASE_SMALL=0 |
52 | # CONFIG_SLOB is not set | 58 | # CONFIG_SLOB is not set |
@@ -94,7 +100,6 @@ CONFIG_HOTPLUG_CPU=y | |||
94 | CONFIG_DEFAULT_MIGRATION_COST=1000000 | 100 | CONFIG_DEFAULT_MIGRATION_COST=1000000 |
95 | CONFIG_COMPAT=y | 101 | CONFIG_COMPAT=y |
96 | CONFIG_SYSVIPC_COMPAT=y | 102 | CONFIG_SYSVIPC_COMPAT=y |
97 | CONFIG_BINFMT_ELF32=y | ||
98 | 103 | ||
99 | # | 104 | # |
100 | # Code generation options | 105 | # Code generation options |
@@ -115,6 +120,7 @@ CONFIG_FLATMEM=y | |||
115 | CONFIG_FLAT_NODE_MEM_MAP=y | 120 | CONFIG_FLAT_NODE_MEM_MAP=y |
116 | # CONFIG_SPARSEMEM_STATIC is not set | 121 | # CONFIG_SPARSEMEM_STATIC is not set |
117 | CONFIG_SPLIT_PTLOCK_CPUS=4 | 122 | CONFIG_SPLIT_PTLOCK_CPUS=4 |
123 | CONFIG_RESOURCES_64BIT=y | ||
118 | 124 | ||
119 | # | 125 | # |
120 | # I/O subsystem configuration | 126 | # I/O subsystem configuration |
@@ -142,6 +148,7 @@ CONFIG_VIRT_CPU_ACCOUNTING=y | |||
142 | # CONFIG_APPLDATA_BASE is not set | 148 | # CONFIG_APPLDATA_BASE is not set |
143 | CONFIG_NO_IDLE_HZ=y | 149 | CONFIG_NO_IDLE_HZ=y |
144 | CONFIG_NO_IDLE_HZ_INIT=y | 150 | CONFIG_NO_IDLE_HZ_INIT=y |
151 | CONFIG_S390_HYPFS_FS=y | ||
145 | CONFIG_KEXEC=y | 152 | CONFIG_KEXEC=y |
146 | 153 | ||
147 | # | 154 | # |
@@ -174,6 +181,8 @@ CONFIG_IP_FIB_HASH=y | |||
174 | # CONFIG_INET_IPCOMP is not set | 181 | # CONFIG_INET_IPCOMP is not set |
175 | # CONFIG_INET_XFRM_TUNNEL is not set | 182 | # CONFIG_INET_XFRM_TUNNEL is not set |
176 | # CONFIG_INET_TUNNEL is not set | 183 | # CONFIG_INET_TUNNEL is not set |
184 | CONFIG_INET_XFRM_MODE_TRANSPORT=y | ||
185 | CONFIG_INET_XFRM_MODE_TUNNEL=y | ||
177 | CONFIG_INET_DIAG=y | 186 | CONFIG_INET_DIAG=y |
178 | CONFIG_INET_TCP_DIAG=y | 187 | CONFIG_INET_TCP_DIAG=y |
179 | # CONFIG_TCP_CONG_ADVANCED is not set | 188 | # CONFIG_TCP_CONG_ADVANCED is not set |
@@ -186,7 +195,10 @@ CONFIG_IPV6=y | |||
186 | # CONFIG_INET6_IPCOMP is not set | 195 | # CONFIG_INET6_IPCOMP is not set |
187 | # CONFIG_INET6_XFRM_TUNNEL is not set | 196 | # CONFIG_INET6_XFRM_TUNNEL is not set |
188 | # CONFIG_INET6_TUNNEL is not set | 197 | # CONFIG_INET6_TUNNEL is not set |
198 | CONFIG_INET6_XFRM_MODE_TRANSPORT=y | ||
199 | CONFIG_INET6_XFRM_MODE_TUNNEL=y | ||
189 | # CONFIG_IPV6_TUNNEL is not set | 200 | # CONFIG_IPV6_TUNNEL is not set |
201 | # CONFIG_NETWORK_SECMARK is not set | ||
190 | # CONFIG_NETFILTER is not set | 202 | # CONFIG_NETFILTER is not set |
191 | 203 | ||
192 | # | 204 | # |
@@ -263,6 +275,7 @@ CONFIG_NET_ESTIMATOR=y | |||
263 | # Network testing | 275 | # Network testing |
264 | # | 276 | # |
265 | # CONFIG_NET_PKTGEN is not set | 277 | # CONFIG_NET_PKTGEN is not set |
278 | # CONFIG_NET_TCPPROBE is not set | ||
266 | # CONFIG_HAMRADIO is not set | 279 | # CONFIG_HAMRADIO is not set |
267 | # CONFIG_IRDA is not set | 280 | # CONFIG_IRDA is not set |
268 | # CONFIG_BT is not set | 281 | # CONFIG_BT is not set |
@@ -276,6 +289,7 @@ CONFIG_STANDALONE=y | |||
276 | CONFIG_PREVENT_FIRMWARE_BUILD=y | 289 | CONFIG_PREVENT_FIRMWARE_BUILD=y |
277 | # CONFIG_FW_LOADER is not set | 290 | # CONFIG_FW_LOADER is not set |
278 | # CONFIG_DEBUG_DRIVER is not set | 291 | # CONFIG_DEBUG_DRIVER is not set |
292 | CONFIG_SYS_HYPERVISOR=y | ||
279 | 293 | ||
280 | # | 294 | # |
281 | # Connector - unified userspace <-> kernelspace linker | 295 | # Connector - unified userspace <-> kernelspace linker |
@@ -334,6 +348,7 @@ CONFIG_BLK_DEV_NBD=m | |||
334 | CONFIG_BLK_DEV_RAM=y | 348 | CONFIG_BLK_DEV_RAM=y |
335 | CONFIG_BLK_DEV_RAM_COUNT=16 | 349 | CONFIG_BLK_DEV_RAM_COUNT=16 |
336 | CONFIG_BLK_DEV_RAM_SIZE=4096 | 350 | CONFIG_BLK_DEV_RAM_SIZE=4096 |
351 | CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024 | ||
337 | CONFIG_BLK_DEV_INITRD=y | 352 | CONFIG_BLK_DEV_INITRD=y |
338 | # CONFIG_CDROM_PKTCDVD is not set | 353 | # CONFIG_CDROM_PKTCDVD is not set |
339 | 354 | ||
@@ -359,9 +374,7 @@ CONFIG_MD_LINEAR=m | |||
359 | CONFIG_MD_RAID0=m | 374 | CONFIG_MD_RAID0=m |
360 | CONFIG_MD_RAID1=m | 375 | CONFIG_MD_RAID1=m |
361 | # CONFIG_MD_RAID10 is not set | 376 | # CONFIG_MD_RAID10 is not set |
362 | CONFIG_MD_RAID5=m | 377 | # CONFIG_MD_RAID456 is not set |
363 | # CONFIG_MD_RAID5_RESHAPE is not set | ||
364 | # CONFIG_MD_RAID6 is not set | ||
365 | CONFIG_MD_MULTIPATH=m | 378 | CONFIG_MD_MULTIPATH=m |
366 | # CONFIG_MD_FAULTY is not set | 379 | # CONFIG_MD_FAULTY is not set |
367 | CONFIG_BLK_DEV_DM=y | 380 | CONFIG_BLK_DEV_DM=y |
@@ -419,7 +432,8 @@ CONFIG_S390_TAPE_34XX=m | |||
419 | # | 432 | # |
420 | # Cryptographic devices | 433 | # Cryptographic devices |
421 | # | 434 | # |
422 | CONFIG_Z90CRYPT=m | 435 | CONFIG_ZCRYPT=m |
436 | # CONFIG_ZCRYPT_MONOLITHIC is not set | ||
423 | 437 | ||
424 | # | 438 | # |
425 | # Network device support | 439 | # Network device support |
@@ -509,6 +523,7 @@ CONFIG_FS_MBCACHE=y | |||
509 | # CONFIG_MINIX_FS is not set | 523 | # CONFIG_MINIX_FS is not set |
510 | # CONFIG_ROMFS_FS is not set | 524 | # CONFIG_ROMFS_FS is not set |
511 | CONFIG_INOTIFY=y | 525 | CONFIG_INOTIFY=y |
526 | CONFIG_INOTIFY_USER=y | ||
512 | # CONFIG_QUOTA is not set | 527 | # CONFIG_QUOTA is not set |
513 | CONFIG_DNOTIFY=y | 528 | CONFIG_DNOTIFY=y |
514 | # CONFIG_AUTOFS_FS is not set | 529 | # CONFIG_AUTOFS_FS is not set |
@@ -614,26 +629,36 @@ CONFIG_MSDOS_PARTITION=y | |||
614 | # Instrumentation Support | 629 | # Instrumentation Support |
615 | # | 630 | # |
616 | # CONFIG_PROFILING is not set | 631 | # CONFIG_PROFILING is not set |
617 | # CONFIG_STATISTICS is not set | 632 | CONFIG_STATISTICS=y |
633 | CONFIG_KPROBES=y | ||
618 | 634 | ||
619 | # | 635 | # |
620 | # Kernel hacking | 636 | # Kernel hacking |
621 | # | 637 | # |
638 | CONFIG_TRACE_IRQFLAGS_SUPPORT=y | ||
622 | # CONFIG_PRINTK_TIME is not set | 639 | # CONFIG_PRINTK_TIME is not set |
623 | CONFIG_MAGIC_SYSRQ=y | 640 | CONFIG_MAGIC_SYSRQ=y |
641 | # CONFIG_UNUSED_SYMBOLS is not set | ||
624 | CONFIG_DEBUG_KERNEL=y | 642 | CONFIG_DEBUG_KERNEL=y |
625 | CONFIG_LOG_BUF_SHIFT=17 | 643 | CONFIG_LOG_BUF_SHIFT=17 |
626 | # CONFIG_DETECT_SOFTLOCKUP is not set | 644 | # CONFIG_DETECT_SOFTLOCKUP is not set |
627 | # CONFIG_SCHEDSTATS is not set | 645 | # CONFIG_SCHEDSTATS is not set |
628 | # CONFIG_DEBUG_SLAB is not set | 646 | # CONFIG_DEBUG_SLAB is not set |
629 | CONFIG_DEBUG_PREEMPT=y | 647 | CONFIG_DEBUG_PREEMPT=y |
630 | CONFIG_DEBUG_MUTEXES=y | 648 | # CONFIG_DEBUG_RT_MUTEXES is not set |
649 | # CONFIG_RT_MUTEX_TESTER is not set | ||
631 | CONFIG_DEBUG_SPINLOCK=y | 650 | CONFIG_DEBUG_SPINLOCK=y |
651 | CONFIG_DEBUG_MUTEXES=y | ||
652 | # CONFIG_DEBUG_RWSEMS is not set | ||
653 | # CONFIG_DEBUG_LOCK_ALLOC is not set | ||
654 | # CONFIG_PROVE_LOCKING is not set | ||
632 | CONFIG_DEBUG_SPINLOCK_SLEEP=y | 655 | CONFIG_DEBUG_SPINLOCK_SLEEP=y |
656 | # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set | ||
633 | # CONFIG_DEBUG_KOBJECT is not set | 657 | # CONFIG_DEBUG_KOBJECT is not set |
634 | # CONFIG_DEBUG_INFO is not set | 658 | # CONFIG_DEBUG_INFO is not set |
635 | CONFIG_DEBUG_FS=y | 659 | CONFIG_DEBUG_FS=y |
636 | # CONFIG_DEBUG_VM is not set | 660 | # CONFIG_DEBUG_VM is not set |
661 | # CONFIG_FRAME_POINTER is not set | ||
637 | # CONFIG_UNWIND_INFO is not set | 662 | # CONFIG_UNWIND_INFO is not set |
638 | CONFIG_FORCED_INLINING=y | 663 | CONFIG_FORCED_INLINING=y |
639 | # CONFIG_RCU_TORTURE_TEST is not set | 664 | # CONFIG_RCU_TORTURE_TEST is not set |
@@ -688,3 +713,4 @@ CONFIG_CRYPTO=y | |||
688 | # CONFIG_CRC16 is not set | 713 | # CONFIG_CRC16 is not set |
689 | CONFIG_CRC32=m | 714 | CONFIG_CRC32=m |
690 | # CONFIG_LIBCRC32C is not set | 715 | # CONFIG_LIBCRC32C is not set |
716 | CONFIG_PLIST=y | ||
diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c index 04eb1eab6e3e..845081b01267 100644 --- a/arch/sparc/kernel/time.c +++ b/arch/sparc/kernel/time.c | |||
@@ -225,6 +225,32 @@ static __inline__ int has_low_battery(void) | |||
225 | return (data1 == data2); /* Was the write blocked? */ | 225 | return (data1 == data2); /* Was the write blocked? */ |
226 | } | 226 | } |
227 | 227 | ||
228 | static void __init mostek_set_system_time(void) | ||
229 | { | ||
230 | unsigned int year, mon, day, hour, min, sec; | ||
231 | struct mostek48t02 *mregs; | ||
232 | |||
233 | mregs = (struct mostek48t02 *)mstk48t02_regs; | ||
234 | if(!mregs) { | ||
235 | prom_printf("Something wrong, clock regs not mapped yet.\n"); | ||
236 | prom_halt(); | ||
237 | } | ||
238 | spin_lock_irq(&mostek_lock); | ||
239 | mregs->creg |= MSTK_CREG_READ; | ||
240 | sec = MSTK_REG_SEC(mregs); | ||
241 | min = MSTK_REG_MIN(mregs); | ||
242 | hour = MSTK_REG_HOUR(mregs); | ||
243 | day = MSTK_REG_DOM(mregs); | ||
244 | mon = MSTK_REG_MONTH(mregs); | ||
245 | year = MSTK_CVT_YEAR( MSTK_REG_YEAR(mregs) ); | ||
246 | xtime.tv_sec = mktime(year, mon, day, hour, min, sec); | ||
247 | xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ); | ||
248 | set_normalized_timespec(&wall_to_monotonic, | ||
249 | -xtime.tv_sec, -xtime.tv_nsec); | ||
250 | mregs->creg &= ~MSTK_CREG_READ; | ||
251 | spin_unlock_irq(&mostek_lock); | ||
252 | } | ||
253 | |||
228 | /* Probe for the real time clock chip on Sun4 */ | 254 | /* Probe for the real time clock chip on Sun4 */ |
229 | static __inline__ void sun4_clock_probe(void) | 255 | static __inline__ void sun4_clock_probe(void) |
230 | { | 256 | { |
@@ -273,6 +299,7 @@ static __inline__ void sun4_clock_probe(void) | |||
273 | #endif | 299 | #endif |
274 | } | 300 | } |
275 | 301 | ||
302 | #ifndef CONFIG_SUN4 | ||
276 | static int __devinit clock_probe(struct of_device *op, const struct of_device_id *match) | 303 | static int __devinit clock_probe(struct of_device *op, const struct of_device_id *match) |
277 | { | 304 | { |
278 | struct device_node *dp = op->node; | 305 | struct device_node *dp = op->node; |
@@ -307,6 +334,8 @@ static int __devinit clock_probe(struct of_device *op, const struct of_device_id | |||
307 | if (mostek_read(mstk48t02_regs + MOSTEK_SEC) & MSTK_STOP) | 334 | if (mostek_read(mstk48t02_regs + MOSTEK_SEC) & MSTK_STOP) |
308 | kick_start_clock(); | 335 | kick_start_clock(); |
309 | 336 | ||
337 | mostek_set_system_time(); | ||
338 | |||
310 | return 0; | 339 | return 0; |
311 | } | 340 | } |
312 | 341 | ||
@@ -325,56 +354,37 @@ static struct of_platform_driver clock_driver = { | |||
325 | 354 | ||
326 | 355 | ||
327 | /* Probe for the mostek real time clock chip. */ | 356 | /* Probe for the mostek real time clock chip. */ |
328 | static void clock_init(void) | 357 | static int __init clock_init(void) |
329 | { | 358 | { |
330 | of_register_driver(&clock_driver, &of_bus_type); | 359 | return of_register_driver(&clock_driver, &of_bus_type); |
331 | } | 360 | } |
332 | 361 | ||
362 | /* Must be after subsys_initcall() so that busses are probed. Must | ||
363 | * be before device_initcall() because things like the RTC driver | ||
364 | * need to see the clock registers. | ||
365 | */ | ||
366 | fs_initcall(clock_init); | ||
367 | #endif /* !CONFIG_SUN4 */ | ||
368 | |||
333 | void __init sbus_time_init(void) | 369 | void __init sbus_time_init(void) |
334 | { | 370 | { |
335 | unsigned int year, mon, day, hour, min, sec; | ||
336 | struct mostek48t02 *mregs; | ||
337 | |||
338 | #ifdef CONFIG_SUN4 | ||
339 | int temp; | ||
340 | struct intersil *iregs; | ||
341 | #endif | ||
342 | 371 | ||
343 | BTFIXUPSET_CALL(bus_do_settimeofday, sbus_do_settimeofday, BTFIXUPCALL_NORM); | 372 | BTFIXUPSET_CALL(bus_do_settimeofday, sbus_do_settimeofday, BTFIXUPCALL_NORM); |
344 | btfixup(); | 373 | btfixup(); |
345 | 374 | ||
346 | if (ARCH_SUN4) | 375 | if (ARCH_SUN4) |
347 | sun4_clock_probe(); | 376 | sun4_clock_probe(); |
348 | else | ||
349 | clock_init(); | ||
350 | 377 | ||
351 | sparc_init_timers(timer_interrupt); | 378 | sparc_init_timers(timer_interrupt); |
352 | 379 | ||
353 | #ifdef CONFIG_SUN4 | 380 | #ifdef CONFIG_SUN4 |
354 | if(idprom->id_machtype == (SM_SUN4 | SM_4_330)) { | 381 | if(idprom->id_machtype == (SM_SUN4 | SM_4_330)) { |
355 | #endif | 382 | mostek_set_system_time(); |
356 | mregs = (struct mostek48t02 *)mstk48t02_regs; | ||
357 | if(!mregs) { | ||
358 | prom_printf("Something wrong, clock regs not mapped yet.\n"); | ||
359 | prom_halt(); | ||
360 | } | ||
361 | spin_lock_irq(&mostek_lock); | ||
362 | mregs->creg |= MSTK_CREG_READ; | ||
363 | sec = MSTK_REG_SEC(mregs); | ||
364 | min = MSTK_REG_MIN(mregs); | ||
365 | hour = MSTK_REG_HOUR(mregs); | ||
366 | day = MSTK_REG_DOM(mregs); | ||
367 | mon = MSTK_REG_MONTH(mregs); | ||
368 | year = MSTK_CVT_YEAR( MSTK_REG_YEAR(mregs) ); | ||
369 | xtime.tv_sec = mktime(year, mon, day, hour, min, sec); | ||
370 | xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ); | ||
371 | set_normalized_timespec(&wall_to_monotonic, | ||
372 | -xtime.tv_sec, -xtime.tv_nsec); | ||
373 | mregs->creg &= ~MSTK_CREG_READ; | ||
374 | spin_unlock_irq(&mostek_lock); | ||
375 | #ifdef CONFIG_SUN4 | ||
376 | } else if(idprom->id_machtype == (SM_SUN4 | SM_4_260) ) { | 383 | } else if(idprom->id_machtype == (SM_SUN4 | SM_4_260) ) { |
377 | /* initialise the intersil on sun4 */ | 384 | /* initialise the intersil on sun4 */ |
385 | unsigned int year, mon, day, hour, min, sec; | ||
386 | int temp; | ||
387 | struct intersil *iregs; | ||
378 | 388 | ||
379 | iregs=intersil_clock; | 389 | iregs=intersil_clock; |
380 | if(!iregs) { | 390 | if(!iregs) { |
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c index 1605967cce91..55ae802dc0ad 100644 --- a/arch/sparc64/mm/fault.c +++ b/arch/sparc64/mm/fault.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
21 | #include <linux/kprobes.h> | 21 | #include <linux/kprobes.h> |
22 | #include <linux/kallsyms.h> | ||
22 | 23 | ||
23 | #include <asm/page.h> | 24 | #include <asm/page.h> |
24 | #include <asm/pgtable.h> | 25 | #include <asm/pgtable.h> |
@@ -132,6 +133,8 @@ static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr) | |||
132 | 133 | ||
133 | printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n", | 134 | printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n", |
134 | regs->tpc); | 135 | regs->tpc); |
136 | printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]); | ||
137 | print_symbol("RPC: <%s>\n", regs->u_regs[15]); | ||
135 | printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr); | 138 | printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr); |
136 | __asm__("mov %%sp, %0" : "=r" (ksp)); | 139 | __asm__("mov %%sp, %0" : "=r" (ksp)); |
137 | show_stack(current, ksp); | 140 | show_stack(current, ksp); |
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S index 9b5bb413a6e9..5d4a7d125ed0 100644 --- a/arch/x86_64/ia32/ia32entry.S +++ b/arch/x86_64/ia32/ia32entry.S | |||
@@ -103,7 +103,7 @@ ENTRY(ia32_sysenter_target) | |||
103 | pushq %rax | 103 | pushq %rax |
104 | CFI_ADJUST_CFA_OFFSET 8 | 104 | CFI_ADJUST_CFA_OFFSET 8 |
105 | cld | 105 | cld |
106 | SAVE_ARGS 0,0,1 | 106 | SAVE_ARGS 0,0,0 |
107 | /* no need to do an access_ok check here because rbp has been | 107 | /* no need to do an access_ok check here because rbp has been |
108 | 32bit zero extended */ | 108 | 32bit zero extended */ |
109 | 1: movl (%rbp),%r9d | 109 | 1: movl (%rbp),%r9d |
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c index b9ff75992c16..e0341c6808e5 100644 --- a/arch/x86_64/kernel/time.c +++ b/arch/x86_64/kernel/time.c | |||
@@ -193,7 +193,7 @@ unsigned long profile_pc(struct pt_regs *regs) | |||
193 | is just accounted to the spinlock function. | 193 | is just accounted to the spinlock function. |
194 | Better would be to write these functions in assembler again | 194 | Better would be to write these functions in assembler again |
195 | and check exactly. */ | 195 | and check exactly. */ |
196 | if (in_lock_functions(pc)) { | 196 | if (!user_mode(regs) && in_lock_functions(pc)) { |
197 | char *v = *(char **)regs->rsp; | 197 | char *v = *(char **)regs->rsp; |
198 | if ((v >= _stext && v <= _etext) || | 198 | if ((v >= _stext && v <= _etext) || |
199 | (v >= _sinittext && v <= _einittext) || | 199 | (v >= _sinittext && v <= _einittext) || |
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c index eb39a2775236..f7a9d1421078 100644 --- a/arch/x86_64/kernel/traps.c +++ b/arch/x86_64/kernel/traps.c | |||
@@ -254,7 +254,6 @@ void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s | |||
254 | { | 254 | { |
255 | const unsigned cpu = safe_smp_processor_id(); | 255 | const unsigned cpu = safe_smp_processor_id(); |
256 | unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr; | 256 | unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr; |
257 | int i = 11; | ||
258 | unsigned used = 0; | 257 | unsigned used = 0; |
259 | 258 | ||
260 | printk("\nCall Trace:\n"); | 259 | printk("\nCall Trace:\n"); |
@@ -275,11 +274,20 @@ void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s | |||
275 | if (unwind_init_blocked(&info, tsk) == 0) | 274 | if (unwind_init_blocked(&info, tsk) == 0) |
276 | unw_ret = show_trace_unwind(&info, NULL); | 275 | unw_ret = show_trace_unwind(&info, NULL); |
277 | } | 276 | } |
278 | if (unw_ret > 0) { | 277 | if (unw_ret > 0 && !arch_unw_user_mode(&info)) { |
279 | if (call_trace > 0) | 278 | #ifdef CONFIG_STACK_UNWIND |
279 | unsigned long rip = info.regs.rip; | ||
280 | print_symbol("DWARF2 unwinder stuck at %s\n", rip); | ||
281 | if (call_trace == 1) { | ||
282 | printk("Leftover inexact backtrace:\n"); | ||
283 | stack = (unsigned long *)info.regs.rsp; | ||
284 | } else if (call_trace > 1) | ||
280 | return; | 285 | return; |
281 | printk("Legacy call trace:"); | 286 | else |
282 | i = 18; | 287 | printk("Full inexact backtrace again:\n"); |
288 | #else | ||
289 | printk("Inexact backtrace:\n"); | ||
290 | #endif | ||
283 | } | 291 | } |
284 | } | 292 | } |
285 | 293 | ||
@@ -1118,8 +1126,10 @@ static int __init call_trace_setup(char *s) | |||
1118 | call_trace = -1; | 1126 | call_trace = -1; |
1119 | else if (strcmp(s, "both") == 0) | 1127 | else if (strcmp(s, "both") == 0) |
1120 | call_trace = 0; | 1128 | call_trace = 0; |
1121 | else if (strcmp(s, "new") == 0) | 1129 | else if (strcmp(s, "newfallback") == 0) |
1122 | call_trace = 1; | 1130 | call_trace = 1; |
1131 | else if (strcmp(s, "new") == 0) | ||
1132 | call_trace = 2; | ||
1123 | return 1; | 1133 | return 1; |
1124 | } | 1134 | } |
1125 | __setup("call_trace=", call_trace_setup); | 1135 | __setup("call_trace=", call_trace_setup); |
diff --git a/block/blktrace.c b/block/blktrace.c index b8c0702777ff..265f7a830619 100644 --- a/block/blktrace.c +++ b/block/blktrace.c | |||
@@ -80,7 +80,7 @@ static u32 bio_act[5] __read_mostly = { 0, BLK_TC_ACT(BLK_TC_BARRIER), BLK_TC_AC | |||
80 | #define trace_sync_bit(rw) \ | 80 | #define trace_sync_bit(rw) \ |
81 | (((rw) & (1 << BIO_RW_SYNC)) >> (BIO_RW_SYNC - 1)) | 81 | (((rw) & (1 << BIO_RW_SYNC)) >> (BIO_RW_SYNC - 1)) |
82 | #define trace_ahead_bit(rw) \ | 82 | #define trace_ahead_bit(rw) \ |
83 | (((rw) & (1 << BIO_RW_AHEAD)) << (BIO_RW_AHEAD - 0)) | 83 | (((rw) & (1 << BIO_RW_AHEAD)) << (2 - BIO_RW_AHEAD)) |
84 | 84 | ||
85 | /* | 85 | /* |
86 | * The worker for the various blk_add_trace*() types. Fills out a | 86 | * The worker for the various blk_add_trace*() types. Fills out a |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 102ebc2c5c34..aae3123bf3ee 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -936,7 +936,7 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
936 | * seeks. so allow a little bit of time for him to submit a new rq | 936 | * seeks. so allow a little bit of time for him to submit a new rq |
937 | */ | 937 | */ |
938 | if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic)) | 938 | if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic)) |
939 | sl = 2; | 939 | sl = min(sl, msecs_to_jiffies(2)); |
940 | 940 | ||
941 | mod_timer(&cfqd->idle_slice_timer, jiffies + sl); | 941 | mod_timer(&cfqd->idle_slice_timer, jiffies + sl); |
942 | return 1; | 942 | return 1; |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 1c4df22dfd2a..7b0eca703a67 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -1233,6 +1233,50 @@ static inline void complete_buffers(struct bio *bio, int status) | |||
1233 | } | 1233 | } |
1234 | } | 1234 | } |
1235 | 1235 | ||
1236 | static void cciss_check_queues(ctlr_info_t *h) | ||
1237 | { | ||
1238 | int start_queue = h->next_to_run; | ||
1239 | int i; | ||
1240 | |||
1241 | /* check to see if we have maxed out the number of commands that can | ||
1242 | * be placed on the queue. If so then exit. We do this check here | ||
1243 | * in case the interrupt we serviced was from an ioctl and did not | ||
1244 | * free any new commands. | ||
1245 | */ | ||
1246 | if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) | ||
1247 | return; | ||
1248 | |||
1249 | /* We have room on the queue for more commands. Now we need to queue | ||
1250 | * them up. We will also keep track of the next queue to run so | ||
1251 | * that every queue gets a chance to be started first. | ||
1252 | */ | ||
1253 | for (i = 0; i < h->highest_lun + 1; i++) { | ||
1254 | int curr_queue = (start_queue + i) % (h->highest_lun + 1); | ||
1255 | /* make sure the disk has been added and the drive is real | ||
1256 | * because this can be called from the middle of init_one. | ||
1257 | */ | ||
1258 | if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads)) | ||
1259 | continue; | ||
1260 | blk_start_queue(h->gendisk[curr_queue]->queue); | ||
1261 | |||
1262 | /* check to see if we have maxed out the number of commands | ||
1263 | * that can be placed on the queue. | ||
1264 | */ | ||
1265 | if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) { | ||
1266 | if (curr_queue == start_queue) { | ||
1267 | h->next_to_run = | ||
1268 | (start_queue + 1) % (h->highest_lun + 1); | ||
1269 | break; | ||
1270 | } else { | ||
1271 | h->next_to_run = curr_queue; | ||
1272 | break; | ||
1273 | } | ||
1274 | } else { | ||
1275 | curr_queue = (curr_queue + 1) % (h->highest_lun + 1); | ||
1276 | } | ||
1277 | } | ||
1278 | } | ||
1279 | |||
1236 | static void cciss_softirq_done(struct request *rq) | 1280 | static void cciss_softirq_done(struct request *rq) |
1237 | { | 1281 | { |
1238 | CommandList_struct *cmd = rq->completion_data; | 1282 | CommandList_struct *cmd = rq->completion_data; |
@@ -1264,6 +1308,7 @@ static void cciss_softirq_done(struct request *rq) | |||
1264 | spin_lock_irqsave(&h->lock, flags); | 1308 | spin_lock_irqsave(&h->lock, flags); |
1265 | end_that_request_last(rq, rq->errors); | 1309 | end_that_request_last(rq, rq->errors); |
1266 | cmd_free(h, cmd, 1); | 1310 | cmd_free(h, cmd, 1); |
1311 | cciss_check_queues(h); | ||
1267 | spin_unlock_irqrestore(&h->lock, flags); | 1312 | spin_unlock_irqrestore(&h->lock, flags); |
1268 | } | 1313 | } |
1269 | 1314 | ||
@@ -2528,8 +2573,6 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs) | |||
2528 | CommandList_struct *c; | 2573 | CommandList_struct *c; |
2529 | unsigned long flags; | 2574 | unsigned long flags; |
2530 | __u32 a, a1, a2; | 2575 | __u32 a, a1, a2; |
2531 | int j; | ||
2532 | int start_queue = h->next_to_run; | ||
2533 | 2576 | ||
2534 | if (interrupt_not_for_us(h)) | 2577 | if (interrupt_not_for_us(h)) |
2535 | return IRQ_NONE; | 2578 | return IRQ_NONE; |
@@ -2588,45 +2631,6 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs) | |||
2588 | } | 2631 | } |
2589 | } | 2632 | } |
2590 | 2633 | ||
2591 | /* check to see if we have maxed out the number of commands that can | ||
2592 | * be placed on the queue. If so then exit. We do this check here | ||
2593 | * in case the interrupt we serviced was from an ioctl and did not | ||
2594 | * free any new commands. | ||
2595 | */ | ||
2596 | if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) | ||
2597 | goto cleanup; | ||
2598 | |||
2599 | /* We have room on the queue for more commands. Now we need to queue | ||
2600 | * them up. We will also keep track of the next queue to run so | ||
2601 | * that every queue gets a chance to be started first. | ||
2602 | */ | ||
2603 | for (j = 0; j < h->highest_lun + 1; j++) { | ||
2604 | int curr_queue = (start_queue + j) % (h->highest_lun + 1); | ||
2605 | /* make sure the disk has been added and the drive is real | ||
2606 | * because this can be called from the middle of init_one. | ||
2607 | */ | ||
2608 | if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads)) | ||
2609 | continue; | ||
2610 | blk_start_queue(h->gendisk[curr_queue]->queue); | ||
2611 | |||
2612 | /* check to see if we have maxed out the number of commands | ||
2613 | * that can be placed on the queue. | ||
2614 | */ | ||
2615 | if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) { | ||
2616 | if (curr_queue == start_queue) { | ||
2617 | h->next_to_run = | ||
2618 | (start_queue + 1) % (h->highest_lun + 1); | ||
2619 | goto cleanup; | ||
2620 | } else { | ||
2621 | h->next_to_run = curr_queue; | ||
2622 | goto cleanup; | ||
2623 | } | ||
2624 | } else { | ||
2625 | curr_queue = (curr_queue + 1) % (h->highest_lun + 1); | ||
2626 | } | ||
2627 | } | ||
2628 | |||
2629 | cleanup: | ||
2630 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | 2634 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); |
2631 | return IRQ_HANDLED; | 2635 | return IRQ_HANDLED; |
2632 | } | 2636 | } |
diff --git a/drivers/bluetooth/hci_usb.c b/drivers/bluetooth/hci_usb.c index 6a0c2230f82f..e2d4beac7420 100644 --- a/drivers/bluetooth/hci_usb.c +++ b/drivers/bluetooth/hci_usb.c | |||
@@ -67,6 +67,8 @@ static int ignore = 0; | |||
67 | static int ignore_dga = 0; | 67 | static int ignore_dga = 0; |
68 | static int ignore_csr = 0; | 68 | static int ignore_csr = 0; |
69 | static int ignore_sniffer = 0; | 69 | static int ignore_sniffer = 0; |
70 | static int disable_scofix = 0; | ||
71 | static int force_scofix = 0; | ||
70 | static int reset = 0; | 72 | static int reset = 0; |
71 | 73 | ||
72 | #ifdef CONFIG_BT_HCIUSB_SCO | 74 | #ifdef CONFIG_BT_HCIUSB_SCO |
@@ -107,9 +109,12 @@ static struct usb_device_id blacklist_ids[] = { | |||
107 | { USB_DEVICE(0x0a5c, 0x2033), .driver_info = HCI_IGNORE }, | 109 | { USB_DEVICE(0x0a5c, 0x2033), .driver_info = HCI_IGNORE }, |
108 | 110 | ||
109 | /* Broadcom BCM2035 */ | 111 | /* Broadcom BCM2035 */ |
110 | { USB_DEVICE(0x0a5c, 0x200a), .driver_info = HCI_RESET | HCI_BROKEN_ISOC }, | 112 | { USB_DEVICE(0x0a5c, 0x200a), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU }, |
111 | { USB_DEVICE(0x0a5c, 0x2009), .driver_info = HCI_BCM92035 }, | 113 | { USB_DEVICE(0x0a5c, 0x2009), .driver_info = HCI_BCM92035 }, |
112 | 114 | ||
115 | /* IBM/Lenovo ThinkPad with Broadcom chip */ | ||
116 | { USB_DEVICE(0x0a5c, 0x201e), .driver_info = HCI_WRONG_SCO_MTU }, | ||
117 | |||
113 | /* Microsoft Wireless Transceiver for Bluetooth 2.0 */ | 118 | /* Microsoft Wireless Transceiver for Bluetooth 2.0 */ |
114 | { USB_DEVICE(0x045e, 0x009c), .driver_info = HCI_RESET }, | 119 | { USB_DEVICE(0x045e, 0x009c), .driver_info = HCI_RESET }, |
115 | 120 | ||
@@ -119,11 +124,13 @@ static struct usb_device_id blacklist_ids[] = { | |||
119 | /* ISSC Bluetooth Adapter v3.1 */ | 124 | /* ISSC Bluetooth Adapter v3.1 */ |
120 | { USB_DEVICE(0x1131, 0x1001), .driver_info = HCI_RESET }, | 125 | { USB_DEVICE(0x1131, 0x1001), .driver_info = HCI_RESET }, |
121 | 126 | ||
122 | /* RTX Telecom based adapter with buggy SCO support */ | 127 | /* RTX Telecom based adapters with buggy SCO support */ |
123 | { USB_DEVICE(0x0400, 0x0807), .driver_info = HCI_BROKEN_ISOC }, | 128 | { USB_DEVICE(0x0400, 0x0807), .driver_info = HCI_BROKEN_ISOC }, |
129 | { USB_DEVICE(0x0400, 0x080a), .driver_info = HCI_BROKEN_ISOC }, | ||
124 | 130 | ||
125 | /* Belkin F8T012 */ | 131 | /* Belkin F8T012 and F8T013 devices */ |
126 | { USB_DEVICE(0x050d, 0x0012), .driver_info = HCI_WRONG_SCO_MTU }, | 132 | { USB_DEVICE(0x050d, 0x0012), .driver_info = HCI_WRONG_SCO_MTU }, |
133 | { USB_DEVICE(0x050d, 0x0013), .driver_info = HCI_WRONG_SCO_MTU }, | ||
127 | 134 | ||
128 | /* Digianswer devices */ | 135 | /* Digianswer devices */ |
129 | { USB_DEVICE(0x08fd, 0x0001), .driver_info = HCI_DIGIANSWER }, | 136 | { USB_DEVICE(0x08fd, 0x0001), .driver_info = HCI_DIGIANSWER }, |
@@ -990,8 +997,10 @@ static int hci_usb_probe(struct usb_interface *intf, const struct usb_device_id | |||
990 | if (reset || id->driver_info & HCI_RESET) | 997 | if (reset || id->driver_info & HCI_RESET) |
991 | set_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks); | 998 | set_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks); |
992 | 999 | ||
993 | if (id->driver_info & HCI_WRONG_SCO_MTU) | 1000 | if (force_scofix || id->driver_info & HCI_WRONG_SCO_MTU) { |
994 | set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks); | 1001 | if (!disable_scofix) |
1002 | set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks); | ||
1003 | } | ||
995 | 1004 | ||
996 | if (id->driver_info & HCI_SNIFFER) { | 1005 | if (id->driver_info & HCI_SNIFFER) { |
997 | if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997) | 1006 | if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997) |
@@ -1161,6 +1170,12 @@ MODULE_PARM_DESC(ignore_csr, "Ignore devices with id 0a12:0001"); | |||
1161 | module_param(ignore_sniffer, bool, 0644); | 1170 | module_param(ignore_sniffer, bool, 0644); |
1162 | MODULE_PARM_DESC(ignore_sniffer, "Ignore devices with id 0a12:0002"); | 1171 | MODULE_PARM_DESC(ignore_sniffer, "Ignore devices with id 0a12:0002"); |
1163 | 1172 | ||
1173 | module_param(disable_scofix, bool, 0644); | ||
1174 | MODULE_PARM_DESC(disable_scofix, "Disable fixup of wrong SCO buffer size"); | ||
1175 | |||
1176 | module_param(force_scofix, bool, 0644); | ||
1177 | MODULE_PARM_DESC(force_scofix, "Force fixup of wrong SCO buffers size"); | ||
1178 | |||
1164 | module_param(reset, bool, 0644); | 1179 | module_param(reset, bool, 0644); |
1165 | MODULE_PARM_DESC(reset, "Send HCI reset command on initialization"); | 1180 | MODULE_PARM_DESC(reset, "Send HCI reset command on initialization"); |
1166 | 1181 | ||
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 8d328186f774..bc1088d9b379 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -364,10 +364,12 @@ static ssize_t store_##file_name \ | |||
364 | if (ret != 1) \ | 364 | if (ret != 1) \ |
365 | return -EINVAL; \ | 365 | return -EINVAL; \ |
366 | \ | 366 | \ |
367 | lock_cpu_hotplug(); \ | ||
367 | mutex_lock(&policy->lock); \ | 368 | mutex_lock(&policy->lock); \ |
368 | ret = __cpufreq_set_policy(policy, &new_policy); \ | 369 | ret = __cpufreq_set_policy(policy, &new_policy); \ |
369 | policy->user_policy.object = policy->object; \ | 370 | policy->user_policy.object = policy->object; \ |
370 | mutex_unlock(&policy->lock); \ | 371 | mutex_unlock(&policy->lock); \ |
372 | unlock_cpu_hotplug(); \ | ||
371 | \ | 373 | \ |
372 | return ret ? ret : count; \ | 374 | return ret ? ret : count; \ |
373 | } | 375 | } |
@@ -1197,20 +1199,18 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier); | |||
1197 | *********************************************************************/ | 1199 | *********************************************************************/ |
1198 | 1200 | ||
1199 | 1201 | ||
1202 | /* Must be called with lock_cpu_hotplug held */ | ||
1200 | int __cpufreq_driver_target(struct cpufreq_policy *policy, | 1203 | int __cpufreq_driver_target(struct cpufreq_policy *policy, |
1201 | unsigned int target_freq, | 1204 | unsigned int target_freq, |
1202 | unsigned int relation) | 1205 | unsigned int relation) |
1203 | { | 1206 | { |
1204 | int retval = -EINVAL; | 1207 | int retval = -EINVAL; |
1205 | 1208 | ||
1206 | lock_cpu_hotplug(); | ||
1207 | dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu, | 1209 | dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu, |
1208 | target_freq, relation); | 1210 | target_freq, relation); |
1209 | if (cpu_online(policy->cpu) && cpufreq_driver->target) | 1211 | if (cpu_online(policy->cpu) && cpufreq_driver->target) |
1210 | retval = cpufreq_driver->target(policy, target_freq, relation); | 1212 | retval = cpufreq_driver->target(policy, target_freq, relation); |
1211 | 1213 | ||
1212 | unlock_cpu_hotplug(); | ||
1213 | |||
1214 | return retval; | 1214 | return retval; |
1215 | } | 1215 | } |
1216 | EXPORT_SYMBOL_GPL(__cpufreq_driver_target); | 1216 | EXPORT_SYMBOL_GPL(__cpufreq_driver_target); |
@@ -1225,17 +1225,23 @@ int cpufreq_driver_target(struct cpufreq_policy *policy, | |||
1225 | if (!policy) | 1225 | if (!policy) |
1226 | return -EINVAL; | 1226 | return -EINVAL; |
1227 | 1227 | ||
1228 | lock_cpu_hotplug(); | ||
1228 | mutex_lock(&policy->lock); | 1229 | mutex_lock(&policy->lock); |
1229 | 1230 | ||
1230 | ret = __cpufreq_driver_target(policy, target_freq, relation); | 1231 | ret = __cpufreq_driver_target(policy, target_freq, relation); |
1231 | 1232 | ||
1232 | mutex_unlock(&policy->lock); | 1233 | mutex_unlock(&policy->lock); |
1234 | unlock_cpu_hotplug(); | ||
1233 | 1235 | ||
1234 | cpufreq_cpu_put(policy); | 1236 | cpufreq_cpu_put(policy); |
1235 | return ret; | 1237 | return ret; |
1236 | } | 1238 | } |
1237 | EXPORT_SYMBOL_GPL(cpufreq_driver_target); | 1239 | EXPORT_SYMBOL_GPL(cpufreq_driver_target); |
1238 | 1240 | ||
1241 | /* | ||
1242 | * Locking: Must be called with the lock_cpu_hotplug() lock held | ||
1243 | * when "event" is CPUFREQ_GOV_LIMITS | ||
1244 | */ | ||
1239 | 1245 | ||
1240 | static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event) | 1246 | static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event) |
1241 | { | 1247 | { |
@@ -1257,24 +1263,6 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event) | |||
1257 | } | 1263 | } |
1258 | 1264 | ||
1259 | 1265 | ||
1260 | int cpufreq_governor(unsigned int cpu, unsigned int event) | ||
1261 | { | ||
1262 | int ret = 0; | ||
1263 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); | ||
1264 | |||
1265 | if (!policy) | ||
1266 | return -EINVAL; | ||
1267 | |||
1268 | mutex_lock(&policy->lock); | ||
1269 | ret = __cpufreq_governor(policy, event); | ||
1270 | mutex_unlock(&policy->lock); | ||
1271 | |||
1272 | cpufreq_cpu_put(policy); | ||
1273 | return ret; | ||
1274 | } | ||
1275 | EXPORT_SYMBOL_GPL(cpufreq_governor); | ||
1276 | |||
1277 | |||
1278 | int cpufreq_register_governor(struct cpufreq_governor *governor) | 1266 | int cpufreq_register_governor(struct cpufreq_governor *governor) |
1279 | { | 1267 | { |
1280 | struct cpufreq_governor *t; | 1268 | struct cpufreq_governor *t; |
@@ -1342,6 +1330,9 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) | |||
1342 | EXPORT_SYMBOL(cpufreq_get_policy); | 1330 | EXPORT_SYMBOL(cpufreq_get_policy); |
1343 | 1331 | ||
1344 | 1332 | ||
1333 | /* | ||
1334 | * Locking: Must be called with the lock_cpu_hotplug() lock held | ||
1335 | */ | ||
1345 | static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy) | 1336 | static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy) |
1346 | { | 1337 | { |
1347 | int ret = 0; | 1338 | int ret = 0; |
@@ -1436,6 +1427,8 @@ int cpufreq_set_policy(struct cpufreq_policy *policy) | |||
1436 | if (!data) | 1427 | if (!data) |
1437 | return -EINVAL; | 1428 | return -EINVAL; |
1438 | 1429 | ||
1430 | lock_cpu_hotplug(); | ||
1431 | |||
1439 | /* lock this CPU */ | 1432 | /* lock this CPU */ |
1440 | mutex_lock(&data->lock); | 1433 | mutex_lock(&data->lock); |
1441 | 1434 | ||
@@ -1446,6 +1439,8 @@ int cpufreq_set_policy(struct cpufreq_policy *policy) | |||
1446 | data->user_policy.governor = data->governor; | 1439 | data->user_policy.governor = data->governor; |
1447 | 1440 | ||
1448 | mutex_unlock(&data->lock); | 1441 | mutex_unlock(&data->lock); |
1442 | |||
1443 | unlock_cpu_hotplug(); | ||
1449 | cpufreq_cpu_put(data); | 1444 | cpufreq_cpu_put(data); |
1450 | 1445 | ||
1451 | return ret; | 1446 | return ret; |
@@ -1469,6 +1464,7 @@ int cpufreq_update_policy(unsigned int cpu) | |||
1469 | if (!data) | 1464 | if (!data) |
1470 | return -ENODEV; | 1465 | return -ENODEV; |
1471 | 1466 | ||
1467 | lock_cpu_hotplug(); | ||
1472 | mutex_lock(&data->lock); | 1468 | mutex_lock(&data->lock); |
1473 | 1469 | ||
1474 | dprintk("updating policy for CPU %u\n", cpu); | 1470 | dprintk("updating policy for CPU %u\n", cpu); |
@@ -1494,7 +1490,7 @@ int cpufreq_update_policy(unsigned int cpu) | |||
1494 | ret = __cpufreq_set_policy(data, &policy); | 1490 | ret = __cpufreq_set_policy(data, &policy); |
1495 | 1491 | ||
1496 | mutex_unlock(&data->lock); | 1492 | mutex_unlock(&data->lock); |
1497 | 1493 | unlock_cpu_hotplug(); | |
1498 | cpufreq_cpu_put(data); | 1494 | cpufreq_cpu_put(data); |
1499 | return ret; | 1495 | return ret; |
1500 | } | 1496 | } |
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index b3ebc8f01975..c4c578defabf 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -525,7 +525,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
525 | break; | 525 | break; |
526 | 526 | ||
527 | case CPUFREQ_GOV_LIMITS: | 527 | case CPUFREQ_GOV_LIMITS: |
528 | lock_cpu_hotplug(); | ||
529 | mutex_lock(&dbs_mutex); | 528 | mutex_lock(&dbs_mutex); |
530 | if (policy->max < this_dbs_info->cur_policy->cur) | 529 | if (policy->max < this_dbs_info->cur_policy->cur) |
531 | __cpufreq_driver_target( | 530 | __cpufreq_driver_target( |
@@ -536,7 +535,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
536 | this_dbs_info->cur_policy, | 535 | this_dbs_info->cur_policy, |
537 | policy->min, CPUFREQ_RELATION_L); | 536 | policy->min, CPUFREQ_RELATION_L); |
538 | mutex_unlock(&dbs_mutex); | 537 | mutex_unlock(&dbs_mutex); |
539 | unlock_cpu_hotplug(); | ||
540 | break; | 538 | break; |
541 | } | 539 | } |
542 | return 0; | 540 | return 0; |
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 178f0c547eb7..52cf1f021825 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -309,7 +309,9 @@ static void do_dbs_timer(void *data) | |||
309 | if (!dbs_info->enable) | 309 | if (!dbs_info->enable) |
310 | return; | 310 | return; |
311 | 311 | ||
312 | lock_cpu_hotplug(); | ||
312 | dbs_check_cpu(dbs_info); | 313 | dbs_check_cpu(dbs_info); |
314 | unlock_cpu_hotplug(); | ||
313 | queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, | 315 | queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, |
314 | usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); | 316 | usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); |
315 | } | 317 | } |
@@ -412,7 +414,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
412 | break; | 414 | break; |
413 | 415 | ||
414 | case CPUFREQ_GOV_LIMITS: | 416 | case CPUFREQ_GOV_LIMITS: |
415 | lock_cpu_hotplug(); | ||
416 | mutex_lock(&dbs_mutex); | 417 | mutex_lock(&dbs_mutex); |
417 | if (policy->max < this_dbs_info->cur_policy->cur) | 418 | if (policy->max < this_dbs_info->cur_policy->cur) |
418 | __cpufreq_driver_target(this_dbs_info->cur_policy, | 419 | __cpufreq_driver_target(this_dbs_info->cur_policy, |
@@ -423,7 +424,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
423 | policy->min, | 424 | policy->min, |
424 | CPUFREQ_RELATION_L); | 425 | CPUFREQ_RELATION_L); |
425 | mutex_unlock(&dbs_mutex); | 426 | mutex_unlock(&dbs_mutex); |
426 | unlock_cpu_hotplug(); | ||
427 | break; | 427 | break; |
428 | } | 428 | } |
429 | return 0; | 429 | return 0; |
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index 44ae5e5b94cf..a06c204589cd 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/spinlock.h> | 18 | #include <linux/spinlock.h> |
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/cpufreq.h> | 20 | #include <linux/cpufreq.h> |
21 | #include <linux/cpu.h> | ||
21 | #include <linux/types.h> | 22 | #include <linux/types.h> |
22 | #include <linux/fs.h> | 23 | #include <linux/fs.h> |
23 | #include <linux/sysfs.h> | 24 | #include <linux/sysfs.h> |
@@ -70,6 +71,7 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy) | |||
70 | 71 | ||
71 | dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); | 72 | dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); |
72 | 73 | ||
74 | lock_cpu_hotplug(); | ||
73 | mutex_lock(&userspace_mutex); | 75 | mutex_lock(&userspace_mutex); |
74 | if (!cpu_is_managed[policy->cpu]) | 76 | if (!cpu_is_managed[policy->cpu]) |
75 | goto err; | 77 | goto err; |
@@ -92,6 +94,7 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy) | |||
92 | 94 | ||
93 | err: | 95 | err: |
94 | mutex_unlock(&userspace_mutex); | 96 | mutex_unlock(&userspace_mutex); |
97 | unlock_cpu_hotplug(); | ||
95 | return ret; | 98 | return ret; |
96 | } | 99 | } |
97 | 100 | ||
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index f712e4cfd9dc..7cf3eb023521 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c | |||
@@ -776,7 +776,7 @@ static void update_ordered(ide_drive_t *drive) | |||
776 | * not available so we don't need to recheck that. | 776 | * not available so we don't need to recheck that. |
777 | */ | 777 | */ |
778 | capacity = idedisk_capacity(drive); | 778 | capacity = idedisk_capacity(drive); |
779 | barrier = ide_id_has_flush_cache(id) && | 779 | barrier = ide_id_has_flush_cache(id) && !drive->noflush && |
780 | (drive->addressing == 0 || capacity <= (1ULL << 28) || | 780 | (drive->addressing == 0 || capacity <= (1ULL << 28) || |
781 | ide_id_has_flush_cache_ext(id)); | 781 | ide_id_has_flush_cache_ext(id)); |
782 | 782 | ||
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c index 98918fb6b2ce..7c3a13e1cf64 100644 --- a/drivers/ide/ide-dma.c +++ b/drivers/ide/ide-dma.c | |||
@@ -750,7 +750,7 @@ void ide_dma_verbose(ide_drive_t *drive) | |||
750 | goto bug_dma_off; | 750 | goto bug_dma_off; |
751 | printk(", DMA"); | 751 | printk(", DMA"); |
752 | } else if (id->field_valid & 1) { | 752 | } else if (id->field_valid & 1) { |
753 | printk(", BUG"); | 753 | goto bug_dma_off; |
754 | } | 754 | } |
755 | return; | 755 | return; |
756 | bug_dma_off: | 756 | bug_dma_off: |
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c index 05fbd9298db7..defd4b4bd374 100644 --- a/drivers/ide/ide.c +++ b/drivers/ide/ide.c | |||
@@ -1539,7 +1539,7 @@ static int __init ide_setup(char *s) | |||
1539 | const char *hd_words[] = { | 1539 | const char *hd_words[] = { |
1540 | "none", "noprobe", "nowerr", "cdrom", "serialize", | 1540 | "none", "noprobe", "nowerr", "cdrom", "serialize", |
1541 | "autotune", "noautotune", "minus8", "swapdata", "bswap", | 1541 | "autotune", "noautotune", "minus8", "swapdata", "bswap", |
1542 | "minus11", "remap", "remap63", "scsi", NULL }; | 1542 | "noflush", "remap", "remap63", "scsi", NULL }; |
1543 | unit = s[2] - 'a'; | 1543 | unit = s[2] - 'a'; |
1544 | hw = unit / MAX_DRIVES; | 1544 | hw = unit / MAX_DRIVES; |
1545 | unit = unit % MAX_DRIVES; | 1545 | unit = unit % MAX_DRIVES; |
@@ -1578,6 +1578,9 @@ static int __init ide_setup(char *s) | |||
1578 | case -10: /* "bswap" */ | 1578 | case -10: /* "bswap" */ |
1579 | drive->bswap = 1; | 1579 | drive->bswap = 1; |
1580 | goto done; | 1580 | goto done; |
1581 | case -11: /* noflush */ | ||
1582 | drive->noflush = 1; | ||
1583 | goto done; | ||
1581 | case -12: /* "remap" */ | 1584 | case -12: /* "remap" */ |
1582 | drive->remap_0_to_1 = 1; | 1585 | drive->remap_0_to_1 = 1; |
1583 | goto done; | 1586 | goto done; |
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c index 3cb04424d351..e9bad185968a 100644 --- a/drivers/ide/pci/it821x.c +++ b/drivers/ide/pci/it821x.c | |||
@@ -498,9 +498,14 @@ static int config_chipset_for_dma (ide_drive_t *drive) | |||
498 | { | 498 | { |
499 | u8 speed = ide_dma_speed(drive, it821x_ratemask(drive)); | 499 | u8 speed = ide_dma_speed(drive, it821x_ratemask(drive)); |
500 | 500 | ||
501 | config_it821x_chipset_for_pio(drive, !speed); | 501 | if (speed) { |
502 | it821x_tune_chipset(drive, speed); | 502 | config_it821x_chipset_for_pio(drive, 0); |
503 | return ide_dma_enable(drive); | 503 | it821x_tune_chipset(drive, speed); |
504 | |||
505 | return ide_dma_enable(drive); | ||
506 | } | ||
507 | |||
508 | return 0; | ||
504 | } | 509 | } |
505 | 510 | ||
506 | /** | 511 | /** |
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 5ed4dab52a6f..1c3cfbbe6a97 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -167,6 +167,15 @@ static int is_vendor_method_in_use( | |||
167 | return 0; | 167 | return 0; |
168 | } | 168 | } |
169 | 169 | ||
170 | int ib_response_mad(struct ib_mad *mad) | ||
171 | { | ||
172 | return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) || | ||
173 | (mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) || | ||
174 | ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) && | ||
175 | (mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP))); | ||
176 | } | ||
177 | EXPORT_SYMBOL(ib_response_mad); | ||
178 | |||
170 | /* | 179 | /* |
171 | * ib_register_mad_agent - Register to send/receive MADs | 180 | * ib_register_mad_agent - Register to send/receive MADs |
172 | */ | 181 | */ |
@@ -570,13 +579,6 @@ int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent) | |||
570 | } | 579 | } |
571 | EXPORT_SYMBOL(ib_unregister_mad_agent); | 580 | EXPORT_SYMBOL(ib_unregister_mad_agent); |
572 | 581 | ||
573 | static inline int response_mad(struct ib_mad *mad) | ||
574 | { | ||
575 | /* Trap represses are responses although response bit is reset */ | ||
576 | return ((mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) || | ||
577 | (mad->mad_hdr.method & IB_MGMT_METHOD_RESP)); | ||
578 | } | ||
579 | |||
580 | static void dequeue_mad(struct ib_mad_list_head *mad_list) | 582 | static void dequeue_mad(struct ib_mad_list_head *mad_list) |
581 | { | 583 | { |
582 | struct ib_mad_queue *mad_queue; | 584 | struct ib_mad_queue *mad_queue; |
@@ -723,7 +725,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, | |||
723 | switch (ret) | 725 | switch (ret) |
724 | { | 726 | { |
725 | case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: | 727 | case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: |
726 | if (response_mad(&mad_priv->mad.mad) && | 728 | if (ib_response_mad(&mad_priv->mad.mad) && |
727 | mad_agent_priv->agent.recv_handler) { | 729 | mad_agent_priv->agent.recv_handler) { |
728 | local->mad_priv = mad_priv; | 730 | local->mad_priv = mad_priv; |
729 | local->recv_mad_agent = mad_agent_priv; | 731 | local->recv_mad_agent = mad_agent_priv; |
@@ -1551,7 +1553,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv, | |||
1551 | unsigned long flags; | 1553 | unsigned long flags; |
1552 | 1554 | ||
1553 | spin_lock_irqsave(&port_priv->reg_lock, flags); | 1555 | spin_lock_irqsave(&port_priv->reg_lock, flags); |
1554 | if (response_mad(mad)) { | 1556 | if (ib_response_mad(mad)) { |
1555 | u32 hi_tid; | 1557 | u32 hi_tid; |
1556 | struct ib_mad_agent_private *entry; | 1558 | struct ib_mad_agent_private *entry; |
1557 | 1559 | ||
@@ -1799,7 +1801,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, | |||
1799 | } | 1801 | } |
1800 | 1802 | ||
1801 | /* Complete corresponding request */ | 1803 | /* Complete corresponding request */ |
1802 | if (response_mad(mad_recv_wc->recv_buf.mad)) { | 1804 | if (ib_response_mad(mad_recv_wc->recv_buf.mad)) { |
1803 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | 1805 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
1804 | mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); | 1806 | mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); |
1805 | if (!mad_send_wr) { | 1807 | if (!mad_send_wr) { |
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index afe70a549c2f..1273f8807e84 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c | |||
@@ -112,8 +112,10 @@ struct ib_umad_device { | |||
112 | struct ib_umad_file { | 112 | struct ib_umad_file { |
113 | struct ib_umad_port *port; | 113 | struct ib_umad_port *port; |
114 | struct list_head recv_list; | 114 | struct list_head recv_list; |
115 | struct list_head send_list; | ||
115 | struct list_head port_list; | 116 | struct list_head port_list; |
116 | spinlock_t recv_lock; | 117 | spinlock_t recv_lock; |
118 | spinlock_t send_lock; | ||
117 | wait_queue_head_t recv_wait; | 119 | wait_queue_head_t recv_wait; |
118 | struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS]; | 120 | struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS]; |
119 | int agents_dead; | 121 | int agents_dead; |
@@ -177,12 +179,21 @@ static int queue_packet(struct ib_umad_file *file, | |||
177 | return ret; | 179 | return ret; |
178 | } | 180 | } |
179 | 181 | ||
182 | static void dequeue_send(struct ib_umad_file *file, | ||
183 | struct ib_umad_packet *packet) | ||
184 | { | ||
185 | spin_lock_irq(&file->send_lock); | ||
186 | list_del(&packet->list); | ||
187 | spin_unlock_irq(&file->send_lock); | ||
188 | } | ||
189 | |||
180 | static void send_handler(struct ib_mad_agent *agent, | 190 | static void send_handler(struct ib_mad_agent *agent, |
181 | struct ib_mad_send_wc *send_wc) | 191 | struct ib_mad_send_wc *send_wc) |
182 | { | 192 | { |
183 | struct ib_umad_file *file = agent->context; | 193 | struct ib_umad_file *file = agent->context; |
184 | struct ib_umad_packet *packet = send_wc->send_buf->context[0]; | 194 | struct ib_umad_packet *packet = send_wc->send_buf->context[0]; |
185 | 195 | ||
196 | dequeue_send(file, packet); | ||
186 | ib_destroy_ah(packet->msg->ah); | 197 | ib_destroy_ah(packet->msg->ah); |
187 | ib_free_send_mad(packet->msg); | 198 | ib_free_send_mad(packet->msg); |
188 | 199 | ||
@@ -370,6 +381,51 @@ static int copy_rmpp_mad(struct ib_mad_send_buf *msg, const char __user *buf) | |||
370 | return 0; | 381 | return 0; |
371 | } | 382 | } |
372 | 383 | ||
384 | static int same_destination(struct ib_user_mad_hdr *hdr1, | ||
385 | struct ib_user_mad_hdr *hdr2) | ||
386 | { | ||
387 | if (!hdr1->grh_present && !hdr2->grh_present) | ||
388 | return (hdr1->lid == hdr2->lid); | ||
389 | |||
390 | if (hdr1->grh_present && hdr2->grh_present) | ||
391 | return !memcmp(hdr1->gid, hdr2->gid, 16); | ||
392 | |||
393 | return 0; | ||
394 | } | ||
395 | |||
396 | static int is_duplicate(struct ib_umad_file *file, | ||
397 | struct ib_umad_packet *packet) | ||
398 | { | ||
399 | struct ib_umad_packet *sent_packet; | ||
400 | struct ib_mad_hdr *sent_hdr, *hdr; | ||
401 | |||
402 | hdr = (struct ib_mad_hdr *) packet->mad.data; | ||
403 | list_for_each_entry(sent_packet, &file->send_list, list) { | ||
404 | sent_hdr = (struct ib_mad_hdr *) sent_packet->mad.data; | ||
405 | |||
406 | if ((hdr->tid != sent_hdr->tid) || | ||
407 | (hdr->mgmt_class != sent_hdr->mgmt_class)) | ||
408 | continue; | ||
409 | |||
410 | /* | ||
411 | * No need to be overly clever here. If two new operations have | ||
412 | * the same TID, reject the second as a duplicate. This is more | ||
413 | * restrictive than required by the spec. | ||
414 | */ | ||
415 | if (!ib_response_mad((struct ib_mad *) hdr)) { | ||
416 | if (!ib_response_mad((struct ib_mad *) sent_hdr)) | ||
417 | return 1; | ||
418 | continue; | ||
419 | } else if (!ib_response_mad((struct ib_mad *) sent_hdr)) | ||
420 | continue; | ||
421 | |||
422 | if (same_destination(&packet->mad.hdr, &sent_packet->mad.hdr)) | ||
423 | return 1; | ||
424 | } | ||
425 | |||
426 | return 0; | ||
427 | } | ||
428 | |||
373 | static ssize_t ib_umad_write(struct file *filp, const char __user *buf, | 429 | static ssize_t ib_umad_write(struct file *filp, const char __user *buf, |
374 | size_t count, loff_t *pos) | 430 | size_t count, loff_t *pos) |
375 | { | 431 | { |
@@ -379,7 +435,6 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, | |||
379 | struct ib_ah_attr ah_attr; | 435 | struct ib_ah_attr ah_attr; |
380 | struct ib_ah *ah; | 436 | struct ib_ah *ah; |
381 | struct ib_rmpp_mad *rmpp_mad; | 437 | struct ib_rmpp_mad *rmpp_mad; |
382 | u8 method; | ||
383 | __be64 *tid; | 438 | __be64 *tid; |
384 | int ret, data_len, hdr_len, copy_offset, rmpp_active; | 439 | int ret, data_len, hdr_len, copy_offset, rmpp_active; |
385 | 440 | ||
@@ -473,28 +528,36 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, | |||
473 | } | 528 | } |
474 | 529 | ||
475 | /* | 530 | /* |
476 | * If userspace is generating a request that will generate a | 531 | * Set the high-order part of the transaction ID to make MADs from |
477 | * response, we need to make sure the high-order part of the | 532 | * different agents unique, and allow routing responses back to the |
478 | * transaction ID matches the agent being used to send the | 533 | * original requestor. |
479 | * MAD. | ||
480 | */ | 534 | */ |
481 | method = ((struct ib_mad_hdr *) packet->msg->mad)->method; | 535 | if (!ib_response_mad(packet->msg->mad)) { |
482 | |||
483 | if (!(method & IB_MGMT_METHOD_RESP) && | ||
484 | method != IB_MGMT_METHOD_TRAP_REPRESS && | ||
485 | method != IB_MGMT_METHOD_SEND) { | ||
486 | tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid; | 536 | tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid; |
487 | *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 | | 537 | *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 | |
488 | (be64_to_cpup(tid) & 0xffffffff)); | 538 | (be64_to_cpup(tid) & 0xffffffff)); |
539 | rmpp_mad->mad_hdr.tid = *tid; | ||
540 | } | ||
541 | |||
542 | spin_lock_irq(&file->send_lock); | ||
543 | ret = is_duplicate(file, packet); | ||
544 | if (!ret) | ||
545 | list_add_tail(&packet->list, &file->send_list); | ||
546 | spin_unlock_irq(&file->send_lock); | ||
547 | if (ret) { | ||
548 | ret = -EINVAL; | ||
549 | goto err_msg; | ||
489 | } | 550 | } |
490 | 551 | ||
491 | ret = ib_post_send_mad(packet->msg, NULL); | 552 | ret = ib_post_send_mad(packet->msg, NULL); |
492 | if (ret) | 553 | if (ret) |
493 | goto err_msg; | 554 | goto err_send; |
494 | 555 | ||
495 | up_read(&file->port->mutex); | 556 | up_read(&file->port->mutex); |
496 | return count; | 557 | return count; |
497 | 558 | ||
559 | err_send: | ||
560 | dequeue_send(file, packet); | ||
498 | err_msg: | 561 | err_msg: |
499 | ib_free_send_mad(packet->msg); | 562 | ib_free_send_mad(packet->msg); |
500 | err_ah: | 563 | err_ah: |
@@ -657,7 +720,9 @@ static int ib_umad_open(struct inode *inode, struct file *filp) | |||
657 | } | 720 | } |
658 | 721 | ||
659 | spin_lock_init(&file->recv_lock); | 722 | spin_lock_init(&file->recv_lock); |
723 | spin_lock_init(&file->send_lock); | ||
660 | INIT_LIST_HEAD(&file->recv_list); | 724 | INIT_LIST_HEAD(&file->recv_list); |
725 | INIT_LIST_HEAD(&file->send_list); | ||
661 | init_waitqueue_head(&file->recv_wait); | 726 | init_waitqueue_head(&file->recv_wait); |
662 | 727 | ||
663 | file->port = port; | 728 | file->port = port; |
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index bdf5d5098190..30923eb68ec7 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
@@ -42,6 +42,13 @@ | |||
42 | 42 | ||
43 | #include "uverbs.h" | 43 | #include "uverbs.h" |
44 | 44 | ||
45 | static struct lock_class_key pd_lock_key; | ||
46 | static struct lock_class_key mr_lock_key; | ||
47 | static struct lock_class_key cq_lock_key; | ||
48 | static struct lock_class_key qp_lock_key; | ||
49 | static struct lock_class_key ah_lock_key; | ||
50 | static struct lock_class_key srq_lock_key; | ||
51 | |||
45 | #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ | 52 | #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ |
46 | do { \ | 53 | do { \ |
47 | (udata)->inbuf = (void __user *) (ibuf); \ | 54 | (udata)->inbuf = (void __user *) (ibuf); \ |
@@ -76,12 +83,13 @@ | |||
76 | */ | 83 | */ |
77 | 84 | ||
78 | static void init_uobj(struct ib_uobject *uobj, u64 user_handle, | 85 | static void init_uobj(struct ib_uobject *uobj, u64 user_handle, |
79 | struct ib_ucontext *context) | 86 | struct ib_ucontext *context, struct lock_class_key *key) |
80 | { | 87 | { |
81 | uobj->user_handle = user_handle; | 88 | uobj->user_handle = user_handle; |
82 | uobj->context = context; | 89 | uobj->context = context; |
83 | kref_init(&uobj->ref); | 90 | kref_init(&uobj->ref); |
84 | init_rwsem(&uobj->mutex); | 91 | init_rwsem(&uobj->mutex); |
92 | lockdep_set_class(&uobj->mutex, key); | ||
85 | uobj->live = 0; | 93 | uobj->live = 0; |
86 | } | 94 | } |
87 | 95 | ||
@@ -470,7 +478,7 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, | |||
470 | if (!uobj) | 478 | if (!uobj) |
471 | return -ENOMEM; | 479 | return -ENOMEM; |
472 | 480 | ||
473 | init_uobj(uobj, 0, file->ucontext); | 481 | init_uobj(uobj, 0, file->ucontext, &pd_lock_key); |
474 | down_write(&uobj->mutex); | 482 | down_write(&uobj->mutex); |
475 | 483 | ||
476 | pd = file->device->ib_dev->alloc_pd(file->device->ib_dev, | 484 | pd = file->device->ib_dev->alloc_pd(file->device->ib_dev, |
@@ -591,7 +599,7 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, | |||
591 | if (!obj) | 599 | if (!obj) |
592 | return -ENOMEM; | 600 | return -ENOMEM; |
593 | 601 | ||
594 | init_uobj(&obj->uobject, 0, file->ucontext); | 602 | init_uobj(&obj->uobject, 0, file->ucontext, &mr_lock_key); |
595 | down_write(&obj->uobject.mutex); | 603 | down_write(&obj->uobject.mutex); |
596 | 604 | ||
597 | /* | 605 | /* |
@@ -770,7 +778,7 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, | |||
770 | if (!obj) | 778 | if (!obj) |
771 | return -ENOMEM; | 779 | return -ENOMEM; |
772 | 780 | ||
773 | init_uobj(&obj->uobject, cmd.user_handle, file->ucontext); | 781 | init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_key); |
774 | down_write(&obj->uobject.mutex); | 782 | down_write(&obj->uobject.mutex); |
775 | 783 | ||
776 | if (cmd.comp_channel >= 0) { | 784 | if (cmd.comp_channel >= 0) { |
@@ -1051,13 +1059,14 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, | |||
1051 | if (!obj) | 1059 | if (!obj) |
1052 | return -ENOMEM; | 1060 | return -ENOMEM; |
1053 | 1061 | ||
1054 | init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext); | 1062 | init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key); |
1055 | down_write(&obj->uevent.uobject.mutex); | 1063 | down_write(&obj->uevent.uobject.mutex); |
1056 | 1064 | ||
1065 | srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL; | ||
1057 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); | 1066 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); |
1058 | scq = idr_read_cq(cmd.send_cq_handle, file->ucontext); | 1067 | scq = idr_read_cq(cmd.send_cq_handle, file->ucontext); |
1059 | rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext); | 1068 | rcq = cmd.recv_cq_handle == cmd.send_cq_handle ? |
1060 | srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL; | 1069 | scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext); |
1061 | 1070 | ||
1062 | if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) { | 1071 | if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) { |
1063 | ret = -EINVAL; | 1072 | ret = -EINVAL; |
@@ -1125,7 +1134,8 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, | |||
1125 | 1134 | ||
1126 | put_pd_read(pd); | 1135 | put_pd_read(pd); |
1127 | put_cq_read(scq); | 1136 | put_cq_read(scq); |
1128 | put_cq_read(rcq); | 1137 | if (rcq != scq) |
1138 | put_cq_read(rcq); | ||
1129 | if (srq) | 1139 | if (srq) |
1130 | put_srq_read(srq); | 1140 | put_srq_read(srq); |
1131 | 1141 | ||
@@ -1150,7 +1160,7 @@ err_put: | |||
1150 | put_pd_read(pd); | 1160 | put_pd_read(pd); |
1151 | if (scq) | 1161 | if (scq) |
1152 | put_cq_read(scq); | 1162 | put_cq_read(scq); |
1153 | if (rcq) | 1163 | if (rcq && rcq != scq) |
1154 | put_cq_read(rcq); | 1164 | put_cq_read(rcq); |
1155 | if (srq) | 1165 | if (srq) |
1156 | put_srq_read(srq); | 1166 | put_srq_read(srq); |
@@ -1751,7 +1761,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, | |||
1751 | if (!uobj) | 1761 | if (!uobj) |
1752 | return -ENOMEM; | 1762 | return -ENOMEM; |
1753 | 1763 | ||
1754 | init_uobj(uobj, cmd.user_handle, file->ucontext); | 1764 | init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_key); |
1755 | down_write(&uobj->mutex); | 1765 | down_write(&uobj->mutex); |
1756 | 1766 | ||
1757 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); | 1767 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); |
@@ -1775,7 +1785,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, | |||
1775 | ah = ib_create_ah(pd, &attr); | 1785 | ah = ib_create_ah(pd, &attr); |
1776 | if (IS_ERR(ah)) { | 1786 | if (IS_ERR(ah)) { |
1777 | ret = PTR_ERR(ah); | 1787 | ret = PTR_ERR(ah); |
1778 | goto err; | 1788 | goto err_put; |
1779 | } | 1789 | } |
1780 | 1790 | ||
1781 | ah->uobject = uobj; | 1791 | ah->uobject = uobj; |
@@ -1811,6 +1821,9 @@ err_copy: | |||
1811 | err_destroy: | 1821 | err_destroy: |
1812 | ib_destroy_ah(ah); | 1822 | ib_destroy_ah(ah); |
1813 | 1823 | ||
1824 | err_put: | ||
1825 | put_pd_read(pd); | ||
1826 | |||
1814 | err: | 1827 | err: |
1815 | put_uobj_write(uobj); | 1828 | put_uobj_write(uobj); |
1816 | return ret; | 1829 | return ret; |
@@ -1963,7 +1976,7 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, | |||
1963 | if (!obj) | 1976 | if (!obj) |
1964 | return -ENOMEM; | 1977 | return -ENOMEM; |
1965 | 1978 | ||
1966 | init_uobj(&obj->uobject, cmd.user_handle, file->ucontext); | 1979 | init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &srq_lock_key); |
1967 | down_write(&obj->uobject.mutex); | 1980 | down_write(&obj->uobject.mutex); |
1968 | 1981 | ||
1969 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); | 1982 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); |
@@ -1984,7 +1997,7 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, | |||
1984 | srq = pd->device->create_srq(pd, &attr, &udata); | 1997 | srq = pd->device->create_srq(pd, &attr, &udata); |
1985 | if (IS_ERR(srq)) { | 1998 | if (IS_ERR(srq)) { |
1986 | ret = PTR_ERR(srq); | 1999 | ret = PTR_ERR(srq); |
1987 | goto err; | 2000 | goto err_put; |
1988 | } | 2001 | } |
1989 | 2002 | ||
1990 | srq->device = pd->device; | 2003 | srq->device = pd->device; |
@@ -2029,6 +2042,9 @@ err_copy: | |||
2029 | err_destroy: | 2042 | err_destroy: |
2030 | ib_destroy_srq(srq); | 2043 | ib_destroy_srq(srq); |
2031 | 2044 | ||
2045 | err_put: | ||
2046 | put_pd_read(pd); | ||
2047 | |||
2032 | err: | 2048 | err: |
2033 | put_uobj_write(&obj->uobject); | 2049 | put_uobj_write(&obj->uobject); |
2034 | return ret; | 2050 | return ret; |
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index 823131d58b34..f98518d912b5 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c | |||
@@ -859,6 +859,38 @@ static void ipath_rcv_layer(struct ipath_devdata *dd, u32 etail, | |||
859 | __ipath_layer_rcv_lid(dd, hdr); | 859 | __ipath_layer_rcv_lid(dd, hdr); |
860 | } | 860 | } |
861 | 861 | ||
862 | static void ipath_rcv_hdrerr(struct ipath_devdata *dd, | ||
863 | u32 eflags, | ||
864 | u32 l, | ||
865 | u32 etail, | ||
866 | u64 *rc) | ||
867 | { | ||
868 | char emsg[128]; | ||
869 | struct ipath_message_header *hdr; | ||
870 | |||
871 | get_rhf_errstring(eflags, emsg, sizeof emsg); | ||
872 | hdr = (struct ipath_message_header *)&rc[1]; | ||
873 | ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u " | ||
874 | "tlen=%x opcode=%x egridx=%x: %s\n", | ||
875 | eflags, l, | ||
876 | ipath_hdrget_rcv_type((__le32 *) rc), | ||
877 | ipath_hdrget_length_in_bytes((__le32 *) rc), | ||
878 | be32_to_cpu(hdr->bth[0]) >> 24, | ||
879 | etail, emsg); | ||
880 | |||
881 | /* Count local link integrity errors. */ | ||
882 | if (eflags & (INFINIPATH_RHF_H_ICRCERR | INFINIPATH_RHF_H_VCRCERR)) { | ||
883 | u8 n = (dd->ipath_ibcctrl >> | ||
884 | INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) & | ||
885 | INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK; | ||
886 | |||
887 | if (++dd->ipath_lli_counter > n) { | ||
888 | dd->ipath_lli_counter = 0; | ||
889 | dd->ipath_lli_errors++; | ||
890 | } | ||
891 | } | ||
892 | } | ||
893 | |||
862 | /* | 894 | /* |
863 | * ipath_kreceive - receive a packet | 895 | * ipath_kreceive - receive a packet |
864 | * @dd: the infinipath device | 896 | * @dd: the infinipath device |
@@ -875,7 +907,6 @@ void ipath_kreceive(struct ipath_devdata *dd) | |||
875 | struct ipath_message_header *hdr; | 907 | struct ipath_message_header *hdr; |
876 | u32 eflags, i, etype, tlen, pkttot = 0, updegr=0, reloop=0; | 908 | u32 eflags, i, etype, tlen, pkttot = 0, updegr=0, reloop=0; |
877 | static u64 totcalls; /* stats, may eventually remove */ | 909 | static u64 totcalls; /* stats, may eventually remove */ |
878 | char emsg[128]; | ||
879 | 910 | ||
880 | if (!dd->ipath_hdrqtailptr) { | 911 | if (!dd->ipath_hdrqtailptr) { |
881 | ipath_dev_err(dd, | 912 | ipath_dev_err(dd, |
@@ -938,26 +969,9 @@ reloop: | |||
938 | "%x\n", etype); | 969 | "%x\n", etype); |
939 | } | 970 | } |
940 | 971 | ||
941 | if (eflags & ~(INFINIPATH_RHF_H_TIDERR | | 972 | if (unlikely(eflags)) |
942 | INFINIPATH_RHF_H_IHDRERR)) { | 973 | ipath_rcv_hdrerr(dd, eflags, l, etail, rc); |
943 | get_rhf_errstring(eflags, emsg, sizeof emsg); | 974 | else if (etype == RCVHQ_RCV_TYPE_NON_KD) { |
944 | ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u " | ||
945 | "tlen=%x opcode=%x egridx=%x: %s\n", | ||
946 | eflags, l, etype, tlen, bthbytes[0], | ||
947 | ipath_hdrget_index((__le32 *) rc), emsg); | ||
948 | /* Count local link integrity errors. */ | ||
949 | if (eflags & (INFINIPATH_RHF_H_ICRCERR | | ||
950 | INFINIPATH_RHF_H_VCRCERR)) { | ||
951 | u8 n = (dd->ipath_ibcctrl >> | ||
952 | INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) & | ||
953 | INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK; | ||
954 | |||
955 | if (++dd->ipath_lli_counter > n) { | ||
956 | dd->ipath_lli_counter = 0; | ||
957 | dd->ipath_lli_errors++; | ||
958 | } | ||
959 | } | ||
960 | } else if (etype == RCVHQ_RCV_TYPE_NON_KD) { | ||
961 | int ret = __ipath_verbs_rcv(dd, rc + 1, | 975 | int ret = __ipath_verbs_rcv(dd, rc + 1, |
962 | ebuf, tlen); | 976 | ebuf, tlen); |
963 | if (ret == -ENODEV) | 977 | if (ret == -ENODEV) |
@@ -981,25 +995,7 @@ reloop: | |||
981 | else if (etype == RCVHQ_RCV_TYPE_EXPECTED) | 995 | else if (etype == RCVHQ_RCV_TYPE_EXPECTED) |
982 | ipath_dbg("Bug: Expected TID, opcode %x; ignored\n", | 996 | ipath_dbg("Bug: Expected TID, opcode %x; ignored\n", |
983 | be32_to_cpu(hdr->bth[0]) & 0xff); | 997 | be32_to_cpu(hdr->bth[0]) & 0xff); |
984 | else if (eflags & (INFINIPATH_RHF_H_TIDERR | | 998 | else { |
985 | INFINIPATH_RHF_H_IHDRERR)) { | ||
986 | /* | ||
987 | * This is a type 3 packet, only the LRH is in the | ||
988 | * rcvhdrq, the rest of the header is in the eager | ||
989 | * buffer. | ||
990 | */ | ||
991 | u8 opcode; | ||
992 | if (ebuf) { | ||
993 | bthbytes = (u8 *) ebuf; | ||
994 | opcode = *bthbytes; | ||
995 | } | ||
996 | else | ||
997 | opcode = 0; | ||
998 | get_rhf_errstring(eflags, emsg, sizeof emsg); | ||
999 | ipath_dbg("Err %x (%s), opcode %x, egrbuf %x, " | ||
1000 | "len %x\n", eflags, emsg, opcode, etail, | ||
1001 | tlen); | ||
1002 | } else { | ||
1003 | /* | 999 | /* |
1004 | * error packet, type of error unknown. | 1000 | * error packet, type of error unknown. |
1005 | * Probably type 3, but we don't know, so don't | 1001 | * Probably type 3, but we don't know, so don't |
diff --git a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c index 46773c673a1a..a5ca279370aa 100644 --- a/drivers/infiniband/hw/ipath/ipath_keys.c +++ b/drivers/infiniband/hw/ipath/ipath_keys.c | |||
@@ -197,6 +197,21 @@ int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss, | |||
197 | size_t off; | 197 | size_t off; |
198 | int ret; | 198 | int ret; |
199 | 199 | ||
200 | /* | ||
201 | * We use RKEY == zero for physical addresses | ||
202 | * (see ipath_get_dma_mr). | ||
203 | */ | ||
204 | if (rkey == 0) { | ||
205 | sge->mr = NULL; | ||
206 | sge->vaddr = phys_to_virt(vaddr); | ||
207 | sge->length = len; | ||
208 | sge->sge_length = len; | ||
209 | ss->sg_list = NULL; | ||
210 | ss->num_sge = 1; | ||
211 | ret = 1; | ||
212 | goto bail; | ||
213 | } | ||
214 | |||
200 | mr = rkt->table[(rkey >> (32 - ib_ipath_lkey_table_size))]; | 215 | mr = rkt->table[(rkey >> (32 - ib_ipath_lkey_table_size))]; |
201 | if (unlikely(mr == NULL || mr->lkey != rkey)) { | 216 | if (unlikely(mr == NULL || mr->lkey != rkey)) { |
202 | ret = 0; | 217 | ret = 0; |
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index 56ac336dd1ec..d70a9b6b5239 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c | |||
@@ -191,10 +191,6 @@ void ipath_skip_sge(struct ipath_sge_state *ss, u32 length) | |||
191 | { | 191 | { |
192 | struct ipath_sge *sge = &ss->sge; | 192 | struct ipath_sge *sge = &ss->sge; |
193 | 193 | ||
194 | while (length > sge->sge_length) { | ||
195 | length -= sge->sge_length; | ||
196 | ss->sge = *ss->sg_list++; | ||
197 | } | ||
198 | while (length) { | 194 | while (length) { |
199 | u32 len = sge->length; | 195 | u32 len = sge->length; |
200 | 196 | ||
@@ -627,6 +623,7 @@ static int ipath_query_device(struct ib_device *ibdev, | |||
627 | props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR | | 623 | props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR | |
628 | IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT | | 624 | IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT | |
629 | IB_DEVICE_SYS_IMAGE_GUID; | 625 | IB_DEVICE_SYS_IMAGE_GUID; |
626 | props->page_size_cap = PAGE_SIZE; | ||
630 | props->vendor_id = ipath_layer_get_vendorid(dev->dd); | 627 | props->vendor_id = ipath_layer_get_vendorid(dev->dd); |
631 | props->vendor_part_id = ipath_layer_get_deviceid(dev->dd); | 628 | props->vendor_part_id = ipath_layer_get_deviceid(dev->dd); |
632 | props->hw_ver = ipath_layer_get_pcirev(dev->dd); | 629 | props->hw_ver = ipath_layer_get_pcirev(dev->dd); |
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index d0f7731802c9..deabc14b4ea4 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c | |||
@@ -778,11 +778,12 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status) | |||
778 | ((dev->fw_ver & 0xffff0000ull) >> 16) | | 778 | ((dev->fw_ver & 0xffff0000ull) >> 16) | |
779 | ((dev->fw_ver & 0x0000ffffull) << 16); | 779 | ((dev->fw_ver & 0x0000ffffull) << 16); |
780 | 780 | ||
781 | MTHCA_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET); | ||
782 | dev->cmd.max_cmds = 1 << lg; | ||
783 | |||
781 | mthca_dbg(dev, "FW version %012llx, max commands %d\n", | 784 | mthca_dbg(dev, "FW version %012llx, max commands %d\n", |
782 | (unsigned long long) dev->fw_ver, dev->cmd.max_cmds); | 785 | (unsigned long long) dev->fw_ver, dev->cmd.max_cmds); |
783 | 786 | ||
784 | MTHCA_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET); | ||
785 | dev->cmd.max_cmds = 1 << lg; | ||
786 | MTHCA_GET(dev->catas_err.addr, outbox, QUERY_FW_ERR_START_OFFSET); | 787 | MTHCA_GET(dev->catas_err.addr, outbox, QUERY_FW_ERR_START_OFFSET); |
787 | MTHCA_GET(dev->catas_err.size, outbox, QUERY_FW_ERR_SIZE_OFFSET); | 788 | MTHCA_GET(dev->catas_err.size, outbox, QUERY_FW_ERR_SIZE_OFFSET); |
788 | 789 | ||
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c index fab417c5cf43..b60a9d79ae54 100644 --- a/drivers/infiniband/hw/mthca/mthca_srq.c +++ b/drivers/infiniband/hw/mthca/mthca_srq.c | |||
@@ -370,7 +370,8 @@ int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | |||
370 | return -EINVAL; | 370 | return -EINVAL; |
371 | 371 | ||
372 | if (attr_mask & IB_SRQ_LIMIT) { | 372 | if (attr_mask & IB_SRQ_LIMIT) { |
373 | if (attr->srq_limit > srq->max) | 373 | u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max; |
374 | if (attr->srq_limit > max_wr) | ||
374 | return -EINVAL; | 375 | return -EINVAL; |
375 | 376 | ||
376 | mutex_lock(&srq->mutex); | 377 | mutex_lock(&srq->mutex); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 3f89f5e19036..474aa214ab57 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
@@ -212,6 +212,7 @@ struct ipoib_path { | |||
212 | 212 | ||
213 | struct ipoib_neigh { | 213 | struct ipoib_neigh { |
214 | struct ipoib_ah *ah; | 214 | struct ipoib_ah *ah; |
215 | union ib_gid dgid; | ||
215 | struct sk_buff_head queue; | 216 | struct sk_buff_head queue; |
216 | 217 | ||
217 | struct neighbour *neighbour; | 218 | struct neighbour *neighbour; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 1c6ea1c682a5..cf71d2a5515c 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -404,6 +404,8 @@ static void path_rec_completion(int status, | |||
404 | list_for_each_entry(neigh, &path->neigh_list, list) { | 404 | list_for_each_entry(neigh, &path->neigh_list, list) { |
405 | kref_get(&path->ah->ref); | 405 | kref_get(&path->ah->ref); |
406 | neigh->ah = path->ah; | 406 | neigh->ah = path->ah; |
407 | memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw, | ||
408 | sizeof(union ib_gid)); | ||
407 | 409 | ||
408 | while ((skb = __skb_dequeue(&neigh->queue))) | 410 | while ((skb = __skb_dequeue(&neigh->queue))) |
409 | __skb_queue_tail(&skqueue, skb); | 411 | __skb_queue_tail(&skqueue, skb); |
@@ -510,6 +512,8 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) | |||
510 | if (path->ah) { | 512 | if (path->ah) { |
511 | kref_get(&path->ah->ref); | 513 | kref_get(&path->ah->ref); |
512 | neigh->ah = path->ah; | 514 | neigh->ah = path->ah; |
515 | memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw, | ||
516 | sizeof(union ib_gid)); | ||
513 | 517 | ||
514 | ipoib_send(dev, skb, path->ah, | 518 | ipoib_send(dev, skb, path->ah, |
515 | be32_to_cpup((__be32 *) skb->dst->neighbour->ha)); | 519 | be32_to_cpup((__be32 *) skb->dst->neighbour->ha)); |
@@ -633,6 +637,25 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
633 | neigh = *to_ipoib_neigh(skb->dst->neighbour); | 637 | neigh = *to_ipoib_neigh(skb->dst->neighbour); |
634 | 638 | ||
635 | if (likely(neigh->ah)) { | 639 | if (likely(neigh->ah)) { |
640 | if (unlikely(memcmp(&neigh->dgid.raw, | ||
641 | skb->dst->neighbour->ha + 4, | ||
642 | sizeof(union ib_gid)))) { | ||
643 | spin_lock(&priv->lock); | ||
644 | /* | ||
645 | * It's safe to call ipoib_put_ah() inside | ||
646 | * priv->lock here, because we know that | ||
647 | * path->ah will always hold one more reference, | ||
648 | * so ipoib_put_ah() will never do more than | ||
649 | * decrement the ref count. | ||
650 | */ | ||
651 | ipoib_put_ah(neigh->ah); | ||
652 | list_del(&neigh->list); | ||
653 | ipoib_neigh_free(neigh); | ||
654 | spin_unlock(&priv->lock); | ||
655 | ipoib_path_lookup(skb, dev); | ||
656 | goto out; | ||
657 | } | ||
658 | |||
636 | ipoib_send(dev, skb, neigh->ah, | 659 | ipoib_send(dev, skb, neigh->ah, |
637 | be32_to_cpup((__be32 *) skb->dst->neighbour->ha)); | 660 | be32_to_cpup((__be32 *) skb->dst->neighbour->ha)); |
638 | goto out; | 661 | goto out; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index ab40488182b3..b5e6a7be603d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
@@ -264,6 +264,10 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, | |||
264 | if (!ah) { | 264 | if (!ah) { |
265 | ipoib_warn(priv, "ib_address_create failed\n"); | 265 | ipoib_warn(priv, "ib_address_create failed\n"); |
266 | } else { | 266 | } else { |
267 | spin_lock_irq(&priv->lock); | ||
268 | mcast->ah = ah; | ||
269 | spin_unlock_irq(&priv->lock); | ||
270 | |||
267 | ipoib_dbg_mcast(priv, "MGID " IPOIB_GID_FMT | 271 | ipoib_dbg_mcast(priv, "MGID " IPOIB_GID_FMT |
268 | " AV %p, LID 0x%04x, SL %d\n", | 272 | " AV %p, LID 0x%04x, SL %d\n", |
269 | IPOIB_GID_ARG(mcast->mcmember.mgid), | 273 | IPOIB_GID_ARG(mcast->mcmember.mgid), |
@@ -271,10 +275,6 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, | |||
271 | be16_to_cpu(mcast->mcmember.mlid), | 275 | be16_to_cpu(mcast->mcmember.mlid), |
272 | mcast->mcmember.sl); | 276 | mcast->mcmember.sl); |
273 | } | 277 | } |
274 | |||
275 | spin_lock_irq(&priv->lock); | ||
276 | mcast->ah = ah; | ||
277 | spin_unlock_irq(&priv->lock); | ||
278 | } | 278 | } |
279 | 279 | ||
280 | /* actually send any queued packets */ | 280 | /* actually send any queued packets */ |
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c index 1ef9fd39a79a..0e3fdf7c6dd3 100644 --- a/drivers/net/sunlance.c +++ b/drivers/net/sunlance.c | |||
@@ -1537,7 +1537,7 @@ static int __init sparc_lance_init(void) | |||
1537 | { | 1537 | { |
1538 | if ((idprom->id_machtype == (SM_SUN4|SM_4_330)) || | 1538 | if ((idprom->id_machtype == (SM_SUN4|SM_4_330)) || |
1539 | (idprom->id_machtype == (SM_SUN4|SM_4_470))) { | 1539 | (idprom->id_machtype == (SM_SUN4|SM_4_470))) { |
1540 | memset(&sun4_sdev, 0, sizeof(sdev)); | 1540 | memset(&sun4_sdev, 0, sizeof(struct sbus_dev)); |
1541 | sun4_sdev.reg_addrs[0].phys_addr = sun4_eth_physaddr; | 1541 | sun4_sdev.reg_addrs[0].phys_addr = sun4_eth_physaddr; |
1542 | sun4_sdev.irqs[0] = 6; | 1542 | sun4_sdev.irqs[0] = 6; |
1543 | return sparc_lance_probe_one(&sun4_sdev, NULL, NULL); | 1543 | return sparc_lance_probe_one(&sun4_sdev, NULL, NULL); |
@@ -1547,16 +1547,16 @@ static int __init sparc_lance_init(void) | |||
1547 | 1547 | ||
1548 | static int __exit sunlance_sun4_remove(void) | 1548 | static int __exit sunlance_sun4_remove(void) |
1549 | { | 1549 | { |
1550 | struct lance_private *lp = dev_get_drvdata(&sun4_sdev->dev); | 1550 | struct lance_private *lp = dev_get_drvdata(&sun4_sdev.ofdev.dev); |
1551 | struct net_device *net_dev = lp->dev; | 1551 | struct net_device *net_dev = lp->dev; |
1552 | 1552 | ||
1553 | unregister_netdevice(net_dev); | 1553 | unregister_netdevice(net_dev); |
1554 | 1554 | ||
1555 | lance_free_hwresources(root_lance_dev); | 1555 | lance_free_hwresources(lp); |
1556 | 1556 | ||
1557 | free_netdev(net_dev); | 1557 | free_netdev(net_dev); |
1558 | 1558 | ||
1559 | dev_set_drvdata(&sun4_sdev->dev, NULL); | 1559 | dev_set_drvdata(&sun4_sdev.ofdev.dev, NULL); |
1560 | 1560 | ||
1561 | return 0; | 1561 | return 0; |
1562 | } | 1562 | } |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index ce6f3be86da0..1b8138f641e3 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -68,8 +68,8 @@ | |||
68 | 68 | ||
69 | #define DRV_MODULE_NAME "tg3" | 69 | #define DRV_MODULE_NAME "tg3" |
70 | #define PFX DRV_MODULE_NAME ": " | 70 | #define PFX DRV_MODULE_NAME ": " |
71 | #define DRV_MODULE_VERSION "3.62" | 71 | #define DRV_MODULE_VERSION "3.63" |
72 | #define DRV_MODULE_RELDATE "June 30, 2006" | 72 | #define DRV_MODULE_RELDATE "July 25, 2006" |
73 | 73 | ||
74 | #define TG3_DEF_MAC_MODE 0 | 74 | #define TG3_DEF_MAC_MODE 0 |
75 | #define TG3_DEF_RX_MODE 0 | 75 | #define TG3_DEF_RX_MODE 0 |
@@ -3590,6 +3590,28 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id, | |||
3590 | static int tg3_init_hw(struct tg3 *, int); | 3590 | static int tg3_init_hw(struct tg3 *, int); |
3591 | static int tg3_halt(struct tg3 *, int, int); | 3591 | static int tg3_halt(struct tg3 *, int, int); |
3592 | 3592 | ||
3593 | /* Restart hardware after configuration changes, self-test, etc. | ||
3594 | * Invoked with tp->lock held. | ||
3595 | */ | ||
3596 | static int tg3_restart_hw(struct tg3 *tp, int reset_phy) | ||
3597 | { | ||
3598 | int err; | ||
3599 | |||
3600 | err = tg3_init_hw(tp, reset_phy); | ||
3601 | if (err) { | ||
3602 | printk(KERN_ERR PFX "%s: Failed to re-initialize device, " | ||
3603 | "aborting.\n", tp->dev->name); | ||
3604 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | ||
3605 | tg3_full_unlock(tp); | ||
3606 | del_timer_sync(&tp->timer); | ||
3607 | tp->irq_sync = 0; | ||
3608 | netif_poll_enable(tp->dev); | ||
3609 | dev_close(tp->dev); | ||
3610 | tg3_full_lock(tp, 0); | ||
3611 | } | ||
3612 | return err; | ||
3613 | } | ||
3614 | |||
3593 | #ifdef CONFIG_NET_POLL_CONTROLLER | 3615 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3594 | static void tg3_poll_controller(struct net_device *dev) | 3616 | static void tg3_poll_controller(struct net_device *dev) |
3595 | { | 3617 | { |
@@ -3630,13 +3652,15 @@ static void tg3_reset_task(void *_data) | |||
3630 | } | 3652 | } |
3631 | 3653 | ||
3632 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); | 3654 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); |
3633 | tg3_init_hw(tp, 1); | 3655 | if (tg3_init_hw(tp, 1)) |
3656 | goto out; | ||
3634 | 3657 | ||
3635 | tg3_netif_start(tp); | 3658 | tg3_netif_start(tp); |
3636 | 3659 | ||
3637 | if (restart_timer) | 3660 | if (restart_timer) |
3638 | mod_timer(&tp->timer, jiffies + 1); | 3661 | mod_timer(&tp->timer, jiffies + 1); |
3639 | 3662 | ||
3663 | out: | ||
3640 | tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK; | 3664 | tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK; |
3641 | 3665 | ||
3642 | tg3_full_unlock(tp); | 3666 | tg3_full_unlock(tp); |
@@ -4124,6 +4148,7 @@ static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, | |||
4124 | static int tg3_change_mtu(struct net_device *dev, int new_mtu) | 4148 | static int tg3_change_mtu(struct net_device *dev, int new_mtu) |
4125 | { | 4149 | { |
4126 | struct tg3 *tp = netdev_priv(dev); | 4150 | struct tg3 *tp = netdev_priv(dev); |
4151 | int err; | ||
4127 | 4152 | ||
4128 | if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp)) | 4153 | if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp)) |
4129 | return -EINVAL; | 4154 | return -EINVAL; |
@@ -4144,13 +4169,14 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu) | |||
4144 | 4169 | ||
4145 | tg3_set_mtu(dev, tp, new_mtu); | 4170 | tg3_set_mtu(dev, tp, new_mtu); |
4146 | 4171 | ||
4147 | tg3_init_hw(tp, 0); | 4172 | err = tg3_restart_hw(tp, 0); |
4148 | 4173 | ||
4149 | tg3_netif_start(tp); | 4174 | if (!err) |
4175 | tg3_netif_start(tp); | ||
4150 | 4176 | ||
4151 | tg3_full_unlock(tp); | 4177 | tg3_full_unlock(tp); |
4152 | 4178 | ||
4153 | return 0; | 4179 | return err; |
4154 | } | 4180 | } |
4155 | 4181 | ||
4156 | /* Free up pending packets in all rx/tx rings. | 4182 | /* Free up pending packets in all rx/tx rings. |
@@ -4232,7 +4258,7 @@ static void tg3_free_rings(struct tg3 *tp) | |||
4232 | * end up in the driver. tp->{tx,}lock are held and thus | 4258 | * end up in the driver. tp->{tx,}lock are held and thus |
4233 | * we may not sleep. | 4259 | * we may not sleep. |
4234 | */ | 4260 | */ |
4235 | static void tg3_init_rings(struct tg3 *tp) | 4261 | static int tg3_init_rings(struct tg3 *tp) |
4236 | { | 4262 | { |
4237 | u32 i; | 4263 | u32 i; |
4238 | 4264 | ||
@@ -4281,18 +4307,38 @@ static void tg3_init_rings(struct tg3 *tp) | |||
4281 | 4307 | ||
4282 | /* Now allocate fresh SKBs for each rx ring. */ | 4308 | /* Now allocate fresh SKBs for each rx ring. */ |
4283 | for (i = 0; i < tp->rx_pending; i++) { | 4309 | for (i = 0; i < tp->rx_pending; i++) { |
4284 | if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, | 4310 | if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) { |
4285 | -1, i) < 0) | 4311 | printk(KERN_WARNING PFX |
4312 | "%s: Using a smaller RX standard ring, " | ||
4313 | "only %d out of %d buffers were allocated " | ||
4314 | "successfully.\n", | ||
4315 | tp->dev->name, i, tp->rx_pending); | ||
4316 | if (i == 0) | ||
4317 | return -ENOMEM; | ||
4318 | tp->rx_pending = i; | ||
4286 | break; | 4319 | break; |
4320 | } | ||
4287 | } | 4321 | } |
4288 | 4322 | ||
4289 | if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { | 4323 | if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { |
4290 | for (i = 0; i < tp->rx_jumbo_pending; i++) { | 4324 | for (i = 0; i < tp->rx_jumbo_pending; i++) { |
4291 | if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO, | 4325 | if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO, |
4292 | -1, i) < 0) | 4326 | -1, i) < 0) { |
4327 | printk(KERN_WARNING PFX | ||
4328 | "%s: Using a smaller RX jumbo ring, " | ||
4329 | "only %d out of %d buffers were " | ||
4330 | "allocated successfully.\n", | ||
4331 | tp->dev->name, i, tp->rx_jumbo_pending); | ||
4332 | if (i == 0) { | ||
4333 | tg3_free_rings(tp); | ||
4334 | return -ENOMEM; | ||
4335 | } | ||
4336 | tp->rx_jumbo_pending = i; | ||
4293 | break; | 4337 | break; |
4338 | } | ||
4294 | } | 4339 | } |
4295 | } | 4340 | } |
4341 | return 0; | ||
4296 | } | 4342 | } |
4297 | 4343 | ||
4298 | /* | 4344 | /* |
@@ -5815,6 +5861,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p) | |||
5815 | { | 5861 | { |
5816 | struct tg3 *tp = netdev_priv(dev); | 5862 | struct tg3 *tp = netdev_priv(dev); |
5817 | struct sockaddr *addr = p; | 5863 | struct sockaddr *addr = p; |
5864 | int err = 0; | ||
5818 | 5865 | ||
5819 | if (!is_valid_ether_addr(addr->sa_data)) | 5866 | if (!is_valid_ether_addr(addr->sa_data)) |
5820 | return -EINVAL; | 5867 | return -EINVAL; |
@@ -5832,9 +5879,9 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p) | |||
5832 | tg3_full_lock(tp, 1); | 5879 | tg3_full_lock(tp, 1); |
5833 | 5880 | ||
5834 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 5881 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
5835 | tg3_init_hw(tp, 0); | 5882 | err = tg3_restart_hw(tp, 0); |
5836 | 5883 | if (!err) | |
5837 | tg3_netif_start(tp); | 5884 | tg3_netif_start(tp); |
5838 | tg3_full_unlock(tp); | 5885 | tg3_full_unlock(tp); |
5839 | } else { | 5886 | } else { |
5840 | spin_lock_bh(&tp->lock); | 5887 | spin_lock_bh(&tp->lock); |
@@ -5842,7 +5889,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p) | |||
5842 | spin_unlock_bh(&tp->lock); | 5889 | spin_unlock_bh(&tp->lock); |
5843 | } | 5890 | } |
5844 | 5891 | ||
5845 | return 0; | 5892 | return err; |
5846 | } | 5893 | } |
5847 | 5894 | ||
5848 | /* tp->lock is held. */ | 5895 | /* tp->lock is held. */ |
@@ -5942,7 +5989,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
5942 | * can only do this after the hardware has been | 5989 | * can only do this after the hardware has been |
5943 | * successfully reset. | 5990 | * successfully reset. |
5944 | */ | 5991 | */ |
5945 | tg3_init_rings(tp); | 5992 | err = tg3_init_rings(tp); |
5993 | if (err) | ||
5994 | return err; | ||
5946 | 5995 | ||
5947 | /* This value is determined during the probe time DMA | 5996 | /* This value is determined during the probe time DMA |
5948 | * engine test, tg3_test_dma. | 5997 | * engine test, tg3_test_dma. |
@@ -7956,7 +8005,7 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam * | |||
7956 | static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) | 8005 | static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) |
7957 | { | 8006 | { |
7958 | struct tg3 *tp = netdev_priv(dev); | 8007 | struct tg3 *tp = netdev_priv(dev); |
7959 | int irq_sync = 0; | 8008 | int irq_sync = 0, err = 0; |
7960 | 8009 | ||
7961 | if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) || | 8010 | if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) || |
7962 | (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) || | 8011 | (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) || |
@@ -7980,13 +8029,14 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e | |||
7980 | 8029 | ||
7981 | if (netif_running(dev)) { | 8030 | if (netif_running(dev)) { |
7982 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 8031 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
7983 | tg3_init_hw(tp, 1); | 8032 | err = tg3_restart_hw(tp, 1); |
7984 | tg3_netif_start(tp); | 8033 | if (!err) |
8034 | tg3_netif_start(tp); | ||
7985 | } | 8035 | } |
7986 | 8036 | ||
7987 | tg3_full_unlock(tp); | 8037 | tg3_full_unlock(tp); |
7988 | 8038 | ||
7989 | return 0; | 8039 | return err; |
7990 | } | 8040 | } |
7991 | 8041 | ||
7992 | static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) | 8042 | static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) |
@@ -8001,7 +8051,7 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam | |||
8001 | static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) | 8051 | static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) |
8002 | { | 8052 | { |
8003 | struct tg3 *tp = netdev_priv(dev); | 8053 | struct tg3 *tp = netdev_priv(dev); |
8004 | int irq_sync = 0; | 8054 | int irq_sync = 0, err = 0; |
8005 | 8055 | ||
8006 | if (netif_running(dev)) { | 8056 | if (netif_running(dev)) { |
8007 | tg3_netif_stop(tp); | 8057 | tg3_netif_stop(tp); |
@@ -8025,13 +8075,14 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam | |||
8025 | 8075 | ||
8026 | if (netif_running(dev)) { | 8076 | if (netif_running(dev)) { |
8027 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 8077 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
8028 | tg3_init_hw(tp, 1); | 8078 | err = tg3_restart_hw(tp, 1); |
8029 | tg3_netif_start(tp); | 8079 | if (!err) |
8080 | tg3_netif_start(tp); | ||
8030 | } | 8081 | } |
8031 | 8082 | ||
8032 | tg3_full_unlock(tp); | 8083 | tg3_full_unlock(tp); |
8033 | 8084 | ||
8034 | return 0; | 8085 | return err; |
8035 | } | 8086 | } |
8036 | 8087 | ||
8037 | static u32 tg3_get_rx_csum(struct net_device *dev) | 8088 | static u32 tg3_get_rx_csum(struct net_device *dev) |
@@ -8666,7 +8717,9 @@ static int tg3_test_loopback(struct tg3 *tp) | |||
8666 | if (!netif_running(tp->dev)) | 8717 | if (!netif_running(tp->dev)) |
8667 | return TG3_LOOPBACK_FAILED; | 8718 | return TG3_LOOPBACK_FAILED; |
8668 | 8719 | ||
8669 | tg3_reset_hw(tp, 1); | 8720 | err = tg3_reset_hw(tp, 1); |
8721 | if (err) | ||
8722 | return TG3_LOOPBACK_FAILED; | ||
8670 | 8723 | ||
8671 | if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) | 8724 | if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) |
8672 | err |= TG3_MAC_LOOPBACK_FAILED; | 8725 | err |= TG3_MAC_LOOPBACK_FAILED; |
@@ -8740,8 +8793,8 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, | |||
8740 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 8793 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
8741 | if (netif_running(dev)) { | 8794 | if (netif_running(dev)) { |
8742 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; | 8795 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; |
8743 | tg3_init_hw(tp, 1); | 8796 | if (!tg3_restart_hw(tp, 1)) |
8744 | tg3_netif_start(tp); | 8797 | tg3_netif_start(tp); |
8745 | } | 8798 | } |
8746 | 8799 | ||
8747 | tg3_full_unlock(tp); | 8800 | tg3_full_unlock(tp); |
@@ -11699,7 +11752,8 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) | |||
11699 | tg3_full_lock(tp, 0); | 11752 | tg3_full_lock(tp, 0); |
11700 | 11753 | ||
11701 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; | 11754 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; |
11702 | tg3_init_hw(tp, 1); | 11755 | if (tg3_restart_hw(tp, 1)) |
11756 | goto out; | ||
11703 | 11757 | ||
11704 | tp->timer.expires = jiffies + tp->timer_offset; | 11758 | tp->timer.expires = jiffies + tp->timer_offset; |
11705 | add_timer(&tp->timer); | 11759 | add_timer(&tp->timer); |
@@ -11707,6 +11761,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) | |||
11707 | netif_device_attach(dev); | 11761 | netif_device_attach(dev); |
11708 | tg3_netif_start(tp); | 11762 | tg3_netif_start(tp); |
11709 | 11763 | ||
11764 | out: | ||
11710 | tg3_full_unlock(tp); | 11765 | tg3_full_unlock(tp); |
11711 | } | 11766 | } |
11712 | 11767 | ||
@@ -11733,16 +11788,19 @@ static int tg3_resume(struct pci_dev *pdev) | |||
11733 | tg3_full_lock(tp, 0); | 11788 | tg3_full_lock(tp, 0); |
11734 | 11789 | ||
11735 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; | 11790 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; |
11736 | tg3_init_hw(tp, 1); | 11791 | err = tg3_restart_hw(tp, 1); |
11792 | if (err) | ||
11793 | goto out; | ||
11737 | 11794 | ||
11738 | tp->timer.expires = jiffies + tp->timer_offset; | 11795 | tp->timer.expires = jiffies + tp->timer_offset; |
11739 | add_timer(&tp->timer); | 11796 | add_timer(&tp->timer); |
11740 | 11797 | ||
11741 | tg3_netif_start(tp); | 11798 | tg3_netif_start(tp); |
11742 | 11799 | ||
11800 | out: | ||
11743 | tg3_full_unlock(tp); | 11801 | tg3_full_unlock(tp); |
11744 | 11802 | ||
11745 | return 0; | 11803 | return err; |
11746 | } | 11804 | } |
11747 | 11805 | ||
11748 | static struct pci_driver tg3_driver = { | 11806 | static struct pci_driver tg3_driver = { |
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index f26a2ee3aad8..3cba6c9fab11 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c | |||
@@ -152,7 +152,6 @@ ccwgroup_create(struct device *root, | |||
152 | struct ccwgroup_device *gdev; | 152 | struct ccwgroup_device *gdev; |
153 | int i; | 153 | int i; |
154 | int rc; | 154 | int rc; |
155 | int del_drvdata; | ||
156 | 155 | ||
157 | if (argc > 256) /* disallow dumb users */ | 156 | if (argc > 256) /* disallow dumb users */ |
158 | return -EINVAL; | 157 | return -EINVAL; |
@@ -163,7 +162,6 @@ ccwgroup_create(struct device *root, | |||
163 | 162 | ||
164 | atomic_set(&gdev->onoff, 0); | 163 | atomic_set(&gdev->onoff, 0); |
165 | 164 | ||
166 | del_drvdata = 0; | ||
167 | for (i = 0; i < argc; i++) { | 165 | for (i = 0; i < argc; i++) { |
168 | gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]); | 166 | gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]); |
169 | 167 | ||
@@ -180,10 +178,8 @@ ccwgroup_create(struct device *root, | |||
180 | rc = -EINVAL; | 178 | rc = -EINVAL; |
181 | goto free_dev; | 179 | goto free_dev; |
182 | } | 180 | } |
183 | } | ||
184 | for (i = 0; i < argc; i++) | ||
185 | gdev->cdev[i]->dev.driver_data = gdev; | 181 | gdev->cdev[i]->dev.driver_data = gdev; |
186 | del_drvdata = 1; | 182 | } |
187 | 183 | ||
188 | gdev->creator_id = creator_id; | 184 | gdev->creator_id = creator_id; |
189 | gdev->count = argc; | 185 | gdev->count = argc; |
@@ -226,9 +222,9 @@ error: | |||
226 | free_dev: | 222 | free_dev: |
227 | for (i = 0; i < argc; i++) | 223 | for (i = 0; i < argc; i++) |
228 | if (gdev->cdev[i]) { | 224 | if (gdev->cdev[i]) { |
229 | put_device(&gdev->cdev[i]->dev); | 225 | if (gdev->cdev[i]->dev.driver_data == gdev) |
230 | if (del_drvdata) | ||
231 | gdev->cdev[i]->dev.driver_data = NULL; | 226 | gdev->cdev[i]->dev.driver_data = NULL; |
227 | put_device(&gdev->cdev[i]->dev); | ||
232 | } | 228 | } |
233 | kfree(gdev); | 229 | kfree(gdev); |
234 | return rc; | 230 | return rc; |
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index ac6e0c7e43d9..7a39e0b0386c 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
@@ -152,7 +152,8 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev) | |||
152 | if (cdev->private->iretry) { | 152 | if (cdev->private->iretry) { |
153 | cdev->private->iretry--; | 153 | cdev->private->iretry--; |
154 | ret = cio_halt(sch); | 154 | ret = cio_halt(sch); |
155 | return (ret == 0) ? -EBUSY : ret; | 155 | if (ret != -EBUSY) |
156 | return (ret == 0) ? -EBUSY : ret; | ||
156 | } | 157 | } |
157 | /* halt io unsuccessful. */ | 158 | /* halt io unsuccessful. */ |
158 | cdev->private->iretry = 255; /* 255 clear retries. */ | 159 | cdev->private->iretry = 255; /* 255 clear retries. */ |
diff --git a/drivers/scsi/NCR53C9x.c b/drivers/scsi/NCR53C9x.c index 085db4826e0e..bdc6bb262bce 100644 --- a/drivers/scsi/NCR53C9x.c +++ b/drivers/scsi/NCR53C9x.c | |||
@@ -2152,29 +2152,23 @@ static int esp_do_data_finale(struct NCR_ESP *esp, | |||
2152 | */ | 2152 | */ |
2153 | static int esp_should_clear_sync(Scsi_Cmnd *sp) | 2153 | static int esp_should_clear_sync(Scsi_Cmnd *sp) |
2154 | { | 2154 | { |
2155 | unchar cmd1 = sp->cmnd[0]; | 2155 | unchar cmd = sp->cmnd[0]; |
2156 | unchar cmd2 = sp->data_cmnd[0]; | ||
2157 | 2156 | ||
2158 | /* These cases are for spinning up a disk and | 2157 | /* These cases are for spinning up a disk and |
2159 | * waiting for that spinup to complete. | 2158 | * waiting for that spinup to complete. |
2160 | */ | 2159 | */ |
2161 | if(cmd1 == START_STOP || | 2160 | if(cmd == START_STOP) |
2162 | cmd2 == START_STOP) | ||
2163 | return 0; | 2161 | return 0; |
2164 | 2162 | ||
2165 | if(cmd1 == TEST_UNIT_READY || | 2163 | if(cmd == TEST_UNIT_READY) |
2166 | cmd2 == TEST_UNIT_READY) | ||
2167 | return 0; | 2164 | return 0; |
2168 | 2165 | ||
2169 | /* One more special case for SCSI tape drives, | 2166 | /* One more special case for SCSI tape drives, |
2170 | * this is what is used to probe the device for | 2167 | * this is what is used to probe the device for |
2171 | * completion of a rewind or tape load operation. | 2168 | * completion of a rewind or tape load operation. |
2172 | */ | 2169 | */ |
2173 | if(sp->device->type == TYPE_TAPE) { | 2170 | if(sp->device->type == TYPE_TAPE && cmd == MODE_SENSE) |
2174 | if(cmd1 == MODE_SENSE || | 2171 | return 0; |
2175 | cmd2 == MODE_SENSE) | ||
2176 | return 0; | ||
2177 | } | ||
2178 | 2172 | ||
2179 | return 1; | 2173 | return 1; |
2180 | } | 2174 | } |
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c index 3e1053f111dc..4cf7afc31cc7 100644 --- a/drivers/scsi/arm/fas216.c +++ b/drivers/scsi/arm/fas216.c | |||
@@ -2427,7 +2427,7 @@ int fas216_eh_abort(Scsi_Cmnd *SCpnt) | |||
2427 | info->stats.aborts += 1; | 2427 | info->stats.aborts += 1; |
2428 | 2428 | ||
2429 | printk(KERN_WARNING "scsi%d: abort command ", info->host->host_no); | 2429 | printk(KERN_WARNING "scsi%d: abort command ", info->host->host_no); |
2430 | __scsi_print_command(SCpnt->data_cmnd); | 2430 | __scsi_print_command(SCpnt->cmnd); |
2431 | 2431 | ||
2432 | print_debug_list(); | 2432 | print_debug_list(); |
2433 | fas216_dumpstate(info); | 2433 | fas216_dumpstate(info); |
diff --git a/drivers/scsi/esp.c b/drivers/scsi/esp.c index eaf64c7e54e7..98bd22714d0d 100644 --- a/drivers/scsi/esp.c +++ b/drivers/scsi/esp.c | |||
@@ -2754,18 +2754,15 @@ static int esp_do_data_finale(struct esp *esp) | |||
2754 | */ | 2754 | */ |
2755 | static int esp_should_clear_sync(struct scsi_cmnd *sp) | 2755 | static int esp_should_clear_sync(struct scsi_cmnd *sp) |
2756 | { | 2756 | { |
2757 | u8 cmd1 = sp->cmnd[0]; | 2757 | u8 cmd = sp->cmnd[0]; |
2758 | u8 cmd2 = sp->data_cmnd[0]; | ||
2759 | 2758 | ||
2760 | /* These cases are for spinning up a disk and | 2759 | /* These cases are for spinning up a disk and |
2761 | * waiting for that spinup to complete. | 2760 | * waiting for that spinup to complete. |
2762 | */ | 2761 | */ |
2763 | if (cmd1 == START_STOP || | 2762 | if (cmd == START_STOP) |
2764 | cmd2 == START_STOP) | ||
2765 | return 0; | 2763 | return 0; |
2766 | 2764 | ||
2767 | if (cmd1 == TEST_UNIT_READY || | 2765 | if (cmd == TEST_UNIT_READY) |
2768 | cmd2 == TEST_UNIT_READY) | ||
2769 | return 0; | 2766 | return 0; |
2770 | 2767 | ||
2771 | /* One more special case for SCSI tape drives, | 2768 | /* One more special case for SCSI tape drives, |
@@ -2773,8 +2770,7 @@ static int esp_should_clear_sync(struct scsi_cmnd *sp) | |||
2773 | * completion of a rewind or tape load operation. | 2770 | * completion of a rewind or tape load operation. |
2774 | */ | 2771 | */ |
2775 | if (sp->device->type == TYPE_TAPE) { | 2772 | if (sp->device->type == TYPE_TAPE) { |
2776 | if (cmd1 == MODE_SENSE || | 2773 | if (cmd == MODE_SENSE) |
2777 | cmd2 == MODE_SENSE) | ||
2778 | return 0; | 2774 | return 0; |
2779 | } | 2775 | } |
2780 | 2776 | ||
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c index a89c4115cfba..32293f451669 100644 --- a/drivers/scsi/scsi_ioctl.c +++ b/drivers/scsi/scsi_ioctl.c | |||
@@ -110,11 +110,8 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd, | |||
110 | sshdr.asc, sshdr.ascq); | 110 | sshdr.asc, sshdr.ascq); |
111 | break; | 111 | break; |
112 | case NOT_READY: /* This happens if there is no disc in drive */ | 112 | case NOT_READY: /* This happens if there is no disc in drive */ |
113 | if (sdev->removable && (cmd[0] != TEST_UNIT_READY)) { | 113 | if (sdev->removable) |
114 | printk(KERN_INFO "Device not ready. Make sure" | ||
115 | " there is a disc in the drive.\n"); | ||
116 | break; | 114 | break; |
117 | } | ||
118 | case UNIT_ATTENTION: | 115 | case UNIT_ATTENTION: |
119 | if (sdev->removable) { | 116 | if (sdev->removable) { |
120 | sdev->changed = 1; | 117 | sdev->changed = 1; |
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index ceda3a2859d2..7858703ed84c 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h | |||
@@ -246,8 +246,8 @@ extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *); | |||
246 | #define BUF_BUSY XBF_DONT_BLOCK | 246 | #define BUF_BUSY XBF_DONT_BLOCK |
247 | 247 | ||
248 | #define XFS_BUF_BFLAGS(bp) ((bp)->b_flags) | 248 | #define XFS_BUF_BFLAGS(bp) ((bp)->b_flags) |
249 | #define XFS_BUF_ZEROFLAGS(bp) \ | 249 | #define XFS_BUF_ZEROFLAGS(bp) ((bp)->b_flags &= \ |
250 | ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI)) | 250 | ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI|XBF_ORDERED)) |
251 | 251 | ||
252 | #define XFS_BUF_STALE(bp) ((bp)->b_flags |= XFS_B_STALE) | 252 | #define XFS_BUF_STALE(bp) ((bp)->b_flags |= XFS_B_STALE) |
253 | #define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XFS_B_STALE) | 253 | #define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XFS_B_STALE) |
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 9bdef9d51900..4754f342a5d3 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c | |||
@@ -314,6 +314,13 @@ xfs_mountfs_check_barriers(xfs_mount_t *mp) | |||
314 | return; | 314 | return; |
315 | } | 315 | } |
316 | 316 | ||
317 | if (xfs_readonly_buftarg(mp->m_ddev_targp)) { | ||
318 | xfs_fs_cmn_err(CE_NOTE, mp, | ||
319 | "Disabling barriers, underlying device is readonly"); | ||
320 | mp->m_flags &= ~XFS_MOUNT_BARRIER; | ||
321 | return; | ||
322 | } | ||
323 | |||
317 | error = xfs_barrier_test(mp); | 324 | error = xfs_barrier_test(mp); |
318 | if (error) { | 325 | if (error) { |
319 | xfs_fs_cmn_err(CE_NOTE, mp, | 326 | xfs_fs_cmn_err(CE_NOTE, mp, |
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c index e95e99f7168f..f137856c3261 100644 --- a/fs/xfs/quota/xfs_qm_bhv.c +++ b/fs/xfs/quota/xfs_qm_bhv.c | |||
@@ -217,17 +217,24 @@ xfs_qm_statvfs( | |||
217 | return 0; | 217 | return 0; |
218 | dp = &dqp->q_core; | 218 | dp = &dqp->q_core; |
219 | 219 | ||
220 | limit = dp->d_blk_softlimit ? dp->d_blk_softlimit : dp->d_blk_hardlimit; | 220 | limit = dp->d_blk_softlimit ? |
221 | be64_to_cpu(dp->d_blk_softlimit) : | ||
222 | be64_to_cpu(dp->d_blk_hardlimit); | ||
221 | if (limit && statp->f_blocks > limit) { | 223 | if (limit && statp->f_blocks > limit) { |
222 | statp->f_blocks = limit; | 224 | statp->f_blocks = limit; |
223 | statp->f_bfree = (statp->f_blocks > dp->d_bcount) ? | 225 | statp->f_bfree = |
224 | (statp->f_blocks - dp->d_bcount) : 0; | 226 | (statp->f_blocks > be64_to_cpu(dp->d_bcount)) ? |
227 | (statp->f_blocks - be64_to_cpu(dp->d_bcount)) : 0; | ||
225 | } | 228 | } |
226 | limit = dp->d_ino_softlimit ? dp->d_ino_softlimit : dp->d_ino_hardlimit; | 229 | |
230 | limit = dp->d_ino_softlimit ? | ||
231 | be64_to_cpu(dp->d_ino_softlimit) : | ||
232 | be64_to_cpu(dp->d_ino_hardlimit); | ||
227 | if (limit && statp->f_files > limit) { | 233 | if (limit && statp->f_files > limit) { |
228 | statp->f_files = limit; | 234 | statp->f_files = limit; |
229 | statp->f_ffree = (statp->f_files > dp->d_icount) ? | 235 | statp->f_ffree = |
230 | (statp->f_ffree - dp->d_icount) : 0; | 236 | (statp->f_files > be64_to_cpu(dp->d_icount)) ? |
237 | (statp->f_ffree - be64_to_cpu(dp->d_icount)) : 0; | ||
231 | } | 238 | } |
232 | 239 | ||
233 | xfs_qm_dqput(dqp); | 240 | xfs_qm_dqput(dqp); |
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 86c1bf0bba9e..1f8ecff8553a 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
@@ -334,10 +334,9 @@ xfs_itobp( | |||
334 | #if !defined(__KERNEL__) | 334 | #if !defined(__KERNEL__) |
335 | ni = 0; | 335 | ni = 0; |
336 | #elif defined(DEBUG) | 336 | #elif defined(DEBUG) |
337 | ni = (imap_flags & XFS_IMAP_BULKSTAT) ? 0 : | 337 | ni = BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog; |
338 | (BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog); | ||
339 | #else /* usual case */ | 338 | #else /* usual case */ |
340 | ni = (imap_flags & XFS_IMAP_BULKSTAT) ? 0 : 1; | 339 | ni = 1; |
341 | #endif | 340 | #endif |
342 | 341 | ||
343 | for (i = 0; i < ni; i++) { | 342 | for (i = 0; i < ni; i++) { |
@@ -348,11 +347,15 @@ xfs_itobp( | |||
348 | (i << mp->m_sb.sb_inodelog)); | 347 | (i << mp->m_sb.sb_inodelog)); |
349 | di_ok = INT_GET(dip->di_core.di_magic, ARCH_CONVERT) == XFS_DINODE_MAGIC && | 348 | di_ok = INT_GET(dip->di_core.di_magic, ARCH_CONVERT) == XFS_DINODE_MAGIC && |
350 | XFS_DINODE_GOOD_VERSION(INT_GET(dip->di_core.di_version, ARCH_CONVERT)); | 349 | XFS_DINODE_GOOD_VERSION(INT_GET(dip->di_core.di_version, ARCH_CONVERT)); |
351 | if (unlikely(XFS_TEST_ERROR(!di_ok, mp, XFS_ERRTAG_ITOBP_INOTOBP, | 350 | if (unlikely(XFS_TEST_ERROR(!di_ok, mp, |
352 | XFS_RANDOM_ITOBP_INOTOBP))) { | 351 | XFS_ERRTAG_ITOBP_INOTOBP, |
352 | XFS_RANDOM_ITOBP_INOTOBP))) { | ||
353 | if (imap_flags & XFS_IMAP_BULKSTAT) { | ||
354 | xfs_trans_brelse(tp, bp); | ||
355 | return XFS_ERROR(EINVAL); | ||
356 | } | ||
353 | #ifdef DEBUG | 357 | #ifdef DEBUG |
354 | if (!(imap_flags & XFS_IMAP_BULKSTAT)) | 358 | cmn_err(CE_ALERT, |
355 | cmn_err(CE_ALERT, | ||
356 | "Device %s - bad inode magic/vsn " | 359 | "Device %s - bad inode magic/vsn " |
357 | "daddr %lld #%d (magic=%x)", | 360 | "daddr %lld #%d (magic=%x)", |
358 | XFS_BUFTARG_NAME(mp->m_ddev_targp), | 361 | XFS_BUFTARG_NAME(mp->m_ddev_targp), |
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index e730328636c3..21ac1a67e3e0 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c | |||
@@ -1413,7 +1413,7 @@ xlog_sync(xlog_t *log, | |||
1413 | ops = iclog->ic_header.h_num_logops; | 1413 | ops = iclog->ic_header.h_num_logops; |
1414 | INT_SET(iclog->ic_header.h_num_logops, ARCH_CONVERT, ops); | 1414 | INT_SET(iclog->ic_header.h_num_logops, ARCH_CONVERT, ops); |
1415 | 1415 | ||
1416 | bp = iclog->ic_bp; | 1416 | bp = iclog->ic_bp; |
1417 | ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1); | 1417 | ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1); |
1418 | XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); | 1418 | XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); |
1419 | XFS_BUF_SET_ADDR(bp, BLOCK_LSN(INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT))); | 1419 | XFS_BUF_SET_ADDR(bp, BLOCK_LSN(INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT))); |
@@ -1430,15 +1430,14 @@ xlog_sync(xlog_t *log, | |||
1430 | } | 1430 | } |
1431 | XFS_BUF_SET_PTR(bp, (xfs_caddr_t) &(iclog->ic_header), count); | 1431 | XFS_BUF_SET_PTR(bp, (xfs_caddr_t) &(iclog->ic_header), count); |
1432 | XFS_BUF_SET_FSPRIVATE(bp, iclog); /* save for later */ | 1432 | XFS_BUF_SET_FSPRIVATE(bp, iclog); /* save for later */ |
1433 | XFS_BUF_ZEROFLAGS(bp); | ||
1433 | XFS_BUF_BUSY(bp); | 1434 | XFS_BUF_BUSY(bp); |
1434 | XFS_BUF_ASYNC(bp); | 1435 | XFS_BUF_ASYNC(bp); |
1435 | /* | 1436 | /* |
1436 | * Do an ordered write for the log block. | 1437 | * Do an ordered write for the log block. |
1437 | * | 1438 | * Its unnecessary to flush the first split block in the log wrap case. |
1438 | * It may not be needed to flush the first split block in the log wrap | ||
1439 | * case, but do it anyways to be safe -AK | ||
1440 | */ | 1439 | */ |
1441 | if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) | 1440 | if (!split && (log->l_mp->m_flags & XFS_MOUNT_BARRIER)) |
1442 | XFS_BUF_ORDERED(bp); | 1441 | XFS_BUF_ORDERED(bp); |
1443 | 1442 | ||
1444 | ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); | 1443 | ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); |
@@ -1460,7 +1459,7 @@ xlog_sync(xlog_t *log, | |||
1460 | return error; | 1459 | return error; |
1461 | } | 1460 | } |
1462 | if (split) { | 1461 | if (split) { |
1463 | bp = iclog->ic_log->l_xbuf; | 1462 | bp = iclog->ic_log->l_xbuf; |
1464 | ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == | 1463 | ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == |
1465 | (unsigned long)1); | 1464 | (unsigned long)1); |
1466 | XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); | 1465 | XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); |
@@ -1468,6 +1467,7 @@ xlog_sync(xlog_t *log, | |||
1468 | XFS_BUF_SET_PTR(bp, (xfs_caddr_t)((__psint_t)&(iclog->ic_header)+ | 1467 | XFS_BUF_SET_PTR(bp, (xfs_caddr_t)((__psint_t)&(iclog->ic_header)+ |
1469 | (__psint_t)count), split); | 1468 | (__psint_t)count), split); |
1470 | XFS_BUF_SET_FSPRIVATE(bp, iclog); | 1469 | XFS_BUF_SET_FSPRIVATE(bp, iclog); |
1470 | XFS_BUF_ZEROFLAGS(bp); | ||
1471 | XFS_BUF_BUSY(bp); | 1471 | XFS_BUF_BUSY(bp); |
1472 | XFS_BUF_ASYNC(bp); | 1472 | XFS_BUF_ASYNC(bp); |
1473 | if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) | 1473 | if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) |
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c index 6c96391f3f1a..b427d220a169 100644 --- a/fs/xfs/xfs_vfsops.c +++ b/fs/xfs/xfs_vfsops.c | |||
@@ -515,7 +515,7 @@ xfs_mount( | |||
515 | if (error) | 515 | if (error) |
516 | goto error2; | 516 | goto error2; |
517 | 517 | ||
518 | if ((mp->m_flags & XFS_MOUNT_BARRIER) && !(vfsp->vfs_flag & VFS_RDONLY)) | 518 | if (mp->m_flags & XFS_MOUNT_BARRIER) |
519 | xfs_mountfs_check_barriers(mp); | 519 | xfs_mountfs_check_barriers(mp); |
520 | 520 | ||
521 | error = XFS_IOINIT(vfsp, args, flags); | 521 | error = XFS_IOINIT(vfsp, args, flags); |
diff --git a/include/asm-sparc/signal.h b/include/asm-sparc/signal.h index 0ae5084c427b..d03a21c97abb 100644 --- a/include/asm-sparc/signal.h +++ b/include/asm-sparc/signal.h | |||
@@ -168,7 +168,7 @@ struct sigstack { | |||
168 | * statically allocated data.. which is NOT GOOD. | 168 | * statically allocated data.. which is NOT GOOD. |
169 | * | 169 | * |
170 | */ | 170 | */ |
171 | #define SA_STATIC_ALLOC 0x80 | 171 | #define SA_STATIC_ALLOC 0x8000 |
172 | #endif | 172 | #endif |
173 | 173 | ||
174 | #include <asm-generic/signal.h> | 174 | #include <asm-generic/signal.h> |
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index 03f5bc9b6bec..1ba19eb34ce3 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h | |||
@@ -339,7 +339,7 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot) | |||
339 | " .section .sun4v_2insn_patch, \"ax\"\n" | 339 | " .section .sun4v_2insn_patch, \"ax\"\n" |
340 | " .word 661b\n" | 340 | " .word 661b\n" |
341 | " andn %0, %4, %0\n" | 341 | " andn %0, %4, %0\n" |
342 | " or %0, %3, %0\n" | 342 | " or %0, %5, %0\n" |
343 | " .previous\n" | 343 | " .previous\n" |
344 | : "=r" (val) | 344 | : "=r" (val) |
345 | : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U), | 345 | : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U), |
diff --git a/include/asm-sparc64/sfp-machine.h b/include/asm-sparc64/sfp-machine.h index 5015bb8d6c32..89d42431efb5 100644 --- a/include/asm-sparc64/sfp-machine.h +++ b/include/asm-sparc64/sfp-machine.h | |||
@@ -34,7 +34,7 @@ | |||
34 | #define _FP_MUL_MEAT_D(R,X,Y) \ | 34 | #define _FP_MUL_MEAT_D(R,X,Y) \ |
35 | _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm) | 35 | _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm) |
36 | #define _FP_MUL_MEAT_Q(R,X,Y) \ | 36 | #define _FP_MUL_MEAT_Q(R,X,Y) \ |
37 | _FP_MUL_MEAT_2_wide_3mul(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm) | 37 | _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm) |
38 | 38 | ||
39 | #define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_imm(S,R,X,Y,_FP_DIV_HELP_imm) | 39 | #define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_imm(S,R,X,Y,_FP_DIV_HELP_imm) |
40 | #define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(D,R,X,Y) | 40 | #define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(D,R,X,Y) |
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h index f7bf875aae40..10f346165cab 100644 --- a/include/asm-x86_64/page.h +++ b/include/asm-x86_64/page.h | |||
@@ -19,7 +19,7 @@ | |||
19 | #define EXCEPTION_STACK_ORDER 0 | 19 | #define EXCEPTION_STACK_ORDER 0 |
20 | #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER) | 20 | #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER) |
21 | 21 | ||
22 | #define DEBUG_STACK_ORDER EXCEPTION_STACK_ORDER | 22 | #define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1) |
23 | #define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER) | 23 | #define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER) |
24 | 24 | ||
25 | #define IRQSTACK_ORDER 2 | 25 | #define IRQSTACK_ORDER 2 |
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 35e137636b0b..4ea39fee99c7 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h | |||
@@ -172,9 +172,6 @@ extern int __cpufreq_driver_target(struct cpufreq_policy *policy, | |||
172 | unsigned int relation); | 172 | unsigned int relation); |
173 | 173 | ||
174 | 174 | ||
175 | /* pass an event to the cpufreq governor */ | ||
176 | int cpufreq_governor(unsigned int cpu, unsigned int event); | ||
177 | |||
178 | int cpufreq_register_governor(struct cpufreq_governor *governor); | 175 | int cpufreq_register_governor(struct cpufreq_governor *governor); |
179 | void cpufreq_unregister_governor(struct cpufreq_governor *governor); | 176 | void cpufreq_unregister_governor(struct cpufreq_governor *governor); |
180 | 177 | ||
diff --git a/include/linux/futex.h b/include/linux/futex.h index 34c3a215f2cd..d097b5b72bc6 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h | |||
@@ -96,7 +96,8 @@ struct robust_list_head { | |||
96 | long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout, | 96 | long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout, |
97 | u32 __user *uaddr2, u32 val2, u32 val3); | 97 | u32 __user *uaddr2, u32 val2, u32 val3); |
98 | 98 | ||
99 | extern int handle_futex_death(u32 __user *uaddr, struct task_struct *curr); | 99 | extern int |
100 | handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi); | ||
100 | 101 | ||
101 | #ifdef CONFIG_FUTEX | 102 | #ifdef CONFIG_FUTEX |
102 | extern void exit_robust_list(struct task_struct *curr); | 103 | extern void exit_robust_list(struct task_struct *curr); |
diff --git a/include/linux/ide.h b/include/linux/ide.h index dc7abef10965..99620451d958 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h | |||
@@ -571,6 +571,7 @@ typedef struct ide_drive_s { | |||
571 | u8 waiting_for_dma; /* dma currently in progress */ | 571 | u8 waiting_for_dma; /* dma currently in progress */ |
572 | u8 unmask; /* okay to unmask other irqs */ | 572 | u8 unmask; /* okay to unmask other irqs */ |
573 | u8 bswap; /* byte swap data */ | 573 | u8 bswap; /* byte swap data */ |
574 | u8 noflush; /* don't attempt flushes */ | ||
574 | u8 dsc_overlap; /* DSC overlap */ | 575 | u8 dsc_overlap; /* DSC overlap */ |
575 | u8 nice1; /* give potential excess bandwidth */ | 576 | u8 nice1; /* give potential excess bandwidth */ |
576 | 577 | ||
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h index 87764022cc67..31f02ba036ce 100644 --- a/include/linux/netfilter_bridge.h +++ b/include/linux/netfilter_bridge.h | |||
@@ -79,6 +79,8 @@ struct bridge_skb_cb { | |||
79 | __u32 ipv4; | 79 | __u32 ipv4; |
80 | } daddr; | 80 | } daddr; |
81 | }; | 81 | }; |
82 | |||
83 | extern int brnf_deferred_hooks; | ||
82 | #endif /* CONFIG_BRIDGE_NETFILTER */ | 84 | #endif /* CONFIG_BRIDGE_NETFILTER */ |
83 | 85 | ||
84 | #endif /* __KERNEL__ */ | 86 | #endif /* __KERNEL__ */ |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 0bf31b83578c..4307e764ef0a 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -1066,9 +1066,8 @@ static inline void __skb_queue_purge(struct sk_buff_head *list) | |||
1066 | kfree_skb(skb); | 1066 | kfree_skb(skb); |
1067 | } | 1067 | } |
1068 | 1068 | ||
1069 | #ifndef CONFIG_HAVE_ARCH_DEV_ALLOC_SKB | ||
1070 | /** | 1069 | /** |
1071 | * __dev_alloc_skb - allocate an skbuff for sending | 1070 | * __dev_alloc_skb - allocate an skbuff for receiving |
1072 | * @length: length to allocate | 1071 | * @length: length to allocate |
1073 | * @gfp_mask: get_free_pages mask, passed to alloc_skb | 1072 | * @gfp_mask: get_free_pages mask, passed to alloc_skb |
1074 | * | 1073 | * |
@@ -1087,12 +1086,9 @@ static inline struct sk_buff *__dev_alloc_skb(unsigned int length, | |||
1087 | skb_reserve(skb, NET_SKB_PAD); | 1086 | skb_reserve(skb, NET_SKB_PAD); |
1088 | return skb; | 1087 | return skb; |
1089 | } | 1088 | } |
1090 | #else | ||
1091 | extern struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask); | ||
1092 | #endif | ||
1093 | 1089 | ||
1094 | /** | 1090 | /** |
1095 | * dev_alloc_skb - allocate an skbuff for sending | 1091 | * dev_alloc_skb - allocate an skbuff for receiving |
1096 | * @length: length to allocate | 1092 | * @length: length to allocate |
1097 | * | 1093 | * |
1098 | * Allocate a new &sk_buff and assign it a usage count of one. The | 1094 | * Allocate a new &sk_buff and assign it a usage count of one. The |
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 1925c65e617b..f6afee73235d 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h | |||
@@ -169,23 +169,17 @@ psched_tod_diff(int delta_sec, int bound) | |||
169 | 169 | ||
170 | #define PSCHED_TADD2(tv, delta, tv_res) \ | 170 | #define PSCHED_TADD2(tv, delta, tv_res) \ |
171 | ({ \ | 171 | ({ \ |
172 | int __delta = (delta); \ | 172 | int __delta = (tv).tv_usec + (delta); \ |
173 | (tv_res) = (tv); \ | 173 | (tv_res).tv_sec = (tv).tv_sec; \ |
174 | while(__delta >= USEC_PER_SEC){ \ | 174 | while (__delta >= USEC_PER_SEC) { (tv_res).tv_sec++; __delta -= USEC_PER_SEC; } \ |
175 | (tv_res).tv_sec++; \ | ||
176 | __delta -= USEC_PER_SEC; \ | ||
177 | } \ | ||
178 | (tv_res).tv_usec = __delta; \ | 175 | (tv_res).tv_usec = __delta; \ |
179 | }) | 176 | }) |
180 | 177 | ||
181 | #define PSCHED_TADD(tv, delta) \ | 178 | #define PSCHED_TADD(tv, delta) \ |
182 | ({ \ | 179 | ({ \ |
183 | int __delta = (delta); \ | 180 | (tv).tv_usec += (delta); \ |
184 | while(__delta >= USEC_PER_SEC){ \ | 181 | while ((tv).tv_usec >= USEC_PER_SEC) { (tv).tv_sec++; \ |
185 | (tv).tv_sec++; \ | 182 | (tv).tv_usec -= USEC_PER_SEC; } \ |
186 | __delta -= USEC_PER_SEC; \ | ||
187 | } \ | ||
188 | (tv).tv_usec = __delta; \ | ||
189 | }) | 183 | }) |
190 | 184 | ||
191 | /* Set/check that time is in the "past perfect"; | 185 | /* Set/check that time is in the "past perfect"; |
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h index 5ff77558013b..585d28e960dd 100644 --- a/include/rdma/ib_mad.h +++ b/include/rdma/ib_mad.h | |||
@@ -75,6 +75,7 @@ | |||
75 | #define IB_MGMT_METHOD_TRAP_REPRESS 0x07 | 75 | #define IB_MGMT_METHOD_TRAP_REPRESS 0x07 |
76 | 76 | ||
77 | #define IB_MGMT_METHOD_RESP 0x80 | 77 | #define IB_MGMT_METHOD_RESP 0x80 |
78 | #define IB_BM_ATTR_MOD_RESP cpu_to_be32(1) | ||
78 | 79 | ||
79 | #define IB_MGMT_MAX_METHODS 128 | 80 | #define IB_MGMT_MAX_METHODS 128 |
80 | 81 | ||
@@ -247,6 +248,12 @@ struct ib_mad_send_buf { | |||
247 | }; | 248 | }; |
248 | 249 | ||
249 | /** | 250 | /** |
251 | * ib_response_mad - Returns if the specified MAD has been generated in | ||
252 | * response to a sent request or trap. | ||
253 | */ | ||
254 | int ib_response_mad(struct ib_mad *mad); | ||
255 | |||
256 | /** | ||
250 | * ib_get_rmpp_resptime - Returns the RMPP response time. | 257 | * ib_get_rmpp_resptime - Returns the RMPP response time. |
251 | * @rmpp_hdr: An RMPP header. | 258 | * @rmpp_hdr: An RMPP header. |
252 | */ | 259 | */ |
diff --git a/kernel/futex.c b/kernel/futex.c index cf0c8e21d1ab..dda2049692a2 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -415,15 +415,15 @@ out_unlock: | |||
415 | */ | 415 | */ |
416 | void exit_pi_state_list(struct task_struct *curr) | 416 | void exit_pi_state_list(struct task_struct *curr) |
417 | { | 417 | { |
418 | struct futex_hash_bucket *hb; | ||
419 | struct list_head *next, *head = &curr->pi_state_list; | 418 | struct list_head *next, *head = &curr->pi_state_list; |
420 | struct futex_pi_state *pi_state; | 419 | struct futex_pi_state *pi_state; |
420 | struct futex_hash_bucket *hb; | ||
421 | union futex_key key; | 421 | union futex_key key; |
422 | 422 | ||
423 | /* | 423 | /* |
424 | * We are a ZOMBIE and nobody can enqueue itself on | 424 | * We are a ZOMBIE and nobody can enqueue itself on |
425 | * pi_state_list anymore, but we have to be careful | 425 | * pi_state_list anymore, but we have to be careful |
426 | * versus waiters unqueueing themselfs | 426 | * versus waiters unqueueing themselves: |
427 | */ | 427 | */ |
428 | spin_lock_irq(&curr->pi_lock); | 428 | spin_lock_irq(&curr->pi_lock); |
429 | while (!list_empty(head)) { | 429 | while (!list_empty(head)) { |
@@ -431,21 +431,24 @@ void exit_pi_state_list(struct task_struct *curr) | |||
431 | next = head->next; | 431 | next = head->next; |
432 | pi_state = list_entry(next, struct futex_pi_state, list); | 432 | pi_state = list_entry(next, struct futex_pi_state, list); |
433 | key = pi_state->key; | 433 | key = pi_state->key; |
434 | hb = hash_futex(&key); | ||
434 | spin_unlock_irq(&curr->pi_lock); | 435 | spin_unlock_irq(&curr->pi_lock); |
435 | 436 | ||
436 | hb = hash_futex(&key); | ||
437 | spin_lock(&hb->lock); | 437 | spin_lock(&hb->lock); |
438 | 438 | ||
439 | spin_lock_irq(&curr->pi_lock); | 439 | spin_lock_irq(&curr->pi_lock); |
440 | /* | ||
441 | * We dropped the pi-lock, so re-check whether this | ||
442 | * task still owns the PI-state: | ||
443 | */ | ||
440 | if (head->next != next) { | 444 | if (head->next != next) { |
441 | spin_unlock(&hb->lock); | 445 | spin_unlock(&hb->lock); |
442 | continue; | 446 | continue; |
443 | } | 447 | } |
444 | 448 | ||
445 | list_del_init(&pi_state->list); | ||
446 | |||
447 | WARN_ON(pi_state->owner != curr); | 449 | WARN_ON(pi_state->owner != curr); |
448 | 450 | WARN_ON(list_empty(&pi_state->list)); | |
451 | list_del_init(&pi_state->list); | ||
449 | pi_state->owner = NULL; | 452 | pi_state->owner = NULL; |
450 | spin_unlock_irq(&curr->pi_lock); | 453 | spin_unlock_irq(&curr->pi_lock); |
451 | 454 | ||
@@ -470,7 +473,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me) | |||
470 | head = &hb->chain; | 473 | head = &hb->chain; |
471 | 474 | ||
472 | list_for_each_entry_safe(this, next, head, list) { | 475 | list_for_each_entry_safe(this, next, head, list) { |
473 | if (match_futex (&this->key, &me->key)) { | 476 | if (match_futex(&this->key, &me->key)) { |
474 | /* | 477 | /* |
475 | * Another waiter already exists - bump up | 478 | * Another waiter already exists - bump up |
476 | * the refcount and return its pi_state: | 479 | * the refcount and return its pi_state: |
@@ -482,6 +485,8 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me) | |||
482 | if (unlikely(!pi_state)) | 485 | if (unlikely(!pi_state)) |
483 | return -EINVAL; | 486 | return -EINVAL; |
484 | 487 | ||
488 | WARN_ON(!atomic_read(&pi_state->refcount)); | ||
489 | |||
485 | atomic_inc(&pi_state->refcount); | 490 | atomic_inc(&pi_state->refcount); |
486 | me->pi_state = pi_state; | 491 | me->pi_state = pi_state; |
487 | 492 | ||
@@ -490,10 +495,13 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me) | |||
490 | } | 495 | } |
491 | 496 | ||
492 | /* | 497 | /* |
493 | * We are the first waiter - try to look up the real owner and | 498 | * We are the first waiter - try to look up the real owner and attach |
494 | * attach the new pi_state to it: | 499 | * the new pi_state to it, but bail out when the owner died bit is set |
500 | * and TID = 0: | ||
495 | */ | 501 | */ |
496 | pid = uval & FUTEX_TID_MASK; | 502 | pid = uval & FUTEX_TID_MASK; |
503 | if (!pid && (uval & FUTEX_OWNER_DIED)) | ||
504 | return -ESRCH; | ||
497 | p = futex_find_get_task(pid); | 505 | p = futex_find_get_task(pid); |
498 | if (!p) | 506 | if (!p) |
499 | return -ESRCH; | 507 | return -ESRCH; |
@@ -510,6 +518,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me) | |||
510 | pi_state->key = me->key; | 518 | pi_state->key = me->key; |
511 | 519 | ||
512 | spin_lock_irq(&p->pi_lock); | 520 | spin_lock_irq(&p->pi_lock); |
521 | WARN_ON(!list_empty(&pi_state->list)); | ||
513 | list_add(&pi_state->list, &p->pi_state_list); | 522 | list_add(&pi_state->list, &p->pi_state_list); |
514 | pi_state->owner = p; | 523 | pi_state->owner = p; |
515 | spin_unlock_irq(&p->pi_lock); | 524 | spin_unlock_irq(&p->pi_lock); |
@@ -573,20 +582,29 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) | |||
573 | * kept enabled while there is PI state around. We must also | 582 | * kept enabled while there is PI state around. We must also |
574 | * preserve the owner died bit.) | 583 | * preserve the owner died bit.) |
575 | */ | 584 | */ |
576 | newval = (uval & FUTEX_OWNER_DIED) | FUTEX_WAITERS | new_owner->pid; | 585 | if (!(uval & FUTEX_OWNER_DIED)) { |
586 | newval = FUTEX_WAITERS | new_owner->pid; | ||
577 | 587 | ||
578 | inc_preempt_count(); | 588 | inc_preempt_count(); |
579 | curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); | 589 | curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); |
580 | dec_preempt_count(); | 590 | dec_preempt_count(); |
591 | if (curval == -EFAULT) | ||
592 | return -EFAULT; | ||
593 | if (curval != uval) | ||
594 | return -EINVAL; | ||
595 | } | ||
581 | 596 | ||
582 | if (curval == -EFAULT) | 597 | spin_lock_irq(&pi_state->owner->pi_lock); |
583 | return -EFAULT; | 598 | WARN_ON(list_empty(&pi_state->list)); |
584 | if (curval != uval) | 599 | list_del_init(&pi_state->list); |
585 | return -EINVAL; | 600 | spin_unlock_irq(&pi_state->owner->pi_lock); |
586 | 601 | ||
587 | list_del_init(&pi_state->owner->pi_state_list); | 602 | spin_lock_irq(&new_owner->pi_lock); |
603 | WARN_ON(!list_empty(&pi_state->list)); | ||
588 | list_add(&pi_state->list, &new_owner->pi_state_list); | 604 | list_add(&pi_state->list, &new_owner->pi_state_list); |
589 | pi_state->owner = new_owner; | 605 | pi_state->owner = new_owner; |
606 | spin_unlock_irq(&new_owner->pi_lock); | ||
607 | |||
590 | rt_mutex_unlock(&pi_state->pi_mutex); | 608 | rt_mutex_unlock(&pi_state->pi_mutex); |
591 | 609 | ||
592 | return 0; | 610 | return 0; |
@@ -1236,6 +1254,7 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock, | |||
1236 | /* Owner died? */ | 1254 | /* Owner died? */ |
1237 | if (q.pi_state->owner != NULL) { | 1255 | if (q.pi_state->owner != NULL) { |
1238 | spin_lock_irq(&q.pi_state->owner->pi_lock); | 1256 | spin_lock_irq(&q.pi_state->owner->pi_lock); |
1257 | WARN_ON(list_empty(&q.pi_state->list)); | ||
1239 | list_del_init(&q.pi_state->list); | 1258 | list_del_init(&q.pi_state->list); |
1240 | spin_unlock_irq(&q.pi_state->owner->pi_lock); | 1259 | spin_unlock_irq(&q.pi_state->owner->pi_lock); |
1241 | } else | 1260 | } else |
@@ -1244,6 +1263,7 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock, | |||
1244 | q.pi_state->owner = current; | 1263 | q.pi_state->owner = current; |
1245 | 1264 | ||
1246 | spin_lock_irq(¤t->pi_lock); | 1265 | spin_lock_irq(¤t->pi_lock); |
1266 | WARN_ON(!list_empty(&q.pi_state->list)); | ||
1247 | list_add(&q.pi_state->list, ¤t->pi_state_list); | 1267 | list_add(&q.pi_state->list, ¤t->pi_state_list); |
1248 | spin_unlock_irq(¤t->pi_lock); | 1268 | spin_unlock_irq(¤t->pi_lock); |
1249 | 1269 | ||
@@ -1427,9 +1447,11 @@ retry_locked: | |||
1427 | * again. If it succeeds then we can return without waking | 1447 | * again. If it succeeds then we can return without waking |
1428 | * anyone else up: | 1448 | * anyone else up: |
1429 | */ | 1449 | */ |
1430 | inc_preempt_count(); | 1450 | if (!(uval & FUTEX_OWNER_DIED)) { |
1431 | uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0); | 1451 | inc_preempt_count(); |
1432 | dec_preempt_count(); | 1452 | uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0); |
1453 | dec_preempt_count(); | ||
1454 | } | ||
1433 | 1455 | ||
1434 | if (unlikely(uval == -EFAULT)) | 1456 | if (unlikely(uval == -EFAULT)) |
1435 | goto pi_faulted; | 1457 | goto pi_faulted; |
@@ -1462,9 +1484,11 @@ retry_locked: | |||
1462 | /* | 1484 | /* |
1463 | * No waiters - kernel unlocks the futex: | 1485 | * No waiters - kernel unlocks the futex: |
1464 | */ | 1486 | */ |
1465 | ret = unlock_futex_pi(uaddr, uval); | 1487 | if (!(uval & FUTEX_OWNER_DIED)) { |
1466 | if (ret == -EFAULT) | 1488 | ret = unlock_futex_pi(uaddr, uval); |
1467 | goto pi_faulted; | 1489 | if (ret == -EFAULT) |
1490 | goto pi_faulted; | ||
1491 | } | ||
1468 | 1492 | ||
1469 | out_unlock: | 1493 | out_unlock: |
1470 | spin_unlock(&hb->lock); | 1494 | spin_unlock(&hb->lock); |
@@ -1683,9 +1707,9 @@ err_unlock: | |||
1683 | * Process a futex-list entry, check whether it's owned by the | 1707 | * Process a futex-list entry, check whether it's owned by the |
1684 | * dying task, and do notification if so: | 1708 | * dying task, and do notification if so: |
1685 | */ | 1709 | */ |
1686 | int handle_futex_death(u32 __user *uaddr, struct task_struct *curr) | 1710 | int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi) |
1687 | { | 1711 | { |
1688 | u32 uval, nval; | 1712 | u32 uval, nval, mval; |
1689 | 1713 | ||
1690 | retry: | 1714 | retry: |
1691 | if (get_user(uval, uaddr)) | 1715 | if (get_user(uval, uaddr)) |
@@ -1702,21 +1726,45 @@ retry: | |||
1702 | * thread-death.) The rest of the cleanup is done in | 1726 | * thread-death.) The rest of the cleanup is done in |
1703 | * userspace. | 1727 | * userspace. |
1704 | */ | 1728 | */ |
1705 | nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, | 1729 | mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; |
1706 | uval | FUTEX_OWNER_DIED); | 1730 | nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval); |
1731 | |||
1707 | if (nval == -EFAULT) | 1732 | if (nval == -EFAULT) |
1708 | return -1; | 1733 | return -1; |
1709 | 1734 | ||
1710 | if (nval != uval) | 1735 | if (nval != uval) |
1711 | goto retry; | 1736 | goto retry; |
1712 | 1737 | ||
1713 | if (uval & FUTEX_WAITERS) | 1738 | /* |
1714 | futex_wake(uaddr, 1); | 1739 | * Wake robust non-PI futexes here. The wakeup of |
1740 | * PI futexes happens in exit_pi_state(): | ||
1741 | */ | ||
1742 | if (!pi) { | ||
1743 | if (uval & FUTEX_WAITERS) | ||
1744 | futex_wake(uaddr, 1); | ||
1745 | } | ||
1715 | } | 1746 | } |
1716 | return 0; | 1747 | return 0; |
1717 | } | 1748 | } |
1718 | 1749 | ||
1719 | /* | 1750 | /* |
1751 | * Fetch a robust-list pointer. Bit 0 signals PI futexes: | ||
1752 | */ | ||
1753 | static inline int fetch_robust_entry(struct robust_list __user **entry, | ||
1754 | struct robust_list __user **head, int *pi) | ||
1755 | { | ||
1756 | unsigned long uentry; | ||
1757 | |||
1758 | if (get_user(uentry, (unsigned long *)head)) | ||
1759 | return -EFAULT; | ||
1760 | |||
1761 | *entry = (void *)(uentry & ~1UL); | ||
1762 | *pi = uentry & 1; | ||
1763 | |||
1764 | return 0; | ||
1765 | } | ||
1766 | |||
1767 | /* | ||
1720 | * Walk curr->robust_list (very carefully, it's a userspace list!) | 1768 | * Walk curr->robust_list (very carefully, it's a userspace list!) |
1721 | * and mark any locks found there dead, and notify any waiters. | 1769 | * and mark any locks found there dead, and notify any waiters. |
1722 | * | 1770 | * |
@@ -1726,14 +1774,14 @@ void exit_robust_list(struct task_struct *curr) | |||
1726 | { | 1774 | { |
1727 | struct robust_list_head __user *head = curr->robust_list; | 1775 | struct robust_list_head __user *head = curr->robust_list; |
1728 | struct robust_list __user *entry, *pending; | 1776 | struct robust_list __user *entry, *pending; |
1729 | unsigned int limit = ROBUST_LIST_LIMIT; | 1777 | unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; |
1730 | unsigned long futex_offset; | 1778 | unsigned long futex_offset; |
1731 | 1779 | ||
1732 | /* | 1780 | /* |
1733 | * Fetch the list head (which was registered earlier, via | 1781 | * Fetch the list head (which was registered earlier, via |
1734 | * sys_set_robust_list()): | 1782 | * sys_set_robust_list()): |
1735 | */ | 1783 | */ |
1736 | if (get_user(entry, &head->list.next)) | 1784 | if (fetch_robust_entry(&entry, &head->list.next, &pi)) |
1737 | return; | 1785 | return; |
1738 | /* | 1786 | /* |
1739 | * Fetch the relative futex offset: | 1787 | * Fetch the relative futex offset: |
@@ -1744,10 +1792,11 @@ void exit_robust_list(struct task_struct *curr) | |||
1744 | * Fetch any possibly pending lock-add first, and handle it | 1792 | * Fetch any possibly pending lock-add first, and handle it |
1745 | * if it exists: | 1793 | * if it exists: |
1746 | */ | 1794 | */ |
1747 | if (get_user(pending, &head->list_op_pending)) | 1795 | if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) |
1748 | return; | 1796 | return; |
1797 | |||
1749 | if (pending) | 1798 | if (pending) |
1750 | handle_futex_death((void *)pending + futex_offset, curr); | 1799 | handle_futex_death((void *)pending + futex_offset, curr, pip); |
1751 | 1800 | ||
1752 | while (entry != &head->list) { | 1801 | while (entry != &head->list) { |
1753 | /* | 1802 | /* |
@@ -1756,12 +1805,12 @@ void exit_robust_list(struct task_struct *curr) | |||
1756 | */ | 1805 | */ |
1757 | if (entry != pending) | 1806 | if (entry != pending) |
1758 | if (handle_futex_death((void *)entry + futex_offset, | 1807 | if (handle_futex_death((void *)entry + futex_offset, |
1759 | curr)) | 1808 | curr, pi)) |
1760 | return; | 1809 | return; |
1761 | /* | 1810 | /* |
1762 | * Fetch the next entry in the list: | 1811 | * Fetch the next entry in the list: |
1763 | */ | 1812 | */ |
1764 | if (get_user(entry, &entry->next)) | 1813 | if (fetch_robust_entry(&entry, &entry->next, &pi)) |
1765 | return; | 1814 | return; |
1766 | /* | 1815 | /* |
1767 | * Avoid excessively long or circular lists: | 1816 | * Avoid excessively long or circular lists: |
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index d1d92b441fb7..d1aab1a452cc 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c | |||
@@ -12,6 +12,23 @@ | |||
12 | 12 | ||
13 | #include <asm/uaccess.h> | 13 | #include <asm/uaccess.h> |
14 | 14 | ||
15 | |||
16 | /* | ||
17 | * Fetch a robust-list pointer. Bit 0 signals PI futexes: | ||
18 | */ | ||
19 | static inline int | ||
20 | fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, | ||
21 | compat_uptr_t *head, int *pi) | ||
22 | { | ||
23 | if (get_user(*uentry, head)) | ||
24 | return -EFAULT; | ||
25 | |||
26 | *entry = compat_ptr((*uentry) & ~1); | ||
27 | *pi = (unsigned int)(*uentry) & 1; | ||
28 | |||
29 | return 0; | ||
30 | } | ||
31 | |||
15 | /* | 32 | /* |
16 | * Walk curr->robust_list (very carefully, it's a userspace list!) | 33 | * Walk curr->robust_list (very carefully, it's a userspace list!) |
17 | * and mark any locks found there dead, and notify any waiters. | 34 | * and mark any locks found there dead, and notify any waiters. |
@@ -22,17 +39,16 @@ void compat_exit_robust_list(struct task_struct *curr) | |||
22 | { | 39 | { |
23 | struct compat_robust_list_head __user *head = curr->compat_robust_list; | 40 | struct compat_robust_list_head __user *head = curr->compat_robust_list; |
24 | struct robust_list __user *entry, *pending; | 41 | struct robust_list __user *entry, *pending; |
42 | unsigned int limit = ROBUST_LIST_LIMIT, pi; | ||
25 | compat_uptr_t uentry, upending; | 43 | compat_uptr_t uentry, upending; |
26 | unsigned int limit = ROBUST_LIST_LIMIT; | ||
27 | compat_long_t futex_offset; | 44 | compat_long_t futex_offset; |
28 | 45 | ||
29 | /* | 46 | /* |
30 | * Fetch the list head (which was registered earlier, via | 47 | * Fetch the list head (which was registered earlier, via |
31 | * sys_set_robust_list()): | 48 | * sys_set_robust_list()): |
32 | */ | 49 | */ |
33 | if (get_user(uentry, &head->list.next)) | 50 | if (fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) |
34 | return; | 51 | return; |
35 | entry = compat_ptr(uentry); | ||
36 | /* | 52 | /* |
37 | * Fetch the relative futex offset: | 53 | * Fetch the relative futex offset: |
38 | */ | 54 | */ |
@@ -42,11 +58,11 @@ void compat_exit_robust_list(struct task_struct *curr) | |||
42 | * Fetch any possibly pending lock-add first, and handle it | 58 | * Fetch any possibly pending lock-add first, and handle it |
43 | * if it exists: | 59 | * if it exists: |
44 | */ | 60 | */ |
45 | if (get_user(upending, &head->list_op_pending)) | 61 | if (fetch_robust_entry(&upending, &pending, |
62 | &head->list_op_pending, &pi)) | ||
46 | return; | 63 | return; |
47 | pending = compat_ptr(upending); | ||
48 | if (upending) | 64 | if (upending) |
49 | handle_futex_death((void *)pending + futex_offset, curr); | 65 | handle_futex_death((void *)pending + futex_offset, curr, pi); |
50 | 66 | ||
51 | while (compat_ptr(uentry) != &head->list) { | 67 | while (compat_ptr(uentry) != &head->list) { |
52 | /* | 68 | /* |
@@ -55,15 +71,15 @@ void compat_exit_robust_list(struct task_struct *curr) | |||
55 | */ | 71 | */ |
56 | if (entry != pending) | 72 | if (entry != pending) |
57 | if (handle_futex_death((void *)entry + futex_offset, | 73 | if (handle_futex_death((void *)entry + futex_offset, |
58 | curr)) | 74 | curr, pi)) |
59 | return; | 75 | return; |
60 | 76 | ||
61 | /* | 77 | /* |
62 | * Fetch the next entry in the list: | 78 | * Fetch the next entry in the list: |
63 | */ | 79 | */ |
64 | if (get_user(uentry, (compat_uptr_t *)&entry->next)) | 80 | if (fetch_robust_entry(&uentry, &entry, |
81 | (compat_uptr_t *)&entry->next, &pi)) | ||
65 | return; | 82 | return; |
66 | entry = compat_ptr(uentry); | ||
67 | /* | 83 | /* |
68 | * Avoid excessively long or circular lists: | 84 | * Avoid excessively long or circular lists: |
69 | */ | 85 | */ |
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 0ade0c63fdf6..18fcb9fa518d 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
@@ -67,10 +67,6 @@ static struct packet_type vlan_packet_type = { | |||
67 | .func = vlan_skb_recv, /* VLAN receive method */ | 67 | .func = vlan_skb_recv, /* VLAN receive method */ |
68 | }; | 68 | }; |
69 | 69 | ||
70 | /* Bits of netdev state that are propagated from real device to virtual */ | ||
71 | #define VLAN_LINK_STATE_MASK \ | ||
72 | ((1<<__LINK_STATE_PRESENT)|(1<<__LINK_STATE_NOCARRIER)|(1<<__LINK_STATE_DORMANT)) | ||
73 | |||
74 | /* End of global variables definitions. */ | 70 | /* End of global variables definitions. */ |
75 | 71 | ||
76 | /* | 72 | /* |
@@ -479,7 +475,9 @@ static struct net_device *register_vlan_device(const char *eth_IF_name, | |||
479 | new_dev->flags = real_dev->flags; | 475 | new_dev->flags = real_dev->flags; |
480 | new_dev->flags &= ~IFF_UP; | 476 | new_dev->flags &= ~IFF_UP; |
481 | 477 | ||
482 | new_dev->state = real_dev->state & ~(1<<__LINK_STATE_START); | 478 | new_dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) | |
479 | (1<<__LINK_STATE_DORMANT))) | | ||
480 | (1<<__LINK_STATE_PRESENT); | ||
483 | 481 | ||
484 | /* need 4 bytes for extra VLAN header info, | 482 | /* need 4 bytes for extra VLAN header info, |
485 | * hope the underlying device can handle it. | 483 | * hope the underlying device can handle it. |
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 77eab8f4c7fd..332dd8f436ea 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c | |||
@@ -55,6 +55,7 @@ | |||
55 | #define VERSION "1.8" | 55 | #define VERSION "1.8" |
56 | 56 | ||
57 | static int disable_cfc = 0; | 57 | static int disable_cfc = 0; |
58 | static int channel_mtu = -1; | ||
58 | static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU; | 59 | static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU; |
59 | 60 | ||
60 | static struct task_struct *rfcomm_thread; | 61 | static struct task_struct *rfcomm_thread; |
@@ -812,7 +813,10 @@ static int rfcomm_send_pn(struct rfcomm_session *s, int cr, struct rfcomm_dlc *d | |||
812 | pn->credits = 0; | 813 | pn->credits = 0; |
813 | } | 814 | } |
814 | 815 | ||
815 | pn->mtu = htobs(d->mtu); | 816 | if (cr && channel_mtu >= 0) |
817 | pn->mtu = htobs(channel_mtu); | ||
818 | else | ||
819 | pn->mtu = htobs(d->mtu); | ||
816 | 820 | ||
817 | *ptr = __fcs(buf); ptr++; | 821 | *ptr = __fcs(buf); ptr++; |
818 | 822 | ||
@@ -1243,7 +1247,10 @@ static int rfcomm_apply_pn(struct rfcomm_dlc *d, int cr, struct rfcomm_pn *pn) | |||
1243 | 1247 | ||
1244 | d->priority = pn->priority; | 1248 | d->priority = pn->priority; |
1245 | 1249 | ||
1246 | d->mtu = s->mtu = btohs(pn->mtu); | 1250 | d->mtu = btohs(pn->mtu); |
1251 | |||
1252 | if (cr && d->mtu > s->mtu) | ||
1253 | d->mtu = s->mtu; | ||
1247 | 1254 | ||
1248 | return 0; | 1255 | return 0; |
1249 | } | 1256 | } |
@@ -1770,6 +1777,11 @@ static inline void rfcomm_accept_connection(struct rfcomm_session *s) | |||
1770 | s = rfcomm_session_add(nsock, BT_OPEN); | 1777 | s = rfcomm_session_add(nsock, BT_OPEN); |
1771 | if (s) { | 1778 | if (s) { |
1772 | rfcomm_session_hold(s); | 1779 | rfcomm_session_hold(s); |
1780 | |||
1781 | /* We should adjust MTU on incoming sessions. | ||
1782 | * L2CAP MTU minus UIH header and FCS. */ | ||
1783 | s->mtu = min(l2cap_pi(nsock->sk)->omtu, l2cap_pi(nsock->sk)->imtu) - 5; | ||
1784 | |||
1773 | rfcomm_schedule(RFCOMM_SCHED_RX); | 1785 | rfcomm_schedule(RFCOMM_SCHED_RX); |
1774 | } else | 1786 | } else |
1775 | sock_release(nsock); | 1787 | sock_release(nsock); |
@@ -2087,6 +2099,9 @@ module_exit(rfcomm_exit); | |||
2087 | module_param(disable_cfc, bool, 0644); | 2099 | module_param(disable_cfc, bool, 0644); |
2088 | MODULE_PARM_DESC(disable_cfc, "Disable credit based flow control"); | 2100 | MODULE_PARM_DESC(disable_cfc, "Disable credit based flow control"); |
2089 | 2101 | ||
2102 | module_param(channel_mtu, int, 0644); | ||
2103 | MODULE_PARM_DESC(channel_mtu, "Default MTU for the RFCOMM channel"); | ||
2104 | |||
2090 | module_param(l2cap_mtu, uint, 0644); | 2105 | module_param(l2cap_mtu, uint, 0644); |
2091 | MODULE_PARM_DESC(l2cap_mtu, "Default MTU for the L2CAP connection"); | 2106 | MODULE_PARM_DESC(l2cap_mtu, "Default MTU for the L2CAP connection"); |
2092 | 2107 | ||
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index cbc8a389a0a8..05b3de888243 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
@@ -61,6 +61,9 @@ static int brnf_filter_vlan_tagged = 1; | |||
61 | #define brnf_filter_vlan_tagged 1 | 61 | #define brnf_filter_vlan_tagged 1 |
62 | #endif | 62 | #endif |
63 | 63 | ||
64 | int brnf_deferred_hooks; | ||
65 | EXPORT_SYMBOL_GPL(brnf_deferred_hooks); | ||
66 | |||
64 | static __be16 inline vlan_proto(const struct sk_buff *skb) | 67 | static __be16 inline vlan_proto(const struct sk_buff *skb) |
65 | { | 68 | { |
66 | return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; | 69 | return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; |
@@ -890,6 +893,8 @@ static unsigned int ip_sabotage_out(unsigned int hook, struct sk_buff **pskb, | |||
890 | return NF_ACCEPT; | 893 | return NF_ACCEPT; |
891 | else if (ip->version == 6 && !brnf_call_ip6tables) | 894 | else if (ip->version == 6 && !brnf_call_ip6tables) |
892 | return NF_ACCEPT; | 895 | return NF_ACCEPT; |
896 | else if (!brnf_deferred_hooks) | ||
897 | return NF_ACCEPT; | ||
893 | #endif | 898 | #endif |
894 | if (hook == NF_IP_POST_ROUTING) | 899 | if (hook == NF_IP_POST_ROUTING) |
895 | return NF_ACCEPT; | 900 | return NF_ACCEPT; |
diff --git a/net/dccp/feat.h b/net/dccp/feat.h index 6048373c7186..b44c45504fb6 100644 --- a/net/dccp/feat.h +++ b/net/dccp/feat.h | |||
@@ -26,4 +26,6 @@ extern void dccp_feat_clean(struct dccp_minisock *dmsk); | |||
26 | extern int dccp_feat_clone(struct sock *oldsk, struct sock *newsk); | 26 | extern int dccp_feat_clone(struct sock *oldsk, struct sock *newsk); |
27 | extern int dccp_feat_init(struct dccp_minisock *dmsk); | 27 | extern int dccp_feat_init(struct dccp_minisock *dmsk); |
28 | 28 | ||
29 | extern int dccp_feat_default_sequence_window; | ||
30 | |||
29 | #endif /* _DCCP_FEAT_H */ | 31 | #endif /* _DCCP_FEAT_H */ |
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index c3073e7e81d3..7f56f7e8f571 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c | |||
@@ -504,8 +504,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
504 | ireq = inet_rsk(req); | 504 | ireq = inet_rsk(req); |
505 | ireq->loc_addr = daddr; | 505 | ireq->loc_addr = daddr; |
506 | ireq->rmt_addr = saddr; | 506 | ireq->rmt_addr = saddr; |
507 | req->rcv_wnd = 100; /* Fake, option parsing will get the | 507 | req->rcv_wnd = dccp_feat_default_sequence_window; |
508 | right value */ | ||
509 | ireq->opt = NULL; | 508 | ireq->opt = NULL; |
510 | 509 | ||
511 | /* | 510 | /* |
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index ff42bc43263d..9f3d4d7cd0bf 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
@@ -31,6 +31,7 @@ | |||
31 | 31 | ||
32 | #include "dccp.h" | 32 | #include "dccp.h" |
33 | #include "ipv6.h" | 33 | #include "ipv6.h" |
34 | #include "feat.h" | ||
34 | 35 | ||
35 | /* Socket used for sending RSTs and ACKs */ | 36 | /* Socket used for sending RSTs and ACKs */ |
36 | static struct socket *dccp_v6_ctl_socket; | 37 | static struct socket *dccp_v6_ctl_socket; |
@@ -707,8 +708,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
707 | ireq = inet_rsk(req); | 708 | ireq = inet_rsk(req); |
708 | ipv6_addr_copy(&ireq6->rmt_addr, &skb->nh.ipv6h->saddr); | 709 | ipv6_addr_copy(&ireq6->rmt_addr, &skb->nh.ipv6h->saddr); |
709 | ipv6_addr_copy(&ireq6->loc_addr, &skb->nh.ipv6h->daddr); | 710 | ipv6_addr_copy(&ireq6->loc_addr, &skb->nh.ipv6h->daddr); |
710 | req->rcv_wnd = 100; /* Fake, option parsing will get the | 711 | req->rcv_wnd = dccp_feat_default_sequence_window; |
711 | right value */ | ||
712 | ireq6->pktopts = NULL; | 712 | ireq6->pktopts = NULL; |
713 | 713 | ||
714 | if (ipv6_opt_accepted(sk, skb) || | 714 | if (ipv6_opt_accepted(sk, skb) || |
diff --git a/net/dccp/options.c b/net/dccp/options.c index c3cda1e39aa8..daf72bb671f0 100644 --- a/net/dccp/options.c +++ b/net/dccp/options.c | |||
@@ -29,6 +29,8 @@ int dccp_feat_default_ack_ratio = DCCPF_INITIAL_ACK_RATIO; | |||
29 | int dccp_feat_default_send_ack_vector = DCCPF_INITIAL_SEND_ACK_VECTOR; | 29 | int dccp_feat_default_send_ack_vector = DCCPF_INITIAL_SEND_ACK_VECTOR; |
30 | int dccp_feat_default_send_ndp_count = DCCPF_INITIAL_SEND_NDP_COUNT; | 30 | int dccp_feat_default_send_ndp_count = DCCPF_INITIAL_SEND_NDP_COUNT; |
31 | 31 | ||
32 | EXPORT_SYMBOL_GPL(dccp_feat_default_sequence_window); | ||
33 | |||
32 | void dccp_minisock_init(struct dccp_minisock *dmsk) | 34 | void dccp_minisock_init(struct dccp_minisock *dmsk) |
33 | { | 35 | { |
34 | dmsk->dccpms_sequence_window = dccp_feat_default_sequence_window; | 36 | dmsk->dccpms_sequence_window = dccp_feat_default_sequence_window; |
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 184c78ca79e6..212734ca238f 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c | |||
@@ -429,7 +429,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, | |||
429 | } | 429 | } |
430 | 430 | ||
431 | /* Remove any debris in the socket control block */ | 431 | /* Remove any debris in the socket control block */ |
432 | memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); | 432 | memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); |
433 | 433 | ||
434 | return NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, dev, NULL, | 434 | return NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, dev, NULL, |
435 | ip_rcv_finish); | 435 | ip_rcv_finish); |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 9ccacf57f08b..85893eef6b16 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -1578,6 +1578,7 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait) | |||
1578 | cache = ipmr_cache_find(rt->rt_src, rt->rt_dst); | 1578 | cache = ipmr_cache_find(rt->rt_src, rt->rt_dst); |
1579 | 1579 | ||
1580 | if (cache==NULL) { | 1580 | if (cache==NULL) { |
1581 | struct sk_buff *skb2; | ||
1581 | struct net_device *dev; | 1582 | struct net_device *dev; |
1582 | int vif; | 1583 | int vif; |
1583 | 1584 | ||
@@ -1591,12 +1592,18 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait) | |||
1591 | read_unlock(&mrt_lock); | 1592 | read_unlock(&mrt_lock); |
1592 | return -ENODEV; | 1593 | return -ENODEV; |
1593 | } | 1594 | } |
1594 | skb->nh.raw = skb_push(skb, sizeof(struct iphdr)); | 1595 | skb2 = skb_clone(skb, GFP_ATOMIC); |
1595 | skb->nh.iph->ihl = sizeof(struct iphdr)>>2; | 1596 | if (!skb2) { |
1596 | skb->nh.iph->saddr = rt->rt_src; | 1597 | read_unlock(&mrt_lock); |
1597 | skb->nh.iph->daddr = rt->rt_dst; | 1598 | return -ENOMEM; |
1598 | skb->nh.iph->version = 0; | 1599 | } |
1599 | err = ipmr_cache_unresolved(vif, skb); | 1600 | |
1601 | skb2->nh.raw = skb_push(skb2, sizeof(struct iphdr)); | ||
1602 | skb2->nh.iph->ihl = sizeof(struct iphdr)>>2; | ||
1603 | skb2->nh.iph->saddr = rt->rt_src; | ||
1604 | skb2->nh.iph->daddr = rt->rt_dst; | ||
1605 | skb2->nh.iph->version = 0; | ||
1606 | err = ipmr_cache_unresolved(vif, skb2); | ||
1600 | read_unlock(&mrt_lock); | 1607 | read_unlock(&mrt_lock); |
1601 | return err; | 1608 | return err; |
1602 | } | 1609 | } |
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_h323.c b/net/ipv4/netfilter/ip_conntrack_helper_h323.c index af35235672d5..9a39e2969712 100644 --- a/net/ipv4/netfilter/ip_conntrack_helper_h323.c +++ b/net/ipv4/netfilter/ip_conntrack_helper_h323.c | |||
@@ -1200,7 +1200,7 @@ static struct ip_conntrack_expect *find_expect(struct ip_conntrack *ct, | |||
1200 | tuple.dst.protonum = IPPROTO_TCP; | 1200 | tuple.dst.protonum = IPPROTO_TCP; |
1201 | 1201 | ||
1202 | exp = __ip_conntrack_expect_find(&tuple); | 1202 | exp = __ip_conntrack_expect_find(&tuple); |
1203 | if (exp->master == ct) | 1203 | if (exp && exp->master == ct) |
1204 | return exp; | 1204 | return exp; |
1205 | return NULL; | 1205 | return NULL; |
1206 | } | 1206 | } |
diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c index 7bd3c22003a2..7a9fa04a467a 100644 --- a/net/ipv4/netfilter/ip_conntrack_standalone.c +++ b/net/ipv4/netfilter/ip_conntrack_standalone.c | |||
@@ -534,6 +534,8 @@ static struct nf_hook_ops ip_conntrack_ops[] = { | |||
534 | 534 | ||
535 | /* Sysctl support */ | 535 | /* Sysctl support */ |
536 | 536 | ||
537 | int ip_conntrack_checksum = 1; | ||
538 | |||
537 | #ifdef CONFIG_SYSCTL | 539 | #ifdef CONFIG_SYSCTL |
538 | 540 | ||
539 | /* From ip_conntrack_core.c */ | 541 | /* From ip_conntrack_core.c */ |
@@ -568,8 +570,6 @@ extern unsigned int ip_ct_generic_timeout; | |||
568 | static int log_invalid_proto_min = 0; | 570 | static int log_invalid_proto_min = 0; |
569 | static int log_invalid_proto_max = 255; | 571 | static int log_invalid_proto_max = 255; |
570 | 572 | ||
571 | int ip_conntrack_checksum = 1; | ||
572 | |||
573 | static struct ctl_table_header *ip_ct_sysctl_header; | 573 | static struct ctl_table_header *ip_ct_sysctl_header; |
574 | 574 | ||
575 | static ctl_table ip_ct_sysctl_table[] = { | 575 | static ctl_table ip_ct_sysctl_table[] = { |
diff --git a/net/ipv4/netfilter/ip_nat_snmp_basic.c b/net/ipv4/netfilter/ip_nat_snmp_basic.c index 0b1b416759cc..18b7fbdccb61 100644 --- a/net/ipv4/netfilter/ip_nat_snmp_basic.c +++ b/net/ipv4/netfilter/ip_nat_snmp_basic.c | |||
@@ -1255,9 +1255,9 @@ static int help(struct sk_buff **pskb, | |||
1255 | struct udphdr *udph = (struct udphdr *)((u_int32_t *)iph + iph->ihl); | 1255 | struct udphdr *udph = (struct udphdr *)((u_int32_t *)iph + iph->ihl); |
1256 | 1256 | ||
1257 | /* SNMP replies and originating SNMP traps get mangled */ | 1257 | /* SNMP replies and originating SNMP traps get mangled */ |
1258 | if (udph->source == ntohs(SNMP_PORT) && dir != IP_CT_DIR_REPLY) | 1258 | if (udph->source == htons(SNMP_PORT) && dir != IP_CT_DIR_REPLY) |
1259 | return NF_ACCEPT; | 1259 | return NF_ACCEPT; |
1260 | if (udph->dest == ntohs(SNMP_TRAP_PORT) && dir != IP_CT_DIR_ORIGINAL) | 1260 | if (udph->dest == htons(SNMP_TRAP_PORT) && dir != IP_CT_DIR_ORIGINAL) |
1261 | return NF_ACCEPT; | 1261 | return NF_ACCEPT; |
1262 | 1262 | ||
1263 | /* No NAT? */ | 1263 | /* No NAT? */ |
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index bd221ec3f81e..62b2762a2420 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
@@ -609,6 +609,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
609 | if (sin) { | 609 | if (sin) { |
610 | sin->sin_family = AF_INET; | 610 | sin->sin_family = AF_INET; |
611 | sin->sin_addr.s_addr = skb->nh.iph->saddr; | 611 | sin->sin_addr.s_addr = skb->nh.iph->saddr; |
612 | sin->sin_port = 0; | ||
612 | memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); | 613 | memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); |
613 | } | 614 | } |
614 | if (inet->cmsg_flags) | 615 | if (inet->cmsg_flags) |
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index df8f051c0fce..25c2a9e03895 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c | |||
@@ -71,6 +71,8 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt | |||
71 | goto out; | 71 | goto out; |
72 | } | 72 | } |
73 | 73 | ||
74 | memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); | ||
75 | |||
74 | /* | 76 | /* |
75 | * Store incoming device index. When the packet will | 77 | * Store incoming device index. When the packet will |
76 | * be queued, we cannot refer to skb->dev anymore. | 78 | * be queued, we cannot refer to skb->dev anymore. |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index fa1ce0ae123e..d57e61ce4a7d 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -411,6 +411,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
411 | /* Copy the address. */ | 411 | /* Copy the address. */ |
412 | if (sin6) { | 412 | if (sin6) { |
413 | sin6->sin6_family = AF_INET6; | 413 | sin6->sin6_family = AF_INET6; |
414 | sin6->sin6_port = 0; | ||
414 | ipv6_addr_copy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr); | 415 | ipv6_addr_copy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr); |
415 | sin6->sin6_flowinfo = 0; | 416 | sin6->sin6_flowinfo = 0; |
416 | sin6->sin6_scope_id = 0; | 417 | sin6->sin6_scope_id = 0; |
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c index 6b44fe8516c3..c8f9369c2a87 100644 --- a/net/ipv6/xfrm6_tunnel.c +++ b/net/ipv6/xfrm6_tunnel.c | |||
@@ -31,27 +31,6 @@ | |||
31 | #include <linux/icmpv6.h> | 31 | #include <linux/icmpv6.h> |
32 | #include <linux/mutex.h> | 32 | #include <linux/mutex.h> |
33 | 33 | ||
34 | #ifdef CONFIG_IPV6_XFRM6_TUNNEL_DEBUG | ||
35 | # define X6TDEBUG 3 | ||
36 | #else | ||
37 | # define X6TDEBUG 1 | ||
38 | #endif | ||
39 | |||
40 | #define X6TPRINTK(fmt, args...) printk(fmt, ## args) | ||
41 | #define X6TNOPRINTK(fmt, args...) do { ; } while(0) | ||
42 | |||
43 | #if X6TDEBUG >= 1 | ||
44 | # define X6TPRINTK1 X6TPRINTK | ||
45 | #else | ||
46 | # define X6TPRINTK1 X6TNOPRINTK | ||
47 | #endif | ||
48 | |||
49 | #if X6TDEBUG >= 3 | ||
50 | # define X6TPRINTK3 X6TPRINTK | ||
51 | #else | ||
52 | # define X6TPRINTK3 X6TNOPRINTK | ||
53 | #endif | ||
54 | |||
55 | /* | 34 | /* |
56 | * xfrm_tunnel_spi things are for allocating unique id ("spi") | 35 | * xfrm_tunnel_spi things are for allocating unique id ("spi") |
57 | * per xfrm_address_t. | 36 | * per xfrm_address_t. |
@@ -62,15 +41,8 @@ struct xfrm6_tunnel_spi { | |||
62 | xfrm_address_t addr; | 41 | xfrm_address_t addr; |
63 | u32 spi; | 42 | u32 spi; |
64 | atomic_t refcnt; | 43 | atomic_t refcnt; |
65 | #ifdef XFRM6_TUNNEL_SPI_MAGIC | ||
66 | u32 magic; | ||
67 | #endif | ||
68 | }; | 44 | }; |
69 | 45 | ||
70 | #ifdef CONFIG_IPV6_XFRM6_TUNNEL_DEBUG | ||
71 | # define XFRM6_TUNNEL_SPI_MAGIC 0xdeadbeef | ||
72 | #endif | ||
73 | |||
74 | static DEFINE_RWLOCK(xfrm6_tunnel_spi_lock); | 46 | static DEFINE_RWLOCK(xfrm6_tunnel_spi_lock); |
75 | 47 | ||
76 | static u32 xfrm6_tunnel_spi; | 48 | static u32 xfrm6_tunnel_spi; |
@@ -86,43 +58,15 @@ static kmem_cache_t *xfrm6_tunnel_spi_kmem __read_mostly; | |||
86 | static struct hlist_head xfrm6_tunnel_spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE]; | 58 | static struct hlist_head xfrm6_tunnel_spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE]; |
87 | static struct hlist_head xfrm6_tunnel_spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE]; | 59 | static struct hlist_head xfrm6_tunnel_spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE]; |
88 | 60 | ||
89 | #ifdef XFRM6_TUNNEL_SPI_MAGIC | ||
90 | static int x6spi_check_magic(const struct xfrm6_tunnel_spi *x6spi, | ||
91 | const char *name) | ||
92 | { | ||
93 | if (unlikely(x6spi->magic != XFRM6_TUNNEL_SPI_MAGIC)) { | ||
94 | X6TPRINTK3(KERN_DEBUG "%s(): x6spi object " | ||
95 | "at %p has corrupted magic %08x " | ||
96 | "(should be %08x)\n", | ||
97 | name, x6spi, x6spi->magic, XFRM6_TUNNEL_SPI_MAGIC); | ||
98 | return -1; | ||
99 | } | ||
100 | return 0; | ||
101 | } | ||
102 | #else | ||
103 | static int inline x6spi_check_magic(const struct xfrm6_tunnel_spi *x6spi, | ||
104 | const char *name) | ||
105 | { | ||
106 | return 0; | ||
107 | } | ||
108 | #endif | ||
109 | |||
110 | #define X6SPI_CHECK_MAGIC(x6spi) x6spi_check_magic((x6spi), __FUNCTION__) | ||
111 | |||
112 | |||
113 | static unsigned inline xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr) | 61 | static unsigned inline xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr) |
114 | { | 62 | { |
115 | unsigned h; | 63 | unsigned h; |
116 | 64 | ||
117 | X6TPRINTK3(KERN_DEBUG "%s(addr=%p)\n", __FUNCTION__, addr); | ||
118 | |||
119 | h = addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3]; | 65 | h = addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3]; |
120 | h ^= h >> 16; | 66 | h ^= h >> 16; |
121 | h ^= h >> 8; | 67 | h ^= h >> 8; |
122 | h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1; | 68 | h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1; |
123 | 69 | ||
124 | X6TPRINTK3(KERN_DEBUG "%s() = %u\n", __FUNCTION__, h); | ||
125 | |||
126 | return h; | 70 | return h; |
127 | } | 71 | } |
128 | 72 | ||
@@ -136,19 +80,13 @@ static int xfrm6_tunnel_spi_init(void) | |||
136 | { | 80 | { |
137 | int i; | 81 | int i; |
138 | 82 | ||
139 | X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__); | ||
140 | |||
141 | xfrm6_tunnel_spi = 0; | 83 | xfrm6_tunnel_spi = 0; |
142 | xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi", | 84 | xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi", |
143 | sizeof(struct xfrm6_tunnel_spi), | 85 | sizeof(struct xfrm6_tunnel_spi), |
144 | 0, SLAB_HWCACHE_ALIGN, | 86 | 0, SLAB_HWCACHE_ALIGN, |
145 | NULL, NULL); | 87 | NULL, NULL); |
146 | if (!xfrm6_tunnel_spi_kmem) { | 88 | if (!xfrm6_tunnel_spi_kmem) |
147 | X6TPRINTK1(KERN_ERR | ||
148 | "%s(): failed to allocate xfrm6_tunnel_spi_kmem\n", | ||
149 | __FUNCTION__); | ||
150 | return -ENOMEM; | 89 | return -ENOMEM; |
151 | } | ||
152 | 90 | ||
153 | for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) | 91 | for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) |
154 | INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byaddr[i]); | 92 | INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byaddr[i]); |
@@ -161,22 +99,16 @@ static void xfrm6_tunnel_spi_fini(void) | |||
161 | { | 99 | { |
162 | int i; | 100 | int i; |
163 | 101 | ||
164 | X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__); | ||
165 | |||
166 | for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) { | 102 | for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) { |
167 | if (!hlist_empty(&xfrm6_tunnel_spi_byaddr[i])) | 103 | if (!hlist_empty(&xfrm6_tunnel_spi_byaddr[i])) |
168 | goto err; | 104 | return; |
169 | } | 105 | } |
170 | for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++) { | 106 | for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++) { |
171 | if (!hlist_empty(&xfrm6_tunnel_spi_byspi[i])) | 107 | if (!hlist_empty(&xfrm6_tunnel_spi_byspi[i])) |
172 | goto err; | 108 | return; |
173 | } | 109 | } |
174 | kmem_cache_destroy(xfrm6_tunnel_spi_kmem); | 110 | kmem_cache_destroy(xfrm6_tunnel_spi_kmem); |
175 | xfrm6_tunnel_spi_kmem = NULL; | 111 | xfrm6_tunnel_spi_kmem = NULL; |
176 | return; | ||
177 | err: | ||
178 | X6TPRINTK1(KERN_ERR "%s(): table is not empty\n", __FUNCTION__); | ||
179 | return; | ||
180 | } | 112 | } |
181 | 113 | ||
182 | static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr) | 114 | static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr) |
@@ -184,19 +116,13 @@ static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr) | |||
184 | struct xfrm6_tunnel_spi *x6spi; | 116 | struct xfrm6_tunnel_spi *x6spi; |
185 | struct hlist_node *pos; | 117 | struct hlist_node *pos; |
186 | 118 | ||
187 | X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr); | ||
188 | |||
189 | hlist_for_each_entry(x6spi, pos, | 119 | hlist_for_each_entry(x6spi, pos, |
190 | &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], | 120 | &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], |
191 | list_byaddr) { | 121 | list_byaddr) { |
192 | if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) { | 122 | if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) |
193 | X6SPI_CHECK_MAGIC(x6spi); | ||
194 | X6TPRINTK3(KERN_DEBUG "%s() = %p(%u)\n", __FUNCTION__, x6spi, x6spi->spi); | ||
195 | return x6spi; | 123 | return x6spi; |
196 | } | ||
197 | } | 124 | } |
198 | 125 | ||
199 | X6TPRINTK3(KERN_DEBUG "%s() = NULL(0)\n", __FUNCTION__); | ||
200 | return NULL; | 126 | return NULL; |
201 | } | 127 | } |
202 | 128 | ||
@@ -205,8 +131,6 @@ u32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr) | |||
205 | struct xfrm6_tunnel_spi *x6spi; | 131 | struct xfrm6_tunnel_spi *x6spi; |
206 | u32 spi; | 132 | u32 spi; |
207 | 133 | ||
208 | X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr); | ||
209 | |||
210 | read_lock_bh(&xfrm6_tunnel_spi_lock); | 134 | read_lock_bh(&xfrm6_tunnel_spi_lock); |
211 | x6spi = __xfrm6_tunnel_spi_lookup(saddr); | 135 | x6spi = __xfrm6_tunnel_spi_lookup(saddr); |
212 | spi = x6spi ? x6spi->spi : 0; | 136 | spi = x6spi ? x6spi->spi : 0; |
@@ -223,8 +147,6 @@ static u32 __xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr) | |||
223 | struct hlist_node *pos; | 147 | struct hlist_node *pos; |
224 | unsigned index; | 148 | unsigned index; |
225 | 149 | ||
226 | X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr); | ||
227 | |||
228 | if (xfrm6_tunnel_spi < XFRM6_TUNNEL_SPI_MIN || | 150 | if (xfrm6_tunnel_spi < XFRM6_TUNNEL_SPI_MIN || |
229 | xfrm6_tunnel_spi >= XFRM6_TUNNEL_SPI_MAX) | 151 | xfrm6_tunnel_spi >= XFRM6_TUNNEL_SPI_MAX) |
230 | xfrm6_tunnel_spi = XFRM6_TUNNEL_SPI_MIN; | 152 | xfrm6_tunnel_spi = XFRM6_TUNNEL_SPI_MIN; |
@@ -258,18 +180,10 @@ try_next_2:; | |||
258 | spi = 0; | 180 | spi = 0; |
259 | goto out; | 181 | goto out; |
260 | alloc_spi: | 182 | alloc_spi: |
261 | X6TPRINTK3(KERN_DEBUG "%s(): allocate new spi for " NIP6_FMT "\n", | ||
262 | __FUNCTION__, | ||
263 | NIP6(*(struct in6_addr *)saddr)); | ||
264 | x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, SLAB_ATOMIC); | 183 | x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, SLAB_ATOMIC); |
265 | if (!x6spi) { | 184 | if (!x6spi) |
266 | X6TPRINTK1(KERN_ERR "%s(): kmem_cache_alloc() failed\n", | ||
267 | __FUNCTION__); | ||
268 | goto out; | 185 | goto out; |
269 | } | 186 | |
270 | #ifdef XFRM6_TUNNEL_SPI_MAGIC | ||
271 | x6spi->magic = XFRM6_TUNNEL_SPI_MAGIC; | ||
272 | #endif | ||
273 | memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr)); | 187 | memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr)); |
274 | x6spi->spi = spi; | 188 | x6spi->spi = spi; |
275 | atomic_set(&x6spi->refcnt, 1); | 189 | atomic_set(&x6spi->refcnt, 1); |
@@ -278,9 +192,7 @@ alloc_spi: | |||
278 | 192 | ||
279 | index = xfrm6_tunnel_spi_hash_byaddr(saddr); | 193 | index = xfrm6_tunnel_spi_hash_byaddr(saddr); |
280 | hlist_add_head(&x6spi->list_byaddr, &xfrm6_tunnel_spi_byaddr[index]); | 194 | hlist_add_head(&x6spi->list_byaddr, &xfrm6_tunnel_spi_byaddr[index]); |
281 | X6SPI_CHECK_MAGIC(x6spi); | ||
282 | out: | 195 | out: |
283 | X6TPRINTK3(KERN_DEBUG "%s() = %u\n", __FUNCTION__, spi); | ||
284 | return spi; | 196 | return spi; |
285 | } | 197 | } |
286 | 198 | ||
@@ -289,8 +201,6 @@ u32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr) | |||
289 | struct xfrm6_tunnel_spi *x6spi; | 201 | struct xfrm6_tunnel_spi *x6spi; |
290 | u32 spi; | 202 | u32 spi; |
291 | 203 | ||
292 | X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr); | ||
293 | |||
294 | write_lock_bh(&xfrm6_tunnel_spi_lock); | 204 | write_lock_bh(&xfrm6_tunnel_spi_lock); |
295 | x6spi = __xfrm6_tunnel_spi_lookup(saddr); | 205 | x6spi = __xfrm6_tunnel_spi_lookup(saddr); |
296 | if (x6spi) { | 206 | if (x6spi) { |
@@ -300,8 +210,6 @@ u32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr) | |||
300 | spi = __xfrm6_tunnel_alloc_spi(saddr); | 210 | spi = __xfrm6_tunnel_alloc_spi(saddr); |
301 | write_unlock_bh(&xfrm6_tunnel_spi_lock); | 211 | write_unlock_bh(&xfrm6_tunnel_spi_lock); |
302 | 212 | ||
303 | X6TPRINTK3(KERN_DEBUG "%s() = %u\n", __FUNCTION__, spi); | ||
304 | |||
305 | return spi; | 213 | return spi; |
306 | } | 214 | } |
307 | 215 | ||
@@ -312,8 +220,6 @@ void xfrm6_tunnel_free_spi(xfrm_address_t *saddr) | |||
312 | struct xfrm6_tunnel_spi *x6spi; | 220 | struct xfrm6_tunnel_spi *x6spi; |
313 | struct hlist_node *pos, *n; | 221 | struct hlist_node *pos, *n; |
314 | 222 | ||
315 | X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr); | ||
316 | |||
317 | write_lock_bh(&xfrm6_tunnel_spi_lock); | 223 | write_lock_bh(&xfrm6_tunnel_spi_lock); |
318 | 224 | ||
319 | hlist_for_each_entry_safe(x6spi, pos, n, | 225 | hlist_for_each_entry_safe(x6spi, pos, n, |
@@ -321,12 +227,6 @@ void xfrm6_tunnel_free_spi(xfrm_address_t *saddr) | |||
321 | list_byaddr) | 227 | list_byaddr) |
322 | { | 228 | { |
323 | if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) { | 229 | if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) { |
324 | X6TPRINTK3(KERN_DEBUG "%s(): x6spi object for " NIP6_FMT | ||
325 | " found at %p\n", | ||
326 | __FUNCTION__, | ||
327 | NIP6(*(struct in6_addr *)saddr), | ||
328 | x6spi); | ||
329 | X6SPI_CHECK_MAGIC(x6spi); | ||
330 | if (atomic_dec_and_test(&x6spi->refcnt)) { | 230 | if (atomic_dec_and_test(&x6spi->refcnt)) { |
331 | hlist_del(&x6spi->list_byaddr); | 231 | hlist_del(&x6spi->list_byaddr); |
332 | hlist_del(&x6spi->list_byspi); | 232 | hlist_del(&x6spi->list_byspi); |
@@ -377,20 +277,14 @@ static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
377 | case ICMPV6_ADDR_UNREACH: | 277 | case ICMPV6_ADDR_UNREACH: |
378 | case ICMPV6_PORT_UNREACH: | 278 | case ICMPV6_PORT_UNREACH: |
379 | default: | 279 | default: |
380 | X6TPRINTK3(KERN_DEBUG | ||
381 | "xfrm6_tunnel: Destination Unreach.\n"); | ||
382 | break; | 280 | break; |
383 | } | 281 | } |
384 | break; | 282 | break; |
385 | case ICMPV6_PKT_TOOBIG: | 283 | case ICMPV6_PKT_TOOBIG: |
386 | X6TPRINTK3(KERN_DEBUG | ||
387 | "xfrm6_tunnel: Packet Too Big.\n"); | ||
388 | break; | 284 | break; |
389 | case ICMPV6_TIME_EXCEED: | 285 | case ICMPV6_TIME_EXCEED: |
390 | switch (code) { | 286 | switch (code) { |
391 | case ICMPV6_EXC_HOPLIMIT: | 287 | case ICMPV6_EXC_HOPLIMIT: |
392 | X6TPRINTK3(KERN_DEBUG | ||
393 | "xfrm6_tunnel: Too small Hoplimit.\n"); | ||
394 | break; | 288 | break; |
395 | case ICMPV6_EXC_FRAGTIME: | 289 | case ICMPV6_EXC_FRAGTIME: |
396 | default: | 290 | default: |
@@ -447,22 +341,14 @@ static struct xfrm6_tunnel xfrm6_tunnel_handler = { | |||
447 | 341 | ||
448 | static int __init xfrm6_tunnel_init(void) | 342 | static int __init xfrm6_tunnel_init(void) |
449 | { | 343 | { |
450 | X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__); | 344 | if (xfrm_register_type(&xfrm6_tunnel_type, AF_INET6) < 0) |
451 | |||
452 | if (xfrm_register_type(&xfrm6_tunnel_type, AF_INET6) < 0) { | ||
453 | X6TPRINTK1(KERN_ERR | ||
454 | "xfrm6_tunnel init: can't add xfrm type\n"); | ||
455 | return -EAGAIN; | 345 | return -EAGAIN; |
456 | } | 346 | |
457 | if (xfrm6_tunnel_register(&xfrm6_tunnel_handler)) { | 347 | if (xfrm6_tunnel_register(&xfrm6_tunnel_handler)) { |
458 | X6TPRINTK1(KERN_ERR | ||
459 | "xfrm6_tunnel init(): can't add handler\n"); | ||
460 | xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6); | 348 | xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6); |
461 | return -EAGAIN; | 349 | return -EAGAIN; |
462 | } | 350 | } |
463 | if (xfrm6_tunnel_spi_init() < 0) { | 351 | if (xfrm6_tunnel_spi_init() < 0) { |
464 | X6TPRINTK1(KERN_ERR | ||
465 | "xfrm6_tunnel init: failed to initialize spi\n"); | ||
466 | xfrm6_tunnel_deregister(&xfrm6_tunnel_handler); | 352 | xfrm6_tunnel_deregister(&xfrm6_tunnel_handler); |
467 | xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6); | 353 | xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6); |
468 | return -EAGAIN; | 354 | return -EAGAIN; |
@@ -472,15 +358,9 @@ static int __init xfrm6_tunnel_init(void) | |||
472 | 358 | ||
473 | static void __exit xfrm6_tunnel_fini(void) | 359 | static void __exit xfrm6_tunnel_fini(void) |
474 | { | 360 | { |
475 | X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__); | ||
476 | |||
477 | xfrm6_tunnel_spi_fini(); | 361 | xfrm6_tunnel_spi_fini(); |
478 | if (xfrm6_tunnel_deregister(&xfrm6_tunnel_handler)) | 362 | xfrm6_tunnel_deregister(&xfrm6_tunnel_handler); |
479 | X6TPRINTK1(KERN_ERR | 363 | xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6); |
480 | "xfrm6_tunnel close: can't remove handler\n"); | ||
481 | if (xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6) < 0) | ||
482 | X6TPRINTK1(KERN_ERR | ||
483 | "xfrm6_tunnel close: can't remove xfrm type\n"); | ||
484 | } | 364 | } |
485 | 365 | ||
486 | module_init(xfrm6_tunnel_init); | 366 | module_init(xfrm6_tunnel_init); |
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 42a178aa30f9..a9894ddfd72a 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig | |||
@@ -386,8 +386,8 @@ config NETFILTER_XT_MATCH_REALM | |||
386 | <file:Documentation/modules.txt>. If unsure, say `N'. | 386 | <file:Documentation/modules.txt>. If unsure, say `N'. |
387 | 387 | ||
388 | config NETFILTER_XT_MATCH_SCTP | 388 | config NETFILTER_XT_MATCH_SCTP |
389 | tristate '"sctp" protocol match support' | 389 | tristate '"sctp" protocol match support (EXPERIMENTAL)' |
390 | depends on NETFILTER_XTABLES | 390 | depends on NETFILTER_XTABLES && EXPERIMENTAL |
391 | help | 391 | help |
392 | With this option enabled, you will be able to use the | 392 | With this option enabled, you will be able to use the |
393 | `sctp' match in order to match on SCTP source/destination ports | 393 | `sctp' match in order to match on SCTP source/destination ports |
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 5fcab2ef231f..4ef836699962 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c | |||
@@ -428,6 +428,8 @@ static struct file_operations ct_cpu_seq_fops = { | |||
428 | 428 | ||
429 | /* Sysctl support */ | 429 | /* Sysctl support */ |
430 | 430 | ||
431 | int nf_conntrack_checksum = 1; | ||
432 | |||
431 | #ifdef CONFIG_SYSCTL | 433 | #ifdef CONFIG_SYSCTL |
432 | 434 | ||
433 | /* From nf_conntrack_core.c */ | 435 | /* From nf_conntrack_core.c */ |
@@ -459,8 +461,6 @@ extern unsigned int nf_ct_generic_timeout; | |||
459 | static int log_invalid_proto_min = 0; | 461 | static int log_invalid_proto_min = 0; |
460 | static int log_invalid_proto_max = 255; | 462 | static int log_invalid_proto_max = 255; |
461 | 463 | ||
462 | int nf_conntrack_checksum = 1; | ||
463 | |||
464 | static struct ctl_table_header *nf_ct_sysctl_header; | 464 | static struct ctl_table_header *nf_ct_sysctl_header; |
465 | 465 | ||
466 | static ctl_table nf_ct_sysctl_table[] = { | 466 | static ctl_table nf_ct_sysctl_table[] = { |
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index bb6fcee452ca..662a869593bf 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c | |||
@@ -219,21 +219,20 @@ void nf_reinject(struct sk_buff *skb, struct nf_info *info, | |||
219 | 219 | ||
220 | switch (verdict & NF_VERDICT_MASK) { | 220 | switch (verdict & NF_VERDICT_MASK) { |
221 | case NF_ACCEPT: | 221 | case NF_ACCEPT: |
222 | case NF_STOP: | ||
222 | info->okfn(skb); | 223 | info->okfn(skb); |
224 | case NF_STOLEN: | ||
223 | break; | 225 | break; |
224 | |||
225 | case NF_QUEUE: | 226 | case NF_QUEUE: |
226 | if (!nf_queue(&skb, elem, info->pf, info->hook, | 227 | if (!nf_queue(&skb, elem, info->pf, info->hook, |
227 | info->indev, info->outdev, info->okfn, | 228 | info->indev, info->outdev, info->okfn, |
228 | verdict >> NF_VERDICT_BITS)) | 229 | verdict >> NF_VERDICT_BITS)) |
229 | goto next_hook; | 230 | goto next_hook; |
230 | break; | 231 | break; |
232 | default: | ||
233 | kfree_skb(skb); | ||
231 | } | 234 | } |
232 | rcu_read_unlock(); | 235 | rcu_read_unlock(); |
233 | |||
234 | if (verdict == NF_DROP) | ||
235 | kfree_skb(skb); | ||
236 | |||
237 | kfree(info); | 236 | kfree(info); |
238 | return; | 237 | return; |
239 | } | 238 | } |
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c index 5fe4c9df17f5..a9f4f6f3c628 100644 --- a/net/netfilter/xt_physdev.c +++ b/net/netfilter/xt_physdev.c | |||
@@ -113,6 +113,21 @@ checkentry(const char *tablename, | |||
113 | if (!(info->bitmask & XT_PHYSDEV_OP_MASK) || | 113 | if (!(info->bitmask & XT_PHYSDEV_OP_MASK) || |
114 | info->bitmask & ~XT_PHYSDEV_OP_MASK) | 114 | info->bitmask & ~XT_PHYSDEV_OP_MASK) |
115 | return 0; | 115 | return 0; |
116 | if (brnf_deferred_hooks == 0 && | ||
117 | info->bitmask & XT_PHYSDEV_OP_OUT && | ||
118 | (!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) || | ||
119 | info->invert & XT_PHYSDEV_OP_BRIDGED) && | ||
120 | hook_mask & ((1 << NF_IP_LOCAL_OUT) | (1 << NF_IP_FORWARD) | | ||
121 | (1 << NF_IP_POST_ROUTING))) { | ||
122 | printk(KERN_WARNING "physdev match: using --physdev-out in the " | ||
123 | "OUTPUT, FORWARD and POSTROUTING chains for non-bridged " | ||
124 | "traffic is deprecated and breaks other things, it will " | ||
125 | "be removed in January 2007. See Documentation/" | ||
126 | "feature-removal-schedule.txt for details. This doesn't " | ||
127 | "affect you in case you're using it for purely bridged " | ||
128 | "traffic.\n"); | ||
129 | brnf_deferred_hooks = 1; | ||
130 | } | ||
116 | return 1; | 131 | return 1; |
117 | } | 132 | } |
118 | 133 | ||
diff --git a/net/netfilter/xt_pkttype.c b/net/netfilter/xt_pkttype.c index 3ac703b5cb8f..d2f5320a80bf 100644 --- a/net/netfilter/xt_pkttype.c +++ b/net/netfilter/xt_pkttype.c | |||
@@ -9,6 +9,8 @@ | |||
9 | #include <linux/skbuff.h> | 9 | #include <linux/skbuff.h> |
10 | #include <linux/if_ether.h> | 10 | #include <linux/if_ether.h> |
11 | #include <linux/if_packet.h> | 11 | #include <linux/if_packet.h> |
12 | #include <linux/in.h> | ||
13 | #include <linux/ip.h> | ||
12 | 14 | ||
13 | #include <linux/netfilter/xt_pkttype.h> | 15 | #include <linux/netfilter/xt_pkttype.h> |
14 | #include <linux/netfilter/x_tables.h> | 16 | #include <linux/netfilter/x_tables.h> |
@@ -28,9 +30,17 @@ static int match(const struct sk_buff *skb, | |||
28 | unsigned int protoff, | 30 | unsigned int protoff, |
29 | int *hotdrop) | 31 | int *hotdrop) |
30 | { | 32 | { |
33 | u_int8_t type; | ||
31 | const struct xt_pkttype_info *info = matchinfo; | 34 | const struct xt_pkttype_info *info = matchinfo; |
32 | 35 | ||
33 | return (skb->pkt_type == info->pkttype) ^ info->invert; | 36 | if (skb->pkt_type == PACKET_LOOPBACK) |
37 | type = (MULTICAST(skb->nh.iph->daddr) | ||
38 | ? PACKET_MULTICAST | ||
39 | : PACKET_BROADCAST); | ||
40 | else | ||
41 | type = skb->pkt_type; | ||
42 | |||
43 | return (type == info->pkttype) ^ info->invert; | ||
34 | } | 44 | } |
35 | 45 | ||
36 | static struct xt_match pkttype_match = { | 46 | static struct xt_match pkttype_match = { |