aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/Makefile2
-rw-r--r--Documentation/DocBook/kernel-api.tmpl1
-rw-r--r--Documentation/acpi-hotkey.txt2
-rw-r--r--Documentation/fujitsu/frv/kernel-ABI.txt192
-rw-r--r--Documentation/kernel-parameters.txt34
-rw-r--r--Documentation/networking/packet_mmap.txt2
-rw-r--r--Documentation/networking/tuntap.txt2
-rw-r--r--arch/i386/kernel/crash.c2
-rw-r--r--block/ll_rw_blk.c2
-rw-r--r--drivers/md/dm-target.c3
-rw-r--r--drivers/md/raid1.c6
-rw-r--r--drivers/md/raid10.c6
-rw-r--r--drivers/md/raid5.c34
-rw-r--r--drivers/md/raid6main.c29
-rw-r--r--drivers/mtd/chips/Kconfig21
-rw-r--r--drivers/net/8139cp.c12
-rw-r--r--drivers/net/arcnet/arcnet.c3
-rw-r--r--drivers/net/b44.c3
-rw-r--r--drivers/net/chelsio/sge.c3
-rw-r--r--drivers/net/e1000/e1000_main.c3
-rw-r--r--drivers/net/eql.c3
-rw-r--r--drivers/net/irda/sa1100_ir.c3
-rw-r--r--drivers/net/ne2k-pci.c4
-rw-r--r--drivers/net/ns83820.c3
-rw-r--r--drivers/net/starfire.c3
-rw-r--r--drivers/net/tg3.c15
-rw-r--r--drivers/net/tokenring/abyss.c3
-rw-r--r--drivers/net/tokenring/madgemc.c3
-rw-r--r--drivers/net/wireless/ipw2200.c9
-rw-r--r--drivers/net/yellowfin.c3
-rw-r--r--drivers/s390/block/dasd_erp.c8
-rw-r--r--drivers/s390/char/sclp_rw.c2
-rw-r--r--drivers/s390/char/tape_block.c13
-rw-r--r--drivers/s390/net/lcs.c13
-rw-r--r--drivers/scsi/aic7xxx/Kconfig.aic7xxx2
-rw-r--r--drivers/serial/jsm/jsm.h2
-rw-r--r--drivers/serial/jsm/jsm_driver.c2
-rw-r--r--drivers/serial/jsm/jsm_neo.c2
-rw-r--r--fs/direct-io.c3
-rw-r--r--fs/dquot.c6
-rw-r--r--fs/exec.c2
-rw-r--r--fs/fcntl.c3
-rw-r--r--fs/freevxfs/vxfs_olt.c9
-rw-r--r--fs/hfsplus/bnode.c6
-rw-r--r--fs/hfsplus/btree.c3
-rw-r--r--fs/inode.c15
-rw-r--r--fs/jffs2/background.c3
-rw-r--r--fs/smbfs/file.c6
-rw-r--r--fs/sysfs/dir.c2
-rw-r--r--fs/sysfs/inode.c3
-rw-r--r--fs/sysv/dir.c6
-rw-r--r--fs/udf/inode.c6
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/hrtimer.h2
-rw-r--r--ipc/shm.c15
-rw-r--r--ipc/util.c6
-rw-r--r--kernel/power/Kconfig2
-rw-r--r--kernel/printk.c6
-rw-r--r--kernel/ptrace.c3
-rw-r--r--kernel/signal.c6
-rw-r--r--kernel/time.c8
-rw-r--r--kernel/timer.c3
-rw-r--r--mm/highmem.c15
-rw-r--r--mm/mmap.c9
-rw-r--r--mm/page-writeback.c2
-rw-r--r--mm/slab.c18
-rw-r--r--mm/swap_state.c3
-rw-r--r--mm/vmalloc.c3
68 files changed, 262 insertions, 369 deletions
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index 7d87dd73cbe..5a2882d275b 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -2,7 +2,7 @@
2# This makefile is used to generate the kernel documentation, 2# This makefile is used to generate the kernel documentation,
3# primarily based on in-line comments in various source files. 3# primarily based on in-line comments in various source files.
4# See Documentation/kernel-doc-nano-HOWTO.txt for instruction in how 4# See Documentation/kernel-doc-nano-HOWTO.txt for instruction in how
5# to ducument the SRC - and how to read it. 5# to document the SRC - and how to read it.
6# To add a new book the only step required is to add the book to the 6# To add a new book the only step required is to add the book to the
7# list of DOCBOOKS. 7# list of DOCBOOKS.
8 8
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index 8c9c6704e85..ca02e04a906 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -322,7 +322,6 @@ X!Earch/i386/kernel/mca.c
322 <chapter id="sysfs"> 322 <chapter id="sysfs">
323 <title>The Filesystem for Exporting Kernel Objects</title> 323 <title>The Filesystem for Exporting Kernel Objects</title>
324!Efs/sysfs/file.c 324!Efs/sysfs/file.c
325!Efs/sysfs/dir.c
326!Efs/sysfs/symlink.c 325!Efs/sysfs/symlink.c
327!Efs/sysfs/bin.c 326!Efs/sysfs/bin.c
328 </chapter> 327 </chapter>
diff --git a/Documentation/acpi-hotkey.txt b/Documentation/acpi-hotkey.txt
index 744f1aec655..38040fa3764 100644
--- a/Documentation/acpi-hotkey.txt
+++ b/Documentation/acpi-hotkey.txt
@@ -30,7 +30,7 @@ specific hotkey(event))
30echo "event_num:event_type:event_argument" > 30echo "event_num:event_type:event_argument" >
31 /proc/acpi/hotkey/action. 31 /proc/acpi/hotkey/action.
32The result of the execution of this aml method is 32The result of the execution of this aml method is
33attached to /proc/acpi/hotkey/poll_method, which is dnyamically 33attached to /proc/acpi/hotkey/poll_method, which is dynamically
34created. Please use command "cat /proc/acpi/hotkey/polling_method" 34created. Please use command "cat /proc/acpi/hotkey/polling_method"
35to retrieve it. 35to retrieve it.
36 36
diff --git a/Documentation/fujitsu/frv/kernel-ABI.txt b/Documentation/fujitsu/frv/kernel-ABI.txt
index 0ed9b0a779b..8b0a5fc8bfd 100644
--- a/Documentation/fujitsu/frv/kernel-ABI.txt
+++ b/Documentation/fujitsu/frv/kernel-ABI.txt
@@ -1,17 +1,19 @@
1 ================================= 1 =================================
2 INTERNAL KERNEL ABI FOR FR-V ARCH 2 INTERNAL KERNEL ABI FOR FR-V ARCH
3 ================================= 3 =================================
4 4
5The internal FRV kernel ABI is not quite the same as the userspace ABI. A number of the registers 5The internal FRV kernel ABI is not quite the same as the userspace ABI. A
6are used for special purposed, and the ABI is not consistent between modules vs core, and MMU vs 6number of the registers are used for special purposed, and the ABI is not
7no-MMU. 7consistent between modules vs core, and MMU vs no-MMU.
8 8
9This partly stems from the fact that FRV CPUs do not have a separate supervisor stack pointer, and 9This partly stems from the fact that FRV CPUs do not have a separate
10most of them do not have any scratch registers, thus requiring at least one general purpose 10supervisor stack pointer, and most of them do not have any scratch
11register to be clobbered in such an event. Also, within the kernel core, it is possible to simply 11registers, thus requiring at least one general purpose register to be
12jump or call directly between functions using a relative offset. This cannot be extended to modules 12clobbered in such an event. Also, within the kernel core, it is possible to
13for the displacement is likely to be too far. Thus in modules the address of a function to call 13simply jump or call directly between functions using a relative offset.
14must be calculated in a register and then used, requiring two extra instructions. 14This cannot be extended to modules for the displacement is likely to be too
15far. Thus in modules the address of a function to call must be calculated
16in a register and then used, requiring two extra instructions.
15 17
16This document has the following sections: 18This document has the following sections:
17 19
@@ -39,7 +41,8 @@ When a system call is made, the following registers are effective:
39CPU OPERATING MODES 41CPU OPERATING MODES
40=================== 42===================
41 43
42The FR-V CPU has three basic operating modes. In order of increasing capability: 44The FR-V CPU has three basic operating modes. In order of increasing
45capability:
43 46
44 (1) User mode. 47 (1) User mode.
45 48
@@ -47,42 +50,46 @@ The FR-V CPU has three basic operating modes. In order of increasing capability:
47 50
48 (2) Kernel mode. 51 (2) Kernel mode.
49 52
50 Normal kernel mode. There are many additional control registers available that may be 53 Normal kernel mode. There are many additional control registers
51 accessed in this mode, in addition to all the stuff available to user mode. This has two 54 available that may be accessed in this mode, in addition to all the
52 submodes: 55 stuff available to user mode. This has two submodes:
53 56
54 (a) Exceptions enabled (PSR.T == 1). 57 (a) Exceptions enabled (PSR.T == 1).
55 58
56 Exceptions will invoke the appropriate normal kernel mode handler. On entry to the 59 Exceptions will invoke the appropriate normal kernel mode
57 handler, the PSR.T bit will be cleared. 60 handler. On entry to the handler, the PSR.T bit will be cleared.
58 61
59 (b) Exceptions disabled (PSR.T == 0). 62 (b) Exceptions disabled (PSR.T == 0).
60 63
61 No exceptions or interrupts may happen. Any mandatory exceptions will cause the CPU to 64 No exceptions or interrupts may happen. Any mandatory exceptions
62 halt unless the CPU is told to jump into debug mode instead. 65 will cause the CPU to halt unless the CPU is told to jump into
66 debug mode instead.
63 67
64 (3) Debug mode. 68 (3) Debug mode.
65 69
66 No exceptions may happen in this mode. Memory protection and management exceptions will be 70 No exceptions may happen in this mode. Memory protection and
67 flagged for later consideration, but the exception handler won't be invoked. Debugging traps 71 management exceptions will be flagged for later consideration, but
68 such as hardware breakpoints and watchpoints will be ignored. This mode is entered only by 72 the exception handler won't be invoked. Debugging traps such as
69 debugging events obtained from the other two modes. 73 hardware breakpoints and watchpoints will be ignored. This mode is
74 entered only by debugging events obtained from the other two modes.
70 75
71 All kernel mode registers may be accessed, plus a few extra debugging specific registers. 76 All kernel mode registers may be accessed, plus a few extra debugging
77 specific registers.
72 78
73 79
74================================= 80=================================
75INTERNAL KERNEL-MODE REGISTER ABI 81INTERNAL KERNEL-MODE REGISTER ABI
76================================= 82=================================
77 83
78There are a number of permanent register assignments that are set up by entry.S in the exception 84There are a number of permanent register assignments that are set up by
79prologue. Note that there is a complete set of exception prologues for each of user->kernel 85entry.S in the exception prologue. Note that there is a complete set of
80transition and kernel->kernel transition. There are also user->debug and kernel->debug mode 86exception prologues for each of user->kernel transition and kernel->kernel
81transition prologues. 87transition. There are also user->debug and kernel->debug mode transition
88prologues.
82 89
83 90
84 REGISTER FLAVOUR USE 91 REGISTER FLAVOUR USE
85 =============== ======= ==================================================== 92 =============== ======= ==============================================
86 GR1 Supervisor stack pointer 93 GR1 Supervisor stack pointer
87 GR15 Current thread info pointer 94 GR15 Current thread info pointer
88 GR16 GP-Rel base register for small data 95 GR16 GP-Rel base register for small data
@@ -92,10 +99,12 @@ transition prologues.
92 GR31 NOMMU Destroyed by debug mode entry 99 GR31 NOMMU Destroyed by debug mode entry
93 GR31 MMU Destroyed by TLB miss kernel mode entry 100 GR31 MMU Destroyed by TLB miss kernel mode entry
94 CCR.ICC2 Virtual interrupt disablement tracking 101 CCR.ICC2 Virtual interrupt disablement tracking
95 CCCR.CC3 Cleared by exception prologue (atomic op emulation) 102 CCCR.CC3 Cleared by exception prologue
103 (atomic op emulation)
96 SCR0 MMU See mmu-layout.txt. 104 SCR0 MMU See mmu-layout.txt.
97 SCR1 MMU See mmu-layout.txt. 105 SCR1 MMU See mmu-layout.txt.
98 SCR2 MMU Save for EAR0 (destroyed by icache insns in debug mode) 106 SCR2 MMU Save for EAR0 (destroyed by icache insns
107 in debug mode)
99 SCR3 MMU Save for GR31 during debug exceptions 108 SCR3 MMU Save for GR31 during debug exceptions
100 DAMR/IAMR NOMMU Fixed memory protection layout. 109 DAMR/IAMR NOMMU Fixed memory protection layout.
101 DAMR/IAMR MMU See mmu-layout.txt. 110 DAMR/IAMR MMU See mmu-layout.txt.
@@ -104,18 +113,21 @@ transition prologues.
104Certain registers are also used or modified across function calls: 113Certain registers are also used or modified across function calls:
105 114
106 REGISTER CALL RETURN 115 REGISTER CALL RETURN
107 =============== =============================== =============================== 116 =============== =============================== ======================
108 GR0 Fixed Zero - 117 GR0 Fixed Zero -
109 GR2 Function call frame pointer 118 GR2 Function call frame pointer
110 GR3 Special Preserved 119 GR3 Special Preserved
111 GR3-GR7 - Clobbered 120 GR3-GR7 - Clobbered
112 GR8 Function call arg #1 Return value (or clobbered) 121 GR8 Function call arg #1 Return value
113 GR9 Function call arg #2 Return value MSW (or clobbered) 122 (or clobbered)
123 GR9 Function call arg #2 Return value MSW
124 (or clobbered)
114 GR10-GR13 Function call arg #3-#6 Clobbered 125 GR10-GR13 Function call arg #3-#6 Clobbered
115 GR14 - Clobbered 126 GR14 - Clobbered
116 GR15-GR16 Special Preserved 127 GR15-GR16 Special Preserved
117 GR17-GR27 - Preserved 128 GR17-GR27 - Preserved
118 GR28-GR31 Special Only accessed explicitly 129 GR28-GR31 Special Only accessed
130 explicitly
119 LR Return address after CALL Clobbered 131 LR Return address after CALL Clobbered
120 CCR/CCCR - Mostly Clobbered 132 CCR/CCCR - Mostly Clobbered
121 133
@@ -124,46 +136,53 @@ Certain registers are also used or modified across function calls:
124INTERNAL DEBUG-MODE REGISTER ABI 136INTERNAL DEBUG-MODE REGISTER ABI
125================================ 137================================
126 138
127This is the same as the kernel-mode register ABI for functions calls. The difference is that in 139This is the same as the kernel-mode register ABI for functions calls. The
128debug-mode there's a different stack and a different exception frame. Almost all the global 140difference is that in debug-mode there's a different stack and a different
129registers from kernel-mode (including the stack pointer) may be changed. 141exception frame. Almost all the global registers from kernel-mode
142(including the stack pointer) may be changed.
130 143
131 REGISTER FLAVOUR USE 144 REGISTER FLAVOUR USE
132 =============== ======= ==================================================== 145 =============== ======= ==============================================
133 GR1 Debug stack pointer 146 GR1 Debug stack pointer
134 GR16 GP-Rel base register for small data 147 GR16 GP-Rel base register for small data
135 GR31 Current debug exception frame pointer (__debug_frame) 148 GR31 Current debug exception frame pointer
149 (__debug_frame)
136 SCR3 MMU Saved value of GR31 150 SCR3 MMU Saved value of GR31
137 151
138 152
139Note that debug mode is able to interfere with the kernel's emulated atomic ops, so it must be 153Note that debug mode is able to interfere with the kernel's emulated atomic
140exceedingly careful not to do any that would interact with the main kernel in this regard. Hence 154ops, so it must be exceedingly careful not to do any that would interact
141the debug mode code (gdbstub) is almost completely self-contained. The only external code used is 155with the main kernel in this regard. Hence the debug mode code (gdbstub) is
142the sprintf family of functions. 156almost completely self-contained. The only external code used is the
157sprintf family of functions.
143 158
144Futhermore, break.S is so complicated because single-step mode does not switch off on entry to an 159Futhermore, break.S is so complicated because single-step mode does not
145exception. That means unless manually disabled, single-stepping will blithely go on stepping into 160switch off on entry to an exception. That means unless manually disabled,
146things like interrupts. See gdbstub.txt for more information. 161single-stepping will blithely go on stepping into things like interrupts.
162See gdbstub.txt for more information.
147 163
148 164
149========================== 165==========================
150VIRTUAL INTERRUPT HANDLING 166VIRTUAL INTERRUPT HANDLING
151========================== 167==========================
152 168
153Because accesses to the PSR is so slow, and to disable interrupts we have to access it twice (once 169Because accesses to the PSR is so slow, and to disable interrupts we have
154to read and once to write), we don't actually disable interrupts at all if we don't have to. What 170to access it twice (once to read and once to write), we don't actually
155we do instead is use the ICC2 condition code flags to note virtual disablement, such that if we 171disable interrupts at all if we don't have to. What we do instead is use
156then do take an interrupt, we note the flag, really disable interrupts, set another flag and resume 172the ICC2 condition code flags to note virtual disablement, such that if we
157execution at the point the interrupt happened. Setting condition flags as a side effect of an 173then do take an interrupt, we note the flag, really disable interrupts, set
158arithmetic or logical instruction is really fast. This use of the ICC2 only occurs within the 174another flag and resume execution at the point the interrupt happened.
175Setting condition flags as a side effect of an arithmetic or logical
176instruction is really fast. This use of the ICC2 only occurs within the
159kernel - it does not affect userspace. 177kernel - it does not affect userspace.
160 178
161The flags we use are: 179The flags we use are:
162 180
163 (*) CCR.ICC2.Z [Zero flag] 181 (*) CCR.ICC2.Z [Zero flag]
164 182
165 Set to virtually disable interrupts, clear when interrupts are virtually enabled. Can be 183 Set to virtually disable interrupts, clear when interrupts are
166 modified by logical instructions without affecting the Carry flag. 184 virtually enabled. Can be modified by logical instructions without
185 affecting the Carry flag.
167 186
168 (*) CCR.ICC2.C [Carry flag] 187 (*) CCR.ICC2.C [Carry flag]
169 188
@@ -176,8 +195,9 @@ What happens is this:
176 195
177 ICC2.Z is 0, ICC2.C is 1. 196 ICC2.Z is 0, ICC2.C is 1.
178 197
179 (2) An interrupt occurs. The exception prologue examines ICC2.Z and determines that nothing needs 198 (2) An interrupt occurs. The exception prologue examines ICC2.Z and
180 doing. This is done simply with an unlikely BEQ instruction. 199 determines that nothing needs doing. This is done simply with an
200 unlikely BEQ instruction.
181 201
182 (3) The interrupts are disabled (local_irq_disable) 202 (3) The interrupts are disabled (local_irq_disable)
183 203
@@ -187,48 +207,56 @@ What happens is this:
187 207
188 ICC2.Z would be set to 0. 208 ICC2.Z would be set to 0.
189 209
190 A TIHI #2 instruction (trap #2 if condition HI - Z==0 && C==0) would be used to trap if 210 A TIHI #2 instruction (trap #2 if condition HI - Z==0 && C==0) would
191 interrupts were now virtually enabled, but physically disabled - which they're not, so the 211 be used to trap if interrupts were now virtually enabled, but
192 trap isn't taken. The kernel would then be back to state (1). 212 physically disabled - which they're not, so the trap isn't taken. The
213 kernel would then be back to state (1).
193 214
194 (5) An interrupt occurs. The exception prologue examines ICC2.Z and determines that the interrupt 215 (5) An interrupt occurs. The exception prologue examines ICC2.Z and
195 shouldn't actually have happened. It jumps aside, and there disabled interrupts by setting 216 determines that the interrupt shouldn't actually have happened. It
196 PSR.PIL to 14 and then it clears ICC2.C. 217 jumps aside, and there disabled interrupts by setting PSR.PIL to 14
218 and then it clears ICC2.C.
197 219
198 (6) If interrupts were then saved and disabled again (local_irq_save): 220 (6) If interrupts were then saved and disabled again (local_irq_save):
199 221
200 ICC2.Z would be shifted into the save variable and masked off (giving a 1). 222 ICC2.Z would be shifted into the save variable and masked off
223 (giving a 1).
201 224
202 ICC2.Z would then be set to 1 (thus unchanged), and ICC2.C would be unaffected (ie: 0). 225 ICC2.Z would then be set to 1 (thus unchanged), and ICC2.C would be
226 unaffected (ie: 0).
203 227
204 (7) If interrupts were then restored from state (6) (local_irq_restore): 228 (7) If interrupts were then restored from state (6) (local_irq_restore):
205 229
206 ICC2.Z would be set to indicate the result of XOR'ing the saved value (ie: 1) with 1, which 230 ICC2.Z would be set to indicate the result of XOR'ing the saved
207 gives a result of 0 - thus leaving ICC2.Z set. 231 value (ie: 1) with 1, which gives a result of 0 - thus leaving
232 ICC2.Z set.
208 233
209 ICC2.C would remain unaffected (ie: 0). 234 ICC2.C would remain unaffected (ie: 0).
210 235
211 A TIHI #2 instruction would be used to again assay the current state, but this would do 236 A TIHI #2 instruction would be used to again assay the current state,
212 nothing as Z==1. 237 but this would do nothing as Z==1.
213 238
214 (8) If interrupts were then enabled (local_irq_enable): 239 (8) If interrupts were then enabled (local_irq_enable):
215 240
216 ICC2.Z would be cleared. ICC2.C would be left unaffected. Both flags would now be 0. 241 ICC2.Z would be cleared. ICC2.C would be left unaffected. Both
242 flags would now be 0.
217 243
218 A TIHI #2 instruction again issued to assay the current state would then trap as both Z==0 244 A TIHI #2 instruction again issued to assay the current state would
219 [interrupts virtually enabled] and C==0 [interrupts really disabled] would then be true. 245 then trap as both Z==0 [interrupts virtually enabled] and C==0
246 [interrupts really disabled] would then be true.
220 247
221 (9) The trap #2 handler would simply enable hardware interrupts (set PSR.PIL to 0), set ICC2.C to 248 (9) The trap #2 handler would simply enable hardware interrupts
222 1 and return. 249 (set PSR.PIL to 0), set ICC2.C to 1 and return.
223 250
224(10) Immediately upon returning, the pending interrupt would be taken. 251(10) Immediately upon returning, the pending interrupt would be taken.
225 252
226(11) The interrupt handler would take the path of actually processing the interrupt (ICC2.Z is 253(11) The interrupt handler would take the path of actually processing the
227 clear, BEQ fails as per step (2)). 254 interrupt (ICC2.Z is clear, BEQ fails as per step (2)).
228 255
229(12) The interrupt handler would then set ICC2.C to 1 since hardware interrupts are definitely 256(12) The interrupt handler would then set ICC2.C to 1 since hardware
230 enabled - or else the kernel wouldn't be here. 257 interrupts are definitely enabled - or else the kernel wouldn't be here.
231 258
232(13) On return from the interrupt handler, things would be back to state (1). 259(13) On return from the interrupt handler, things would be back to state (1).
233 260
234This trap (#2) is only available in kernel mode. In user mode it will result in SIGILL. 261This trap (#2) is only available in kernel mode. In user mode it will
262result in SIGILL.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index f8cb55c30b0..b3a6187e530 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1,4 +1,4 @@
1February 2003 Kernel Parameters v2.5.59 1 Kernel Parameters
2 ~~~~~~~~~~~~~~~~~ 2 ~~~~~~~~~~~~~~~~~
3 3
4The following is a consolidated list of the kernel parameters as implemented 4The following is a consolidated list of the kernel parameters as implemented
@@ -17,9 +17,17 @@ are specified on the kernel command line with the module name plus
17 17
18 usbcore.blinkenlights=1 18 usbcore.blinkenlights=1
19 19
20The text in square brackets at the beginning of the description states the 20This document may not be entirely up to date and comprehensive. The command
21restrictions on the kernel for the said kernel parameter to be valid. The 21"modinfo -p ${modulename}" shows a current list of all parameters of a loadable
22restrictions referred to are that the relevant option is valid if: 22module. Loadable modules, after being loaded into the running kernel, also
23reveal their parameters in /sys/module/${modulename}/parameters/. Some of these
24parameters may be changed at runtime by the command
25"echo -n ${value} > /sys/module/${modulename}/parameters/${parm}".
26
27The parameters listed below are only valid if certain kernel build options were
28enabled and if respective hardware is present. The text in square brackets at
29the beginning of each description states the restrictions within which a
30parameter is applicable:
23 31
24 ACPI ACPI support is enabled. 32 ACPI ACPI support is enabled.
25 ALSA ALSA sound support is enabled. 33 ALSA ALSA sound support is enabled.
@@ -1046,10 +1054,10 @@ running once the system is up.
1046 noltlbs [PPC] Do not use large page/tlb entries for kernel 1054 noltlbs [PPC] Do not use large page/tlb entries for kernel
1047 lowmem mapping on PPC40x. 1055 lowmem mapping on PPC40x.
1048 1056
1049 nomce [IA-32] Machine Check Exception
1050
1051 nomca [IA-64] Disable machine check abort handling 1057 nomca [IA-64] Disable machine check abort handling
1052 1058
1059 nomce [IA-32] Machine Check Exception
1060
1053 noresidual [PPC] Don't use residual data on PReP machines. 1061 noresidual [PPC] Don't use residual data on PReP machines.
1054 1062
1055 noresume [SWSUSP] Disables resume and restores original swap 1063 noresume [SWSUSP] Disables resume and restores original swap
@@ -1682,20 +1690,6 @@ running once the system is up.
1682 1690
1683 1691
1684______________________________________________________________________ 1692______________________________________________________________________
1685Changelog:
1686
16872000-06-?? Mr. Unknown
1688 The last known update (for 2.4.0) - the changelog was not kept before.
1689
16902002-11-24 Petr Baudis <pasky@ucw.cz>
1691 Randy Dunlap <randy.dunlap@verizon.net>
1692 Update for 2.5.49, description for most of the options introduced,
1693 references to other documentation (C files, READMEs, ..), added S390,
1694 PPC, SPARC, MTD, ALSA and OSS category. Minor corrections and
1695 reformatting.
1696
16972005-10-19 Randy Dunlap <rdunlap@xenotime.net>
1698 Lots of typos, whitespace, some reformatting.
1699 1693
1700TODO: 1694TODO:
1701 1695
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
index 4fc8e987432..aaf99d5f0da 100644
--- a/Documentation/networking/packet_mmap.txt
+++ b/Documentation/networking/packet_mmap.txt
@@ -254,7 +254,7 @@ and, the number of frames be
254 254
255 <block number> * <block size> / <frame size> 255 <block number> * <block size> / <frame size>
256 256
257Suposse the following parameters, which apply for 2.6 kernel and an 257Suppose the following parameters, which apply for 2.6 kernel and an
258i386 architecture: 258i386 architecture:
259 259
260 <size-max> = 131072 bytes 260 <size-max> = 131072 bytes
diff --git a/Documentation/networking/tuntap.txt b/Documentation/networking/tuntap.txt
index ec3d109d787..76750fb9151 100644
--- a/Documentation/networking/tuntap.txt
+++ b/Documentation/networking/tuntap.txt
@@ -138,7 +138,7 @@ This means that you have to read/write IP packets when you are using tun and
138ethernet frames when using tap. 138ethernet frames when using tap.
139 139
1405. What is the difference between BPF and TUN/TAP driver? 1405. What is the difference between BPF and TUN/TAP driver?
141BFP is an advanced packet filter. It can be attached to existing 141BPF is an advanced packet filter. It can be attached to existing
142network interface. It does not provide a virtual network interface. 142network interface. It does not provide a virtual network interface.
143A TUN/TAP driver does provide a virtual network interface and it is possible 143A TUN/TAP driver does provide a virtual network interface and it is possible
144to attach BPF to this interface. 144to attach BPF to this interface.
diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c
index e3c5fca0aa8..2b0cfce24a6 100644
--- a/arch/i386/kernel/crash.c
+++ b/arch/i386/kernel/crash.c
@@ -69,7 +69,7 @@ static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
69 * for the data I pass, and I need tags 69 * for the data I pass, and I need tags
70 * on the data to indicate what information I have 70 * on the data to indicate what information I have
71 * squirrelled away. ELF notes happen to provide 71 * squirrelled away. ELF notes happen to provide
72 * all of that that no need to invent something new. 72 * all of that, so there is no need to invent something new.
73 */ 73 */
74 buf = (u32*)per_cpu_ptr(crash_notes, cpu); 74 buf = (u32*)per_cpu_ptr(crash_notes, cpu);
75 if (!buf) 75 if (!buf)
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 5b26af8597f..e112d1a5dab 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -1740,7 +1740,7 @@ EXPORT_SYMBOL(blk_run_queue);
1740 1740
1741/** 1741/**
1742 * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed 1742 * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed
1743 * @q: the request queue to be released 1743 * @kobj: the kobj belonging of the request queue to be released
1744 * 1744 *
1745 * Description: 1745 * Description:
1746 * blk_cleanup_queue is the pair to blk_init_queue() or 1746 * blk_cleanup_queue is the pair to blk_init_queue() or
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index aecd9e0c261..64fd8e79ea4 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -78,8 +78,7 @@ void dm_put_target_type(struct target_type *t)
78 if (--ti->use == 0) 78 if (--ti->use == 0)
79 module_put(ti->tt.module); 79 module_put(ti->tt.module);
80 80
81 if (ti->use < 0) 81 BUG_ON(ti->use < 0);
82 BUG();
83 up_read(&_lock); 82 up_read(&_lock);
84 83
85 return; 84 return;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 9b374c91db6..6081941de1b 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1558,8 +1558,7 @@ static int init_resync(conf_t *conf)
1558 int buffs; 1558 int buffs;
1559 1559
1560 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE; 1560 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
1561 if (conf->r1buf_pool) 1561 BUG_ON(conf->r1buf_pool);
1562 BUG();
1563 conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free, 1562 conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
1564 conf->poolinfo); 1563 conf->poolinfo);
1565 if (!conf->r1buf_pool) 1564 if (!conf->r1buf_pool)
@@ -1732,8 +1731,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1732 !conf->fullsync && 1731 !conf->fullsync &&
1733 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 1732 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
1734 break; 1733 break;
1735 if (sync_blocks < (PAGE_SIZE>>9)) 1734 BUG_ON(sync_blocks < (PAGE_SIZE>>9));
1736 BUG();
1737 if (len > (sync_blocks<<9)) 1735 if (len > (sync_blocks<<9))
1738 len = sync_blocks<<9; 1736 len = sync_blocks<<9;
1739 } 1737 }
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index ab90a6d1202..617012bc107 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1117,8 +1117,7 @@ static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
1117 for (i=0; i<conf->copies; i++) 1117 for (i=0; i<conf->copies; i++)
1118 if (r10_bio->devs[i].bio == bio) 1118 if (r10_bio->devs[i].bio == bio)
1119 break; 1119 break;
1120 if (i == conf->copies) 1120 BUG_ON(i == conf->copies);
1121 BUG();
1122 update_head_pos(i, r10_bio); 1121 update_head_pos(i, r10_bio);
1123 d = r10_bio->devs[i].devnum; 1122 d = r10_bio->devs[i].devnum;
1124 1123
@@ -1518,8 +1517,7 @@ static int init_resync(conf_t *conf)
1518 int buffs; 1517 int buffs;
1519 1518
1520 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE; 1519 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
1521 if (conf->r10buf_pool) 1520 BUG_ON(conf->r10buf_pool);
1522 BUG();
1523 conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf); 1521 conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
1524 if (!conf->r10buf_pool) 1522 if (!conf->r10buf_pool)
1525 return -ENOMEM; 1523 return -ENOMEM;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index dae740adaf6..31843604049 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -73,10 +73,8 @@ static void print_raid5_conf (raid5_conf_t *conf);
73static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) 73static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
74{ 74{
75 if (atomic_dec_and_test(&sh->count)) { 75 if (atomic_dec_and_test(&sh->count)) {
76 if (!list_empty(&sh->lru)) 76 BUG_ON(!list_empty(&sh->lru));
77 BUG(); 77 BUG_ON(atomic_read(&conf->active_stripes)==0);
78 if (atomic_read(&conf->active_stripes)==0)
79 BUG();
80 if (test_bit(STRIPE_HANDLE, &sh->state)) { 78 if (test_bit(STRIPE_HANDLE, &sh->state)) {
81 if (test_bit(STRIPE_DELAYED, &sh->state)) 79 if (test_bit(STRIPE_DELAYED, &sh->state))
82 list_add_tail(&sh->lru, &conf->delayed_list); 80 list_add_tail(&sh->lru, &conf->delayed_list);
@@ -184,10 +182,8 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int
184 raid5_conf_t *conf = sh->raid_conf; 182 raid5_conf_t *conf = sh->raid_conf;
185 int i; 183 int i;
186 184
187 if (atomic_read(&sh->count) != 0) 185 BUG_ON(atomic_read(&sh->count) != 0);
188 BUG(); 186 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
189 if (test_bit(STRIPE_HANDLE, &sh->state))
190 BUG();
191 187
192 CHECK_DEVLOCK(); 188 CHECK_DEVLOCK();
193 PRINTK("init_stripe called, stripe %llu\n", 189 PRINTK("init_stripe called, stripe %llu\n",
@@ -269,8 +265,7 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector
269 init_stripe(sh, sector, pd_idx, disks); 265 init_stripe(sh, sector, pd_idx, disks);
270 } else { 266 } else {
271 if (atomic_read(&sh->count)) { 267 if (atomic_read(&sh->count)) {
272 if (!list_empty(&sh->lru)) 268 BUG_ON(!list_empty(&sh->lru));
273 BUG();
274 } else { 269 } else {
275 if (!test_bit(STRIPE_HANDLE, &sh->state)) 270 if (!test_bit(STRIPE_HANDLE, &sh->state))
276 atomic_inc(&conf->active_stripes); 271 atomic_inc(&conf->active_stripes);
@@ -465,8 +460,7 @@ static int drop_one_stripe(raid5_conf_t *conf)
465 spin_unlock_irq(&conf->device_lock); 460 spin_unlock_irq(&conf->device_lock);
466 if (!sh) 461 if (!sh)
467 return 0; 462 return 0;
468 if (atomic_read(&sh->count)) 463 BUG_ON(atomic_read(&sh->count));
469 BUG();
470 shrink_buffers(sh, conf->pool_size); 464 shrink_buffers(sh, conf->pool_size);
471 kmem_cache_free(conf->slab_cache, sh); 465 kmem_cache_free(conf->slab_cache, sh);
472 atomic_dec(&conf->active_stripes); 466 atomic_dec(&conf->active_stripes);
@@ -882,8 +876,7 @@ static void compute_parity(struct stripe_head *sh, int method)
882 ptr[0] = page_address(sh->dev[pd_idx].page); 876 ptr[0] = page_address(sh->dev[pd_idx].page);
883 switch(method) { 877 switch(method) {
884 case READ_MODIFY_WRITE: 878 case READ_MODIFY_WRITE:
885 if (!test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags)) 879 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags));
886 BUG();
887 for (i=disks ; i-- ;) { 880 for (i=disks ; i-- ;) {
888 if (i==pd_idx) 881 if (i==pd_idx)
889 continue; 882 continue;
@@ -896,7 +889,7 @@ static void compute_parity(struct stripe_head *sh, int method)
896 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 889 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
897 wake_up(&conf->wait_for_overlap); 890 wake_up(&conf->wait_for_overlap);
898 891
899 if (sh->dev[i].written) BUG(); 892 BUG_ON(sh->dev[i].written);
900 sh->dev[i].written = chosen; 893 sh->dev[i].written = chosen;
901 check_xor(); 894 check_xor();
902 } 895 }
@@ -912,7 +905,7 @@ static void compute_parity(struct stripe_head *sh, int method)
912 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 905 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
913 wake_up(&conf->wait_for_overlap); 906 wake_up(&conf->wait_for_overlap);
914 907
915 if (sh->dev[i].written) BUG(); 908 BUG_ON(sh->dev[i].written);
916 sh->dev[i].written = chosen; 909 sh->dev[i].written = chosen;
917 } 910 }
918 break; 911 break;
@@ -995,8 +988,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
995 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9)) 988 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
996 goto overlap; 989 goto overlap;
997 990
998 if (*bip && bi->bi_next && (*bip) != bi->bi_next) 991 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
999 BUG();
1000 if (*bip) 992 if (*bip)
1001 bi->bi_next = *bip; 993 bi->bi_next = *bip;
1002 *bip = bi; 994 *bip = bi;
@@ -1430,8 +1422,7 @@ static void handle_stripe(struct stripe_head *sh)
1430 set_bit(STRIPE_HANDLE, &sh->state); 1422 set_bit(STRIPE_HANDLE, &sh->state);
1431 if (failed == 0) { 1423 if (failed == 0) {
1432 char *pagea; 1424 char *pagea;
1433 if (uptodate != disks) 1425 BUG_ON(uptodate != disks);
1434 BUG();
1435 compute_parity(sh, CHECK_PARITY); 1426 compute_parity(sh, CHECK_PARITY);
1436 uptodate--; 1427 uptodate--;
1437 pagea = page_address(sh->dev[sh->pd_idx].page); 1428 pagea = page_address(sh->dev[sh->pd_idx].page);
@@ -2096,8 +2087,7 @@ static void raid5d (mddev_t *mddev)
2096 2087
2097 list_del_init(first); 2088 list_del_init(first);
2098 atomic_inc(&sh->count); 2089 atomic_inc(&sh->count);
2099 if (atomic_read(&sh->count)!= 1) 2090 BUG_ON(atomic_read(&sh->count)!= 1);
2100 BUG();
2101 spin_unlock_irq(&conf->device_lock); 2091 spin_unlock_irq(&conf->device_lock);
2102 2092
2103 handled++; 2093 handled++;
diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c
index ab64b37e499..bc69355e010 100644
--- a/drivers/md/raid6main.c
+++ b/drivers/md/raid6main.c
@@ -91,10 +91,8 @@ static void print_raid6_conf (raid6_conf_t *conf);
91static void __release_stripe(raid6_conf_t *conf, struct stripe_head *sh) 91static void __release_stripe(raid6_conf_t *conf, struct stripe_head *sh)
92{ 92{
93 if (atomic_dec_and_test(&sh->count)) { 93 if (atomic_dec_and_test(&sh->count)) {
94 if (!list_empty(&sh->lru)) 94 BUG_ON(!list_empty(&sh->lru));
95 BUG(); 95 BUG_ON(atomic_read(&conf->active_stripes)==0);
96 if (atomic_read(&conf->active_stripes)==0)
97 BUG();
98 if (test_bit(STRIPE_HANDLE, &sh->state)) { 96 if (test_bit(STRIPE_HANDLE, &sh->state)) {
99 if (test_bit(STRIPE_DELAYED, &sh->state)) 97 if (test_bit(STRIPE_DELAYED, &sh->state))
100 list_add_tail(&sh->lru, &conf->delayed_list); 98 list_add_tail(&sh->lru, &conf->delayed_list);
@@ -202,10 +200,8 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx)
202 raid6_conf_t *conf = sh->raid_conf; 200 raid6_conf_t *conf = sh->raid_conf;
203 int disks = conf->raid_disks, i; 201 int disks = conf->raid_disks, i;
204 202
205 if (atomic_read(&sh->count) != 0) 203 BUG_ON(atomic_read(&sh->count) != 0);
206 BUG(); 204 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
207 if (test_bit(STRIPE_HANDLE, &sh->state))
208 BUG();
209 205
210 CHECK_DEVLOCK(); 206 CHECK_DEVLOCK();
211 PRINTK("init_stripe called, stripe %llu\n", 207 PRINTK("init_stripe called, stripe %llu\n",
@@ -284,13 +280,11 @@ static struct stripe_head *get_active_stripe(raid6_conf_t *conf, sector_t sector
284 init_stripe(sh, sector, pd_idx); 280 init_stripe(sh, sector, pd_idx);
285 } else { 281 } else {
286 if (atomic_read(&sh->count)) { 282 if (atomic_read(&sh->count)) {
287 if (!list_empty(&sh->lru)) 283 BUG_ON(!list_empty(&sh->lru));
288 BUG();
289 } else { 284 } else {
290 if (!test_bit(STRIPE_HANDLE, &sh->state)) 285 if (!test_bit(STRIPE_HANDLE, &sh->state))
291 atomic_inc(&conf->active_stripes); 286 atomic_inc(&conf->active_stripes);
292 if (list_empty(&sh->lru)) 287 BUG_ON(list_empty(&sh->lru));
293 BUG();
294 list_del_init(&sh->lru); 288 list_del_init(&sh->lru);
295 } 289 }
296 } 290 }
@@ -353,8 +347,7 @@ static int drop_one_stripe(raid6_conf_t *conf)
353 spin_unlock_irq(&conf->device_lock); 347 spin_unlock_irq(&conf->device_lock);
354 if (!sh) 348 if (!sh)
355 return 0; 349 return 0;
356 if (atomic_read(&sh->count)) 350 BUG_ON(atomic_read(&sh->count));
357 BUG();
358 shrink_buffers(sh, conf->raid_disks); 351 shrink_buffers(sh, conf->raid_disks);
359 kmem_cache_free(conf->slab_cache, sh); 352 kmem_cache_free(conf->slab_cache, sh);
360 atomic_dec(&conf->active_stripes); 353 atomic_dec(&conf->active_stripes);
@@ -780,7 +773,7 @@ static void compute_parity(struct stripe_head *sh, int method)
780 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 773 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
781 wake_up(&conf->wait_for_overlap); 774 wake_up(&conf->wait_for_overlap);
782 775
783 if (sh->dev[i].written) BUG(); 776 BUG_ON(sh->dev[i].written);
784 sh->dev[i].written = chosen; 777 sh->dev[i].written = chosen;
785 } 778 }
786 break; 779 break;
@@ -970,8 +963,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
970 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9)) 963 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
971 goto overlap; 964 goto overlap;
972 965
973 if (*bip && bi->bi_next && (*bip) != bi->bi_next) 966 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
974 BUG();
975 if (*bip) 967 if (*bip)
976 bi->bi_next = *bip; 968 bi->bi_next = *bip;
977 *bip = bi; 969 *bip = bi;
@@ -1906,8 +1898,7 @@ static void raid6d (mddev_t *mddev)
1906 1898
1907 list_del_init(first); 1899 list_del_init(first);
1908 atomic_inc(&sh->count); 1900 atomic_inc(&sh->count);
1909 if (atomic_read(&sh->count)!= 1) 1901 BUG_ON(atomic_read(&sh->count)!= 1);
1910 BUG();
1911 spin_unlock_irq(&conf->device_lock); 1902 spin_unlock_irq(&conf->device_lock);
1912 1903
1913 handled++; 1904 handled++;
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
index 0f6bb2e625d..a7ec5954caf 100644
--- a/drivers/mtd/chips/Kconfig
+++ b/drivers/mtd/chips/Kconfig
@@ -200,27 +200,6 @@ config MTD_CFI_AMDSTD
200 provides support for one of those command sets, used on chips 200 provides support for one of those command sets, used on chips
201 including the AMD Am29LV320. 201 including the AMD Am29LV320.
202 202
203config MTD_CFI_AMDSTD_RETRY
204 int "Retry failed commands (erase/program)"
205 depends on MTD_CFI_AMDSTD
206 default "0"
207 help
208 Some chips, when attached to a shared bus, don't properly filter
209 bus traffic that is destined to other devices. This broken
210 behavior causes erase and program sequences to be aborted when
211 the sequences are mixed with traffic for other devices.
212
213 SST49LF040 (and related) chips are know to be broken.
214
215config MTD_CFI_AMDSTD_RETRY_MAX
216 int "Max retries of failed commands (erase/program)"
217 depends on MTD_CFI_AMDSTD_RETRY
218 default "0"
219 help
220 If you have an SST49LF040 (or related chip) then this value should
221 be set to at least 1. This can also be adjusted at driver load
222 time with the retry_cmd_max module parameter.
223
224config MTD_CFI_STAA 203config MTD_CFI_STAA
225 tristate "Support for ST (Advanced Architecture) flash chips" 204 tristate "Support for ST (Advanced Architecture) flash chips"
226 depends on MTD_GEN_PROBE 205 depends on MTD_GEN_PROBE
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index ce99845d826..066e22b01a9 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -539,8 +539,7 @@ rx_status_loop:
539 unsigned buflen; 539 unsigned buflen;
540 540
541 skb = cp->rx_skb[rx_tail].skb; 541 skb = cp->rx_skb[rx_tail].skb;
542 if (!skb) 542 BUG_ON(!skb);
543 BUG();
544 543
545 desc = &cp->rx_ring[rx_tail]; 544 desc = &cp->rx_ring[rx_tail];
546 status = le32_to_cpu(desc->opts1); 545 status = le32_to_cpu(desc->opts1);
@@ -723,8 +722,7 @@ static void cp_tx (struct cp_private *cp)
723 break; 722 break;
724 723
725 skb = cp->tx_skb[tx_tail].skb; 724 skb = cp->tx_skb[tx_tail].skb;
726 if (!skb) 725 BUG_ON(!skb);
727 BUG();
728 726
729 pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping, 727 pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping,
730 cp->tx_skb[tx_tail].len, PCI_DMA_TODEVICE); 728 cp->tx_skb[tx_tail].len, PCI_DMA_TODEVICE);
@@ -1550,8 +1548,7 @@ static void cp_get_ethtool_stats (struct net_device *dev,
1550 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort); 1548 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
1551 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun); 1549 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
1552 tmp_stats[i++] = cp->cp_stats.rx_frags; 1550 tmp_stats[i++] = cp->cp_stats.rx_frags;
1553 if (i != CP_NUM_STATS) 1551 BUG_ON(i != CP_NUM_STATS);
1554 BUG();
1555 1552
1556 pci_free_consistent(cp->pdev, sizeof(*nic_stats), nic_stats, dma); 1553 pci_free_consistent(cp->pdev, sizeof(*nic_stats), nic_stats, dma);
1557} 1554}
@@ -1856,8 +1853,7 @@ static void cp_remove_one (struct pci_dev *pdev)
1856 struct net_device *dev = pci_get_drvdata(pdev); 1853 struct net_device *dev = pci_get_drvdata(pdev);
1857 struct cp_private *cp = netdev_priv(dev); 1854 struct cp_private *cp = netdev_priv(dev);
1858 1855
1859 if (!dev) 1856 BUG_ON(!dev);
1860 BUG();
1861 unregister_netdev(dev); 1857 unregister_netdev(dev);
1862 iounmap(cp->regs); 1858 iounmap(cp->regs);
1863 if (cp->wol_enabled) pci_set_power_state (pdev, PCI_D0); 1859 if (cp->wol_enabled) pci_set_power_state (pdev, PCI_D0);
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 64e2caf3083..fabc0607b0f 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -765,8 +765,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id, struct pt_regs *regs)
765 BUGMSG(D_DURING, "in arcnet_interrupt\n"); 765 BUGMSG(D_DURING, "in arcnet_interrupt\n");
766 766
767 lp = dev->priv; 767 lp = dev->priv;
768 if (!lp) 768 BUG_ON(!lp);
769 BUG();
770 769
771 spin_lock(&lp->lock); 770 spin_lock(&lp->lock);
772 771
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 15032f2c781..c4e12b5cbb9 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -608,8 +608,7 @@ static void b44_tx(struct b44 *bp)
608 struct ring_info *rp = &bp->tx_buffers[cons]; 608 struct ring_info *rp = &bp->tx_buffers[cons];
609 struct sk_buff *skb = rp->skb; 609 struct sk_buff *skb = rp->skb;
610 610
611 if (unlikely(skb == NULL)) 611 BUG_ON(skb == NULL);
612 BUG();
613 612
614 pci_unmap_single(bp->pdev, 613 pci_unmap_single(bp->pdev,
615 pci_unmap_addr(rp, mapping), 614 pci_unmap_addr(rp, mapping),
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 30ff8ea1a40..4391bf4bf57 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1093,8 +1093,7 @@ static int process_responses(struct adapter *adapter, int budget)
1093 if (likely(e->DataValid)) { 1093 if (likely(e->DataValid)) {
1094 struct freelQ *fl = &sge->freelQ[e->FreelistQid]; 1094 struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1095 1095
1096 if (unlikely(!e->Sop || !e->Eop)) 1096 BUG_ON(!e->Sop || !e->Eop);
1097 BUG();
1098 if (unlikely(e->Offload)) 1097 if (unlikely(e->Offload))
1099 unexpected_offload(adapter, fl); 1098 unexpected_offload(adapter, fl);
1100 else 1099 else
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 49cd096a3c3..add8dc4aa7b 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -3308,8 +3308,7 @@ e1000_clean(struct net_device *poll_dev, int *budget)
3308 3308
3309 while (poll_dev != &adapter->polling_netdev[i]) { 3309 while (poll_dev != &adapter->polling_netdev[i]) {
3310 i++; 3310 i++;
3311 if (unlikely(i == adapter->num_rx_queues)) 3311 BUG_ON(i == adapter->num_rx_queues);
3312 BUG();
3313 } 3312 }
3314 3313
3315 if (likely(adapter->num_tx_queues == 1)) { 3314 if (likely(adapter->num_tx_queues == 1)) {
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index aa1569182fd..815436c6170 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -203,8 +203,7 @@ static int eql_open(struct net_device *dev)
203 printk(KERN_INFO "%s: remember to turn off Van-Jacobson compression on " 203 printk(KERN_INFO "%s: remember to turn off Van-Jacobson compression on "
204 "your slave devices.\n", dev->name); 204 "your slave devices.\n", dev->name);
205 205
206 if (!list_empty(&eql->queue.all_slaves)) 206 BUG_ON(!list_empty(&eql->queue.all_slaves));
207 BUG();
208 207
209 eql->min_slaves = 1; 208 eql->min_slaves = 1;
210 eql->max_slaves = EQL_DEFAULT_MAX_SLAVES; /* 4 usually... */ 209 eql->max_slaves = EQL_DEFAULT_MAX_SLAVES; /* 4 usually... */
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index 63d38fbbd04..f530686bd09 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -695,8 +695,7 @@ static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
695 /* 695 /*
696 * We must not be transmitting... 696 * We must not be transmitting...
697 */ 697 */
698 if (si->txskb) 698 BUG_ON(si->txskb);
699 BUG();
700 699
701 netif_stop_queue(dev); 700 netif_stop_queue(dev);
702 701
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index d11821dd86e..ced9fdb8335 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -645,9 +645,7 @@ static void __devexit ne2k_pci_remove_one (struct pci_dev *pdev)
645{ 645{
646 struct net_device *dev = pci_get_drvdata(pdev); 646 struct net_device *dev = pci_get_drvdata(pdev);
647 647
648 if (!dev) 648 BUG_ON(!dev);
649 BUG();
650
651 unregister_netdev(dev); 649 unregister_netdev(dev);
652 release_region(dev->base_addr, NE_IO_EXTENT); 650 release_region(dev->base_addr, NE_IO_EXTENT);
653 free_netdev(dev); 651 free_netdev(dev);
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 8e9b1a537de..706aed7d717 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -568,8 +568,7 @@ static inline int ns83820_add_rx_skb(struct ns83820 *dev, struct sk_buff *skb)
568#endif 568#endif
569 569
570 sg = dev->rx_info.descs + (next_empty * DESC_SIZE); 570 sg = dev->rx_info.descs + (next_empty * DESC_SIZE);
571 if (unlikely(NULL != dev->rx_info.skbs[next_empty])) 571 BUG_ON(NULL != dev->rx_info.skbs[next_empty]);
572 BUG();
573 dev->rx_info.skbs[next_empty] = skb; 572 dev->rx_info.skbs[next_empty] = skb;
574 573
575 dev->rx_info.next_empty = (next_empty + 1) % NR_RX_DESC; 574 dev->rx_info.next_empty = (next_empty + 1) % NR_RX_DESC;
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 35b18057fbd..45ad036733e 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -2122,8 +2122,7 @@ static void __devexit starfire_remove_one (struct pci_dev *pdev)
2122 struct net_device *dev = pci_get_drvdata(pdev); 2122 struct net_device *dev = pci_get_drvdata(pdev);
2123 struct netdev_private *np = netdev_priv(dev); 2123 struct netdev_private *np = netdev_priv(dev);
2124 2124
2125 if (!dev) 2125 BUG_ON(!dev);
2126 BUG();
2127 2126
2128 unregister_netdev(dev); 2127 unregister_netdev(dev);
2129 2128
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 770e6b6cec6..0b535807217 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -2959,9 +2959,7 @@ static void tg3_tx(struct tg3 *tp)
2959 struct sk_buff *skb = ri->skb; 2959 struct sk_buff *skb = ri->skb;
2960 int i; 2960 int i;
2961 2961
2962 if (unlikely(skb == NULL)) 2962 BUG_ON(skb == NULL);
2963 BUG();
2964
2965 pci_unmap_single(tp->pdev, 2963 pci_unmap_single(tp->pdev,
2966 pci_unmap_addr(ri, mapping), 2964 pci_unmap_addr(ri, mapping),
2967 skb_headlen(skb), 2965 skb_headlen(skb),
@@ -2972,12 +2970,10 @@ static void tg3_tx(struct tg3 *tp)
2972 sw_idx = NEXT_TX(sw_idx); 2970 sw_idx = NEXT_TX(sw_idx);
2973 2971
2974 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2972 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2975 if (unlikely(sw_idx == hw_idx)) 2973 BUG_ON(sw_idx == hw_idx);
2976 BUG();
2977 2974
2978 ri = &tp->tx_buffers[sw_idx]; 2975 ri = &tp->tx_buffers[sw_idx];
2979 if (unlikely(ri->skb != NULL)) 2976 BUG_ON(ri->skb != NULL);
2980 BUG();
2981 2977
2982 pci_unmap_page(tp->pdev, 2978 pci_unmap_page(tp->pdev,
2983 pci_unmap_addr(ri, mapping), 2979 pci_unmap_addr(ri, mapping),
@@ -4928,9 +4924,8 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4928{ 4924{
4929 int i; 4925 int i;
4930 4926
4931 if (offset == TX_CPU_BASE && 4927 BUG_ON(offset == TX_CPU_BASE &&
4932 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 4928 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
4933 BUG();
4934 4929
4935 if (offset == RX_CPU_BASE) { 4930 if (offset == RX_CPU_BASE) {
4936 for (i = 0; i < 10000; i++) { 4931 for (i = 0; i < 10000; i++) {
diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
index 9345e68c451..649d8ea354f 100644
--- a/drivers/net/tokenring/abyss.c
+++ b/drivers/net/tokenring/abyss.c
@@ -438,8 +438,7 @@ static void __devexit abyss_detach (struct pci_dev *pdev)
438{ 438{
439 struct net_device *dev = pci_get_drvdata(pdev); 439 struct net_device *dev = pci_get_drvdata(pdev);
440 440
441 if (!dev) 441 BUG_ON(!dev);
442 BUG();
443 unregister_netdev(dev); 442 unregister_netdev(dev);
444 release_region(dev->base_addr-0x10, ABYSS_IO_EXTENT); 443 release_region(dev->base_addr-0x10, ABYSS_IO_EXTENT);
445 free_irq(dev->irq, dev); 444 free_irq(dev->irq, dev);
diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
index 3a25d191ea4..19e6f4dfd69 100644
--- a/drivers/net/tokenring/madgemc.c
+++ b/drivers/net/tokenring/madgemc.c
@@ -735,8 +735,7 @@ static int __devexit madgemc_remove(struct device *device)
735 struct net_local *tp; 735 struct net_local *tp;
736 struct card_info *card; 736 struct card_info *card;
737 737
738 if (!dev) 738 BUG_ON(!dev);
739 BUG();
740 739
741 tp = dev->priv; 740 tp = dev->priv;
742 card = tp->tmspriv; 741 card = tp->tmspriv;
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index 9dce522526c..bca89cff85a 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -5573,8 +5573,7 @@ static void ipw_adhoc_create(struct ipw_priv *priv,
5573 case IEEE80211_52GHZ_BAND: 5573 case IEEE80211_52GHZ_BAND:
5574 network->mode = IEEE_A; 5574 network->mode = IEEE_A;
5575 i = ieee80211_channel_to_index(priv->ieee, priv->channel); 5575 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5576 if (i == -1) 5576 BUG_ON(i == -1);
5577 BUG();
5578 if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) { 5577 if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5579 IPW_WARNING("Overriding invalid channel\n"); 5578 IPW_WARNING("Overriding invalid channel\n");
5580 priv->channel = geo->a[0].channel; 5579 priv->channel = geo->a[0].channel;
@@ -5587,8 +5586,7 @@ static void ipw_adhoc_create(struct ipw_priv *priv,
5587 else 5586 else
5588 network->mode = IEEE_B; 5587 network->mode = IEEE_B;
5589 i = ieee80211_channel_to_index(priv->ieee, priv->channel); 5588 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5590 if (i == -1) 5589 BUG_ON(i == -1);
5591 BUG();
5592 if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) { 5590 if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5593 IPW_WARNING("Overriding invalid channel\n"); 5591 IPW_WARNING("Overriding invalid channel\n");
5594 priv->channel = geo->bg[0].channel; 5592 priv->channel = geo->bg[0].channel;
@@ -6715,8 +6713,7 @@ static int ipw_qos_association(struct ipw_priv *priv,
6715 6713
6716 switch (priv->ieee->iw_mode) { 6714 switch (priv->ieee->iw_mode) {
6717 case IW_MODE_ADHOC: 6715 case IW_MODE_ADHOC:
6718 if (!(network->capability & WLAN_CAPABILITY_IBSS)) 6716 BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
6719 BUG();
6720 6717
6721 qos_data = &ibss_data; 6718 qos_data = &ibss_data;
6722 break; 6719 break;
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 75d56bfef0e..fd0f43b7db5 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -1441,8 +1441,7 @@ static void __devexit yellowfin_remove_one (struct pci_dev *pdev)
1441 struct net_device *dev = pci_get_drvdata(pdev); 1441 struct net_device *dev = pci_get_drvdata(pdev);
1442 struct yellowfin_private *np; 1442 struct yellowfin_private *np;
1443 1443
1444 if (!dev) 1444 BUG_ON(!dev);
1445 BUG();
1446 np = netdev_priv(dev); 1445 np = netdev_priv(dev);
1447 1446
1448 pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status, 1447 pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index 8fd71ab02ef..b842377cb0c 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -32,9 +32,8 @@ dasd_alloc_erp_request(char *magic, int cplength, int datasize,
32 int size; 32 int size;
33 33
34 /* Sanity checks */ 34 /* Sanity checks */
35 if ( magic == NULL || datasize > PAGE_SIZE || 35 BUG_ON( magic == NULL || datasize > PAGE_SIZE ||
36 (cplength*sizeof(struct ccw1)) > PAGE_SIZE) 36 (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
37 BUG();
38 37
39 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; 38 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
40 if (cplength > 0) 39 if (cplength > 0)
@@ -125,8 +124,7 @@ dasd_default_erp_postaction(struct dasd_ccw_req * cqr)
125 struct dasd_device *device; 124 struct dasd_device *device;
126 int success; 125 int success;
127 126
128 if (cqr->refers == NULL || cqr->function == NULL) 127 BUG_ON(cqr->refers == NULL || cqr->function == NULL);
129 BUG();
130 128
131 device = cqr->device; 129 device = cqr->device;
132 success = cqr->status == DASD_CQR_DONE; 130 success = cqr->status == DASD_CQR_DONE;
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index ac10dfb20a6..91e93c78f57 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -24,7 +24,7 @@
24 24
25/* 25/*
26 * The room for the SCCB (only for writing) is not equal to a pages size 26 * The room for the SCCB (only for writing) is not equal to a pages size
27 * (as it is specified as the maximum size in the the SCLP ducumentation) 27 * (as it is specified as the maximum size in the the SCLP documentation)
28 * because of the additional data structure described above. 28 * because of the additional data structure described above.
29 */ 29 */
30#define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer)) 30#define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer))
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 5ced2725d6c..5c65cf3e5cc 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -198,9 +198,7 @@ tapeblock_request_fn(request_queue_t *queue)
198 198
199 device = (struct tape_device *) queue->queuedata; 199 device = (struct tape_device *) queue->queuedata;
200 DBF_LH(6, "tapeblock_request_fn(device=%p)\n", device); 200 DBF_LH(6, "tapeblock_request_fn(device=%p)\n", device);
201 if (device == NULL) 201 BUG_ON(device == NULL);
202 BUG();
203
204 tapeblock_trigger_requeue(device); 202 tapeblock_trigger_requeue(device);
205} 203}
206 204
@@ -307,8 +305,7 @@ tapeblock_revalidate_disk(struct gendisk *disk)
307 int rc; 305 int rc;
308 306
309 device = (struct tape_device *) disk->private_data; 307 device = (struct tape_device *) disk->private_data;
310 if (!device) 308 BUG_ON(!device);
311 BUG();
312 309
313 if (!device->blk_data.medium_changed) 310 if (!device->blk_data.medium_changed)
314 return 0; 311 return 0;
@@ -440,11 +437,9 @@ tapeblock_ioctl(
440 437
441 rc = 0; 438 rc = 0;
442 disk = inode->i_bdev->bd_disk; 439 disk = inode->i_bdev->bd_disk;
443 if (!disk) 440 BUG_ON(!disk);
444 BUG();
445 device = disk->private_data; 441 device = disk->private_data;
446 if (!device) 442 BUG_ON(!device);
447 BUG();
448 minor = iminor(inode); 443 minor = iminor(inode);
449 444
450 DBF_LH(6, "tapeblock_ioctl(0x%0x)\n", command); 445 DBF_LH(6, "tapeblock_ioctl(0x%0x)\n", command);
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index edcf05d5d56..5d6b7a57b02 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -675,9 +675,8 @@ lcs_ready_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
675 int index, rc; 675 int index, rc;
676 676
677 LCS_DBF_TEXT(5, trace, "rdybuff"); 677 LCS_DBF_TEXT(5, trace, "rdybuff");
678 if (buffer->state != BUF_STATE_LOCKED && 678 BUG_ON(buffer->state != BUF_STATE_LOCKED &&
679 buffer->state != BUF_STATE_PROCESSED) 679 buffer->state != BUF_STATE_PROCESSED);
680 BUG();
681 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); 680 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
682 buffer->state = BUF_STATE_READY; 681 buffer->state = BUF_STATE_READY;
683 index = buffer - channel->iob; 682 index = buffer - channel->iob;
@@ -701,8 +700,7 @@ __lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
701 int index, prev, next; 700 int index, prev, next;
702 701
703 LCS_DBF_TEXT(5, trace, "prcsbuff"); 702 LCS_DBF_TEXT(5, trace, "prcsbuff");
704 if (buffer->state != BUF_STATE_READY) 703 BUG_ON(buffer->state != BUF_STATE_READY);
705 BUG();
706 buffer->state = BUF_STATE_PROCESSED; 704 buffer->state = BUF_STATE_PROCESSED;
707 index = buffer - channel->iob; 705 index = buffer - channel->iob;
708 prev = (index - 1) & (LCS_NUM_BUFFS - 1); 706 prev = (index - 1) & (LCS_NUM_BUFFS - 1);
@@ -734,9 +732,8 @@ lcs_release_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
734 unsigned long flags; 732 unsigned long flags;
735 733
736 LCS_DBF_TEXT(5, trace, "relbuff"); 734 LCS_DBF_TEXT(5, trace, "relbuff");
737 if (buffer->state != BUF_STATE_LOCKED && 735 BUG_ON(buffer->state != BUF_STATE_LOCKED &&
738 buffer->state != BUF_STATE_PROCESSED) 736 buffer->state != BUF_STATE_PROCESSED);
739 BUG();
740 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); 737 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
741 buffer->state = BUF_STATE_EMPTY; 738 buffer->state = BUF_STATE_EMPTY;
742 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); 739 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
diff --git a/drivers/scsi/aic7xxx/Kconfig.aic7xxx b/drivers/scsi/aic7xxx/Kconfig.aic7xxx
index 6c2c395554f..5517da5855f 100644
--- a/drivers/scsi/aic7xxx/Kconfig.aic7xxx
+++ b/drivers/scsi/aic7xxx/Kconfig.aic7xxx
@@ -86,7 +86,7 @@ config AIC7XXX_DEBUG_MASK
86 default "0" 86 default "0"
87 help 87 help
88 Bit mask of debug options that is only valid if the 88 Bit mask of debug options that is only valid if the
89 CONFIG_AIC7XXX_DEBUG_ENBLE option is enabled. The bits in this mask 89 CONFIG_AIC7XXX_DEBUG_ENABLE option is enabled. The bits in this mask
90 are defined in the drivers/scsi/aic7xxx/aic7xxx.h - search for the 90 are defined in the drivers/scsi/aic7xxx/aic7xxx.h - search for the
91 variable ahc_debug in that file to find them. 91 variable ahc_debug in that file to find them.
92 92
diff --git a/drivers/serial/jsm/jsm.h b/drivers/serial/jsm/jsm.h
index dfc1e86d3aa..043f50b1d10 100644
--- a/drivers/serial/jsm/jsm.h
+++ b/drivers/serial/jsm/jsm.h
@@ -20,7 +20,7 @@
20 * 20 *
21 * Contact Information: 21 * Contact Information:
22 * Scott H Kilau <Scott_Kilau@digi.com> 22 * Scott H Kilau <Scott_Kilau@digi.com>
23 * Wendy Xiong <wendyx@us.ltcfwd.linux.ibm.com> 23 * Wendy Xiong <wendyx@us.ibm.com>
24 * 24 *
25 ***********************************************************************/ 25 ***********************************************************************/
26 26
diff --git a/drivers/serial/jsm/jsm_driver.c b/drivers/serial/jsm/jsm_driver.c
index b1b66e71d28..b3e1f71be4d 100644
--- a/drivers/serial/jsm/jsm_driver.c
+++ b/drivers/serial/jsm/jsm_driver.c
@@ -20,7 +20,7 @@
20 * 20 *
21 * Contact Information: 21 * Contact Information:
22 * Scott H Kilau <Scott_Kilau@digi.com> 22 * Scott H Kilau <Scott_Kilau@digi.com>
23 * Wendy Xiong <wendyx@us.ltcfwd.linux.ibm.com> 23 * Wendy Xiong <wendyx@us.ibm.com>
24 * 24 *
25 * 25 *
26 ***********************************************************************/ 26 ***********************************************************************/
diff --git a/drivers/serial/jsm/jsm_neo.c b/drivers/serial/jsm/jsm_neo.c
index 87e4e2cf8ce..a5fc589d6ef 100644
--- a/drivers/serial/jsm/jsm_neo.c
+++ b/drivers/serial/jsm/jsm_neo.c
@@ -20,7 +20,7 @@
20 * 20 *
21 * Contact Information: 21 * Contact Information:
22 * Scott H Kilau <Scott_Kilau@digi.com> 22 * Scott H Kilau <Scott_Kilau@digi.com>
23 * Wendy Xiong <wendyx@us.ltcfwd.linux.ibm.com> 23 * Wendy Xiong <wendyx@us.ibm.com>
24 * 24 *
25 ***********************************************************************/ 25 ***********************************************************************/
26#include <linux/delay.h> /* For udelay */ 26#include <linux/delay.h> /* For udelay */
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 910a8ed74b5..b05d1b21877 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -929,8 +929,7 @@ do_holes:
929 block_in_page += this_chunk_blocks; 929 block_in_page += this_chunk_blocks;
930 dio->blocks_available -= this_chunk_blocks; 930 dio->blocks_available -= this_chunk_blocks;
931next_block: 931next_block:
932 if (dio->block_in_file > dio->final_block_in_request) 932 BUG_ON(dio->block_in_file > dio->final_block_in_request);
933 BUG();
934 if (dio->block_in_file == dio->final_block_in_request) 933 if (dio->block_in_file == dio->final_block_in_request)
935 break; 934 break;
936 } 935 }
diff --git a/fs/dquot.c b/fs/dquot.c
index 6b388692093..81d87a413c6 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -590,8 +590,7 @@ we_slept:
590 atomic_dec(&dquot->dq_count); 590 atomic_dec(&dquot->dq_count);
591#ifdef __DQUOT_PARANOIA 591#ifdef __DQUOT_PARANOIA
592 /* sanity check */ 592 /* sanity check */
593 if (!list_empty(&dquot->dq_free)) 593 BUG_ON(!list_empty(&dquot->dq_free));
594 BUG();
595#endif 594#endif
596 put_dquot_last(dquot); 595 put_dquot_last(dquot);
597 spin_unlock(&dq_list_lock); 596 spin_unlock(&dq_list_lock);
@@ -666,8 +665,7 @@ we_slept:
666 return NODQUOT; 665 return NODQUOT;
667 } 666 }
668#ifdef __DQUOT_PARANOIA 667#ifdef __DQUOT_PARANOIA
669 if (!dquot->dq_sb) /* Has somebody invalidated entry under us? */ 668 BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */
670 BUG();
671#endif 669#endif
672 670
673 return dquot; 671 return dquot;
diff --git a/fs/exec.c b/fs/exec.c
index 950ebd43cdc..0291a68a362 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -561,7 +561,7 @@ static int exec_mmap(struct mm_struct *mm)
561 arch_pick_mmap_layout(mm); 561 arch_pick_mmap_layout(mm);
562 if (old_mm) { 562 if (old_mm) {
563 up_read(&old_mm->mmap_sem); 563 up_read(&old_mm->mmap_sem);
564 if (active_mm != old_mm) BUG(); 564 BUG_ON(active_mm != old_mm);
565 mmput(old_mm); 565 mmput(old_mm);
566 return 0; 566 return 0;
567 } 567 }
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 2a2479196f9..d35cbc6bc11 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -453,8 +453,7 @@ static void send_sigio_to_task(struct task_struct *p,
453 /* Make sure we are called with one of the POLL_* 453 /* Make sure we are called with one of the POLL_*
454 reasons, otherwise we could leak kernel stack into 454 reasons, otherwise we could leak kernel stack into
455 userspace. */ 455 userspace. */
456 if ((reason & __SI_MASK) != __SI_POLL) 456 BUG_ON((reason & __SI_MASK) != __SI_POLL);
457 BUG();
458 if (reason - POLL_IN >= NSIGPOLL) 457 if (reason - POLL_IN >= NSIGPOLL)
459 si.si_band = ~0L; 458 si.si_band = ~0L;
460 else 459 else
diff --git a/fs/freevxfs/vxfs_olt.c b/fs/freevxfs/vxfs_olt.c
index 76a0708ae97..04950084790 100644
--- a/fs/freevxfs/vxfs_olt.c
+++ b/fs/freevxfs/vxfs_olt.c
@@ -42,24 +42,21 @@
42static inline void 42static inline void
43vxfs_get_fshead(struct vxfs_oltfshead *fshp, struct vxfs_sb_info *infp) 43vxfs_get_fshead(struct vxfs_oltfshead *fshp, struct vxfs_sb_info *infp)
44{ 44{
45 if (infp->vsi_fshino) 45 BUG_ON(infp->vsi_fshino);
46 BUG();
47 infp->vsi_fshino = fshp->olt_fsino[0]; 46 infp->vsi_fshino = fshp->olt_fsino[0];
48} 47}
49 48
50static inline void 49static inline void
51vxfs_get_ilist(struct vxfs_oltilist *ilistp, struct vxfs_sb_info *infp) 50vxfs_get_ilist(struct vxfs_oltilist *ilistp, struct vxfs_sb_info *infp)
52{ 51{
53 if (infp->vsi_iext) 52 BUG_ON(infp->vsi_iext);
54 BUG();
55 infp->vsi_iext = ilistp->olt_iext[0]; 53 infp->vsi_iext = ilistp->olt_iext[0];
56} 54}
57 55
58static inline u_long 56static inline u_long
59vxfs_oblock(struct super_block *sbp, daddr_t block, u_long bsize) 57vxfs_oblock(struct super_block *sbp, daddr_t block, u_long bsize)
60{ 58{
61 if (sbp->s_blocksize % bsize) 59 BUG_ON(sbp->s_blocksize % bsize);
62 BUG();
63 return (block * (sbp->s_blocksize / bsize)); 60 return (block * (sbp->s_blocksize / bsize));
64} 61}
65 62
diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
index 8f07e8fbd03..746abc9ecf7 100644
--- a/fs/hfsplus/bnode.c
+++ b/fs/hfsplus/bnode.c
@@ -466,8 +466,7 @@ void hfs_bnode_unhash(struct hfs_bnode *node)
466 for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)]; 466 for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
467 *p && *p != node; p = &(*p)->next_hash) 467 *p && *p != node; p = &(*p)->next_hash)
468 ; 468 ;
469 if (!*p) 469 BUG_ON(!*p);
470 BUG();
471 *p = node->next_hash; 470 *p = node->next_hash;
472 node->tree->node_hash_cnt--; 471 node->tree->node_hash_cnt--;
473} 472}
@@ -622,8 +621,7 @@ void hfs_bnode_put(struct hfs_bnode *node)
622 621
623 dprint(DBG_BNODE_REFS, "put_node(%d:%d): %d\n", 622 dprint(DBG_BNODE_REFS, "put_node(%d:%d): %d\n",
624 node->tree->cnid, node->this, atomic_read(&node->refcnt)); 623 node->tree->cnid, node->this, atomic_read(&node->refcnt));
625 if (!atomic_read(&node->refcnt)) 624 BUG_ON(!atomic_read(&node->refcnt));
626 BUG();
627 if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock)) 625 if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
628 return; 626 return;
629 for (i = 0; i < tree->pages_per_bnode; i++) { 627 for (i = 0; i < tree->pages_per_bnode; i++) {
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index a67edfa34e9..effa8991999 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -269,8 +269,7 @@ void hfs_bmap_free(struct hfs_bnode *node)
269 u8 *data, byte, m; 269 u8 *data, byte, m;
270 270
271 dprint(DBG_BNODE_MOD, "btree_free_node: %u\n", node->this); 271 dprint(DBG_BNODE_MOD, "btree_free_node: %u\n", node->this);
272 if (!node->this) 272 BUG_ON(!node->this);
273 BUG();
274 tree = node->tree; 273 tree = node->tree;
275 nidx = node->this; 274 nidx = node->this;
276 node = hfs_bnode_find(tree, 0); 275 node = hfs_bnode_find(tree, 0);
diff --git a/fs/inode.c b/fs/inode.c
index 32b7c337502..3a2446a27d2 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -172,8 +172,7 @@ static struct inode *alloc_inode(struct super_block *sb)
172 172
173void destroy_inode(struct inode *inode) 173void destroy_inode(struct inode *inode)
174{ 174{
175 if (inode_has_buffers(inode)) 175 BUG_ON(inode_has_buffers(inode));
176 BUG();
177 security_inode_free(inode); 176 security_inode_free(inode);
178 if (inode->i_sb->s_op->destroy_inode) 177 if (inode->i_sb->s_op->destroy_inode)
179 inode->i_sb->s_op->destroy_inode(inode); 178 inode->i_sb->s_op->destroy_inode(inode);
@@ -249,12 +248,9 @@ void clear_inode(struct inode *inode)
249 might_sleep(); 248 might_sleep();
250 invalidate_inode_buffers(inode); 249 invalidate_inode_buffers(inode);
251 250
252 if (inode->i_data.nrpages) 251 BUG_ON(inode->i_data.nrpages);
253 BUG(); 252 BUG_ON(!(inode->i_state & I_FREEING));
254 if (!(inode->i_state & I_FREEING)) 253 BUG_ON(inode->i_state & I_CLEAR);
255 BUG();
256 if (inode->i_state & I_CLEAR)
257 BUG();
258 wait_on_inode(inode); 254 wait_on_inode(inode);
259 DQUOT_DROP(inode); 255 DQUOT_DROP(inode);
260 if (inode->i_sb && inode->i_sb->s_op->clear_inode) 256 if (inode->i_sb && inode->i_sb->s_op->clear_inode)
@@ -1054,8 +1050,7 @@ void generic_delete_inode(struct inode *inode)
1054 hlist_del_init(&inode->i_hash); 1050 hlist_del_init(&inode->i_hash);
1055 spin_unlock(&inode_lock); 1051 spin_unlock(&inode_lock);
1056 wake_up_inode(inode); 1052 wake_up_inode(inode);
1057 if (inode->i_state != I_CLEAR) 1053 BUG_ON(inode->i_state != I_CLEAR);
1058 BUG();
1059 destroy_inode(inode); 1054 destroy_inode(inode);
1060} 1055}
1061 1056
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c
index 7b77a954112..ff2a872e80e 100644
--- a/fs/jffs2/background.c
+++ b/fs/jffs2/background.c
@@ -35,8 +35,7 @@ int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c)
35 pid_t pid; 35 pid_t pid;
36 int ret = 0; 36 int ret = 0;
37 37
38 if (c->gc_task) 38 BUG_ON(c->gc_task);
39 BUG();
40 39
41 init_completion(&c->gc_thread_start); 40 init_completion(&c->gc_thread_start);
42 init_completion(&c->gc_thread_exit); 41 init_completion(&c->gc_thread_exit);
diff --git a/fs/smbfs/file.c b/fs/smbfs/file.c
index c56bd99a970..ed9a24d19d7 100644
--- a/fs/smbfs/file.c
+++ b/fs/smbfs/file.c
@@ -178,11 +178,9 @@ smb_writepage(struct page *page, struct writeback_control *wbc)
178 unsigned offset = PAGE_CACHE_SIZE; 178 unsigned offset = PAGE_CACHE_SIZE;
179 int err; 179 int err;
180 180
181 if (!mapping) 181 BUG_ON(!mapping);
182 BUG();
183 inode = mapping->host; 182 inode = mapping->host;
184 if (!inode) 183 BUG_ON(!inode);
185 BUG();
186 184
187 end_index = inode->i_size >> PAGE_CACHE_SHIFT; 185 end_index = inode->i_size >> PAGE_CACHE_SHIFT;
188 186
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index f26880a4785..6cfdc9a8777 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -50,7 +50,7 @@ static struct sysfs_dirent * sysfs_new_dirent(struct sysfs_dirent * parent_sd,
50 return sd; 50 return sd;
51} 51}
52 52
53/** 53/*
54 * 54 *
55 * Return -EEXIST if there is already a sysfs element with the same name for 55 * Return -EEXIST if there is already a sysfs element with the same name for
56 * the same parent. 56 * the same parent.
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index 4c29ac41ac3..f0b347bd12c 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -175,8 +175,7 @@ const unsigned char * sysfs_get_name(struct sysfs_dirent *sd)
175 struct bin_attribute * bin_attr; 175 struct bin_attribute * bin_attr;
176 struct sysfs_symlink * sl; 176 struct sysfs_symlink * sl;
177 177
178 if (!sd || !sd->s_element) 178 BUG_ON(!sd || !sd->s_element);
179 BUG();
180 179
181 switch (sd->s_type) { 180 switch (sd->s_type) {
182 case SYSFS_DIR: 181 case SYSFS_DIR:
diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c
index 8c66e9270dd..d7074341ee8 100644
--- a/fs/sysv/dir.c
+++ b/fs/sysv/dir.c
@@ -253,8 +253,7 @@ int sysv_delete_entry(struct sysv_dir_entry *de, struct page *page)
253 253
254 lock_page(page); 254 lock_page(page);
255 err = mapping->a_ops->prepare_write(NULL, page, from, to); 255 err = mapping->a_ops->prepare_write(NULL, page, from, to);
256 if (err) 256 BUG_ON(err);
257 BUG();
258 de->inode = 0; 257 de->inode = 0;
259 err = dir_commit_chunk(page, from, to); 258 err = dir_commit_chunk(page, from, to);
260 dir_put_page(page); 259 dir_put_page(page);
@@ -353,8 +352,7 @@ void sysv_set_link(struct sysv_dir_entry *de, struct page *page,
353 352
354 lock_page(page); 353 lock_page(page);
355 err = page->mapping->a_ops->prepare_write(NULL, page, from, to); 354 err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
356 if (err) 355 BUG_ON(err);
357 BUG();
358 de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino); 356 de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
359 err = dir_commit_chunk(page, from, to); 357 err = dir_commit_chunk(page, from, to);
360 dir_put_page(page); 358 dir_put_page(page);
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 81e0e8459af..2983afd5e7f 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -312,12 +312,10 @@ static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head
312 err = 0; 312 err = 0;
313 313
314 bh = inode_getblk(inode, block, &err, &phys, &new); 314 bh = inode_getblk(inode, block, &err, &phys, &new);
315 if (bh) 315 BUG_ON(bh);
316 BUG();
317 if (err) 316 if (err)
318 goto abort; 317 goto abort;
319 if (!phys) 318 BUG_ON(!phys);
320 BUG();
321 319
322 if (new) 320 if (new)
323 set_buffer_new(bh_result); 321 set_buffer_new(bh_result);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 4ed7e602d70..1e9ebaba07b 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -864,7 +864,7 @@ struct super_block {
864 */ 864 */
865 struct mutex s_vfs_rename_mutex; /* Kludge */ 865 struct mutex s_vfs_rename_mutex; /* Kludge */
866 866
867 /* Granuality of c/m/atime in ns. 867 /* Granularity of c/m/atime in ns.
868 Cannot be worse than a second */ 868 Cannot be worse than a second */
869 u32 s_time_gran; 869 u32 s_time_gran;
870}; 870};
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index b2093928761..306acf1dc6d 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -80,7 +80,7 @@ struct hrtimer_sleeper {
80 * @first: pointer to the timer node which expires first 80 * @first: pointer to the timer node which expires first
81 * @resolution: the resolution of the clock, in nanoseconds 81 * @resolution: the resolution of the clock, in nanoseconds
82 * @get_time: function to retrieve the current time of the clock 82 * @get_time: function to retrieve the current time of the clock
83 * @get_sofirq_time: function to retrieve the current time from the softirq 83 * @get_softirq_time: function to retrieve the current time from the softirq
84 * @curr_timer: the timer which is executing a callback right now 84 * @curr_timer: the timer which is executing a callback right now
85 * @softirq_time: the time when running the hrtimer queue in the softirq 85 * @softirq_time: the time when running the hrtimer queue in the softirq
86 */ 86 */
diff --git a/ipc/shm.c b/ipc/shm.c
index f806a2e314e..6b0c9af5bbf 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -91,8 +91,8 @@ static inline int shm_addid(struct shmid_kernel *shp)
91static inline void shm_inc (int id) { 91static inline void shm_inc (int id) {
92 struct shmid_kernel *shp; 92 struct shmid_kernel *shp;
93 93
94 if(!(shp = shm_lock(id))) 94 shp = shm_lock(id);
95 BUG(); 95 BUG_ON(!shp);
96 shp->shm_atim = get_seconds(); 96 shp->shm_atim = get_seconds();
97 shp->shm_lprid = current->tgid; 97 shp->shm_lprid = current->tgid;
98 shp->shm_nattch++; 98 shp->shm_nattch++;
@@ -142,8 +142,8 @@ static void shm_close (struct vm_area_struct *shmd)
142 142
143 mutex_lock(&shm_ids.mutex); 143 mutex_lock(&shm_ids.mutex);
144 /* remove from the list of attaches of the shm segment */ 144 /* remove from the list of attaches of the shm segment */
145 if(!(shp = shm_lock(id))) 145 shp = shm_lock(id);
146 BUG(); 146 BUG_ON(!shp);
147 shp->shm_lprid = current->tgid; 147 shp->shm_lprid = current->tgid;
148 shp->shm_dtim = get_seconds(); 148 shp->shm_dtim = get_seconds();
149 shp->shm_nattch--; 149 shp->shm_nattch--;
@@ -283,8 +283,7 @@ asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
283 err = -EEXIST; 283 err = -EEXIST;
284 } else { 284 } else {
285 shp = shm_lock(id); 285 shp = shm_lock(id);
286 if(shp==NULL) 286 BUG_ON(shp==NULL);
287 BUG();
288 if (shp->shm_segsz < size) 287 if (shp->shm_segsz < size)
289 err = -EINVAL; 288 err = -EINVAL;
290 else if (ipcperms(&shp->shm_perm, shmflg)) 289 else if (ipcperms(&shp->shm_perm, shmflg))
@@ -774,8 +773,8 @@ invalid:
774 up_write(&current->mm->mmap_sem); 773 up_write(&current->mm->mmap_sem);
775 774
776 mutex_lock(&shm_ids.mutex); 775 mutex_lock(&shm_ids.mutex);
777 if(!(shp = shm_lock(shmid))) 776 shp = shm_lock(shmid);
778 BUG(); 777 BUG_ON(!shp);
779 shp->shm_nattch--; 778 shp->shm_nattch--;
780 if(shp->shm_nattch == 0 && 779 if(shp->shm_nattch == 0 &&
781 shp->shm_perm.mode & SHM_DEST) 780 shp->shm_perm.mode & SHM_DEST)
diff --git a/ipc/util.c b/ipc/util.c
index 23151ef3259..5e785a29e1e 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -266,8 +266,7 @@ struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id)
266{ 266{
267 struct kern_ipc_perm* p; 267 struct kern_ipc_perm* p;
268 int lid = id % SEQ_MULTIPLIER; 268 int lid = id % SEQ_MULTIPLIER;
269 if(lid >= ids->entries->size) 269 BUG_ON(lid >= ids->entries->size);
270 BUG();
271 270
272 /* 271 /*
273 * do not need a rcu_dereference()() here to force ordering 272 * do not need a rcu_dereference()() here to force ordering
@@ -275,8 +274,7 @@ struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id)
275 */ 274 */
276 p = ids->entries->p[lid]; 275 p = ids->entries->p[lid];
277 ids->entries->p[lid] = NULL; 276 ids->entries->p[lid] = NULL;
278 if(p==NULL) 277 BUG_ON(p==NULL);
279 BUG();
280 ids->in_use--; 278 ids->in_use--;
281 279
282 if (lid == ids->max_id) { 280 if (lid == ids->max_id) {
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 9fd8d4f0359..ce0dfb8f4a4 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -41,7 +41,7 @@ config SOFTWARE_SUSPEND
41 depends on PM && SWAP && (X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP) 41 depends on PM && SWAP && (X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP)
42 ---help--- 42 ---help---
43 Enable the possibility of suspending the machine. 43 Enable the possibility of suspending the machine.
44 It doesn't need APM. 44 It doesn't need ACPI or APM.
45 You may suspend your machine by 'swsusp' or 'shutdown -z <time>' 45 You may suspend your machine by 'swsusp' or 'shutdown -z <time>'
46 (patch for sysvinit needed). 46 (patch for sysvinit needed).
47 47
diff --git a/kernel/printk.c b/kernel/printk.c
index 8cc19431e74..c056f332443 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -360,8 +360,7 @@ static void call_console_drivers(unsigned long start, unsigned long end)
360 unsigned long cur_index, start_print; 360 unsigned long cur_index, start_print;
361 static int msg_level = -1; 361 static int msg_level = -1;
362 362
363 if (((long)(start - end)) > 0) 363 BUG_ON(((long)(start - end)) > 0);
364 BUG();
365 364
366 cur_index = start; 365 cur_index = start;
367 start_print = start; 366 start_print = start;
@@ -708,8 +707,7 @@ int __init add_preferred_console(char *name, int idx, char *options)
708 */ 707 */
709void acquire_console_sem(void) 708void acquire_console_sem(void)
710{ 709{
711 if (in_interrupt()) 710 BUG_ON(in_interrupt());
712 BUG();
713 down(&console_sem); 711 down(&console_sem);
714 console_locked = 1; 712 console_locked = 1;
715 console_may_schedule = 1; 713 console_may_schedule = 1;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 86a7f6c60cb..0eeb7e66722 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -30,8 +30,7 @@
30 */ 30 */
31void __ptrace_link(task_t *child, task_t *new_parent) 31void __ptrace_link(task_t *child, task_t *new_parent)
32{ 32{
33 if (!list_empty(&child->ptrace_list)) 33 BUG_ON(!list_empty(&child->ptrace_list));
34 BUG();
35 if (child->parent == new_parent) 34 if (child->parent == new_parent)
36 return; 35 return;
37 list_add(&child->ptrace_list, &child->parent->ptrace_children); 36 list_add(&child->ptrace_list, &child->parent->ptrace_children);
diff --git a/kernel/signal.c b/kernel/signal.c
index 92025b10879..5ccaac505e8 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -769,8 +769,7 @@ specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
769{ 769{
770 int ret = 0; 770 int ret = 0;
771 771
772 if (!irqs_disabled()) 772 BUG_ON(!irqs_disabled());
773 BUG();
774 assert_spin_locked(&t->sighand->siglock); 773 assert_spin_locked(&t->sighand->siglock);
775 774
776 /* Short-circuit ignored signals. */ 775 /* Short-circuit ignored signals. */
@@ -1384,8 +1383,7 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1384 * the overrun count. Other uses should not try to 1383 * the overrun count. Other uses should not try to
1385 * send the signal multiple times. 1384 * send the signal multiple times.
1386 */ 1385 */
1387 if (q->info.si_code != SI_TIMER) 1386 BUG_ON(q->info.si_code != SI_TIMER);
1388 BUG();
1389 q->info.si_overrun++; 1387 q->info.si_overrun++;
1390 goto out; 1388 goto out;
1391 } 1389 }
diff --git a/kernel/time.c b/kernel/time.c
index ff8e7019c4c..b00ddc71ced 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -410,7 +410,7 @@ EXPORT_SYMBOL(current_kernel_time);
410 * current_fs_time - Return FS time 410 * current_fs_time - Return FS time
411 * @sb: Superblock. 411 * @sb: Superblock.
412 * 412 *
413 * Return the current time truncated to the time granuality supported by 413 * Return the current time truncated to the time granularity supported by
414 * the fs. 414 * the fs.
415 */ 415 */
416struct timespec current_fs_time(struct super_block *sb) 416struct timespec current_fs_time(struct super_block *sb)
@@ -421,11 +421,11 @@ struct timespec current_fs_time(struct super_block *sb)
421EXPORT_SYMBOL(current_fs_time); 421EXPORT_SYMBOL(current_fs_time);
422 422
423/** 423/**
424 * timespec_trunc - Truncate timespec to a granuality 424 * timespec_trunc - Truncate timespec to a granularity
425 * @t: Timespec 425 * @t: Timespec
426 * @gran: Granuality in ns. 426 * @gran: Granularity in ns.
427 * 427 *
428 * Truncate a timespec to a granuality. gran must be smaller than a second. 428 * Truncate a timespec to a granularity. gran must be smaller than a second.
429 * Always rounds down. 429 * Always rounds down.
430 * 430 *
431 * This function should be only used for timestamps returned by 431 * This function should be only used for timestamps returned by
diff --git a/kernel/timer.c b/kernel/timer.c
index 6b812c04737..c3a874f1393 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1479,8 +1479,7 @@ register_time_interpolator(struct time_interpolator *ti)
1479 unsigned long flags; 1479 unsigned long flags;
1480 1480
1481 /* Sanity check */ 1481 /* Sanity check */
1482 if (ti->frequency == 0 || ti->mask == 0) 1482 BUG_ON(ti->frequency == 0 || ti->mask == 0);
1483 BUG();
1484 1483
1485 ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency; 1484 ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency;
1486 spin_lock(&time_interpolator_lock); 1485 spin_lock(&time_interpolator_lock);
diff --git a/mm/highmem.c b/mm/highmem.c
index 55885f64af4..9b274fdf9d0 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -74,8 +74,7 @@ static void flush_all_zero_pkmaps(void)
74 pkmap_count[i] = 0; 74 pkmap_count[i] = 0;
75 75
76 /* sanity check */ 76 /* sanity check */
77 if (pte_none(pkmap_page_table[i])) 77 BUG_ON(pte_none(pkmap_page_table[i]));
78 BUG();
79 78
80 /* 79 /*
81 * Don't need an atomic fetch-and-clear op here; 80 * Don't need an atomic fetch-and-clear op here;
@@ -158,8 +157,7 @@ void fastcall *kmap_high(struct page *page)
158 if (!vaddr) 157 if (!vaddr)
159 vaddr = map_new_virtual(page); 158 vaddr = map_new_virtual(page);
160 pkmap_count[PKMAP_NR(vaddr)]++; 159 pkmap_count[PKMAP_NR(vaddr)]++;
161 if (pkmap_count[PKMAP_NR(vaddr)] < 2) 160 BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
162 BUG();
163 spin_unlock(&kmap_lock); 161 spin_unlock(&kmap_lock);
164 return (void*) vaddr; 162 return (void*) vaddr;
165} 163}
@@ -174,8 +172,7 @@ void fastcall kunmap_high(struct page *page)
174 172
175 spin_lock(&kmap_lock); 173 spin_lock(&kmap_lock);
176 vaddr = (unsigned long)page_address(page); 174 vaddr = (unsigned long)page_address(page);
177 if (!vaddr) 175 BUG_ON(!vaddr);
178 BUG();
179 nr = PKMAP_NR(vaddr); 176 nr = PKMAP_NR(vaddr);
180 177
181 /* 178 /*
@@ -220,8 +217,7 @@ static __init int init_emergency_pool(void)
220 return 0; 217 return 0;
221 218
222 page_pool = mempool_create_page_pool(POOL_SIZE, 0); 219 page_pool = mempool_create_page_pool(POOL_SIZE, 0);
223 if (!page_pool) 220 BUG_ON(!page_pool);
224 BUG();
225 printk("highmem bounce pool size: %d pages\n", POOL_SIZE); 221 printk("highmem bounce pool size: %d pages\n", POOL_SIZE);
226 222
227 return 0; 223 return 0;
@@ -264,8 +260,7 @@ int init_emergency_isa_pool(void)
264 260
265 isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa, 261 isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
266 mempool_free_pages, (void *) 0); 262 mempool_free_pages, (void *) 0);
267 if (!isa_page_pool) 263 BUG_ON(!isa_page_pool);
268 BUG();
269 264
270 printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE); 265 printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE);
271 return 0; 266 return 0;
diff --git a/mm/mmap.c b/mm/mmap.c
index 4f5b5709136..e780d19aa21 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -294,8 +294,7 @@ void validate_mm(struct mm_struct *mm)
294 i = browse_rb(&mm->mm_rb); 294 i = browse_rb(&mm->mm_rb);
295 if (i != mm->map_count) 295 if (i != mm->map_count)
296 printk("map_count %d rb %d\n", mm->map_count, i), bug = 1; 296 printk("map_count %d rb %d\n", mm->map_count, i), bug = 1;
297 if (bug) 297 BUG_ON(bug);
298 BUG();
299} 298}
300#else 299#else
301#define validate_mm(mm) do { } while (0) 300#define validate_mm(mm) do { } while (0)
@@ -432,8 +431,7 @@ __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
432 struct rb_node ** rb_link, * rb_parent; 431 struct rb_node ** rb_link, * rb_parent;
433 432
434 __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent); 433 __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent);
435 if (__vma && __vma->vm_start < vma->vm_end) 434 BUG_ON(__vma && __vma->vm_start < vma->vm_end);
436 BUG();
437 __vma_link(mm, vma, prev, rb_link, rb_parent); 435 __vma_link(mm, vma, prev, rb_link, rb_parent);
438 mm->map_count++; 436 mm->map_count++;
439} 437}
@@ -813,8 +811,7 @@ try_prev:
813 * (e.g. stash info in next's anon_vma_node when assigning 811 * (e.g. stash info in next's anon_vma_node when assigning
814 * an anon_vma, or when trying vma_merge). Another time. 812 * an anon_vma, or when trying vma_merge). Another time.
815 */ 813 */
816 if (find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma) 814 BUG_ON(find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma);
817 BUG();
818 if (!near) 815 if (!near)
819 goto none; 816 goto none;
820 817
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 893d7677579..6dcce3a4bbd 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -258,7 +258,7 @@ static void balance_dirty_pages(struct address_space *mapping)
258/** 258/**
259 * balance_dirty_pages_ratelimited_nr - balance dirty memory state 259 * balance_dirty_pages_ratelimited_nr - balance dirty memory state
260 * @mapping: address_space which was dirtied 260 * @mapping: address_space which was dirtied
261 * @nr_pages: number of pages which the caller has just dirtied 261 * @nr_pages_dirtied: number of pages which the caller has just dirtied
262 * 262 *
263 * Processes which are dirtying memory should call in here once for each page 263 * Processes which are dirtying memory should call in here once for each page
264 * which was newly dirtied. The function will periodically check the system's 264 * which was newly dirtied. The function will periodically check the system's
diff --git a/mm/slab.c b/mm/slab.c
index 4cbf8bb1355..f055c142021 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1297,8 +1297,7 @@ void __init kmem_cache_init(void)
1297 if (cache_cache.num) 1297 if (cache_cache.num)
1298 break; 1298 break;
1299 } 1299 }
1300 if (!cache_cache.num) 1300 BUG_ON(!cache_cache.num);
1301 BUG();
1302 cache_cache.gfporder = order; 1301 cache_cache.gfporder = order;
1303 cache_cache.colour = left_over / cache_cache.colour_off; 1302 cache_cache.colour = left_over / cache_cache.colour_off;
1304 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + 1303 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
@@ -1974,8 +1973,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
1974 * Always checks flags, a caller might be expecting debug support which 1973 * Always checks flags, a caller might be expecting debug support which
1975 * isn't available. 1974 * isn't available.
1976 */ 1975 */
1977 if (flags & ~CREATE_MASK) 1976 BUG_ON(flags & ~CREATE_MASK);
1978 BUG();
1979 1977
1980 /* 1978 /*
1981 * Check that size is in terms of words. This is needed to avoid 1979 * Check that size is in terms of words. This is needed to avoid
@@ -2206,8 +2204,7 @@ static int __node_shrink(struct kmem_cache *cachep, int node)
2206 2204
2207 slabp = list_entry(l3->slabs_free.prev, struct slab, list); 2205 slabp = list_entry(l3->slabs_free.prev, struct slab, list);
2208#if DEBUG 2206#if DEBUG
2209 if (slabp->inuse) 2207 BUG_ON(slabp->inuse);
2210 BUG();
2211#endif 2208#endif
2212 list_del(&slabp->list); 2209 list_del(&slabp->list);
2213 2210
@@ -2248,8 +2245,7 @@ static int __cache_shrink(struct kmem_cache *cachep)
2248 */ 2245 */
2249int kmem_cache_shrink(struct kmem_cache *cachep) 2246int kmem_cache_shrink(struct kmem_cache *cachep)
2250{ 2247{
2251 if (!cachep || in_interrupt()) 2248 BUG_ON(!cachep || in_interrupt());
2252 BUG();
2253 2249
2254 return __cache_shrink(cachep); 2250 return __cache_shrink(cachep);
2255} 2251}
@@ -2277,8 +2273,7 @@ int kmem_cache_destroy(struct kmem_cache *cachep)
2277 int i; 2273 int i;
2278 struct kmem_list3 *l3; 2274 struct kmem_list3 *l3;
2279 2275
2280 if (!cachep || in_interrupt()) 2276 BUG_ON(!cachep || in_interrupt());
2281 BUG();
2282 2277
2283 /* Don't let CPUs to come and go */ 2278 /* Don't let CPUs to come and go */
2284 lock_cpu_hotplug(); 2279 lock_cpu_hotplug();
@@ -2477,8 +2472,7 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
2477 * Be lazy and only check for valid flags here, keeping it out of the 2472 * Be lazy and only check for valid flags here, keeping it out of the
2478 * critical path in kmem_cache_alloc(). 2473 * critical path in kmem_cache_alloc().
2479 */ 2474 */
2480 if (flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW)) 2475 BUG_ON(flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW));
2481 BUG();
2482 if (flags & SLAB_NO_GROW) 2476 if (flags & SLAB_NO_GROW)
2483 return 0; 2477 return 0;
2484 2478
diff --git a/mm/swap_state.c b/mm/swap_state.c
index d7af296833f..e0e1583f32c 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -148,8 +148,7 @@ int add_to_swap(struct page * page, gfp_t gfp_mask)
148 swp_entry_t entry; 148 swp_entry_t entry;
149 int err; 149 int err;
150 150
151 if (!PageLocked(page)) 151 BUG_ON(!PageLocked(page));
152 BUG();
153 152
154 for (;;) { 153 for (;;) {
155 entry = get_swap_page(); 154 entry = get_swap_page();
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 729eb3eec75..c0504f1e34e 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -321,8 +321,7 @@ void __vunmap(void *addr, int deallocate_pages)
321 int i; 321 int i;
322 322
323 for (i = 0; i < area->nr_pages; i++) { 323 for (i = 0; i < area->nr_pages; i++) {
324 if (unlikely(!area->pages[i])) 324 BUG_ON(!area->pages[i]);
325 BUG();
326 __free_page(area->pages[i]); 325 __free_page(area->pages[i]);
327 } 326 }
328 327