diff options
124 files changed, 1481 insertions, 1129 deletions
diff --git a/Documentation/fujitsu/frv/kernel-ABI.txt b/Documentation/fujitsu/frv/kernel-ABI.txt new file mode 100644 index 000000000000..0ed9b0a779bc --- /dev/null +++ b/Documentation/fujitsu/frv/kernel-ABI.txt | |||
@@ -0,0 +1,234 @@ | |||
1 | ================================= | ||
2 | INTERNAL KERNEL ABI FOR FR-V ARCH | ||
3 | ================================= | ||
4 | |||
5 | The internal FRV kernel ABI is not quite the same as the userspace ABI. A number of the registers | ||
6 | are used for special purposed, and the ABI is not consistent between modules vs core, and MMU vs | ||
7 | no-MMU. | ||
8 | |||
9 | This partly stems from the fact that FRV CPUs do not have a separate supervisor stack pointer, and | ||
10 | most of them do not have any scratch registers, thus requiring at least one general purpose | ||
11 | register to be clobbered in such an event. Also, within the kernel core, it is possible to simply | ||
12 | jump or call directly between functions using a relative offset. This cannot be extended to modules | ||
13 | for the displacement is likely to be too far. Thus in modules the address of a function to call | ||
14 | must be calculated in a register and then used, requiring two extra instructions. | ||
15 | |||
16 | This document has the following sections: | ||
17 | |||
18 | (*) System call register ABI | ||
19 | (*) CPU operating modes | ||
20 | (*) Internal kernel-mode register ABI | ||
21 | (*) Internal debug-mode register ABI | ||
22 | (*) Virtual interrupt handling | ||
23 | |||
24 | |||
25 | ======================== | ||
26 | SYSTEM CALL REGISTER ABI | ||
27 | ======================== | ||
28 | |||
29 | When a system call is made, the following registers are effective: | ||
30 | |||
31 | REGISTERS CALL RETURN | ||
32 | =============== ======================= ======================= | ||
33 | GR7 System call number Preserved | ||
34 | GR8 Syscall arg #1 Return value | ||
35 | GR9-GR13 Syscall arg #2-6 Preserved | ||
36 | |||
37 | |||
38 | =================== | ||
39 | CPU OPERATING MODES | ||
40 | =================== | ||
41 | |||
42 | The FR-V CPU has three basic operating modes. In order of increasing capability: | ||
43 | |||
44 | (1) User mode. | ||
45 | |||
46 | Basic userspace running mode. | ||
47 | |||
48 | (2) Kernel mode. | ||
49 | |||
50 | Normal kernel mode. There are many additional control registers available that may be | ||
51 | accessed in this mode, in addition to all the stuff available to user mode. This has two | ||
52 | submodes: | ||
53 | |||
54 | (a) Exceptions enabled (PSR.T == 1). | ||
55 | |||
56 | Exceptions will invoke the appropriate normal kernel mode handler. On entry to the | ||
57 | handler, the PSR.T bit will be cleared. | ||
58 | |||
59 | (b) Exceptions disabled (PSR.T == 0). | ||
60 | |||
61 | No exceptions or interrupts may happen. Any mandatory exceptions will cause the CPU to | ||
62 | halt unless the CPU is told to jump into debug mode instead. | ||
63 | |||
64 | (3) Debug mode. | ||
65 | |||
66 | No exceptions may happen in this mode. Memory protection and management exceptions will be | ||
67 | flagged for later consideration, but the exception handler won't be invoked. Debugging traps | ||
68 | such as hardware breakpoints and watchpoints will be ignored. This mode is entered only by | ||
69 | debugging events obtained from the other two modes. | ||
70 | |||
71 | All kernel mode registers may be accessed, plus a few extra debugging specific registers. | ||
72 | |||
73 | |||
74 | ================================= | ||
75 | INTERNAL KERNEL-MODE REGISTER ABI | ||
76 | ================================= | ||
77 | |||
78 | There are a number of permanent register assignments that are set up by entry.S in the exception | ||
79 | prologue. Note that there is a complete set of exception prologues for each of user->kernel | ||
80 | transition and kernel->kernel transition. There are also user->debug and kernel->debug mode | ||
81 | transition prologues. | ||
82 | |||
83 | |||
84 | REGISTER FLAVOUR USE | ||
85 | =============== ======= ==================================================== | ||
86 | GR1 Supervisor stack pointer | ||
87 | GR15 Current thread info pointer | ||
88 | GR16 GP-Rel base register for small data | ||
89 | GR28 Current exception frame pointer (__frame) | ||
90 | GR29 Current task pointer (current) | ||
91 | GR30 Destroyed by kernel mode entry | ||
92 | GR31 NOMMU Destroyed by debug mode entry | ||
93 | GR31 MMU Destroyed by TLB miss kernel mode entry | ||
94 | CCR.ICC2 Virtual interrupt disablement tracking | ||
95 | CCCR.CC3 Cleared by exception prologue (atomic op emulation) | ||
96 | SCR0 MMU See mmu-layout.txt. | ||
97 | SCR1 MMU See mmu-layout.txt. | ||
98 | SCR2 MMU Save for EAR0 (destroyed by icache insns in debug mode) | ||
99 | SCR3 MMU Save for GR31 during debug exceptions | ||
100 | DAMR/IAMR NOMMU Fixed memory protection layout. | ||
101 | DAMR/IAMR MMU See mmu-layout.txt. | ||
102 | |||
103 | |||
104 | Certain registers are also used or modified across function calls: | ||
105 | |||
106 | REGISTER CALL RETURN | ||
107 | =============== =============================== =============================== | ||
108 | GR0 Fixed Zero - | ||
109 | GR2 Function call frame pointer | ||
110 | GR3 Special Preserved | ||
111 | GR3-GR7 - Clobbered | ||
112 | GR8 Function call arg #1 Return value (or clobbered) | ||
113 | GR9 Function call arg #2 Return value MSW (or clobbered) | ||
114 | GR10-GR13 Function call arg #3-#6 Clobbered | ||
115 | GR14 - Clobbered | ||
116 | GR15-GR16 Special Preserved | ||
117 | GR17-GR27 - Preserved | ||
118 | GR28-GR31 Special Only accessed explicitly | ||
119 | LR Return address after CALL Clobbered | ||
120 | CCR/CCCR - Mostly Clobbered | ||
121 | |||
122 | |||
123 | ================================ | ||
124 | INTERNAL DEBUG-MODE REGISTER ABI | ||
125 | ================================ | ||
126 | |||
127 | This is the same as the kernel-mode register ABI for functions calls. The difference is that in | ||
128 | debug-mode there's a different stack and a different exception frame. Almost all the global | ||
129 | registers from kernel-mode (including the stack pointer) may be changed. | ||
130 | |||
131 | REGISTER FLAVOUR USE | ||
132 | =============== ======= ==================================================== | ||
133 | GR1 Debug stack pointer | ||
134 | GR16 GP-Rel base register for small data | ||
135 | GR31 Current debug exception frame pointer (__debug_frame) | ||
136 | SCR3 MMU Saved value of GR31 | ||
137 | |||
138 | |||
139 | Note that debug mode is able to interfere with the kernel's emulated atomic ops, so it must be | ||
140 | exceedingly careful not to do any that would interact with the main kernel in this regard. Hence | ||
141 | the debug mode code (gdbstub) is almost completely self-contained. The only external code used is | ||
142 | the sprintf family of functions. | ||
143 | |||
144 | Futhermore, break.S is so complicated because single-step mode does not switch off on entry to an | ||
145 | exception. That means unless manually disabled, single-stepping will blithely go on stepping into | ||
146 | things like interrupts. See gdbstub.txt for more information. | ||
147 | |||
148 | |||
149 | ========================== | ||
150 | VIRTUAL INTERRUPT HANDLING | ||
151 | ========================== | ||
152 | |||
153 | Because accesses to the PSR is so slow, and to disable interrupts we have to access it twice (once | ||
154 | to read and once to write), we don't actually disable interrupts at all if we don't have to. What | ||
155 | we do instead is use the ICC2 condition code flags to note virtual disablement, such that if we | ||
156 | then do take an interrupt, we note the flag, really disable interrupts, set another flag and resume | ||
157 | execution at the point the interrupt happened. Setting condition flags as a side effect of an | ||
158 | arithmetic or logical instruction is really fast. This use of the ICC2 only occurs within the | ||
159 | kernel - it does not affect userspace. | ||
160 | |||
161 | The flags we use are: | ||
162 | |||
163 | (*) CCR.ICC2.Z [Zero flag] | ||
164 | |||
165 | Set to virtually disable interrupts, clear when interrupts are virtually enabled. Can be | ||
166 | modified by logical instructions without affecting the Carry flag. | ||
167 | |||
168 | (*) CCR.ICC2.C [Carry flag] | ||
169 | |||
170 | Clear to indicate hardware interrupts are really disabled, set otherwise. | ||
171 | |||
172 | |||
173 | What happens is this: | ||
174 | |||
175 | (1) Normal kernel-mode operation. | ||
176 | |||
177 | ICC2.Z is 0, ICC2.C is 1. | ||
178 | |||
179 | (2) An interrupt occurs. The exception prologue examines ICC2.Z and determines that nothing needs | ||
180 | doing. This is done simply with an unlikely BEQ instruction. | ||
181 | |||
182 | (3) The interrupts are disabled (local_irq_disable) | ||
183 | |||
184 | ICC2.Z is set to 1. | ||
185 | |||
186 | (4) If interrupts were then re-enabled (local_irq_enable): | ||
187 | |||
188 | ICC2.Z would be set to 0. | ||
189 | |||
190 | A TIHI #2 instruction (trap #2 if condition HI - Z==0 && C==0) would be used to trap if | ||
191 | interrupts were now virtually enabled, but physically disabled - which they're not, so the | ||
192 | trap isn't taken. The kernel would then be back to state (1). | ||
193 | |||
194 | (5) An interrupt occurs. The exception prologue examines ICC2.Z and determines that the interrupt | ||
195 | shouldn't actually have happened. It jumps aside, and there disabled interrupts by setting | ||
196 | PSR.PIL to 14 and then it clears ICC2.C. | ||
197 | |||
198 | (6) If interrupts were then saved and disabled again (local_irq_save): | ||
199 | |||
200 | ICC2.Z would be shifted into the save variable and masked off (giving a 1). | ||
201 | |||
202 | ICC2.Z would then be set to 1 (thus unchanged), and ICC2.C would be unaffected (ie: 0). | ||
203 | |||
204 | (7) If interrupts were then restored from state (6) (local_irq_restore): | ||
205 | |||
206 | ICC2.Z would be set to indicate the result of XOR'ing the saved value (ie: 1) with 1, which | ||
207 | gives a result of 0 - thus leaving ICC2.Z set. | ||
208 | |||
209 | ICC2.C would remain unaffected (ie: 0). | ||
210 | |||
211 | A TIHI #2 instruction would be used to again assay the current state, but this would do | ||
212 | nothing as Z==1. | ||
213 | |||
214 | (8) If interrupts were then enabled (local_irq_enable): | ||
215 | |||
216 | ICC2.Z would be cleared. ICC2.C would be left unaffected. Both flags would now be 0. | ||
217 | |||
218 | A TIHI #2 instruction again issued to assay the current state would then trap as both Z==0 | ||
219 | [interrupts virtually enabled] and C==0 [interrupts really disabled] would then be true. | ||
220 | |||
221 | (9) The trap #2 handler would simply enable hardware interrupts (set PSR.PIL to 0), set ICC2.C to | ||
222 | 1 and return. | ||
223 | |||
224 | (10) Immediately upon returning, the pending interrupt would be taken. | ||
225 | |||
226 | (11) The interrupt handler would take the path of actually processing the interrupt (ICC2.Z is | ||
227 | clear, BEQ fails as per step (2)). | ||
228 | |||
229 | (12) The interrupt handler would then set ICC2.C to 1 since hardware interrupts are definitely | ||
230 | enabled - or else the kernel wouldn't be here. | ||
231 | |||
232 | (13) On return from the interrupt handler, things would be back to state (1). | ||
233 | |||
234 | This trap (#2) is only available in kernel mode. In user mode it will result in SIGILL. | ||
diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt index 0ea5a0c6e827..2c3b1eae4280 100644 --- a/Documentation/kprobes.txt +++ b/Documentation/kprobes.txt | |||
@@ -136,17 +136,20 @@ Kprobes, jprobes, and return probes are implemented on the following | |||
136 | architectures: | 136 | architectures: |
137 | 137 | ||
138 | - i386 | 138 | - i386 |
139 | - x86_64 (AMD-64, E64MT) | 139 | - x86_64 (AMD-64, EM64T) |
140 | - ppc64 | 140 | - ppc64 |
141 | - ia64 (Support for probes on certain instruction types is still in progress.) | 141 | - ia64 (Does not support probes on instruction slot1.) |
142 | - sparc64 (Return probes not yet implemented.) | 142 | - sparc64 (Return probes not yet implemented.) |
143 | 143 | ||
144 | 3. Configuring Kprobes | 144 | 3. Configuring Kprobes |
145 | 145 | ||
146 | When configuring the kernel using make menuconfig/xconfig/oldconfig, | 146 | When configuring the kernel using make menuconfig/xconfig/oldconfig, |
147 | ensure that CONFIG_KPROBES is set to "y". Under "Kernel hacking", | 147 | ensure that CONFIG_KPROBES is set to "y". Under "Instrumentation |
148 | look for "Kprobes". You may have to enable "Kernel debugging" | 148 | Support", look for "Kprobes". |
149 | (CONFIG_DEBUG_KERNEL) before you can enable Kprobes. | 149 | |
150 | So that you can load and unload Kprobes-based instrumentation modules, | ||
151 | make sure "Loadable module support" (CONFIG_MODULES) and "Module | ||
152 | unloading" (CONFIG_MODULE_UNLOAD) are set to "y". | ||
150 | 153 | ||
151 | You may also want to ensure that CONFIG_KALLSYMS and perhaps even | 154 | You may also want to ensure that CONFIG_KALLSYMS and perhaps even |
152 | CONFIG_KALLSYMS_ALL are set to "y", since kallsyms_lookup_name() | 155 | CONFIG_KALLSYMS_ALL are set to "y", since kallsyms_lookup_name() |
@@ -262,18 +265,18 @@ at any time after the probe has been registered. | |||
262 | 265 | ||
263 | 5. Kprobes Features and Limitations | 266 | 5. Kprobes Features and Limitations |
264 | 267 | ||
265 | As of Linux v2.6.12, Kprobes allows multiple probes at the same | 268 | Kprobes allows multiple probes at the same address. Currently, |
266 | address. Currently, however, there cannot be multiple jprobes on | 269 | however, there cannot be multiple jprobes on the same function at |
267 | the same function at the same time. | 270 | the same time. |
268 | 271 | ||
269 | In general, you can install a probe anywhere in the kernel. | 272 | In general, you can install a probe anywhere in the kernel. |
270 | In particular, you can probe interrupt handlers. Known exceptions | 273 | In particular, you can probe interrupt handlers. Known exceptions |
271 | are discussed in this section. | 274 | are discussed in this section. |
272 | 275 | ||
273 | For obvious reasons, it's a bad idea to install a probe in | 276 | The register_*probe functions will return -EINVAL if you attempt |
274 | the code that implements Kprobes (mostly kernel/kprobes.c and | 277 | to install a probe in the code that implements Kprobes (mostly |
275 | arch/*/kernel/kprobes.c). A patch in the v2.6.13 timeframe instructs | 278 | kernel/kprobes.c and arch/*/kernel/kprobes.c, but also functions such |
276 | Kprobes to reject such requests. | 279 | as do_page_fault and notifier_call_chain). |
277 | 280 | ||
278 | If you install a probe in an inline-able function, Kprobes makes | 281 | If you install a probe in an inline-able function, Kprobes makes |
279 | no attempt to chase down all inline instances of the function and | 282 | no attempt to chase down all inline instances of the function and |
@@ -290,18 +293,14 @@ from the accidental ones. Don't drink and probe. | |||
290 | 293 | ||
291 | Kprobes makes no attempt to prevent probe handlers from stepping on | 294 | Kprobes makes no attempt to prevent probe handlers from stepping on |
292 | each other -- e.g., probing printk() and then calling printk() from a | 295 | each other -- e.g., probing printk() and then calling printk() from a |
293 | probe handler. As of Linux v2.6.12, if a probe handler hits a probe, | 296 | probe handler. If a probe handler hits a probe, that second probe's |
294 | that second probe's handlers won't be run in that instance. | 297 | handlers won't be run in that instance, and the kprobe.nmissed member |
295 | 298 | of the second probe will be incremented. | |
296 | In Linux v2.6.12 and previous versions, Kprobes' data structures are | 299 | |
297 | protected by a single lock that is held during probe registration and | 300 | As of Linux v2.6.15-rc1, multiple handlers (or multiple instances of |
298 | unregistration and while handlers are run. Thus, no two handlers | 301 | the same handler) may run concurrently on different CPUs. |
299 | can run simultaneously. To improve scalability on SMP systems, | 302 | |
300 | this restriction will probably be removed soon, in which case | 303 | Kprobes does not use mutexes or allocate memory except during |
301 | multiple handlers (or multiple instances of the same handler) may | ||
302 | run concurrently on different CPUs. Code your handlers accordingly. | ||
303 | |||
304 | Kprobes does not use semaphores or allocate memory except during | ||
305 | registration and unregistration. | 304 | registration and unregistration. |
306 | 305 | ||
307 | Probe handlers are run with preemption disabled. Depending on the | 306 | Probe handlers are run with preemption disabled. Depending on the |
@@ -316,11 +315,18 @@ address instead of the real return address for kretprobed functions. | |||
316 | (As far as we can tell, __builtin_return_address() is used only | 315 | (As far as we can tell, __builtin_return_address() is used only |
317 | for instrumentation and error reporting.) | 316 | for instrumentation and error reporting.) |
318 | 317 | ||
319 | If the number of times a function is called does not match the | 318 | If the number of times a function is called does not match the number |
320 | number of times it returns, registering a return probe on that | 319 | of times it returns, registering a return probe on that function may |
321 | function may produce undesirable results. We have the do_exit() | 320 | produce undesirable results. We have the do_exit() case covered. |
322 | and do_execve() cases covered. do_fork() is not an issue. We're | 321 | do_execve() and do_fork() are not an issue. We're unaware of other |
323 | unaware of other specific cases where this could be a problem. | 322 | specific cases where this could be a problem. |
323 | |||
324 | If, upon entry to or exit from a function, the CPU is running on | ||
325 | a stack other than that of the current task, registering a return | ||
326 | probe on that function may produce undesirable results. For this | ||
327 | reason, Kprobes doesn't support return probes (or kprobes or jprobes) | ||
328 | on the x86_64 version of __switch_to(); the registration functions | ||
329 | return -EINVAL. | ||
324 | 330 | ||
325 | 6. Probe Overhead | 331 | 6. Probe Overhead |
326 | 332 | ||
@@ -347,14 +353,12 @@ k = 0.77 usec; j = 1.31; r = 1.26; kr = 1.45; jr = 1.99 | |||
347 | 353 | ||
348 | 7. TODO | 354 | 7. TODO |
349 | 355 | ||
350 | a. SystemTap (http://sourceware.org/systemtap): Work in progress | 356 | a. SystemTap (http://sourceware.org/systemtap): Provides a simplified |
351 | to provide a simplified programming interface for probe-based | 357 | programming interface for probe-based instrumentation. Try it out. |
352 | instrumentation. | 358 | b. Kernel return probes for sparc64. |
353 | b. Improved SMP scalability: Currently, work is in progress to handle | 359 | c. Support for other architectures. |
354 | multiple kprobes in parallel. | 360 | d. User-space probes. |
355 | c. Kernel return probes for sparc64. | 361 | e. Watchpoint probes (which fire on data references). |
356 | d. Support for other architectures. | ||
357 | e. User-space probes. | ||
358 | 362 | ||
359 | 8. Kprobes Example | 363 | 8. Kprobes Example |
360 | 364 | ||
@@ -411,8 +415,7 @@ int init_module(void) | |||
411 | printk("Couldn't find %s to plant kprobe\n", "do_fork"); | 415 | printk("Couldn't find %s to plant kprobe\n", "do_fork"); |
412 | return -1; | 416 | return -1; |
413 | } | 417 | } |
414 | ret = register_kprobe(&kp); | 418 | if ((ret = register_kprobe(&kp) < 0)) { |
415 | if (ret < 0) { | ||
416 | printk("register_kprobe failed, returned %d\n", ret); | 419 | printk("register_kprobe failed, returned %d\n", ret); |
417 | return -1; | 420 | return -1; |
418 | } | 421 | } |
diff --git a/Documentation/mips/AU1xxx_IDE.README b/Documentation/mips/AU1xxx_IDE.README index a7e4c4ea3560..afb31c141d9d 100644 --- a/Documentation/mips/AU1xxx_IDE.README +++ b/Documentation/mips/AU1xxx_IDE.README | |||
@@ -95,11 +95,13 @@ CONFIG_BLK_DEV_IDEDMA_PCI=y | |||
95 | CONFIG_IDEDMA_PCI_AUTO=y | 95 | CONFIG_IDEDMA_PCI_AUTO=y |
96 | CONFIG_BLK_DEV_IDE_AU1XXX=y | 96 | CONFIG_BLK_DEV_IDE_AU1XXX=y |
97 | CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA=y | 97 | CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA=y |
98 | CONFIG_BLK_DEV_IDE_AU1XXX_BURSTABLE_ON=y | ||
99 | CONFIG_BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ=128 | 98 | CONFIG_BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ=128 |
100 | CONFIG_BLK_DEV_IDEDMA=y | 99 | CONFIG_BLK_DEV_IDEDMA=y |
101 | CONFIG_IDEDMA_AUTO=y | 100 | CONFIG_IDEDMA_AUTO=y |
102 | 101 | ||
102 | Also define 'IDE_AU1XXX_BURSTMODE' in 'drivers/ide/mips/au1xxx-ide.c' to enable | ||
103 | the burst support on DBDMA controller. | ||
104 | |||
103 | If the used system need the USB support enable the following kernel configs for | 105 | If the used system need the USB support enable the following kernel configs for |
104 | high IDE to USB throughput. | 106 | high IDE to USB throughput. |
105 | 107 | ||
@@ -115,6 +117,8 @@ CONFIG_BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ=128 | |||
115 | CONFIG_BLK_DEV_IDEDMA=y | 117 | CONFIG_BLK_DEV_IDEDMA=y |
116 | CONFIG_IDEDMA_AUTO=y | 118 | CONFIG_IDEDMA_AUTO=y |
117 | 119 | ||
120 | Also undefine 'IDE_AU1XXX_BURSTMODE' in 'drivers/ide/mips/au1xxx-ide.c' to | ||
121 | disable the burst support on DBDMA controller. | ||
118 | 122 | ||
119 | ADD NEW HARD DISC TO WHITE OR BLACK LIST | 123 | ADD NEW HARD DISC TO WHITE OR BLACK LIST |
120 | ---------------------------------------- | 124 | ---------------------------------------- |
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig index 60a617aff8ba..e08383712370 100644 --- a/arch/frv/Kconfig +++ b/arch/frv/Kconfig | |||
@@ -25,6 +25,10 @@ config GENERIC_HARDIRQS | |||
25 | bool | 25 | bool |
26 | default n | 26 | default n |
27 | 27 | ||
28 | config TIME_LOW_RES | ||
29 | bool | ||
30 | default y | ||
31 | |||
28 | mainmenu "Fujitsu FR-V Kernel Configuration" | 32 | mainmenu "Fujitsu FR-V Kernel Configuration" |
29 | 33 | ||
30 | source "init/Kconfig" | 34 | source "init/Kconfig" |
diff --git a/arch/frv/Makefile b/arch/frv/Makefile index 90c0fb8d9dc3..d163747d17c0 100644 --- a/arch/frv/Makefile +++ b/arch/frv/Makefile | |||
@@ -81,7 +81,7 @@ endif | |||
81 | # - reserve CC3 for use with atomic ops | 81 | # - reserve CC3 for use with atomic ops |
82 | # - all the extra registers are dealt with only at context switch time | 82 | # - all the extra registers are dealt with only at context switch time |
83 | CFLAGS += -mno-fdpic -mgpr-32 -msoft-float -mno-media | 83 | CFLAGS += -mno-fdpic -mgpr-32 -msoft-float -mno-media |
84 | CFLAGS += -ffixed-fcc3 -ffixed-cc3 -ffixed-gr15 | 84 | CFLAGS += -ffixed-fcc3 -ffixed-cc3 -ffixed-gr15 -ffixed-icc2 |
85 | AFLAGS += -mno-fdpic | 85 | AFLAGS += -mno-fdpic |
86 | ASFLAGS += -mno-fdpic | 86 | ASFLAGS += -mno-fdpic |
87 | 87 | ||
diff --git a/arch/frv/kernel/break.S b/arch/frv/kernel/break.S index 33233dc23e29..687c48d62dde 100644 --- a/arch/frv/kernel/break.S +++ b/arch/frv/kernel/break.S | |||
@@ -200,12 +200,20 @@ __break_step: | |||
200 | movsg bpcsr,gr2 | 200 | movsg bpcsr,gr2 |
201 | sethi.p %hi(__entry_kernel_external_interrupt),gr3 | 201 | sethi.p %hi(__entry_kernel_external_interrupt),gr3 |
202 | setlo %lo(__entry_kernel_external_interrupt),gr3 | 202 | setlo %lo(__entry_kernel_external_interrupt),gr3 |
203 | subcc gr2,gr3,gr0,icc0 | 203 | subcc.p gr2,gr3,gr0,icc0 |
204 | sethi %hi(__entry_uspace_external_interrupt),gr3 | ||
205 | setlo.p %lo(__entry_uspace_external_interrupt),gr3 | ||
204 | beq icc0,#2,__break_step_kernel_external_interrupt | 206 | beq icc0,#2,__break_step_kernel_external_interrupt |
205 | sethi.p %hi(__entry_uspace_external_interrupt),gr3 | 207 | subcc.p gr2,gr3,gr0,icc0 |
206 | setlo %lo(__entry_uspace_external_interrupt),gr3 | 208 | sethi %hi(__entry_kernel_external_interrupt_virtually_disabled),gr3 |
207 | subcc gr2,gr3,gr0,icc0 | 209 | setlo.p %lo(__entry_kernel_external_interrupt_virtually_disabled),gr3 |
208 | beq icc0,#2,__break_step_uspace_external_interrupt | 210 | beq icc0,#2,__break_step_uspace_external_interrupt |
211 | subcc.p gr2,gr3,gr0,icc0 | ||
212 | sethi %hi(__entry_kernel_external_interrupt_virtual_reenable),gr3 | ||
213 | setlo.p %lo(__entry_kernel_external_interrupt_virtual_reenable),gr3 | ||
214 | beq icc0,#2,__break_step_kernel_external_interrupt_virtually_disabled | ||
215 | subcc gr2,gr3,gr0,icc0 | ||
216 | beq icc0,#2,__break_step_kernel_external_interrupt_virtual_reenable | ||
209 | 217 | ||
210 | LEDS 0x2007,gr2 | 218 | LEDS 0x2007,gr2 |
211 | 219 | ||
@@ -254,6 +262,9 @@ __break_step_kernel_softprog_interrupt: | |||
254 | # step through an external interrupt from kernel mode | 262 | # step through an external interrupt from kernel mode |
255 | .globl __break_step_kernel_external_interrupt | 263 | .globl __break_step_kernel_external_interrupt |
256 | __break_step_kernel_external_interrupt: | 264 | __break_step_kernel_external_interrupt: |
265 | # deal with virtual interrupt disablement | ||
266 | beq icc2,#0,__break_step_kernel_external_interrupt_virtually_disabled | ||
267 | |||
257 | sethi.p %hi(__entry_kernel_external_interrupt_reentry),gr3 | 268 | sethi.p %hi(__entry_kernel_external_interrupt_reentry),gr3 |
258 | setlo %lo(__entry_kernel_external_interrupt_reentry),gr3 | 269 | setlo %lo(__entry_kernel_external_interrupt_reentry),gr3 |
259 | 270 | ||
@@ -294,6 +305,64 @@ __break_return_as_kernel_prologue: | |||
294 | #endif | 305 | #endif |
295 | rett #1 | 306 | rett #1 |
296 | 307 | ||
308 | # we single-stepped into an interrupt handler whilst interrupts were merely virtually disabled | ||
309 | # need to really disable interrupts, set flag, fix up and return | ||
310 | __break_step_kernel_external_interrupt_virtually_disabled: | ||
311 | movsg psr,gr2 | ||
312 | andi gr2,#~PSR_PIL,gr2 | ||
313 | ori gr2,#PSR_PIL_14,gr2 /* debugging interrupts only */ | ||
314 | movgs gr2,psr | ||
315 | |||
316 | ldi @(gr31,#REG_CCR),gr3 | ||
317 | movgs gr3,ccr | ||
318 | subcc.p gr0,gr0,gr0,icc2 /* leave Z set, clear C */ | ||
319 | |||
320 | # exceptions must've been enabled and we must've been in supervisor mode | ||
321 | setlos BPSR_BET|BPSR_BS,gr3 | ||
322 | movgs gr3,bpsr | ||
323 | |||
324 | # return to where the interrupt happened | ||
325 | movsg pcsr,gr2 | ||
326 | movgs gr2,bpcsr | ||
327 | |||
328 | lddi.p @(gr31,#REG_GR(2)),gr2 | ||
329 | |||
330 | xor gr31,gr31,gr31 | ||
331 | movgs gr0,brr | ||
332 | #ifdef CONFIG_MMU | ||
333 | movsg scr3,gr31 | ||
334 | #endif | ||
335 | rett #1 | ||
336 | |||
337 | # we stepped through into the virtual interrupt reenablement trap | ||
338 | # | ||
339 | # we also want to single step anyway, but after fixing up so that we get an event on the | ||
340 | # instruction after the broken-into exception returns | ||
341 | .globl __break_step_kernel_external_interrupt_virtual_reenable | ||
342 | __break_step_kernel_external_interrupt_virtual_reenable: | ||
343 | movsg psr,gr2 | ||
344 | andi gr2,#~PSR_PIL,gr2 | ||
345 | movgs gr2,psr | ||
346 | |||
347 | ldi @(gr31,#REG_CCR),gr3 | ||
348 | movgs gr3,ccr | ||
349 | subicc gr0,#1,gr0,icc2 /* clear Z, set C */ | ||
350 | |||
351 | # save the adjusted ICC2 | ||
352 | movsg ccr,gr3 | ||
353 | sti gr3,@(gr31,#REG_CCR) | ||
354 | |||
355 | # exceptions must've been enabled and we must've been in supervisor mode | ||
356 | setlos BPSR_BET|BPSR_BS,gr3 | ||
357 | movgs gr3,bpsr | ||
358 | |||
359 | # return to where the trap happened | ||
360 | movsg pcsr,gr2 | ||
361 | movgs gr2,bpcsr | ||
362 | |||
363 | # and then process the single step | ||
364 | bra __break_continue | ||
365 | |||
297 | # step through an internal exception from uspace mode | 366 | # step through an internal exception from uspace mode |
298 | .globl __break_step_uspace_softprog_interrupt | 367 | .globl __break_step_uspace_softprog_interrupt |
299 | __break_step_uspace_softprog_interrupt: | 368 | __break_step_uspace_softprog_interrupt: |
diff --git a/arch/frv/kernel/entry-table.S b/arch/frv/kernel/entry-table.S index 9b9243e2103c..81568acea9cd 100644 --- a/arch/frv/kernel/entry-table.S +++ b/arch/frv/kernel/entry-table.S | |||
@@ -116,6 +116,8 @@ __break_kerneltrap_fixup_table: | |||
116 | .long __break_step_uspace_external_interrupt | 116 | .long __break_step_uspace_external_interrupt |
117 | .section .trap.kernel | 117 | .section .trap.kernel |
118 | .org \tbr_tt | 118 | .org \tbr_tt |
119 | # deal with virtual interrupt disablement | ||
120 | beq icc2,#0,__entry_kernel_external_interrupt_virtually_disabled | ||
119 | bra __entry_kernel_external_interrupt | 121 | bra __entry_kernel_external_interrupt |
120 | .section .trap.fixup.kernel | 122 | .section .trap.fixup.kernel |
121 | .org \tbr_tt >> 2 | 123 | .org \tbr_tt >> 2 |
@@ -259,25 +261,52 @@ __trap_fixup_kernel_data_tlb_miss: | |||
259 | .org TBR_TT_TRAP0 | 261 | .org TBR_TT_TRAP0 |
260 | .rept 127 | 262 | .rept 127 |
261 | bra __entry_uspace_softprog_interrupt | 263 | bra __entry_uspace_softprog_interrupt |
262 | bra __break_step_uspace_softprog_interrupt | 264 | .long 0,0,0 |
263 | .long 0,0 | ||
264 | .endr | 265 | .endr |
265 | .org TBR_TT_BREAK | 266 | .org TBR_TT_BREAK |
266 | bra __entry_break | 267 | bra __entry_break |
267 | .long 0,0,0 | 268 | .long 0,0,0 |
268 | 269 | ||
270 | .section .trap.fixup.user | ||
271 | .org TBR_TT_TRAP0 >> 2 | ||
272 | .rept 127 | ||
273 | .long __break_step_uspace_softprog_interrupt | ||
274 | .endr | ||
275 | .org TBR_TT_BREAK >> 2 | ||
276 | .long 0 | ||
277 | |||
269 | # miscellaneous kernel mode entry points | 278 | # miscellaneous kernel mode entry points |
270 | .section .trap.kernel | 279 | .section .trap.kernel |
271 | .org TBR_TT_TRAP0 | 280 | .org TBR_TT_TRAP0 |
272 | .rept 127 | ||
273 | bra __entry_kernel_softprog_interrupt | 281 | bra __entry_kernel_softprog_interrupt |
274 | bra __break_step_kernel_softprog_interrupt | 282 | .org TBR_TT_TRAP1 |
275 | .long 0,0 | 283 | bra __entry_kernel_softprog_interrupt |
284 | |||
285 | # trap #2 in kernel - reenable interrupts | ||
286 | .org TBR_TT_TRAP2 | ||
287 | bra __entry_kernel_external_interrupt_virtual_reenable | ||
288 | |||
289 | # miscellaneous kernel traps | ||
290 | .org TBR_TT_TRAP3 | ||
291 | .rept 124 | ||
292 | bra __entry_kernel_softprog_interrupt | ||
293 | .long 0,0,0 | ||
276 | .endr | 294 | .endr |
277 | .org TBR_TT_BREAK | 295 | .org TBR_TT_BREAK |
278 | bra __entry_break | 296 | bra __entry_break |
279 | .long 0,0,0 | 297 | .long 0,0,0 |
280 | 298 | ||
299 | .section .trap.fixup.kernel | ||
300 | .org TBR_TT_TRAP0 >> 2 | ||
301 | .long __break_step_kernel_softprog_interrupt | ||
302 | .long __break_step_kernel_softprog_interrupt | ||
303 | .long __break_step_kernel_external_interrupt_virtual_reenable | ||
304 | .rept 124 | ||
305 | .long __break_step_kernel_softprog_interrupt | ||
306 | .endr | ||
307 | .org TBR_TT_BREAK >> 2 | ||
308 | .long 0 | ||
309 | |||
281 | # miscellaneous debug mode entry points | 310 | # miscellaneous debug mode entry points |
282 | .section .trap.break | 311 | .section .trap.break |
283 | .org TBR_TT_BREAK | 312 | .org TBR_TT_BREAK |
diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S index 5f6548388b74..1d21c8d34d8a 100644 --- a/arch/frv/kernel/entry.S +++ b/arch/frv/kernel/entry.S | |||
@@ -141,7 +141,10 @@ __entry_uspace_external_interrupt_reentry: | |||
141 | 141 | ||
142 | movsg gner0,gr4 | 142 | movsg gner0,gr4 |
143 | movsg gner1,gr5 | 143 | movsg gner1,gr5 |
144 | stdi gr4,@(gr28,#REG_GNER0) | 144 | stdi.p gr4,@(gr28,#REG_GNER0) |
145 | |||
146 | # interrupts start off fully disabled in the interrupt handler | ||
147 | subcc gr0,gr0,gr0,icc2 /* set Z and clear C */ | ||
145 | 148 | ||
146 | # set up kernel global registers | 149 | # set up kernel global registers |
147 | sethi.p %hi(__kernel_current_task),gr5 | 150 | sethi.p %hi(__kernel_current_task),gr5 |
@@ -193,9 +196,8 @@ __entry_uspace_external_interrupt_reentry: | |||
193 | .type __entry_kernel_external_interrupt,@function | 196 | .type __entry_kernel_external_interrupt,@function |
194 | __entry_kernel_external_interrupt: | 197 | __entry_kernel_external_interrupt: |
195 | LEDS 0x6210 | 198 | LEDS 0x6210 |
196 | 199 | // sub sp,gr15,gr31 | |
197 | sub sp,gr15,gr31 | 200 | // LEDS32 |
198 | LEDS32 | ||
199 | 201 | ||
200 | # set up the stack pointer | 202 | # set up the stack pointer |
201 | or.p sp,gr0,gr30 | 203 | or.p sp,gr0,gr30 |
@@ -231,7 +233,10 @@ __entry_kernel_external_interrupt_reentry: | |||
231 | stdi gr24,@(gr28,#REG_GR(24)) | 233 | stdi gr24,@(gr28,#REG_GR(24)) |
232 | stdi gr26,@(gr28,#REG_GR(26)) | 234 | stdi gr26,@(gr28,#REG_GR(26)) |
233 | sti gr29,@(gr28,#REG_GR(29)) | 235 | sti gr29,@(gr28,#REG_GR(29)) |
234 | stdi gr30,@(gr28,#REG_GR(30)) | 236 | stdi.p gr30,@(gr28,#REG_GR(30)) |
237 | |||
238 | # note virtual interrupts will be fully enabled upon return | ||
239 | subicc gr0,#1,gr0,icc2 /* clear Z, set C */ | ||
235 | 240 | ||
236 | movsg tbr ,gr20 | 241 | movsg tbr ,gr20 |
237 | movsg psr ,gr22 | 242 | movsg psr ,gr22 |
@@ -267,7 +272,10 @@ __entry_kernel_external_interrupt_reentry: | |||
267 | 272 | ||
268 | movsg gner0,gr4 | 273 | movsg gner0,gr4 |
269 | movsg gner1,gr5 | 274 | movsg gner1,gr5 |
270 | stdi gr4,@(gr28,#REG_GNER0) | 275 | stdi.p gr4,@(gr28,#REG_GNER0) |
276 | |||
277 | # interrupts start off fully disabled in the interrupt handler | ||
278 | subcc gr0,gr0,gr0,icc2 /* set Z and clear C */ | ||
271 | 279 | ||
272 | # set the return address | 280 | # set the return address |
273 | sethi.p %hi(__entry_return_from_kernel_interrupt),gr4 | 281 | sethi.p %hi(__entry_return_from_kernel_interrupt),gr4 |
@@ -291,6 +299,45 @@ __entry_kernel_external_interrupt_reentry: | |||
291 | 299 | ||
292 | .size __entry_kernel_external_interrupt,.-__entry_kernel_external_interrupt | 300 | .size __entry_kernel_external_interrupt,.-__entry_kernel_external_interrupt |
293 | 301 | ||
302 | ############################################################################### | ||
303 | # | ||
304 | # deal with interrupts that were actually virtually disabled | ||
305 | # - we need to really disable them, flag the fact and return immediately | ||
306 | # - if you change this, you must alter break.S also | ||
307 | # | ||
308 | ############################################################################### | ||
309 | .balign L1_CACHE_BYTES | ||
310 | .globl __entry_kernel_external_interrupt_virtually_disabled | ||
311 | .type __entry_kernel_external_interrupt_virtually_disabled,@function | ||
312 | __entry_kernel_external_interrupt_virtually_disabled: | ||
313 | movsg psr,gr30 | ||
314 | andi gr30,#~PSR_PIL,gr30 | ||
315 | ori gr30,#PSR_PIL_14,gr30 ; debugging interrupts only | ||
316 | movgs gr30,psr | ||
317 | subcc gr0,gr0,gr0,icc2 ; leave Z set, clear C | ||
318 | rett #0 | ||
319 | |||
320 | .size __entry_kernel_external_interrupt_virtually_disabled,.-__entry_kernel_external_interrupt_virtually_disabled | ||
321 | |||
322 | ############################################################################### | ||
323 | # | ||
324 | # deal with re-enablement of interrupts that were pending when virtually re-enabled | ||
325 | # - set ICC2.C, re-enable the real interrupts and return | ||
326 | # - we can clear ICC2.Z because we shouldn't be here if it's not 0 [due to TIHI] | ||
327 | # - if you change this, you must alter break.S also | ||
328 | # | ||
329 | ############################################################################### | ||
330 | .balign L1_CACHE_BYTES | ||
331 | .globl __entry_kernel_external_interrupt_virtual_reenable | ||
332 | .type __entry_kernel_external_interrupt_virtual_reenable,@function | ||
333 | __entry_kernel_external_interrupt_virtual_reenable: | ||
334 | movsg psr,gr30 | ||
335 | andi gr30,#~PSR_PIL,gr30 ; re-enable interrupts | ||
336 | movgs gr30,psr | ||
337 | subicc gr0,#1,gr0,icc2 ; clear Z, set C | ||
338 | rett #0 | ||
339 | |||
340 | .size __entry_kernel_external_interrupt_virtual_reenable,.-__entry_kernel_external_interrupt_virtual_reenable | ||
294 | 341 | ||
295 | ############################################################################### | 342 | ############################################################################### |
296 | # | 343 | # |
@@ -335,6 +382,7 @@ __entry_uspace_softprog_interrupt_reentry: | |||
335 | 382 | ||
336 | sethi.p %hi(__entry_return_from_user_exception),gr23 | 383 | sethi.p %hi(__entry_return_from_user_exception),gr23 |
337 | setlo %lo(__entry_return_from_user_exception),gr23 | 384 | setlo %lo(__entry_return_from_user_exception),gr23 |
385 | |||
338 | bra __entry_common | 386 | bra __entry_common |
339 | 387 | ||
340 | .size __entry_uspace_softprog_interrupt,.-__entry_uspace_softprog_interrupt | 388 | .size __entry_uspace_softprog_interrupt,.-__entry_uspace_softprog_interrupt |
@@ -495,7 +543,10 @@ __entry_common: | |||
495 | 543 | ||
496 | movsg gner0,gr4 | 544 | movsg gner0,gr4 |
497 | movsg gner1,gr5 | 545 | movsg gner1,gr5 |
498 | stdi gr4,@(gr28,#REG_GNER0) | 546 | stdi.p gr4,@(gr28,#REG_GNER0) |
547 | |||
548 | # set up virtual interrupt disablement | ||
549 | subicc gr0,#1,gr0,icc2 /* clear Z flag, set C flag */ | ||
499 | 550 | ||
500 | # set up kernel global registers | 551 | # set up kernel global registers |
501 | sethi.p %hi(__kernel_current_task),gr5 | 552 | sethi.p %hi(__kernel_current_task),gr5 |
@@ -1418,11 +1469,27 @@ sys_call_table: | |||
1418 | .long sys_add_key | 1469 | .long sys_add_key |
1419 | .long sys_request_key | 1470 | .long sys_request_key |
1420 | .long sys_keyctl | 1471 | .long sys_keyctl |
1421 | .long sys_ni_syscall // sys_vperfctr_open | 1472 | .long sys_ioprio_set |
1422 | .long sys_ni_syscall // sys_vperfctr_control /* 290 */ | 1473 | .long sys_ioprio_get /* 290 */ |
1423 | .long sys_ni_syscall // sys_vperfctr_unlink | 1474 | .long sys_inotify_init |
1424 | .long sys_ni_syscall // sys_vperfctr_iresume | 1475 | .long sys_inotify_add_watch |
1425 | .long sys_ni_syscall // sys_vperfctr_read | 1476 | .long sys_inotify_rm_watch |
1477 | .long sys_migrate_pages | ||
1478 | .long sys_openat /* 295 */ | ||
1479 | .long sys_mkdirat | ||
1480 | .long sys_mknodat | ||
1481 | .long sys_fchownat | ||
1482 | .long sys_futimesat | ||
1483 | .long sys_newfstatat /* 300 */ | ||
1484 | .long sys_unlinkat | ||
1485 | .long sys_renameat | ||
1486 | .long sys_linkat | ||
1487 | .long sys_symlinkat | ||
1488 | .long sys_readlinkat /* 305 */ | ||
1489 | .long sys_fchmodat | ||
1490 | .long sys_faccessat | ||
1491 | .long sys_pselect6 | ||
1492 | .long sys_ppoll | ||
1426 | 1493 | ||
1427 | 1494 | ||
1428 | syscall_table_size = (. - sys_call_table) | 1495 | syscall_table_size = (. - sys_call_table) |
diff --git a/arch/frv/kernel/head.S b/arch/frv/kernel/head.S index c73b4fe9f6ca..29a5265489b7 100644 --- a/arch/frv/kernel/head.S +++ b/arch/frv/kernel/head.S | |||
@@ -513,6 +513,9 @@ __head_mmu_enabled: | |||
513 | movgs gr0,ccr | 513 | movgs gr0,ccr |
514 | movgs gr0,cccr | 514 | movgs gr0,cccr |
515 | 515 | ||
516 | # initialise the virtual interrupt handling | ||
517 | subcc gr0,gr0,gr0,icc2 /* set Z, clear C */ | ||
518 | |||
516 | #ifdef CONFIG_MMU | 519 | #ifdef CONFIG_MMU |
517 | movgs gr3,scr2 | 520 | movgs gr3,scr2 |
518 | movgs gr3,scr3 | 521 | movgs gr3,scr3 |
diff --git a/arch/frv/kernel/irq.c b/arch/frv/kernel/irq.c index 59580c59c62c..27ab4c30aac6 100644 --- a/arch/frv/kernel/irq.c +++ b/arch/frv/kernel/irq.c | |||
@@ -287,18 +287,11 @@ asmlinkage void do_IRQ(void) | |||
287 | struct irq_source *source; | 287 | struct irq_source *source; |
288 | int level, cpu; | 288 | int level, cpu; |
289 | 289 | ||
290 | irq_enter(); | ||
291 | |||
290 | level = (__frame->tbr >> 4) & 0xf; | 292 | level = (__frame->tbr >> 4) & 0xf; |
291 | cpu = smp_processor_id(); | 293 | cpu = smp_processor_id(); |
292 | 294 | ||
293 | #if 0 | ||
294 | { | ||
295 | static u32 irqcount; | ||
296 | *(volatile u32 *) 0xe1200004 = ~((irqcount++ << 8) | level); | ||
297 | *(volatile u16 *) 0xffc00100 = (u16) ~0x9999; | ||
298 | mb(); | ||
299 | } | ||
300 | #endif | ||
301 | |||
302 | if ((unsigned long) __frame - (unsigned long) (current + 1) < 512) | 295 | if ((unsigned long) __frame - (unsigned long) (current + 1) < 512) |
303 | BUG(); | 296 | BUG(); |
304 | 297 | ||
@@ -308,40 +301,12 @@ asmlinkage void do_IRQ(void) | |||
308 | 301 | ||
309 | kstat_this_cpu.irqs[level]++; | 302 | kstat_this_cpu.irqs[level]++; |
310 | 303 | ||
311 | irq_enter(); | ||
312 | |||
313 | for (source = frv_irq_levels[level].sources; source; source = source->next) | 304 | for (source = frv_irq_levels[level].sources; source; source = source->next) |
314 | source->doirq(source); | 305 | source->doirq(source); |
315 | 306 | ||
316 | irq_exit(); | ||
317 | |||
318 | __clr_MASK(level); | 307 | __clr_MASK(level); |
319 | 308 | ||
320 | /* only process softirqs if we didn't interrupt another interrupt handler */ | 309 | irq_exit(); |
321 | if ((__frame->psr & PSR_PIL) == PSR_PIL_0) | ||
322 | if (local_softirq_pending()) | ||
323 | do_softirq(); | ||
324 | |||
325 | #ifdef CONFIG_PREEMPT | ||
326 | local_irq_disable(); | ||
327 | while (--current->preempt_count == 0) { | ||
328 | if (!(__frame->psr & PSR_S) || | ||
329 | current->need_resched == 0 || | ||
330 | in_interrupt()) | ||
331 | break; | ||
332 | current->preempt_count++; | ||
333 | local_irq_enable(); | ||
334 | preempt_schedule(); | ||
335 | local_irq_disable(); | ||
336 | } | ||
337 | #endif | ||
338 | |||
339 | #if 0 | ||
340 | { | ||
341 | *(volatile u16 *) 0xffc00100 = (u16) ~0x6666; | ||
342 | mb(); | ||
343 | } | ||
344 | #endif | ||
345 | 310 | ||
346 | } /* end do_IRQ() */ | 311 | } /* end do_IRQ() */ |
347 | 312 | ||
diff --git a/arch/frv/mm/kmap.c b/arch/frv/mm/kmap.c index 539f45e6d15e..c54f18e65ea6 100644 --- a/arch/frv/mm/kmap.c +++ b/arch/frv/mm/kmap.c | |||
@@ -44,15 +44,6 @@ void iounmap(void *addr) | |||
44 | } | 44 | } |
45 | 45 | ||
46 | /* | 46 | /* |
47 | * __iounmap unmaps nearly everything, so be careful | ||
48 | * it doesn't free currently pointer/page tables anymore but it | ||
49 | * wans't used anyway and might be added later. | ||
50 | */ | ||
51 | void __iounmap(void *addr, unsigned long size) | ||
52 | { | ||
53 | } | ||
54 | |||
55 | /* | ||
56 | * Set new cache mode for some kernel address space. | 47 | * Set new cache mode for some kernel address space. |
57 | * The caller must push data for that range itself, if such data may already | 48 | * The caller must push data for that range itself, if such data may already |
58 | * be in the cache. | 49 | * be in the cache. |
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig index 80940d712acf..98308b018a35 100644 --- a/arch/h8300/Kconfig +++ b/arch/h8300/Kconfig | |||
@@ -33,6 +33,10 @@ config GENERIC_CALIBRATE_DELAY | |||
33 | bool | 33 | bool |
34 | default y | 34 | default y |
35 | 35 | ||
36 | config TIME_LOW_RES | ||
37 | bool | ||
38 | default y | ||
39 | |||
36 | config ISA | 40 | config ISA |
37 | bool | 41 | bool |
38 | default y | 42 | default y |
diff --git a/arch/h8300/Kconfig.cpu b/arch/h8300/Kconfig.cpu index a380167a13cf..582797db9603 100644 --- a/arch/h8300/Kconfig.cpu +++ b/arch/h8300/Kconfig.cpu | |||
@@ -169,7 +169,7 @@ endif | |||
169 | 169 | ||
170 | config CPU_H8300H | 170 | config CPU_H8300H |
171 | bool | 171 | bool |
172 | depends on (H8002 || H83007 || H83048 || H83068) | 172 | depends on (H83002 || H83007 || H83048 || H83068) |
173 | default y | 173 | default y |
174 | 174 | ||
175 | config CPU_H8S | 175 | config CPU_H8S |
diff --git a/arch/i386/boot/.gitignore b/arch/i386/boot/.gitignore new file mode 100644 index 000000000000..495f20c085de --- /dev/null +++ b/arch/i386/boot/.gitignore | |||
@@ -0,0 +1,3 @@ | |||
1 | bootsect | ||
2 | bzImage | ||
3 | setup | ||
diff --git a/arch/i386/boot/tools/.gitignore b/arch/i386/boot/tools/.gitignore new file mode 100644 index 000000000000..378eac25d311 --- /dev/null +++ b/arch/i386/boot/tools/.gitignore | |||
@@ -0,0 +1 @@ | |||
build | |||
diff --git a/arch/i386/kernel/.gitignore b/arch/i386/kernel/.gitignore new file mode 100644 index 000000000000..40836ad9079c --- /dev/null +++ b/arch/i386/kernel/.gitignore | |||
@@ -0,0 +1 @@ | |||
vsyscall.lds | |||
diff --git a/arch/i386/kernel/vsyscall-sysenter.S b/arch/i386/kernel/vsyscall-sysenter.S index 4daefb2ec1b2..76b728159403 100644 --- a/arch/i386/kernel/vsyscall-sysenter.S +++ b/arch/i386/kernel/vsyscall-sysenter.S | |||
@@ -7,6 +7,21 @@ | |||
7 | * for details. | 7 | * for details. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | /* | ||
11 | * The caller puts arg2 in %ecx, which gets pushed. The kernel will use | ||
12 | * %ecx itself for arg2. The pushing is because the sysexit instruction | ||
13 | * (found in entry.S) requires that we clobber %ecx with the desired %esp. | ||
14 | * User code might expect that %ecx is unclobbered though, as it would be | ||
15 | * for returning via the iret instruction, so we must push and pop. | ||
16 | * | ||
17 | * The caller puts arg3 in %edx, which the sysexit instruction requires | ||
18 | * for %eip. Thus, exactly as for arg2, we must push and pop. | ||
19 | * | ||
20 | * Arg6 is different. The caller puts arg6 in %ebp. Since the sysenter | ||
21 | * instruction clobbers %esp, the user's %esp won't even survive entry | ||
22 | * into the kernel. We store %esp in %ebp. Code in entry.S must fetch | ||
23 | * arg6 from the stack. | ||
24 | */ | ||
10 | .text | 25 | .text |
11 | .globl __kernel_vsyscall | 26 | .globl __kernel_vsyscall |
12 | .type __kernel_vsyscall,@function | 27 | .type __kernel_vsyscall,@function |
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c index e72de580ebbf..bbcfd08378a6 100644 --- a/arch/ia64/kernel/ia64_ksyms.c +++ b/arch/ia64/kernel/ia64_ksyms.c | |||
@@ -10,23 +10,8 @@ | |||
10 | 10 | ||
11 | #include <linux/string.h> | 11 | #include <linux/string.h> |
12 | EXPORT_SYMBOL(memset); | 12 | EXPORT_SYMBOL(memset); |
13 | EXPORT_SYMBOL(memchr); | ||
14 | EXPORT_SYMBOL(memcmp); | ||
15 | EXPORT_SYMBOL(memcpy); | 13 | EXPORT_SYMBOL(memcpy); |
16 | EXPORT_SYMBOL(memmove); | ||
17 | EXPORT_SYMBOL(memscan); | ||
18 | EXPORT_SYMBOL(strcat); | ||
19 | EXPORT_SYMBOL(strchr); | ||
20 | EXPORT_SYMBOL(strcmp); | ||
21 | EXPORT_SYMBOL(strcpy); | ||
22 | EXPORT_SYMBOL(strlen); | 14 | EXPORT_SYMBOL(strlen); |
23 | EXPORT_SYMBOL(strncat); | ||
24 | EXPORT_SYMBOL(strncmp); | ||
25 | EXPORT_SYMBOL(strncpy); | ||
26 | EXPORT_SYMBOL(strnlen); | ||
27 | EXPORT_SYMBOL(strrchr); | ||
28 | EXPORT_SYMBOL(strstr); | ||
29 | EXPORT_SYMBOL(strpbrk); | ||
30 | 15 | ||
31 | #include <asm/checksum.h> | 16 | #include <asm/checksum.h> |
32 | EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */ | 17 | EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */ |
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index a094ec49ccfa..307d01e15b2e 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c | |||
@@ -250,32 +250,27 @@ time_init (void) | |||
250 | set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); | 250 | set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); |
251 | } | 251 | } |
252 | 252 | ||
253 | #define SMALLUSECS 100 | 253 | /* |
254 | 254 | * Generic udelay assumes that if preemption is allowed and the thread | |
255 | void | 255 | * migrates to another CPU, that the ITC values are synchronized across |
256 | udelay (unsigned long usecs) | 256 | * all CPUs. |
257 | */ | ||
258 | static void | ||
259 | ia64_itc_udelay (unsigned long usecs) | ||
257 | { | 260 | { |
258 | unsigned long start; | 261 | unsigned long start = ia64_get_itc(); |
259 | unsigned long cycles; | 262 | unsigned long end = start + usecs*local_cpu_data->cyc_per_usec; |
260 | unsigned long smallusecs; | ||
261 | 263 | ||
262 | /* | 264 | while (time_before(ia64_get_itc(), end)) |
263 | * Execute the non-preemptible delay loop (because the ITC might | 265 | cpu_relax(); |
264 | * not be synchronized between CPUS) in relatively short time | 266 | } |
265 | * chunks, allowing preemption between the chunks. | ||
266 | */ | ||
267 | while (usecs > 0) { | ||
268 | smallusecs = (usecs > SMALLUSECS) ? SMALLUSECS : usecs; | ||
269 | preempt_disable(); | ||
270 | cycles = smallusecs*local_cpu_data->cyc_per_usec; | ||
271 | start = ia64_get_itc(); | ||
272 | 267 | ||
273 | while (ia64_get_itc() - start < cycles) | 268 | void (*ia64_udelay)(unsigned long usecs) = &ia64_itc_udelay; |
274 | cpu_relax(); | ||
275 | 269 | ||
276 | preempt_enable(); | 270 | void |
277 | usecs -= smallusecs; | 271 | udelay (unsigned long usecs) |
278 | } | 272 | { |
273 | (*ia64_udelay)(usecs); | ||
279 | } | 274 | } |
280 | EXPORT_SYMBOL(udelay); | 275 | EXPORT_SYMBOL(udelay); |
281 | 276 | ||
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index 55391901b013..dabd6c32641e 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/module.h> /* for EXPORT_SYMBOL */ | 16 | #include <linux/module.h> /* for EXPORT_SYMBOL */ |
17 | #include <linux/hardirq.h> | 17 | #include <linux/hardirq.h> |
18 | #include <linux/kprobes.h> | 18 | #include <linux/kprobes.h> |
19 | #include <linux/delay.h> /* for ssleep() */ | ||
19 | 20 | ||
20 | #include <asm/fpswa.h> | 21 | #include <asm/fpswa.h> |
21 | #include <asm/ia32.h> | 22 | #include <asm/ia32.h> |
@@ -116,6 +117,13 @@ die (const char *str, struct pt_regs *regs, long err) | |||
116 | bust_spinlocks(0); | 117 | bust_spinlocks(0); |
117 | die.lock_owner = -1; | 118 | die.lock_owner = -1; |
118 | spin_unlock_irq(&die.lock); | 119 | spin_unlock_irq(&die.lock); |
120 | |||
121 | if (panic_on_oops) { | ||
122 | printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n"); | ||
123 | ssleep(5); | ||
124 | panic("Fatal exception"); | ||
125 | } | ||
126 | |||
119 | do_exit(SIGSEGV); | 127 | do_exit(SIGSEGV); |
120 | } | 128 | } |
121 | 129 | ||
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c index 3437c2390429..3edef0d32f86 100644 --- a/arch/ia64/sn/kernel/io_init.c +++ b/arch/ia64/sn/kernel/io_init.c | |||
@@ -23,6 +23,10 @@ | |||
23 | #include "xtalk/hubdev.h" | 23 | #include "xtalk/hubdev.h" |
24 | #include "xtalk/xwidgetdev.h" | 24 | #include "xtalk/xwidgetdev.h" |
25 | 25 | ||
26 | |||
27 | extern void sn_init_cpei_timer(void); | ||
28 | extern void register_sn_procfs(void); | ||
29 | |||
26 | static struct list_head sn_sysdata_list; | 30 | static struct list_head sn_sysdata_list; |
27 | 31 | ||
28 | /* sysdata list struct */ | 32 | /* sysdata list struct */ |
@@ -40,12 +44,12 @@ struct brick { | |||
40 | struct slab_info slab_info[MAX_SLABS + 1]; | 44 | struct slab_info slab_info[MAX_SLABS + 1]; |
41 | }; | 45 | }; |
42 | 46 | ||
43 | int sn_ioif_inited = 0; /* SN I/O infrastructure initialized? */ | 47 | int sn_ioif_inited; /* SN I/O infrastructure initialized? */ |
44 | 48 | ||
45 | struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */ | 49 | struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */ |
46 | 50 | ||
47 | static int max_segment_number = 0; /* Default highest segment number */ | 51 | static int max_segment_number; /* Default highest segment number */ |
48 | static int max_pcibus_number = 255; /* Default highest pci bus number */ | 52 | static int max_pcibus_number = 255; /* Default highest pci bus number */ |
49 | 53 | ||
50 | /* | 54 | /* |
51 | * Hooks and struct for unsupported pci providers | 55 | * Hooks and struct for unsupported pci providers |
@@ -84,7 +88,6 @@ static inline u64 | |||
84 | sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num, | 88 | sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num, |
85 | u64 address) | 89 | u64 address) |
86 | { | 90 | { |
87 | |||
88 | struct ia64_sal_retval ret_stuff; | 91 | struct ia64_sal_retval ret_stuff; |
89 | ret_stuff.status = 0; | 92 | ret_stuff.status = 0; |
90 | ret_stuff.v0 = 0; | 93 | ret_stuff.v0 = 0; |
@@ -94,7 +97,6 @@ sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num, | |||
94 | (u64) nasid, (u64) widget_num, | 97 | (u64) nasid, (u64) widget_num, |
95 | (u64) device_num, (u64) address, 0, 0, 0); | 98 | (u64) device_num, (u64) address, 0, 0, 0); |
96 | return ret_stuff.status; | 99 | return ret_stuff.status; |
97 | |||
98 | } | 100 | } |
99 | 101 | ||
100 | /* | 102 | /* |
@@ -102,7 +104,6 @@ sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num, | |||
102 | */ | 104 | */ |
103 | static inline u64 sal_get_hubdev_info(u64 handle, u64 address) | 105 | static inline u64 sal_get_hubdev_info(u64 handle, u64 address) |
104 | { | 106 | { |
105 | |||
106 | struct ia64_sal_retval ret_stuff; | 107 | struct ia64_sal_retval ret_stuff; |
107 | ret_stuff.status = 0; | 108 | ret_stuff.status = 0; |
108 | ret_stuff.v0 = 0; | 109 | ret_stuff.v0 = 0; |
@@ -118,7 +119,6 @@ static inline u64 sal_get_hubdev_info(u64 handle, u64 address) | |||
118 | */ | 119 | */ |
119 | static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address) | 120 | static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address) |
120 | { | 121 | { |
121 | |||
122 | struct ia64_sal_retval ret_stuff; | 122 | struct ia64_sal_retval ret_stuff; |
123 | ret_stuff.status = 0; | 123 | ret_stuff.status = 0; |
124 | ret_stuff.v0 = 0; | 124 | ret_stuff.v0 = 0; |
@@ -215,7 +215,7 @@ static void __init sn_fixup_ionodes(void) | |||
215 | struct hubdev_info *hubdev; | 215 | struct hubdev_info *hubdev; |
216 | u64 status; | 216 | u64 status; |
217 | u64 nasid; | 217 | u64 nasid; |
218 | int i, widget, device; | 218 | int i, widget, device, size; |
219 | 219 | ||
220 | /* | 220 | /* |
221 | * Get SGI Specific HUB chipset information. | 221 | * Get SGI Specific HUB chipset information. |
@@ -251,48 +251,37 @@ static void __init sn_fixup_ionodes(void) | |||
251 | if (!hubdev->hdi_flush_nasid_list.widget_p) | 251 | if (!hubdev->hdi_flush_nasid_list.widget_p) |
252 | continue; | 252 | continue; |
253 | 253 | ||
254 | size = (HUB_WIDGET_ID_MAX + 1) * | ||
255 | sizeof(struct sn_flush_device_kernel *); | ||
254 | hubdev->hdi_flush_nasid_list.widget_p = | 256 | hubdev->hdi_flush_nasid_list.widget_p = |
255 | kmalloc((HUB_WIDGET_ID_MAX + 1) * | 257 | kzalloc(size, GFP_KERNEL); |
256 | sizeof(struct sn_flush_device_kernel *), | 258 | if (!hubdev->hdi_flush_nasid_list.widget_p) |
257 | GFP_KERNEL); | 259 | BUG(); |
258 | memset(hubdev->hdi_flush_nasid_list.widget_p, 0x0, | ||
259 | (HUB_WIDGET_ID_MAX + 1) * | ||
260 | sizeof(struct sn_flush_device_kernel *)); | ||
261 | 260 | ||
262 | for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) { | 261 | for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) { |
263 | sn_flush_device_kernel = kmalloc(DEV_PER_WIDGET * | 262 | size = DEV_PER_WIDGET * |
264 | sizeof(struct | 263 | sizeof(struct sn_flush_device_kernel); |
265 | sn_flush_device_kernel), | 264 | sn_flush_device_kernel = kzalloc(size, GFP_KERNEL); |
266 | GFP_KERNEL); | ||
267 | if (!sn_flush_device_kernel) | 265 | if (!sn_flush_device_kernel) |
268 | BUG(); | 266 | BUG(); |
269 | memset(sn_flush_device_kernel, 0x0, | ||
270 | DEV_PER_WIDGET * | ||
271 | sizeof(struct sn_flush_device_kernel)); | ||
272 | 267 | ||
273 | dev_entry = sn_flush_device_kernel; | 268 | dev_entry = sn_flush_device_kernel; |
274 | for (device = 0; device < DEV_PER_WIDGET; | 269 | for (device = 0; device < DEV_PER_WIDGET; |
275 | device++,dev_entry++) { | 270 | device++,dev_entry++) { |
276 | dev_entry->common = kmalloc(sizeof(struct | 271 | size = sizeof(struct sn_flush_device_common); |
277 | sn_flush_device_common), | 272 | dev_entry->common = kzalloc(size, GFP_KERNEL); |
278 | GFP_KERNEL); | ||
279 | if (!dev_entry->common) | 273 | if (!dev_entry->common) |
280 | BUG(); | 274 | BUG(); |
281 | memset(dev_entry->common, 0x0, sizeof(struct | ||
282 | sn_flush_device_common)); | ||
283 | 275 | ||
284 | if (sn_prom_feature_available( | 276 | if (sn_prom_feature_available( |
285 | PRF_DEVICE_FLUSH_LIST)) | 277 | PRF_DEVICE_FLUSH_LIST)) |
286 | status = sal_get_device_dmaflush_list( | 278 | status = sal_get_device_dmaflush_list( |
287 | nasid, | 279 | nasid, widget, device, |
288 | widget, | 280 | (u64)(dev_entry->common)); |
289 | device, | ||
290 | (u64)(dev_entry->common)); | ||
291 | else | 281 | else |
292 | status = sn_device_fixup_war(nasid, | 282 | status = sn_device_fixup_war(nasid, |
293 | widget, | 283 | widget, device, |
294 | device, | 284 | dev_entry->common); |
295 | dev_entry->common); | ||
296 | if (status != SALRET_OK) | 285 | if (status != SALRET_OK) |
297 | panic("SAL call failed: %s\n", | 286 | panic("SAL call failed: %s\n", |
298 | ia64_sal_strerror(status)); | 287 | ia64_sal_strerror(status)); |
@@ -383,13 +372,12 @@ void sn_pci_fixup_slot(struct pci_dev *dev) | |||
383 | 372 | ||
384 | pci_dev_get(dev); /* for the sysdata pointer */ | 373 | pci_dev_get(dev); /* for the sysdata pointer */ |
385 | pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL); | 374 | pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL); |
386 | if (pcidev_info <= 0) | 375 | if (!pcidev_info) |
387 | BUG(); /* Cannot afford to run out of memory */ | 376 | BUG(); /* Cannot afford to run out of memory */ |
388 | 377 | ||
389 | sn_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_KERNEL); | 378 | sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL); |
390 | if (sn_irq_info <= 0) | 379 | if (!sn_irq_info) |
391 | BUG(); /* Cannot afford to run out of memory */ | 380 | BUG(); /* Cannot afford to run out of memory */ |
392 | memset(sn_irq_info, 0, sizeof(struct sn_irq_info)); | ||
393 | 381 | ||
394 | /* Call to retrieve pci device information needed by kernel. */ | 382 | /* Call to retrieve pci device information needed by kernel. */ |
395 | status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number, | 383 | status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number, |
@@ -482,13 +470,13 @@ void sn_pci_fixup_slot(struct pci_dev *dev) | |||
482 | */ | 470 | */ |
483 | void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus) | 471 | void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus) |
484 | { | 472 | { |
485 | int status = 0; | 473 | int status; |
486 | int nasid, cnode; | 474 | int nasid, cnode; |
487 | struct pci_controller *controller; | 475 | struct pci_controller *controller; |
488 | struct sn_pci_controller *sn_controller; | 476 | struct sn_pci_controller *sn_controller; |
489 | struct pcibus_bussoft *prom_bussoft_ptr; | 477 | struct pcibus_bussoft *prom_bussoft_ptr; |
490 | struct hubdev_info *hubdev_info; | 478 | struct hubdev_info *hubdev_info; |
491 | void *provider_soft = NULL; | 479 | void *provider_soft; |
492 | struct sn_pcibus_provider *provider; | 480 | struct sn_pcibus_provider *provider; |
493 | 481 | ||
494 | status = sal_get_pcibus_info((u64) segment, (u64) busnum, | 482 | status = sal_get_pcibus_info((u64) segment, (u64) busnum, |
@@ -535,6 +523,8 @@ void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus) | |||
535 | bus->sysdata = controller; | 523 | bus->sysdata = controller; |
536 | if (provider->bus_fixup) | 524 | if (provider->bus_fixup) |
537 | provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr, controller); | 525 | provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr, controller); |
526 | else | ||
527 | provider_soft = NULL; | ||
538 | 528 | ||
539 | if (provider_soft == NULL) { | 529 | if (provider_soft == NULL) { |
540 | /* fixup failed or not applicable */ | 530 | /* fixup failed or not applicable */ |
@@ -638,13 +628,8 @@ void sn_bus_free_sysdata(void) | |||
638 | 628 | ||
639 | static int __init sn_pci_init(void) | 629 | static int __init sn_pci_init(void) |
640 | { | 630 | { |
641 | int i = 0; | 631 | int i, j; |
642 | int j = 0; | ||
643 | struct pci_dev *pci_dev = NULL; | 632 | struct pci_dev *pci_dev = NULL; |
644 | extern void sn_init_cpei_timer(void); | ||
645 | #ifdef CONFIG_PROC_FS | ||
646 | extern void register_sn_procfs(void); | ||
647 | #endif | ||
648 | 633 | ||
649 | if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM()) | 634 | if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM()) |
650 | return 0; | 635 | return 0; |
@@ -700,32 +685,29 @@ static int __init sn_pci_init(void) | |||
700 | */ | 685 | */ |
701 | void hubdev_init_node(nodepda_t * npda, cnodeid_t node) | 686 | void hubdev_init_node(nodepda_t * npda, cnodeid_t node) |
702 | { | 687 | { |
703 | |||
704 | struct hubdev_info *hubdev_info; | 688 | struct hubdev_info *hubdev_info; |
689 | int size; | ||
690 | pg_data_t *pg; | ||
691 | |||
692 | size = sizeof(struct hubdev_info); | ||
705 | 693 | ||
706 | if (node >= num_online_nodes()) /* Headless/memless IO nodes */ | 694 | if (node >= num_online_nodes()) /* Headless/memless IO nodes */ |
707 | hubdev_info = | 695 | pg = NODE_DATA(0); |
708 | (struct hubdev_info *)alloc_bootmem_node(NODE_DATA(0), | ||
709 | sizeof(struct | ||
710 | hubdev_info)); | ||
711 | else | 696 | else |
712 | hubdev_info = | 697 | pg = NODE_DATA(node); |
713 | (struct hubdev_info *)alloc_bootmem_node(NODE_DATA(node), | ||
714 | sizeof(struct | ||
715 | hubdev_info)); | ||
716 | npda->pdinfo = (void *)hubdev_info; | ||
717 | 698 | ||
699 | hubdev_info = (struct hubdev_info *)alloc_bootmem_node(pg, size); | ||
700 | |||
701 | npda->pdinfo = (void *)hubdev_info; | ||
718 | } | 702 | } |
719 | 703 | ||
720 | geoid_t | 704 | geoid_t |
721 | cnodeid_get_geoid(cnodeid_t cnode) | 705 | cnodeid_get_geoid(cnodeid_t cnode) |
722 | { | 706 | { |
723 | |||
724 | struct hubdev_info *hubdev; | 707 | struct hubdev_info *hubdev; |
725 | 708 | ||
726 | hubdev = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo); | 709 | hubdev = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo); |
727 | return hubdev->hdi_geoid; | 710 | return hubdev->hdi_geoid; |
728 | |||
729 | } | 711 | } |
730 | 712 | ||
731 | subsys_initcall(sn_pci_init); | 713 | subsys_initcall(sn_pci_init); |
@@ -734,3 +716,4 @@ EXPORT_SYMBOL(sn_pci_unfixup_slot); | |||
734 | EXPORT_SYMBOL(sn_pci_controller_fixup); | 716 | EXPORT_SYMBOL(sn_pci_controller_fixup); |
735 | EXPORT_SYMBOL(sn_bus_store_sysdata); | 717 | EXPORT_SYMBOL(sn_bus_store_sysdata); |
736 | EXPORT_SYMBOL(sn_bus_free_sysdata); | 718 | EXPORT_SYMBOL(sn_bus_free_sysdata); |
719 | EXPORT_SYMBOL(sn_pcidev_info_get); | ||
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index 48645ac120fc..5b84836c2171 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c | |||
@@ -75,7 +75,7 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second); | |||
75 | DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); | 75 | DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); |
76 | EXPORT_PER_CPU_SYMBOL(__sn_hub_info); | 76 | EXPORT_PER_CPU_SYMBOL(__sn_hub_info); |
77 | 77 | ||
78 | DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_NUMNODES]); | 78 | DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]); |
79 | EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid); | 79 | EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid); |
80 | 80 | ||
81 | DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda); | 81 | DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda); |
@@ -317,6 +317,7 @@ struct pcdp_vga_device { | |||
317 | #define PCDP_PCI_TRANS_IOPORT 0x02 | 317 | #define PCDP_PCI_TRANS_IOPORT 0x02 |
318 | #define PCDP_PCI_TRANS_MMIO 0x01 | 318 | #define PCDP_PCI_TRANS_MMIO 0x01 |
319 | 319 | ||
320 | #if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE) | ||
320 | static void | 321 | static void |
321 | sn_scan_pcdp(void) | 322 | sn_scan_pcdp(void) |
322 | { | 323 | { |
@@ -358,6 +359,7 @@ sn_scan_pcdp(void) | |||
358 | break; /* once we find the primary, we're done */ | 359 | break; /* once we find the primary, we're done */ |
359 | } | 360 | } |
360 | } | 361 | } |
362 | #endif | ||
361 | 363 | ||
362 | static unsigned long sn2_rtc_initial; | 364 | static unsigned long sn2_rtc_initial; |
363 | 365 | ||
diff --git a/arch/ia64/sn/kernel/sn2/prominfo_proc.c b/arch/ia64/sn/kernel/sn2/prominfo_proc.c index 81c63b2f8ae9..6ae276d5d50c 100644 --- a/arch/ia64/sn/kernel/sn2/prominfo_proc.c +++ b/arch/ia64/sn/kernel/sn2/prominfo_proc.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 1999,2001-2004 Silicon Graphics, Inc. All Rights Reserved. | 6 | * Copyright (C) 1999,2001-2004, 2006 Silicon Graphics, Inc. All Rights Reserved. |
7 | * | 7 | * |
8 | * Module to export the system's Firmware Interface Tables, including | 8 | * Module to export the system's Firmware Interface Tables, including |
9 | * PROM revision numbers and banners, in /proc | 9 | * PROM revision numbers and banners, in /proc |
@@ -190,7 +190,7 @@ static int | |||
190 | read_version_entry(char *page, char **start, off_t off, int count, int *eof, | 190 | read_version_entry(char *page, char **start, off_t off, int count, int *eof, |
191 | void *data) | 191 | void *data) |
192 | { | 192 | { |
193 | int len = 0; | 193 | int len; |
194 | 194 | ||
195 | /* data holds the NASID of the node */ | 195 | /* data holds the NASID of the node */ |
196 | len = dump_version(page, (unsigned long)data); | 196 | len = dump_version(page, (unsigned long)data); |
@@ -202,7 +202,7 @@ static int | |||
202 | read_fit_entry(char *page, char **start, off_t off, int count, int *eof, | 202 | read_fit_entry(char *page, char **start, off_t off, int count, int *eof, |
203 | void *data) | 203 | void *data) |
204 | { | 204 | { |
205 | int len = 0; | 205 | int len; |
206 | 206 | ||
207 | /* data holds the NASID of the node */ | 207 | /* data holds the NASID of the node */ |
208 | len = dump_fit(page, (unsigned long)data); | 208 | len = dump_fit(page, (unsigned long)data); |
@@ -229,13 +229,16 @@ int __init prominfo_init(void) | |||
229 | struct proc_dir_entry *p; | 229 | struct proc_dir_entry *p; |
230 | cnodeid_t cnodeid; | 230 | cnodeid_t cnodeid; |
231 | unsigned long nasid; | 231 | unsigned long nasid; |
232 | int size; | ||
232 | char name[NODE_NAME_LEN]; | 233 | char name[NODE_NAME_LEN]; |
233 | 234 | ||
234 | if (!ia64_platform_is("sn2")) | 235 | if (!ia64_platform_is("sn2")) |
235 | return 0; | 236 | return 0; |
236 | 237 | ||
237 | proc_entries = kmalloc(num_online_nodes() * sizeof(struct proc_dir_entry *), | 238 | size = num_online_nodes() * sizeof(struct proc_dir_entry *); |
238 | GFP_KERNEL); | 239 | proc_entries = kzalloc(size, GFP_KERNEL); |
240 | if (!proc_entries) | ||
241 | return -ENOMEM; | ||
239 | 242 | ||
240 | sgi_prominfo_entry = proc_mkdir("sgi_prominfo", NULL); | 243 | sgi_prominfo_entry = proc_mkdir("sgi_prominfo", NULL); |
241 | 244 | ||
@@ -244,14 +247,12 @@ int __init prominfo_init(void) | |||
244 | sprintf(name, "node%d", cnodeid); | 247 | sprintf(name, "node%d", cnodeid); |
245 | *entp = proc_mkdir(name, sgi_prominfo_entry); | 248 | *entp = proc_mkdir(name, sgi_prominfo_entry); |
246 | nasid = cnodeid_to_nasid(cnodeid); | 249 | nasid = cnodeid_to_nasid(cnodeid); |
247 | p = create_proc_read_entry( | 250 | p = create_proc_read_entry("fit", 0, *entp, read_fit_entry, |
248 | "fit", 0, *entp, read_fit_entry, | 251 | (void *)nasid); |
249 | (void *)nasid); | ||
250 | if (p) | 252 | if (p) |
251 | p->owner = THIS_MODULE; | 253 | p->owner = THIS_MODULE; |
252 | p = create_proc_read_entry( | 254 | p = create_proc_read_entry("version", 0, *entp, |
253 | "version", 0, *entp, read_version_entry, | 255 | read_version_entry, (void *)nasid); |
254 | (void *)nasid); | ||
255 | if (p) | 256 | if (p) |
256 | p->owner = THIS_MODULE; | 257 | p->owner = THIS_MODULE; |
257 | entp++; | 258 | entp++; |
@@ -263,7 +264,7 @@ int __init prominfo_init(void) | |||
263 | void __exit prominfo_exit(void) | 264 | void __exit prominfo_exit(void) |
264 | { | 265 | { |
265 | struct proc_dir_entry **entp; | 266 | struct proc_dir_entry **entp; |
266 | unsigned cnodeid; | 267 | unsigned int cnodeid; |
267 | char name[NODE_NAME_LEN]; | 268 | char name[NODE_NAME_LEN]; |
268 | 269 | ||
269 | entp = proc_entries; | 270 | entp = proc_entries; |
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c index f153a4c35c70..24eefb2fc55f 100644 --- a/arch/ia64/sn/kernel/sn2/sn2_smp.c +++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c | |||
@@ -46,8 +46,14 @@ DECLARE_PER_CPU(struct ptc_stats, ptcstats); | |||
46 | 46 | ||
47 | static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock); | 47 | static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock); |
48 | 48 | ||
49 | void sn2_ptc_deadlock_recovery(short *, short, short, int, volatile unsigned long *, unsigned long, | 49 | extern unsigned long |
50 | volatile unsigned long *, unsigned long); | 50 | sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long, |
51 | volatile unsigned long *, unsigned long, | ||
52 | volatile unsigned long *, unsigned long); | ||
53 | void | ||
54 | sn2_ptc_deadlock_recovery(short *, short, short, int, | ||
55 | volatile unsigned long *, unsigned long, | ||
56 | volatile unsigned long *, unsigned long); | ||
51 | 57 | ||
52 | /* | 58 | /* |
53 | * Note: some is the following is captured here to make degugging easier | 59 | * Note: some is the following is captured here to make degugging easier |
@@ -59,16 +65,6 @@ void sn2_ptc_deadlock_recovery(short *, short, short, int, volatile unsigned lon | |||
59 | #define reset_max_active_on_deadlock() 1 | 65 | #define reset_max_active_on_deadlock() 1 |
60 | #define PTC_LOCK(sh1) ((sh1) ? &sn2_global_ptc_lock : &sn_nodepda->ptc_lock) | 66 | #define PTC_LOCK(sh1) ((sh1) ? &sn2_global_ptc_lock : &sn_nodepda->ptc_lock) |
61 | 67 | ||
62 | static inline void ptc_lock(int sh1, unsigned long *flagp) | ||
63 | { | ||
64 | spin_lock_irqsave(PTC_LOCK(sh1), *flagp); | ||
65 | } | ||
66 | |||
67 | static inline void ptc_unlock(int sh1, unsigned long flags) | ||
68 | { | ||
69 | spin_unlock_irqrestore(PTC_LOCK(sh1), flags); | ||
70 | } | ||
71 | |||
72 | struct ptc_stats { | 68 | struct ptc_stats { |
73 | unsigned long ptc_l; | 69 | unsigned long ptc_l; |
74 | unsigned long change_rid; | 70 | unsigned long change_rid; |
@@ -82,6 +78,8 @@ struct ptc_stats { | |||
82 | unsigned long shub_ptc_flushes_not_my_mm; | 78 | unsigned long shub_ptc_flushes_not_my_mm; |
83 | }; | 79 | }; |
84 | 80 | ||
81 | #define sn2_ptctest 0 | ||
82 | |||
85 | static inline unsigned long wait_piowc(void) | 83 | static inline unsigned long wait_piowc(void) |
86 | { | 84 | { |
87 | volatile unsigned long *piows; | 85 | volatile unsigned long *piows; |
@@ -200,7 +198,7 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, | |||
200 | max_active = max_active_pio(shub1); | 198 | max_active = max_active_pio(shub1); |
201 | 199 | ||
202 | itc = ia64_get_itc(); | 200 | itc = ia64_get_itc(); |
203 | ptc_lock(shub1, &flags); | 201 | spin_lock_irqsave(PTC_LOCK(shub1), flags); |
204 | itc2 = ia64_get_itc(); | 202 | itc2 = ia64_get_itc(); |
205 | 203 | ||
206 | __get_cpu_var(ptcstats).lock_itc_clocks += itc2 - itc; | 204 | __get_cpu_var(ptcstats).lock_itc_clocks += itc2 - itc; |
@@ -258,7 +256,7 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, | |||
258 | ia64_srlz_d(); | 256 | ia64_srlz_d(); |
259 | } | 257 | } |
260 | 258 | ||
261 | ptc_unlock(shub1, flags); | 259 | spin_unlock_irqrestore(PTC_LOCK(shub1), flags); |
262 | 260 | ||
263 | preempt_enable(); | 261 | preempt_enable(); |
264 | } | 262 | } |
@@ -270,11 +268,12 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, | |||
270 | * TLB flush transaction. The recovery sequence is somewhat tricky & is | 268 | * TLB flush transaction. The recovery sequence is somewhat tricky & is |
271 | * coded in assembly language. | 269 | * coded in assembly language. |
272 | */ | 270 | */ |
273 | void sn2_ptc_deadlock_recovery(short *nasids, short ib, short ie, int mynasid, volatile unsigned long *ptc0, unsigned long data0, | 271 | |
274 | volatile unsigned long *ptc1, unsigned long data1) | 272 | void |
273 | sn2_ptc_deadlock_recovery(short *nasids, short ib, short ie, int mynasid, | ||
274 | volatile unsigned long *ptc0, unsigned long data0, | ||
275 | volatile unsigned long *ptc1, unsigned long data1) | ||
275 | { | 276 | { |
276 | extern unsigned long sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long, | ||
277 | volatile unsigned long *, unsigned long, volatile unsigned long *, unsigned long); | ||
278 | short nasid, i; | 277 | short nasid, i; |
279 | unsigned long *piows, zeroval, n; | 278 | unsigned long *piows, zeroval, n; |
280 | 279 | ||
diff --git a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c index a06719d752a0..c686d9c12f7b 100644 --- a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c +++ b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c | |||
@@ -6,11 +6,11 @@ | |||
6 | * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved. | 6 | * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved. |
7 | */ | 7 | */ |
8 | #include <linux/config.h> | 8 | #include <linux/config.h> |
9 | #include <asm/uaccess.h> | ||
10 | 9 | ||
11 | #ifdef CONFIG_PROC_FS | 10 | #ifdef CONFIG_PROC_FS |
12 | #include <linux/proc_fs.h> | 11 | #include <linux/proc_fs.h> |
13 | #include <linux/seq_file.h> | 12 | #include <linux/seq_file.h> |
13 | #include <asm/uaccess.h> | ||
14 | #include <asm/sn/sn_sal.h> | 14 | #include <asm/sn/sn_sal.h> |
15 | 15 | ||
16 | static int partition_id_show(struct seq_file *s, void *p) | 16 | static int partition_id_show(struct seq_file *s, void *p) |
@@ -90,10 +90,10 @@ static int coherence_id_open(struct inode *inode, struct file *file) | |||
90 | return single_open(file, coherence_id_show, NULL); | 90 | return single_open(file, coherence_id_show, NULL); |
91 | } | 91 | } |
92 | 92 | ||
93 | static struct proc_dir_entry *sn_procfs_create_entry( | 93 | static struct proc_dir_entry |
94 | const char *name, struct proc_dir_entry *parent, | 94 | *sn_procfs_create_entry(const char *name, struct proc_dir_entry *parent, |
95 | int (*openfunc)(struct inode *, struct file *), | 95 | int (*openfunc)(struct inode *, struct file *), |
96 | int (*releasefunc)(struct inode *, struct file *)) | 96 | int (*releasefunc)(struct inode *, struct file *)) |
97 | { | 97 | { |
98 | struct proc_dir_entry *e = create_proc_entry(name, 0444, parent); | 98 | struct proc_dir_entry *e = create_proc_entry(name, 0444, parent); |
99 | 99 | ||
@@ -126,24 +126,24 @@ void register_sn_procfs(void) | |||
126 | return; | 126 | return; |
127 | 127 | ||
128 | sn_procfs_create_entry("partition_id", sgi_proc_dir, | 128 | sn_procfs_create_entry("partition_id", sgi_proc_dir, |
129 | partition_id_open, single_release); | 129 | partition_id_open, single_release); |
130 | 130 | ||
131 | sn_procfs_create_entry("system_serial_number", sgi_proc_dir, | 131 | sn_procfs_create_entry("system_serial_number", sgi_proc_dir, |
132 | system_serial_number_open, single_release); | 132 | system_serial_number_open, single_release); |
133 | 133 | ||
134 | sn_procfs_create_entry("licenseID", sgi_proc_dir, | 134 | sn_procfs_create_entry("licenseID", sgi_proc_dir, |
135 | licenseID_open, single_release); | 135 | licenseID_open, single_release); |
136 | 136 | ||
137 | e = sn_procfs_create_entry("sn_force_interrupt", sgi_proc_dir, | 137 | e = sn_procfs_create_entry("sn_force_interrupt", sgi_proc_dir, |
138 | sn_force_interrupt_open, single_release); | 138 | sn_force_interrupt_open, single_release); |
139 | if (e) | 139 | if (e) |
140 | e->proc_fops->write = sn_force_interrupt_write_proc; | 140 | e->proc_fops->write = sn_force_interrupt_write_proc; |
141 | 141 | ||
142 | sn_procfs_create_entry("coherence_id", sgi_proc_dir, | 142 | sn_procfs_create_entry("coherence_id", sgi_proc_dir, |
143 | coherence_id_open, single_release); | 143 | coherence_id_open, single_release); |
144 | 144 | ||
145 | sn_procfs_create_entry("sn_topology", sgi_proc_dir, | 145 | sn_procfs_create_entry("sn_topology", sgi_proc_dir, |
146 | sn_topology_open, sn_topology_release); | 146 | sn_topology_open, sn_topology_release); |
147 | } | 147 | } |
148 | 148 | ||
149 | #endif /* CONFIG_PROC_FS */ | 149 | #endif /* CONFIG_PROC_FS */ |
diff --git a/arch/ia64/sn/kernel/sn2/timer.c b/arch/ia64/sn/kernel/sn2/timer.c index deb9baf4d473..56a88b6df4b4 100644 --- a/arch/ia64/sn/kernel/sn2/timer.c +++ b/arch/ia64/sn/kernel/sn2/timer.c | |||
@@ -14,6 +14,7 @@ | |||
14 | 14 | ||
15 | #include <asm/hw_irq.h> | 15 | #include <asm/hw_irq.h> |
16 | #include <asm/system.h> | 16 | #include <asm/system.h> |
17 | #include <asm/timex.h> | ||
17 | 18 | ||
18 | #include <asm/sn/leds.h> | 19 | #include <asm/sn/leds.h> |
19 | #include <asm/sn/shub_mmr.h> | 20 | #include <asm/sn/shub_mmr.h> |
@@ -28,9 +29,27 @@ static struct time_interpolator sn2_interpolator = { | |||
28 | .source = TIME_SOURCE_MMIO64 | 29 | .source = TIME_SOURCE_MMIO64 |
29 | }; | 30 | }; |
30 | 31 | ||
32 | /* | ||
33 | * sn udelay uses the RTC instead of the ITC because the ITC is not | ||
34 | * synchronized across all CPUs, and the thread may migrate to another CPU | ||
35 | * if preemption is enabled. | ||
36 | */ | ||
37 | static void | ||
38 | ia64_sn_udelay (unsigned long usecs) | ||
39 | { | ||
40 | unsigned long start = rtc_time(); | ||
41 | unsigned long end = start + | ||
42 | usecs * sn_rtc_cycles_per_second / 1000000; | ||
43 | |||
44 | while (time_before((unsigned long)rtc_time(), end)) | ||
45 | cpu_relax(); | ||
46 | } | ||
47 | |||
31 | void __init sn_timer_init(void) | 48 | void __init sn_timer_init(void) |
32 | { | 49 | { |
33 | sn2_interpolator.frequency = sn_rtc_cycles_per_second; | 50 | sn2_interpolator.frequency = sn_rtc_cycles_per_second; |
34 | sn2_interpolator.addr = RTC_COUNTER_ADDR; | 51 | sn2_interpolator.addr = RTC_COUNTER_ADDR; |
35 | register_time_interpolator(&sn2_interpolator); | 52 | register_time_interpolator(&sn2_interpolator); |
53 | |||
54 | ia64_udelay = &ia64_sn_udelay; | ||
36 | } | 55 | } |
diff --git a/arch/ia64/sn/kernel/sn2/timer_interrupt.c b/arch/ia64/sn/kernel/sn2/timer_interrupt.c index adf5db2e2afe..fa7f69945917 100644 --- a/arch/ia64/sn/kernel/sn2/timer_interrupt.c +++ b/arch/ia64/sn/kernel/sn2/timer_interrupt.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * | 2 | * |
3 | * | 3 | * |
4 | * Copyright (c) 2005 Silicon Graphics, Inc. All Rights Reserved. | 4 | * Copyright (c) 2005, 2006 Silicon Graphics, Inc. All Rights Reserved. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of version 2 of the GNU General Public License | 7 | * under the terms of version 2 of the GNU General Public License |
@@ -22,11 +22,6 @@ | |||
22 | * License along with this program; if not, write the Free Software | 22 | * License along with this program; if not, write the Free Software |
23 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. | 23 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. |
24 | * | 24 | * |
25 | * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, | ||
26 | * Mountain View, CA 94043, or: | ||
27 | * | ||
28 | * http://www.sgi.com | ||
29 | * | ||
30 | * For further information regarding this notice, see: | 25 | * For further information regarding this notice, see: |
31 | * | 26 | * |
32 | * http://oss.sgi.com/projects/GenInfo/NoticeExplan | 27 | * http://oss.sgi.com/projects/GenInfo/NoticeExplan |
diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c index d263d3e8fbb9..8a56f8b5ffa2 100644 --- a/arch/ia64/sn/kernel/tiocx.c +++ b/arch/ia64/sn/kernel/tiocx.c | |||
@@ -284,12 +284,10 @@ struct sn_irq_info *tiocx_irq_alloc(nasid_t nasid, int widget, int irq, | |||
284 | if ((nasid & 1) == 0) | 284 | if ((nasid & 1) == 0) |
285 | return NULL; | 285 | return NULL; |
286 | 286 | ||
287 | sn_irq_info = kmalloc(sn_irq_size, GFP_KERNEL); | 287 | sn_irq_info = kzalloc(sn_irq_size, GFP_KERNEL); |
288 | if (sn_irq_info == NULL) | 288 | if (sn_irq_info == NULL) |
289 | return NULL; | 289 | return NULL; |
290 | 290 | ||
291 | memset(sn_irq_info, 0x0, sn_irq_size); | ||
292 | |||
293 | status = tiocx_intr_alloc(nasid, widget, __pa(sn_irq_info), irq, | 291 | status = tiocx_intr_alloc(nasid, widget, __pa(sn_irq_info), irq, |
294 | req_nasid, slice); | 292 | req_nasid, slice); |
295 | if (status) { | 293 | if (status) { |
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c index 36e5437a0fb6..cdf6856ce089 100644 --- a/arch/ia64/sn/kernel/xpc_channel.c +++ b/arch/ia64/sn/kernel/xpc_channel.c | |||
@@ -738,7 +738,9 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) | |||
738 | 738 | ||
739 | /* make sure all activity has settled down first */ | 739 | /* make sure all activity has settled down first */ |
740 | 740 | ||
741 | if (atomic_read(&ch->references) > 0) { | 741 | if (atomic_read(&ch->references) > 0 || |
742 | ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && | ||
743 | !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE))) { | ||
742 | return; | 744 | return; |
743 | } | 745 | } |
744 | DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0); | 746 | DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0); |
@@ -775,7 +777,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) | |||
775 | 777 | ||
776 | /* both sides are disconnected now */ | 778 | /* both sides are disconnected now */ |
777 | 779 | ||
778 | if (ch->flags & XPC_C_CONNECTCALLOUT) { | 780 | if (ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE) { |
779 | spin_unlock_irqrestore(&ch->lock, *irq_flags); | 781 | spin_unlock_irqrestore(&ch->lock, *irq_flags); |
780 | xpc_disconnect_callout(ch, xpcDisconnected); | 782 | xpc_disconnect_callout(ch, xpcDisconnected); |
781 | spin_lock_irqsave(&ch->lock, *irq_flags); | 783 | spin_lock_irqsave(&ch->lock, *irq_flags); |
@@ -1300,7 +1302,7 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number) | |||
1300 | "delivered=%d, partid=%d, channel=%d\n", | 1302 | "delivered=%d, partid=%d, channel=%d\n", |
1301 | nmsgs_sent, ch->partid, ch->number); | 1303 | nmsgs_sent, ch->partid, ch->number); |
1302 | 1304 | ||
1303 | if (ch->flags & XPC_C_CONNECTCALLOUT) { | 1305 | if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) { |
1304 | xpc_activate_kthreads(ch, nmsgs_sent); | 1306 | xpc_activate_kthreads(ch, nmsgs_sent); |
1305 | } | 1307 | } |
1306 | } | 1308 | } |
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c index 9cd460dfe27e..8cbf16432570 100644 --- a/arch/ia64/sn/kernel/xpc_main.c +++ b/arch/ia64/sn/kernel/xpc_main.c | |||
@@ -750,12 +750,16 @@ xpc_daemonize_kthread(void *args) | |||
750 | /* let registerer know that connection has been established */ | 750 | /* let registerer know that connection has been established */ |
751 | 751 | ||
752 | spin_lock_irqsave(&ch->lock, irq_flags); | 752 | spin_lock_irqsave(&ch->lock, irq_flags); |
753 | if (!(ch->flags & XPC_C_CONNECTCALLOUT)) { | 753 | if (!(ch->flags & XPC_C_CONNECTEDCALLOUT)) { |
754 | ch->flags |= XPC_C_CONNECTCALLOUT; | 754 | ch->flags |= XPC_C_CONNECTEDCALLOUT; |
755 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 755 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
756 | 756 | ||
757 | xpc_connected_callout(ch); | 757 | xpc_connected_callout(ch); |
758 | 758 | ||
759 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
760 | ch->flags |= XPC_C_CONNECTEDCALLOUT_MADE; | ||
761 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
762 | |||
759 | /* | 763 | /* |
760 | * It is possible that while the callout was being | 764 | * It is possible that while the callout was being |
761 | * made that the remote partition sent some messages. | 765 | * made that the remote partition sent some messages. |
@@ -777,15 +781,17 @@ xpc_daemonize_kthread(void *args) | |||
777 | 781 | ||
778 | if (atomic_dec_return(&ch->kthreads_assigned) == 0) { | 782 | if (atomic_dec_return(&ch->kthreads_assigned) == 0) { |
779 | spin_lock_irqsave(&ch->lock, irq_flags); | 783 | spin_lock_irqsave(&ch->lock, irq_flags); |
780 | if ((ch->flags & XPC_C_CONNECTCALLOUT) && | 784 | if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && |
781 | !(ch->flags & XPC_C_DISCONNECTCALLOUT)) { | 785 | !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { |
782 | ch->flags |= XPC_C_DISCONNECTCALLOUT; | 786 | ch->flags |= XPC_C_DISCONNECTINGCALLOUT; |
783 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 787 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
784 | 788 | ||
785 | xpc_disconnect_callout(ch, xpcDisconnecting); | 789 | xpc_disconnect_callout(ch, xpcDisconnecting); |
786 | } else { | 790 | |
787 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 791 | spin_lock_irqsave(&ch->lock, irq_flags); |
792 | ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE; | ||
788 | } | 793 | } |
794 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
789 | if (atomic_dec_return(&part->nchannels_engaged) == 0) { | 795 | if (atomic_dec_return(&part->nchannels_engaged) == 0) { |
790 | xpc_mark_partition_disengaged(part); | 796 | xpc_mark_partition_disengaged(part); |
791 | xpc_IPI_send_disengage(part); | 797 | xpc_IPI_send_disengage(part); |
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index 5a36292388eb..b4b84c269210 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c | |||
@@ -335,10 +335,10 @@ int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size) | |||
335 | */ | 335 | */ |
336 | 336 | ||
337 | SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE, | 337 | SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE, |
338 | pci_domain_nr(bus), bus->number, | 338 | pci_domain_nr(bus), bus->number, |
339 | 0, /* io */ | 339 | 0, /* io */ |
340 | 0, /* read */ | 340 | 0, /* read */ |
341 | port, size, __pa(val)); | 341 | port, size, __pa(val)); |
342 | 342 | ||
343 | if (isrv.status == 0) | 343 | if (isrv.status == 0) |
344 | return size; | 344 | return size; |
@@ -381,10 +381,10 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size) | |||
381 | */ | 381 | */ |
382 | 382 | ||
383 | SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE, | 383 | SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE, |
384 | pci_domain_nr(bus), bus->number, | 384 | pci_domain_nr(bus), bus->number, |
385 | 0, /* io */ | 385 | 0, /* io */ |
386 | 1, /* write */ | 386 | 1, /* write */ |
387 | port, size, __pa(&val)); | 387 | port, size, __pa(&val)); |
388 | 388 | ||
389 | if (isrv.status == 0) | 389 | if (isrv.status == 0) |
390 | return size; | 390 | return size; |
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_ate.c b/arch/ia64/sn/pci/pcibr/pcibr_ate.c index aa3fa5152a32..1f0253bfe0a0 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_ate.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_ate.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved. | 6 | * Copyright (C) 2001-2006 Silicon Graphics, Inc. All rights reserved. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
@@ -12,7 +12,7 @@ | |||
12 | #include <asm/sn/pcibus_provider_defs.h> | 12 | #include <asm/sn/pcibus_provider_defs.h> |
13 | #include <asm/sn/pcidev.h> | 13 | #include <asm/sn/pcidev.h> |
14 | 14 | ||
15 | int pcibr_invalidate_ate = 0; /* by default don't invalidate ATE on free */ | 15 | int pcibr_invalidate_ate; /* by default don't invalidate ATE on free */ |
16 | 16 | ||
17 | /* | 17 | /* |
18 | * mark_ate: Mark the ate as either free or inuse. | 18 | * mark_ate: Mark the ate as either free or inuse. |
@@ -20,14 +20,12 @@ int pcibr_invalidate_ate = 0; /* by default don't invalidate ATE on free */ | |||
20 | static void mark_ate(struct ate_resource *ate_resource, int start, int number, | 20 | static void mark_ate(struct ate_resource *ate_resource, int start, int number, |
21 | u64 value) | 21 | u64 value) |
22 | { | 22 | { |
23 | |||
24 | u64 *ate = ate_resource->ate; | 23 | u64 *ate = ate_resource->ate; |
25 | int index; | 24 | int index; |
26 | int length = 0; | 25 | int length = 0; |
27 | 26 | ||
28 | for (index = start; length < number; index++, length++) | 27 | for (index = start; length < number; index++, length++) |
29 | ate[index] = value; | 28 | ate[index] = value; |
30 | |||
31 | } | 29 | } |
32 | 30 | ||
33 | /* | 31 | /* |
@@ -37,7 +35,6 @@ static void mark_ate(struct ate_resource *ate_resource, int start, int number, | |||
37 | static int find_free_ate(struct ate_resource *ate_resource, int start, | 35 | static int find_free_ate(struct ate_resource *ate_resource, int start, |
38 | int count) | 36 | int count) |
39 | { | 37 | { |
40 | |||
41 | u64 *ate = ate_resource->ate; | 38 | u64 *ate = ate_resource->ate; |
42 | int index; | 39 | int index; |
43 | int start_free; | 40 | int start_free; |
@@ -70,12 +67,10 @@ static int find_free_ate(struct ate_resource *ate_resource, int start, | |||
70 | static inline void free_ate_resource(struct ate_resource *ate_resource, | 67 | static inline void free_ate_resource(struct ate_resource *ate_resource, |
71 | int start) | 68 | int start) |
72 | { | 69 | { |
73 | |||
74 | mark_ate(ate_resource, start, ate_resource->ate[start], 0); | 70 | mark_ate(ate_resource, start, ate_resource->ate[start], 0); |
75 | if ((ate_resource->lowest_free_index > start) || | 71 | if ((ate_resource->lowest_free_index > start) || |
76 | (ate_resource->lowest_free_index < 0)) | 72 | (ate_resource->lowest_free_index < 0)) |
77 | ate_resource->lowest_free_index = start; | 73 | ate_resource->lowest_free_index = start; |
78 | |||
79 | } | 74 | } |
80 | 75 | ||
81 | /* | 76 | /* |
@@ -84,7 +79,6 @@ static inline void free_ate_resource(struct ate_resource *ate_resource, | |||
84 | static inline int alloc_ate_resource(struct ate_resource *ate_resource, | 79 | static inline int alloc_ate_resource(struct ate_resource *ate_resource, |
85 | int ate_needed) | 80 | int ate_needed) |
86 | { | 81 | { |
87 | |||
88 | int start_index; | 82 | int start_index; |
89 | 83 | ||
90 | /* | 84 | /* |
@@ -118,19 +112,12 @@ static inline int alloc_ate_resource(struct ate_resource *ate_resource, | |||
118 | */ | 112 | */ |
119 | int pcibr_ate_alloc(struct pcibus_info *pcibus_info, int count) | 113 | int pcibr_ate_alloc(struct pcibus_info *pcibus_info, int count) |
120 | { | 114 | { |
121 | int status = 0; | 115 | int status; |
122 | u64 flag; | 116 | unsigned long flags; |
123 | 117 | ||
124 | flag = pcibr_lock(pcibus_info); | 118 | spin_lock_irqsave(&pcibus_info->pbi_lock, flags); |
125 | status = alloc_ate_resource(&pcibus_info->pbi_int_ate_resource, count); | 119 | status = alloc_ate_resource(&pcibus_info->pbi_int_ate_resource, count); |
126 | 120 | spin_unlock_irqrestore(&pcibus_info->pbi_lock, flags); | |
127 | if (status < 0) { | ||
128 | /* Failed to allocate */ | ||
129 | pcibr_unlock(pcibus_info, flag); | ||
130 | return -1; | ||
131 | } | ||
132 | |||
133 | pcibr_unlock(pcibus_info, flag); | ||
134 | 121 | ||
135 | return status; | 122 | return status; |
136 | } | 123 | } |
@@ -182,7 +169,7 @@ void pcibr_ate_free(struct pcibus_info *pcibus_info, int index) | |||
182 | ate_write(pcibus_info, index, count, (ate & ~PCI32_ATE_V)); | 169 | ate_write(pcibus_info, index, count, (ate & ~PCI32_ATE_V)); |
183 | } | 170 | } |
184 | 171 | ||
185 | flags = pcibr_lock(pcibus_info); | 172 | spin_lock_irqsave(&pcibus_info->pbi_lock, flags); |
186 | free_ate_resource(&pcibus_info->pbi_int_ate_resource, index); | 173 | free_ate_resource(&pcibus_info->pbi_int_ate_resource, index); |
187 | pcibr_unlock(pcibus_info, flags); | 174 | spin_unlock_irqrestore(&pcibus_info->pbi_lock, flags); |
188 | } | 175 | } |
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c index 54ce5b7ceed2..9f86bb6519aa 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c | |||
@@ -137,14 +137,12 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr, | |||
137 | pci_addr |= PCI64_ATTR_VIRTUAL; | 137 | pci_addr |= PCI64_ATTR_VIRTUAL; |
138 | 138 | ||
139 | return pci_addr; | 139 | return pci_addr; |
140 | |||
141 | } | 140 | } |
142 | 141 | ||
143 | static dma_addr_t | 142 | static dma_addr_t |
144 | pcibr_dmatrans_direct32(struct pcidev_info * info, | 143 | pcibr_dmatrans_direct32(struct pcidev_info * info, |
145 | u64 paddr, size_t req_size, u64 flags) | 144 | u64 paddr, size_t req_size, u64 flags) |
146 | { | 145 | { |
147 | |||
148 | struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info; | 146 | struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info; |
149 | struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info-> | 147 | struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info-> |
150 | pdi_pcibus_info; | 148 | pdi_pcibus_info; |
@@ -171,7 +169,6 @@ pcibr_dmatrans_direct32(struct pcidev_info * info, | |||
171 | } | 169 | } |
172 | 170 | ||
173 | return PCI32_DIRECT_BASE | offset; | 171 | return PCI32_DIRECT_BASE | offset; |
174 | |||
175 | } | 172 | } |
176 | 173 | ||
177 | /* | 174 | /* |
@@ -218,9 +215,8 @@ void sn_dma_flush(u64 addr) | |||
218 | u64 flags; | 215 | u64 flags; |
219 | u64 itte; | 216 | u64 itte; |
220 | struct hubdev_info *hubinfo; | 217 | struct hubdev_info *hubinfo; |
221 | volatile struct sn_flush_device_kernel *p; | 218 | struct sn_flush_device_kernel *p; |
222 | volatile struct sn_flush_device_common *common; | 219 | struct sn_flush_device_common *common; |
223 | |||
224 | struct sn_flush_nasid_entry *flush_nasid_list; | 220 | struct sn_flush_nasid_entry *flush_nasid_list; |
225 | 221 | ||
226 | if (!sn_ioif_inited) | 222 | if (!sn_ioif_inited) |
@@ -310,8 +306,7 @@ void sn_dma_flush(u64 addr) | |||
310 | (common->sfdl_slot - 1)); | 306 | (common->sfdl_slot - 1)); |
311 | } | 307 | } |
312 | } else { | 308 | } else { |
313 | spin_lock_irqsave((spinlock_t *)&p->sfdl_flush_lock, | 309 | spin_lock_irqsave(&p->sfdl_flush_lock, flags); |
314 | flags); | ||
315 | *common->sfdl_flush_addr = 0; | 310 | *common->sfdl_flush_addr = 0; |
316 | 311 | ||
317 | /* force an interrupt. */ | 312 | /* force an interrupt. */ |
@@ -322,8 +317,7 @@ void sn_dma_flush(u64 addr) | |||
322 | cpu_relax(); | 317 | cpu_relax(); |
323 | 318 | ||
324 | /* okay, everything is synched up. */ | 319 | /* okay, everything is synched up. */ |
325 | spin_unlock_irqrestore((spinlock_t *)&p->sfdl_flush_lock, | 320 | spin_unlock_irqrestore(&p->sfdl_flush_lock, flags); |
326 | flags); | ||
327 | } | 321 | } |
328 | return; | 322 | return; |
329 | } | 323 | } |
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c index 2fac27049bf6..98f716bd92f0 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c | |||
@@ -163,9 +163,12 @@ pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont | |||
163 | /* Setup the PMU ATE map */ | 163 | /* Setup the PMU ATE map */ |
164 | soft->pbi_int_ate_resource.lowest_free_index = 0; | 164 | soft->pbi_int_ate_resource.lowest_free_index = 0; |
165 | soft->pbi_int_ate_resource.ate = | 165 | soft->pbi_int_ate_resource.ate = |
166 | kmalloc(soft->pbi_int_ate_size * sizeof(u64), GFP_KERNEL); | 166 | kzalloc(soft->pbi_int_ate_size * sizeof(u64), GFP_KERNEL); |
167 | memset(soft->pbi_int_ate_resource.ate, 0, | 167 | |
168 | (soft->pbi_int_ate_size * sizeof(u64))); | 168 | if (!soft->pbi_int_ate_resource.ate) { |
169 | kfree(soft); | ||
170 | return NULL; | ||
171 | } | ||
169 | 172 | ||
170 | if (prom_bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP) { | 173 | if (prom_bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP) { |
171 | /* TIO PCI Bridge: find nearest node with CPUs */ | 174 | /* TIO PCI Bridge: find nearest node with CPUs */ |
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 96b919828053..8849439e88dd 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig | |||
@@ -21,6 +21,10 @@ config GENERIC_CALIBRATE_DELAY | |||
21 | bool | 21 | bool |
22 | default y | 22 | default y |
23 | 23 | ||
24 | config TIME_LOW_RES | ||
25 | bool | ||
26 | default y | ||
27 | |||
24 | config ARCH_MAY_HAVE_PC_FDC | 28 | config ARCH_MAY_HAVE_PC_FDC |
25 | bool | 29 | bool |
26 | depends on Q40 || (BROKEN && SUN3X) | 30 | depends on Q40 || (BROKEN && SUN3X) |
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig index e2a6e8648960..e50858dbc237 100644 --- a/arch/m68knommu/Kconfig +++ b/arch/m68knommu/Kconfig | |||
@@ -29,6 +29,10 @@ config GENERIC_CALIBRATE_DELAY | |||
29 | bool | 29 | bool |
30 | default y | 30 | default y |
31 | 31 | ||
32 | config TIME_LOW_RES | ||
33 | bool | ||
34 | default y | ||
35 | |||
32 | source "init/Kconfig" | 36 | source "init/Kconfig" |
33 | 37 | ||
34 | menu "Processor type and features" | 38 | menu "Processor type and features" |
diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 6a57407df1bc..38c0f3360d51 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile | |||
@@ -94,7 +94,6 @@ endif | |||
94 | # machines may also. Since BFD is incredibly buggy with respect to | 94 | # machines may also. Since BFD is incredibly buggy with respect to |
95 | # crossformat linking we rely on the elf2ecoff tool for format conversion. | 95 | # crossformat linking we rely on the elf2ecoff tool for format conversion. |
96 | # | 96 | # |
97 | cflags-y += -I $(TOPDIR)/include/asm/gcc | ||
98 | cflags-y += -G 0 -mno-abicalls -fno-pic -pipe | 97 | cflags-y += -G 0 -mno-abicalls -fno-pic -pipe |
99 | LDFLAGS_vmlinux += -G 0 -static -n -nostdlib | 98 | LDFLAGS_vmlinux += -G 0 -static -n -nostdlib |
100 | MODFLAGS += -mlong-calls | 99 | MODFLAGS += -mlong-calls |
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 5232fc752935..092679c2dca9 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/a.out.h> | 25 | #include <linux/a.out.h> |
26 | #include <linux/init.h> | 26 | #include <linux/init.h> |
27 | #include <linux/completion.h> | 27 | #include <linux/completion.h> |
28 | #include <linux/kallsyms.h> | ||
28 | 29 | ||
29 | #include <asm/abi.h> | 30 | #include <asm/abi.h> |
30 | #include <asm/bootinfo.h> | 31 | #include <asm/bootinfo.h> |
@@ -272,46 +273,19 @@ long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) | |||
272 | 273 | ||
273 | static struct mips_frame_info { | 274 | static struct mips_frame_info { |
274 | void *func; | 275 | void *func; |
275 | int omit_fp; /* compiled without fno-omit-frame-pointer */ | 276 | unsigned long func_size; |
276 | int frame_offset; | 277 | int frame_size; |
277 | int pc_offset; | 278 | int pc_offset; |
278 | } schedule_frame, mfinfo[] = { | 279 | } *schedule_frame, mfinfo[64]; |
279 | { schedule, 0 }, /* must be first */ | 280 | static int mfinfo_num; |
280 | /* arch/mips/kernel/semaphore.c */ | ||
281 | { __down, 1 }, | ||
282 | { __down_interruptible, 1 }, | ||
283 | /* kernel/sched.c */ | ||
284 | #ifdef CONFIG_PREEMPT | ||
285 | { preempt_schedule, 0 }, | ||
286 | #endif | ||
287 | { wait_for_completion, 0 }, | ||
288 | { interruptible_sleep_on, 0 }, | ||
289 | { interruptible_sleep_on_timeout, 0 }, | ||
290 | { sleep_on, 0 }, | ||
291 | { sleep_on_timeout, 0 }, | ||
292 | { yield, 0 }, | ||
293 | { io_schedule, 0 }, | ||
294 | { io_schedule_timeout, 0 }, | ||
295 | #if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT) | ||
296 | { __preempt_spin_lock, 0 }, | ||
297 | { __preempt_write_lock, 0 }, | ||
298 | #endif | ||
299 | /* kernel/timer.c */ | ||
300 | { schedule_timeout, 1 }, | ||
301 | /* { nanosleep_restart, 1 }, */ | ||
302 | /* lib/rwsem-spinlock.c */ | ||
303 | { __down_read, 1 }, | ||
304 | { __down_write, 1 }, | ||
305 | }; | ||
306 | 281 | ||
307 | static int mips_frame_info_initialized; | ||
308 | static int __init get_frame_info(struct mips_frame_info *info) | 282 | static int __init get_frame_info(struct mips_frame_info *info) |
309 | { | 283 | { |
310 | int i; | 284 | int i; |
311 | void *func = info->func; | 285 | void *func = info->func; |
312 | union mips_instruction *ip = (union mips_instruction *)func; | 286 | union mips_instruction *ip = (union mips_instruction *)func; |
313 | info->pc_offset = -1; | 287 | info->pc_offset = -1; |
314 | info->frame_offset = info->omit_fp ? 0 : -1; | 288 | info->frame_size = 0; |
315 | for (i = 0; i < 128; i++, ip++) { | 289 | for (i = 0; i < 128; i++, ip++) { |
316 | /* if jal, jalr, jr, stop. */ | 290 | /* if jal, jalr, jr, stop. */ |
317 | if (ip->j_format.opcode == jal_op || | 291 | if (ip->j_format.opcode == jal_op || |
@@ -320,6 +294,23 @@ static int __init get_frame_info(struct mips_frame_info *info) | |||
320 | ip->r_format.func == jr_op))) | 294 | ip->r_format.func == jr_op))) |
321 | break; | 295 | break; |
322 | 296 | ||
297 | if (info->func_size && i >= info->func_size / 4) | ||
298 | break; | ||
299 | if ( | ||
300 | #ifdef CONFIG_32BIT | ||
301 | ip->i_format.opcode == addiu_op && | ||
302 | #endif | ||
303 | #ifdef CONFIG_64BIT | ||
304 | ip->i_format.opcode == daddiu_op && | ||
305 | #endif | ||
306 | ip->i_format.rs == 29 && | ||
307 | ip->i_format.rt == 29) { | ||
308 | /* addiu/daddiu sp,sp,-imm */ | ||
309 | if (info->frame_size) | ||
310 | continue; | ||
311 | info->frame_size = - ip->i_format.simmediate; | ||
312 | } | ||
313 | |||
323 | if ( | 314 | if ( |
324 | #ifdef CONFIG_32BIT | 315 | #ifdef CONFIG_32BIT |
325 | ip->i_format.opcode == sw_op && | 316 | ip->i_format.opcode == sw_op && |
@@ -327,31 +318,20 @@ static int __init get_frame_info(struct mips_frame_info *info) | |||
327 | #ifdef CONFIG_64BIT | 318 | #ifdef CONFIG_64BIT |
328 | ip->i_format.opcode == sd_op && | 319 | ip->i_format.opcode == sd_op && |
329 | #endif | 320 | #endif |
330 | ip->i_format.rs == 29) | 321 | ip->i_format.rs == 29 && |
331 | { | 322 | ip->i_format.rt == 31) { |
332 | /* sw / sd $ra, offset($sp) */ | 323 | /* sw / sd $ra, offset($sp) */ |
333 | if (ip->i_format.rt == 31) { | 324 | if (info->pc_offset != -1) |
334 | if (info->pc_offset != -1) | 325 | continue; |
335 | continue; | 326 | info->pc_offset = |
336 | info->pc_offset = | 327 | ip->i_format.simmediate / sizeof(long); |
337 | ip->i_format.simmediate / sizeof(long); | ||
338 | } | ||
339 | /* sw / sd $s8, offset($sp) */ | ||
340 | if (ip->i_format.rt == 30) { | ||
341 | //#if 0 /* gcc 3.4 does aggressive optimization... */ | ||
342 | if (info->frame_offset != -1) | ||
343 | continue; | ||
344 | //#endif | ||
345 | info->frame_offset = | ||
346 | ip->i_format.simmediate / sizeof(long); | ||
347 | } | ||
348 | } | 328 | } |
349 | } | 329 | } |
350 | if (info->pc_offset == -1 || info->frame_offset == -1) { | 330 | if (info->pc_offset == -1 || info->frame_size == 0) { |
351 | printk("Can't analyze prologue code at %p\n", func); | 331 | if (func == schedule) |
332 | printk("Can't analyze prologue code at %p\n", func); | ||
352 | info->pc_offset = -1; | 333 | info->pc_offset = -1; |
353 | info->frame_offset = -1; | 334 | info->frame_size = 0; |
354 | return -1; | ||
355 | } | 335 | } |
356 | 336 | ||
357 | return 0; | 337 | return 0; |
@@ -359,25 +339,36 @@ static int __init get_frame_info(struct mips_frame_info *info) | |||
359 | 339 | ||
360 | static int __init frame_info_init(void) | 340 | static int __init frame_info_init(void) |
361 | { | 341 | { |
362 | int i, found; | 342 | int i; |
363 | for (i = 0; i < ARRAY_SIZE(mfinfo); i++) | 343 | #ifdef CONFIG_KALLSYMS |
364 | if (get_frame_info(&mfinfo[i])) | 344 | char *modname; |
365 | return -1; | 345 | char namebuf[KSYM_NAME_LEN + 1]; |
366 | schedule_frame = mfinfo[0]; | 346 | unsigned long start, size, ofs; |
367 | /* bubble sort */ | 347 | extern char __sched_text_start[], __sched_text_end[]; |
368 | do { | 348 | extern char __lock_text_start[], __lock_text_end[]; |
369 | struct mips_frame_info tmp; | 349 | |
370 | found = 0; | 350 | start = (unsigned long)__sched_text_start; |
371 | for (i = 1; i < ARRAY_SIZE(mfinfo); i++) { | 351 | for (i = 0; i < ARRAY_SIZE(mfinfo); i++) { |
372 | if (mfinfo[i-1].func > mfinfo[i].func) { | 352 | if (start == (unsigned long)schedule) |
373 | tmp = mfinfo[i]; | 353 | schedule_frame = &mfinfo[i]; |
374 | mfinfo[i] = mfinfo[i-1]; | 354 | if (!kallsyms_lookup(start, &size, &ofs, &modname, namebuf)) |
375 | mfinfo[i-1] = tmp; | 355 | break; |
376 | found = 1; | 356 | mfinfo[i].func = (void *)(start + ofs); |
377 | } | 357 | mfinfo[i].func_size = size; |
378 | } | 358 | start += size - ofs; |
379 | } while (found); | 359 | if (start >= (unsigned long)__lock_text_end) |
380 | mips_frame_info_initialized = 1; | 360 | break; |
361 | if (start == (unsigned long)__sched_text_end) | ||
362 | start = (unsigned long)__lock_text_start; | ||
363 | } | ||
364 | #else | ||
365 | mfinfo[0].func = schedule; | ||
366 | schedule_frame = &mfinfo[0]; | ||
367 | #endif | ||
368 | for (i = 0; i < ARRAY_SIZE(mfinfo) && mfinfo[i].func; i++) | ||
369 | get_frame_info(&mfinfo[i]); | ||
370 | |||
371 | mfinfo_num = i; | ||
381 | return 0; | 372 | return 0; |
382 | } | 373 | } |
383 | 374 | ||
@@ -394,47 +385,52 @@ unsigned long thread_saved_pc(struct task_struct *tsk) | |||
394 | if (t->reg31 == (unsigned long) ret_from_fork) | 385 | if (t->reg31 == (unsigned long) ret_from_fork) |
395 | return t->reg31; | 386 | return t->reg31; |
396 | 387 | ||
397 | if (schedule_frame.pc_offset < 0) | 388 | if (!schedule_frame || schedule_frame->pc_offset < 0) |
398 | return 0; | 389 | return 0; |
399 | return ((unsigned long *)t->reg29)[schedule_frame.pc_offset]; | 390 | return ((unsigned long *)t->reg29)[schedule_frame->pc_offset]; |
400 | } | 391 | } |
401 | 392 | ||
402 | /* get_wchan - a maintenance nightmare^W^Wpain in the ass ... */ | 393 | /* get_wchan - a maintenance nightmare^W^Wpain in the ass ... */ |
403 | unsigned long get_wchan(struct task_struct *p) | 394 | unsigned long get_wchan(struct task_struct *p) |
404 | { | 395 | { |
405 | unsigned long stack_page; | 396 | unsigned long stack_page; |
406 | unsigned long frame, pc; | 397 | unsigned long pc; |
398 | #ifdef CONFIG_KALLSYMS | ||
399 | unsigned long frame; | ||
400 | #endif | ||
407 | 401 | ||
408 | if (!p || p == current || p->state == TASK_RUNNING) | 402 | if (!p || p == current || p->state == TASK_RUNNING) |
409 | return 0; | 403 | return 0; |
410 | 404 | ||
411 | stack_page = (unsigned long)task_stack_page(p); | 405 | stack_page = (unsigned long)task_stack_page(p); |
412 | if (!stack_page || !mips_frame_info_initialized) | 406 | if (!stack_page || !mfinfo_num) |
413 | return 0; | 407 | return 0; |
414 | 408 | ||
415 | pc = thread_saved_pc(p); | 409 | pc = thread_saved_pc(p); |
410 | #ifdef CONFIG_KALLSYMS | ||
416 | if (!in_sched_functions(pc)) | 411 | if (!in_sched_functions(pc)) |
417 | return pc; | 412 | return pc; |
418 | 413 | ||
419 | frame = ((unsigned long *)p->thread.reg30)[schedule_frame.frame_offset]; | 414 | frame = p->thread.reg29 + schedule_frame->frame_size; |
420 | do { | 415 | do { |
421 | int i; | 416 | int i; |
422 | 417 | ||
423 | if (frame < stack_page || frame > stack_page + THREAD_SIZE - 32) | 418 | if (frame < stack_page || frame > stack_page + THREAD_SIZE - 32) |
424 | return 0; | 419 | return 0; |
425 | 420 | ||
426 | for (i = ARRAY_SIZE(mfinfo) - 1; i >= 0; i--) { | 421 | for (i = mfinfo_num - 1; i >= 0; i--) { |
427 | if (pc >= (unsigned long) mfinfo[i].func) | 422 | if (pc >= (unsigned long) mfinfo[i].func) |
428 | break; | 423 | break; |
429 | } | 424 | } |
430 | if (i < 0) | 425 | if (i < 0) |
431 | break; | 426 | break; |
432 | 427 | ||
433 | if (mfinfo[i].omit_fp) | ||
434 | break; | ||
435 | pc = ((unsigned long *)frame)[mfinfo[i].pc_offset]; | 428 | pc = ((unsigned long *)frame)[mfinfo[i].pc_offset]; |
436 | frame = ((unsigned long *)frame)[mfinfo[i].frame_offset]; | 429 | if (!mfinfo[i].frame_size) |
430 | break; | ||
431 | frame += mfinfo[i].frame_size; | ||
437 | } while (in_sched_functions(pc)); | 432 | } while (in_sched_functions(pc)); |
433 | #endif | ||
438 | 434 | ||
439 | return pc; | 435 | return pc; |
440 | } | 436 | } |
diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h index 0fbc492d24b4..36bfc2588aa3 100644 --- a/arch/mips/kernel/signal-common.h +++ b/arch/mips/kernel/signal-common.h | |||
@@ -176,7 +176,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) | |||
176 | if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags (sp) == 0)) | 176 | if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags (sp) == 0)) |
177 | sp = current->sas_ss_sp + current->sas_ss_size; | 177 | sp = current->sas_ss_sp + current->sas_ss_size; |
178 | 178 | ||
179 | return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? 32 : ALMASK)); | 179 | return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK)); |
180 | } | 180 | } |
181 | 181 | ||
182 | static inline int install_sigtramp(unsigned int __user *tramp, | 182 | static inline int install_sigtramp(unsigned int __user *tramp, |
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index da3271e1fdac..8a8b8dd90417 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c | |||
@@ -537,7 +537,7 @@ _sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) | |||
537 | /* The ucontext contains a stack32_t, so we must convert! */ | 537 | /* The ucontext contains a stack32_t, so we must convert! */ |
538 | if (__get_user(sp, &frame->rs_uc.uc_stack.ss_sp)) | 538 | if (__get_user(sp, &frame->rs_uc.uc_stack.ss_sp)) |
539 | goto badframe; | 539 | goto badframe; |
540 | st.ss_size = (long) sp; | 540 | st.ss_sp = (void *)(long) sp; |
541 | if (__get_user(st.ss_size, &frame->rs_uc.uc_stack.ss_size)) | 541 | if (__get_user(st.ss_size, &frame->rs_uc.uc_stack.ss_size)) |
542 | goto badframe; | 542 | goto badframe; |
543 | if (__get_user(st.ss_flags, &frame->rs_uc.uc_stack.ss_flags)) | 543 | if (__get_user(st.ss_flags, &frame->rs_uc.uc_stack.ss_flags)) |
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c index 384fc4a639a4..5a3776096f07 100644 --- a/arch/mips/kernel/signal_n32.c +++ b/arch/mips/kernel/signal_n32.c | |||
@@ -108,7 +108,7 @@ _sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) | |||
108 | /* The ucontext contains a stack32_t, so we must convert! */ | 108 | /* The ucontext contains a stack32_t, so we must convert! */ |
109 | if (__get_user(sp, &frame->rs_uc.uc_stack.ss_sp)) | 109 | if (__get_user(sp, &frame->rs_uc.uc_stack.ss_sp)) |
110 | goto badframe; | 110 | goto badframe; |
111 | st.ss_size = (long) sp; | 111 | st.ss_sp = (void *)(long) sp; |
112 | if (__get_user(st.ss_size, &frame->rs_uc.uc_stack.ss_size)) | 112 | if (__get_user(st.ss_size, &frame->rs_uc.uc_stack.ss_size)) |
113 | goto badframe; | 113 | goto badframe; |
114 | if (__get_user(st.ss_flags, &frame->rs_uc.uc_stack.ss_flags)) | 114 | if (__get_user(st.ss_flags, &frame->rs_uc.uc_stack.ss_flags)) |
diff --git a/arch/mips/kernel/smp_mt.c b/arch/mips/kernel/smp_mt.c index 794a1c3de2a4..c930364830d0 100644 --- a/arch/mips/kernel/smp_mt.c +++ b/arch/mips/kernel/smp_mt.c | |||
@@ -68,6 +68,8 @@ void __init sanitize_tlb_entries(void) | |||
68 | 68 | ||
69 | set_c0_mvpcontrol(MVPCONTROL_VPC); | 69 | set_c0_mvpcontrol(MVPCONTROL_VPC); |
70 | 70 | ||
71 | back_to_back_c0_hazard(); | ||
72 | |||
71 | /* Disable TLB sharing */ | 73 | /* Disable TLB sharing */ |
72 | clear_c0_mvpcontrol(MVPCONTROL_STLB); | 74 | clear_c0_mvpcontrol(MVPCONTROL_STLB); |
73 | 75 | ||
@@ -102,35 +104,6 @@ void __init sanitize_tlb_entries(void) | |||
102 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | 104 | clear_c0_mvpcontrol(MVPCONTROL_VPC); |
103 | } | 105 | } |
104 | 106 | ||
105 | #if 0 | ||
106 | /* | ||
107 | * Use c0_MVPConf0 to find out how many CPUs are available, setting up | ||
108 | * phys_cpu_present_map and the logical/physical mappings. | ||
109 | */ | ||
110 | void __init prom_build_cpu_map(void) | ||
111 | { | ||
112 | int i, num, ncpus; | ||
113 | |||
114 | cpus_clear(phys_cpu_present_map); | ||
115 | |||
116 | /* assume we boot on cpu 0.... */ | ||
117 | cpu_set(0, phys_cpu_present_map); | ||
118 | __cpu_number_map[0] = 0; | ||
119 | __cpu_logical_map[0] = 0; | ||
120 | |||
121 | if (cpu_has_mipsmt) { | ||
122 | ncpus = ((read_c0_mvpconf0() & (MVPCONF0_PVPE)) >> MVPCONF0_PVPE_SHIFT) + 1; | ||
123 | for (i=1, num=0; i< NR_CPUS && i<ncpus; i++) { | ||
124 | cpu_set(i, phys_cpu_present_map); | ||
125 | __cpu_number_map[i] = ++num; | ||
126 | __cpu_logical_map[num] = i; | ||
127 | } | ||
128 | |||
129 | printk(KERN_INFO "%i available secondary CPU(s)\n", num); | ||
130 | } | ||
131 | } | ||
132 | #endif | ||
133 | |||
134 | static void ipi_resched_dispatch (struct pt_regs *regs) | 107 | static void ipi_resched_dispatch (struct pt_regs *regs) |
135 | { | 108 | { |
136 | do_IRQ(MIPS_CPU_IPI_RESCHED_IRQ, regs); | 109 | do_IRQ(MIPS_CPU_IPI_RESCHED_IRQ, regs); |
@@ -222,6 +195,9 @@ void prom_prepare_cpus(unsigned int max_cpus) | |||
222 | 195 | ||
223 | /* set config to be the same as vpe0, particularly kseg0 coherency alg */ | 196 | /* set config to be the same as vpe0, particularly kseg0 coherency alg */ |
224 | write_vpe_c0_config( read_c0_config()); | 197 | write_vpe_c0_config( read_c0_config()); |
198 | |||
199 | /* Propagate Config7 */ | ||
200 | write_vpe_c0_config7(read_c0_config7()); | ||
225 | } | 201 | } |
226 | 202 | ||
227 | } | 203 | } |
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index e51c38cef88e..1b71d91e8268 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c | |||
@@ -471,61 +471,29 @@ struct flush_icache_range_args { | |||
471 | static inline void local_r4k_flush_icache_range(void *args) | 471 | static inline void local_r4k_flush_icache_range(void *args) |
472 | { | 472 | { |
473 | struct flush_icache_range_args *fir_args = args; | 473 | struct flush_icache_range_args *fir_args = args; |
474 | unsigned long dc_lsize = cpu_dcache_line_size(); | ||
475 | unsigned long ic_lsize = cpu_icache_line_size(); | ||
476 | unsigned long sc_lsize = cpu_scache_line_size(); | ||
477 | unsigned long start = fir_args->start; | 474 | unsigned long start = fir_args->start; |
478 | unsigned long end = fir_args->end; | 475 | unsigned long end = fir_args->end; |
479 | unsigned long addr, aend; | ||
480 | 476 | ||
481 | if (!cpu_has_ic_fills_f_dc) { | 477 | if (!cpu_has_ic_fills_f_dc) { |
482 | if (end - start > dcache_size) { | 478 | if (end - start > dcache_size) { |
483 | r4k_blast_dcache(); | 479 | r4k_blast_dcache(); |
484 | } else { | 480 | } else { |
485 | R4600_HIT_CACHEOP_WAR_IMPL; | 481 | R4600_HIT_CACHEOP_WAR_IMPL; |
486 | addr = start & ~(dc_lsize - 1); | 482 | protected_blast_dcache_range(start, end); |
487 | aend = (end - 1) & ~(dc_lsize - 1); | ||
488 | |||
489 | while (1) { | ||
490 | /* Hit_Writeback_Inv_D */ | ||
491 | protected_writeback_dcache_line(addr); | ||
492 | if (addr == aend) | ||
493 | break; | ||
494 | addr += dc_lsize; | ||
495 | } | ||
496 | } | 483 | } |
497 | 484 | ||
498 | if (!cpu_icache_snoops_remote_store) { | 485 | if (!cpu_icache_snoops_remote_store) { |
499 | if (end - start > scache_size) { | 486 | if (end - start > scache_size) |
500 | r4k_blast_scache(); | 487 | r4k_blast_scache(); |
501 | } else { | 488 | else |
502 | addr = start & ~(sc_lsize - 1); | 489 | protected_blast_scache_range(start, end); |
503 | aend = (end - 1) & ~(sc_lsize - 1); | ||
504 | |||
505 | while (1) { | ||
506 | /* Hit_Writeback_Inv_SD */ | ||
507 | protected_writeback_scache_line(addr); | ||
508 | if (addr == aend) | ||
509 | break; | ||
510 | addr += sc_lsize; | ||
511 | } | ||
512 | } | ||
513 | } | 490 | } |
514 | } | 491 | } |
515 | 492 | ||
516 | if (end - start > icache_size) | 493 | if (end - start > icache_size) |
517 | r4k_blast_icache(); | 494 | r4k_blast_icache(); |
518 | else { | 495 | else |
519 | addr = start & ~(ic_lsize - 1); | 496 | protected_blast_icache_range(start, end); |
520 | aend = (end - 1) & ~(ic_lsize - 1); | ||
521 | while (1) { | ||
522 | /* Hit_Invalidate_I */ | ||
523 | protected_flush_icache_line(addr); | ||
524 | if (addr == aend) | ||
525 | break; | ||
526 | addr += ic_lsize; | ||
527 | } | ||
528 | } | ||
529 | } | 497 | } |
530 | 498 | ||
531 | static void r4k_flush_icache_range(unsigned long start, unsigned long end) | 499 | static void r4k_flush_icache_range(unsigned long start, unsigned long end) |
@@ -619,27 +587,14 @@ static void r4k_flush_icache_page(struct vm_area_struct *vma, | |||
619 | 587 | ||
620 | static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) | 588 | static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) |
621 | { | 589 | { |
622 | unsigned long end, a; | ||
623 | |||
624 | /* Catch bad driver code */ | 590 | /* Catch bad driver code */ |
625 | BUG_ON(size == 0); | 591 | BUG_ON(size == 0); |
626 | 592 | ||
627 | if (cpu_has_subset_pcaches) { | 593 | if (cpu_has_subset_pcaches) { |
628 | unsigned long sc_lsize = cpu_scache_line_size(); | 594 | if (size >= scache_size) |
629 | |||
630 | if (size >= scache_size) { | ||
631 | r4k_blast_scache(); | 595 | r4k_blast_scache(); |
632 | return; | 596 | else |
633 | } | 597 | blast_scache_range(addr, addr + size); |
634 | |||
635 | a = addr & ~(sc_lsize - 1); | ||
636 | end = (addr + size - 1) & ~(sc_lsize - 1); | ||
637 | while (1) { | ||
638 | flush_scache_line(a); /* Hit_Writeback_Inv_SD */ | ||
639 | if (a == end) | ||
640 | break; | ||
641 | a += sc_lsize; | ||
642 | } | ||
643 | return; | 598 | return; |
644 | } | 599 | } |
645 | 600 | ||
@@ -651,17 +606,8 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) | |||
651 | if (size >= dcache_size) { | 606 | if (size >= dcache_size) { |
652 | r4k_blast_dcache(); | 607 | r4k_blast_dcache(); |
653 | } else { | 608 | } else { |
654 | unsigned long dc_lsize = cpu_dcache_line_size(); | ||
655 | |||
656 | R4600_HIT_CACHEOP_WAR_IMPL; | 609 | R4600_HIT_CACHEOP_WAR_IMPL; |
657 | a = addr & ~(dc_lsize - 1); | 610 | blast_dcache_range(addr, addr + size); |
658 | end = (addr + size - 1) & ~(dc_lsize - 1); | ||
659 | while (1) { | ||
660 | flush_dcache_line(a); /* Hit_Writeback_Inv_D */ | ||
661 | if (a == end) | ||
662 | break; | ||
663 | a += dc_lsize; | ||
664 | } | ||
665 | } | 611 | } |
666 | 612 | ||
667 | bc_wback_inv(addr, size); | 613 | bc_wback_inv(addr, size); |
@@ -669,44 +615,22 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) | |||
669 | 615 | ||
670 | static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) | 616 | static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) |
671 | { | 617 | { |
672 | unsigned long end, a; | ||
673 | |||
674 | /* Catch bad driver code */ | 618 | /* Catch bad driver code */ |
675 | BUG_ON(size == 0); | 619 | BUG_ON(size == 0); |
676 | 620 | ||
677 | if (cpu_has_subset_pcaches) { | 621 | if (cpu_has_subset_pcaches) { |
678 | unsigned long sc_lsize = cpu_scache_line_size(); | 622 | if (size >= scache_size) |
679 | |||
680 | if (size >= scache_size) { | ||
681 | r4k_blast_scache(); | 623 | r4k_blast_scache(); |
682 | return; | 624 | else |
683 | } | 625 | blast_scache_range(addr, addr + size); |
684 | |||
685 | a = addr & ~(sc_lsize - 1); | ||
686 | end = (addr + size - 1) & ~(sc_lsize - 1); | ||
687 | while (1) { | ||
688 | flush_scache_line(a); /* Hit_Writeback_Inv_SD */ | ||
689 | if (a == end) | ||
690 | break; | ||
691 | a += sc_lsize; | ||
692 | } | ||
693 | return; | 626 | return; |
694 | } | 627 | } |
695 | 628 | ||
696 | if (size >= dcache_size) { | 629 | if (size >= dcache_size) { |
697 | r4k_blast_dcache(); | 630 | r4k_blast_dcache(); |
698 | } else { | 631 | } else { |
699 | unsigned long dc_lsize = cpu_dcache_line_size(); | ||
700 | |||
701 | R4600_HIT_CACHEOP_WAR_IMPL; | 632 | R4600_HIT_CACHEOP_WAR_IMPL; |
702 | a = addr & ~(dc_lsize - 1); | 633 | blast_dcache_range(addr, addr + size); |
703 | end = (addr + size - 1) & ~(dc_lsize - 1); | ||
704 | while (1) { | ||
705 | flush_dcache_line(a); /* Hit_Writeback_Inv_D */ | ||
706 | if (a == end) | ||
707 | break; | ||
708 | a += dc_lsize; | ||
709 | } | ||
710 | } | 634 | } |
711 | 635 | ||
712 | bc_inv(addr, size); | 636 | bc_inv(addr, size); |
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c index 0a97a9434eba..7c572bea4a98 100644 --- a/arch/mips/mm/c-tx39.c +++ b/arch/mips/mm/c-tx39.c | |||
@@ -44,8 +44,6 @@ __asm__ __volatile__( \ | |||
44 | /* TX39H-style cache flush routines. */ | 44 | /* TX39H-style cache flush routines. */ |
45 | static void tx39h_flush_icache_all(void) | 45 | static void tx39h_flush_icache_all(void) |
46 | { | 46 | { |
47 | unsigned long start = KSEG0; | ||
48 | unsigned long end = (start + icache_size); | ||
49 | unsigned long flags, config; | 47 | unsigned long flags, config; |
50 | 48 | ||
51 | /* disable icache (set ICE#) */ | 49 | /* disable icache (set ICE#) */ |
@@ -53,33 +51,18 @@ static void tx39h_flush_icache_all(void) | |||
53 | config = read_c0_conf(); | 51 | config = read_c0_conf(); |
54 | write_c0_conf(config & ~TX39_CONF_ICE); | 52 | write_c0_conf(config & ~TX39_CONF_ICE); |
55 | TX39_STOP_STREAMING(); | 53 | TX39_STOP_STREAMING(); |
56 | 54 | blast_icache16(); | |
57 | /* invalidate icache */ | ||
58 | while (start < end) { | ||
59 | cache16_unroll32(start, Index_Invalidate_I); | ||
60 | start += 0x200; | ||
61 | } | ||
62 | |||
63 | write_c0_conf(config); | 55 | write_c0_conf(config); |
64 | local_irq_restore(flags); | 56 | local_irq_restore(flags); |
65 | } | 57 | } |
66 | 58 | ||
67 | static void tx39h_dma_cache_wback_inv(unsigned long addr, unsigned long size) | 59 | static void tx39h_dma_cache_wback_inv(unsigned long addr, unsigned long size) |
68 | { | 60 | { |
69 | unsigned long end, a; | ||
70 | unsigned long dc_lsize = current_cpu_data.dcache.linesz; | ||
71 | |||
72 | /* Catch bad driver code */ | 61 | /* Catch bad driver code */ |
73 | BUG_ON(size == 0); | 62 | BUG_ON(size == 0); |
74 | 63 | ||
75 | iob(); | 64 | iob(); |
76 | a = addr & ~(dc_lsize - 1); | 65 | blast_inv_dcache_range(addr, addr + size); |
77 | end = (addr + size - 1) & ~(dc_lsize - 1); | ||
78 | while (1) { | ||
79 | invalidate_dcache_line(a); /* Hit_Invalidate_D */ | ||
80 | if (a == end) break; | ||
81 | a += dc_lsize; | ||
82 | } | ||
83 | } | 66 | } |
84 | 67 | ||
85 | 68 | ||
@@ -241,42 +224,21 @@ static void tx39_flush_data_cache_page(unsigned long addr) | |||
241 | 224 | ||
242 | static void tx39_flush_icache_range(unsigned long start, unsigned long end) | 225 | static void tx39_flush_icache_range(unsigned long start, unsigned long end) |
243 | { | 226 | { |
244 | unsigned long dc_lsize = current_cpu_data.dcache.linesz; | ||
245 | unsigned long addr, aend; | ||
246 | |||
247 | if (end - start > dcache_size) | 227 | if (end - start > dcache_size) |
248 | tx39_blast_dcache(); | 228 | tx39_blast_dcache(); |
249 | else { | 229 | else |
250 | addr = start & ~(dc_lsize - 1); | 230 | protected_blast_dcache_range(start, end); |
251 | aend = (end - 1) & ~(dc_lsize - 1); | ||
252 | |||
253 | while (1) { | ||
254 | /* Hit_Writeback_Inv_D */ | ||
255 | protected_writeback_dcache_line(addr); | ||
256 | if (addr == aend) | ||
257 | break; | ||
258 | addr += dc_lsize; | ||
259 | } | ||
260 | } | ||
261 | 231 | ||
262 | if (end - start > icache_size) | 232 | if (end - start > icache_size) |
263 | tx39_blast_icache(); | 233 | tx39_blast_icache(); |
264 | else { | 234 | else { |
265 | unsigned long flags, config; | 235 | unsigned long flags, config; |
266 | addr = start & ~(dc_lsize - 1); | ||
267 | aend = (end - 1) & ~(dc_lsize - 1); | ||
268 | /* disable icache (set ICE#) */ | 236 | /* disable icache (set ICE#) */ |
269 | local_irq_save(flags); | 237 | local_irq_save(flags); |
270 | config = read_c0_conf(); | 238 | config = read_c0_conf(); |
271 | write_c0_conf(config & ~TX39_CONF_ICE); | 239 | write_c0_conf(config & ~TX39_CONF_ICE); |
272 | TX39_STOP_STREAMING(); | 240 | TX39_STOP_STREAMING(); |
273 | while (1) { | 241 | protected_blast_icache_range(start, end); |
274 | /* Hit_Invalidate_I */ | ||
275 | protected_flush_icache_line(addr); | ||
276 | if (addr == aend) | ||
277 | break; | ||
278 | addr += dc_lsize; | ||
279 | } | ||
280 | write_c0_conf(config); | 242 | write_c0_conf(config); |
281 | local_irq_restore(flags); | 243 | local_irq_restore(flags); |
282 | } | 244 | } |
@@ -311,7 +273,7 @@ static void tx39_flush_icache_page(struct vm_area_struct *vma, struct page *page | |||
311 | 273 | ||
312 | static void tx39_dma_cache_wback_inv(unsigned long addr, unsigned long size) | 274 | static void tx39_dma_cache_wback_inv(unsigned long addr, unsigned long size) |
313 | { | 275 | { |
314 | unsigned long end, a; | 276 | unsigned long end; |
315 | 277 | ||
316 | if (((size | addr) & (PAGE_SIZE - 1)) == 0) { | 278 | if (((size | addr) & (PAGE_SIZE - 1)) == 0) { |
317 | end = addr + size; | 279 | end = addr + size; |
@@ -322,20 +284,13 @@ static void tx39_dma_cache_wback_inv(unsigned long addr, unsigned long size) | |||
322 | } else if (size > dcache_size) { | 284 | } else if (size > dcache_size) { |
323 | tx39_blast_dcache(); | 285 | tx39_blast_dcache(); |
324 | } else { | 286 | } else { |
325 | unsigned long dc_lsize = current_cpu_data.dcache.linesz; | 287 | blast_dcache_range(addr, addr + size); |
326 | a = addr & ~(dc_lsize - 1); | ||
327 | end = (addr + size - 1) & ~(dc_lsize - 1); | ||
328 | while (1) { | ||
329 | flush_dcache_line(a); /* Hit_Writeback_Inv_D */ | ||
330 | if (a == end) break; | ||
331 | a += dc_lsize; | ||
332 | } | ||
333 | } | 288 | } |
334 | } | 289 | } |
335 | 290 | ||
336 | static void tx39_dma_cache_inv(unsigned long addr, unsigned long size) | 291 | static void tx39_dma_cache_inv(unsigned long addr, unsigned long size) |
337 | { | 292 | { |
338 | unsigned long end, a; | 293 | unsigned long end; |
339 | 294 | ||
340 | if (((size | addr) & (PAGE_SIZE - 1)) == 0) { | 295 | if (((size | addr) & (PAGE_SIZE - 1)) == 0) { |
341 | end = addr + size; | 296 | end = addr + size; |
@@ -346,14 +301,7 @@ static void tx39_dma_cache_inv(unsigned long addr, unsigned long size) | |||
346 | } else if (size > dcache_size) { | 301 | } else if (size > dcache_size) { |
347 | tx39_blast_dcache(); | 302 | tx39_blast_dcache(); |
348 | } else { | 303 | } else { |
349 | unsigned long dc_lsize = current_cpu_data.dcache.linesz; | 304 | blast_inv_dcache_range(addr, addr + size); |
350 | a = addr & ~(dc_lsize - 1); | ||
351 | end = (addr + size - 1) & ~(dc_lsize - 1); | ||
352 | while (1) { | ||
353 | invalidate_dcache_line(a); /* Hit_Invalidate_D */ | ||
354 | if (a == end) break; | ||
355 | a += dc_lsize; | ||
356 | } | ||
357 | } | 305 | } |
358 | } | 306 | } |
359 | 307 | ||
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 7c914a4c67c3..eca33cfa8a4c 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig | |||
@@ -29,6 +29,11 @@ config GENERIC_CALIBRATE_DELAY | |||
29 | bool | 29 | bool |
30 | default y | 30 | default y |
31 | 31 | ||
32 | config TIME_LOW_RES | ||
33 | bool | ||
34 | depends on SMP | ||
35 | default y | ||
36 | |||
32 | config GENERIC_ISA_DMA | 37 | config GENERIC_ISA_DMA |
33 | bool | 38 | bool |
34 | 39 | ||
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c index e96c35bddac7..71f0a2fb3078 100644 --- a/arch/s390/lib/delay.c +++ b/arch/s390/lib/delay.c | |||
@@ -30,7 +30,7 @@ void __delay(unsigned long loops) | |||
30 | */ | 30 | */ |
31 | __asm__ __volatile__( | 31 | __asm__ __volatile__( |
32 | "0: brct %0,0b" | 32 | "0: brct %0,0b" |
33 | : /* no outputs */ : "r" (loops/2) ); | 33 | : /* no outputs */ : "r" ((loops/2) + 1)); |
34 | } | 34 | } |
35 | 35 | ||
36 | /* | 36 | /* |
diff --git a/arch/v850/Kconfig b/arch/v850/Kconfig index 04494638b963..e7fc3e500342 100644 --- a/arch/v850/Kconfig +++ b/arch/v850/Kconfig | |||
@@ -28,6 +28,10 @@ config GENERIC_IRQ_PROBE | |||
28 | bool | 28 | bool |
29 | default y | 29 | default y |
30 | 30 | ||
31 | config TIME_LOW_RES | ||
32 | bool | ||
33 | default y | ||
34 | |||
31 | # Turn off some random 386 crap that can affect device config | 35 | # Turn off some random 386 crap that can affect device config |
32 | config ISA | 36 | config ISA |
33 | bool | 37 | bool |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 4e7dbcc425ff..93e44d0292ab 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -645,7 +645,7 @@ static void pkt_copy_bio_data(struct bio *src_bio, int seg, int offs, struct pag | |||
645 | * b) The data can be used as cache to avoid read requests if we receive a | 645 | * b) The data can be used as cache to avoid read requests if we receive a |
646 | * new write request for the same zone. | 646 | * new write request for the same zone. |
647 | */ | 647 | */ |
648 | static void pkt_make_local_copy(struct packet_data *pkt, struct page **pages, int *offsets) | 648 | static void pkt_make_local_copy(struct packet_data *pkt, struct bio_vec *bvec) |
649 | { | 649 | { |
650 | int f, p, offs; | 650 | int f, p, offs; |
651 | 651 | ||
@@ -653,15 +653,15 @@ static void pkt_make_local_copy(struct packet_data *pkt, struct page **pages, in | |||
653 | p = 0; | 653 | p = 0; |
654 | offs = 0; | 654 | offs = 0; |
655 | for (f = 0; f < pkt->frames; f++) { | 655 | for (f = 0; f < pkt->frames; f++) { |
656 | if (pages[f] != pkt->pages[p]) { | 656 | if (bvec[f].bv_page != pkt->pages[p]) { |
657 | void *vfrom = kmap_atomic(pages[f], KM_USER0) + offsets[f]; | 657 | void *vfrom = kmap_atomic(bvec[f].bv_page, KM_USER0) + bvec[f].bv_offset; |
658 | void *vto = page_address(pkt->pages[p]) + offs; | 658 | void *vto = page_address(pkt->pages[p]) + offs; |
659 | memcpy(vto, vfrom, CD_FRAMESIZE); | 659 | memcpy(vto, vfrom, CD_FRAMESIZE); |
660 | kunmap_atomic(vfrom, KM_USER0); | 660 | kunmap_atomic(vfrom, KM_USER0); |
661 | pages[f] = pkt->pages[p]; | 661 | bvec[f].bv_page = pkt->pages[p]; |
662 | offsets[f] = offs; | 662 | bvec[f].bv_offset = offs; |
663 | } else { | 663 | } else { |
664 | BUG_ON(offsets[f] != offs); | 664 | BUG_ON(bvec[f].bv_offset != offs); |
665 | } | 665 | } |
666 | offs += CD_FRAMESIZE; | 666 | offs += CD_FRAMESIZE; |
667 | if (offs >= PAGE_SIZE) { | 667 | if (offs >= PAGE_SIZE) { |
@@ -991,18 +991,17 @@ try_next_bio: | |||
991 | static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) | 991 | static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) |
992 | { | 992 | { |
993 | struct bio *bio; | 993 | struct bio *bio; |
994 | struct page *pages[PACKET_MAX_SIZE]; | ||
995 | int offsets[PACKET_MAX_SIZE]; | ||
996 | int f; | 994 | int f; |
997 | int frames_write; | 995 | int frames_write; |
996 | struct bio_vec *bvec = pkt->w_bio->bi_io_vec; | ||
998 | 997 | ||
999 | for (f = 0; f < pkt->frames; f++) { | 998 | for (f = 0; f < pkt->frames; f++) { |
1000 | pages[f] = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE]; | 999 | bvec[f].bv_page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE]; |
1001 | offsets[f] = (f * CD_FRAMESIZE) % PAGE_SIZE; | 1000 | bvec[f].bv_offset = (f * CD_FRAMESIZE) % PAGE_SIZE; |
1002 | } | 1001 | } |
1003 | 1002 | ||
1004 | /* | 1003 | /* |
1005 | * Fill-in pages[] and offsets[] with data from orig_bios. | 1004 | * Fill-in bvec with data from orig_bios. |
1006 | */ | 1005 | */ |
1007 | frames_write = 0; | 1006 | frames_write = 0; |
1008 | spin_lock(&pkt->lock); | 1007 | spin_lock(&pkt->lock); |
@@ -1024,11 +1023,11 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) | |||
1024 | } | 1023 | } |
1025 | 1024 | ||
1026 | if (src_bvl->bv_len - src_offs >= CD_FRAMESIZE) { | 1025 | if (src_bvl->bv_len - src_offs >= CD_FRAMESIZE) { |
1027 | pages[f] = src_bvl->bv_page; | 1026 | bvec[f].bv_page = src_bvl->bv_page; |
1028 | offsets[f] = src_bvl->bv_offset + src_offs; | 1027 | bvec[f].bv_offset = src_bvl->bv_offset + src_offs; |
1029 | } else { | 1028 | } else { |
1030 | pkt_copy_bio_data(bio, segment, src_offs, | 1029 | pkt_copy_bio_data(bio, segment, src_offs, |
1031 | pages[f], offsets[f]); | 1030 | bvec[f].bv_page, bvec[f].bv_offset); |
1032 | } | 1031 | } |
1033 | src_offs += CD_FRAMESIZE; | 1032 | src_offs += CD_FRAMESIZE; |
1034 | frames_write++; | 1033 | frames_write++; |
@@ -1042,7 +1041,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) | |||
1042 | BUG_ON(frames_write != pkt->write_size); | 1041 | BUG_ON(frames_write != pkt->write_size); |
1043 | 1042 | ||
1044 | if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) { | 1043 | if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) { |
1045 | pkt_make_local_copy(pkt, pages, offsets); | 1044 | pkt_make_local_copy(pkt, bvec); |
1046 | pkt->cache_valid = 1; | 1045 | pkt->cache_valid = 1; |
1047 | } else { | 1046 | } else { |
1048 | pkt->cache_valid = 0; | 1047 | pkt->cache_valid = 0; |
@@ -1055,17 +1054,9 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) | |||
1055 | pkt->w_bio->bi_bdev = pd->bdev; | 1054 | pkt->w_bio->bi_bdev = pd->bdev; |
1056 | pkt->w_bio->bi_end_io = pkt_end_io_packet_write; | 1055 | pkt->w_bio->bi_end_io = pkt_end_io_packet_write; |
1057 | pkt->w_bio->bi_private = pkt; | 1056 | pkt->w_bio->bi_private = pkt; |
1058 | for (f = 0; f < pkt->frames; f++) { | 1057 | for (f = 0; f < pkt->frames; f++) |
1059 | if ((f + 1 < pkt->frames) && (pages[f + 1] == pages[f]) && | 1058 | if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset)) |
1060 | (offsets[f + 1] = offsets[f] + CD_FRAMESIZE)) { | 1059 | BUG(); |
1061 | if (!bio_add_page(pkt->w_bio, pages[f], CD_FRAMESIZE * 2, offsets[f])) | ||
1062 | BUG(); | ||
1063 | f++; | ||
1064 | } else { | ||
1065 | if (!bio_add_page(pkt->w_bio, pages[f], CD_FRAMESIZE, offsets[f])) | ||
1066 | BUG(); | ||
1067 | } | ||
1068 | } | ||
1069 | VPRINTK("pktcdvd: vcnt=%d\n", pkt->w_bio->bi_vcnt); | 1060 | VPRINTK("pktcdvd: vcnt=%d\n", pkt->w_bio->bi_vcnt); |
1070 | 1061 | ||
1071 | atomic_set(&pkt->io_wait, 1); | 1062 | atomic_set(&pkt->io_wait, 1); |
@@ -1548,7 +1539,7 @@ static int pkt_good_disc(struct pktcdvd_device *pd, disc_information *di) | |||
1548 | case 0x12: /* DVD-RAM */ | 1539 | case 0x12: /* DVD-RAM */ |
1549 | return 0; | 1540 | return 0; |
1550 | default: | 1541 | default: |
1551 | printk("pktcdvd: Wrong disc profile (%x)\n", pd->mmc3_profile); | 1542 | VPRINTK("pktcdvd: Wrong disc profile (%x)\n", pd->mmc3_profile); |
1552 | return 1; | 1543 | return 1; |
1553 | } | 1544 | } |
1554 | 1545 | ||
@@ -1894,8 +1885,8 @@ static int pkt_open_write(struct pktcdvd_device *pd) | |||
1894 | unsigned int write_speed, media_write_speed, read_speed; | 1885 | unsigned int write_speed, media_write_speed, read_speed; |
1895 | 1886 | ||
1896 | if ((ret = pkt_probe_settings(pd))) { | 1887 | if ((ret = pkt_probe_settings(pd))) { |
1897 | DPRINTK("pktcdvd: %s failed probe\n", pd->name); | 1888 | VPRINTK("pktcdvd: %s failed probe\n", pd->name); |
1898 | return -EIO; | 1889 | return -EROFS; |
1899 | } | 1890 | } |
1900 | 1891 | ||
1901 | if ((ret = pkt_set_write_settings(pd))) { | 1892 | if ((ret = pkt_set_write_settings(pd))) { |
@@ -2053,10 +2044,9 @@ static int pkt_open(struct inode *inode, struct file *file) | |||
2053 | goto out_dec; | 2044 | goto out_dec; |
2054 | } | 2045 | } |
2055 | } else { | 2046 | } else { |
2056 | if (pkt_open_dev(pd, file->f_mode & FMODE_WRITE)) { | 2047 | ret = pkt_open_dev(pd, file->f_mode & FMODE_WRITE); |
2057 | ret = -EIO; | 2048 | if (ret) |
2058 | goto out_dec; | 2049 | goto out_dec; |
2059 | } | ||
2060 | /* | 2050 | /* |
2061 | * needed here as well, since ext2 (among others) may change | 2051 | * needed here as well, since ext2 (among others) may change |
2062 | * the blocksize at mount time | 2052 | * the blocksize at mount time |
@@ -2436,11 +2426,12 @@ static int pkt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, u | |||
2436 | * The door gets locked when the device is opened, so we | 2426 | * The door gets locked when the device is opened, so we |
2437 | * have to unlock it or else the eject command fails. | 2427 | * have to unlock it or else the eject command fails. |
2438 | */ | 2428 | */ |
2439 | pkt_lock_door(pd, 0); | 2429 | if (pd->refcnt == 1) |
2430 | pkt_lock_door(pd, 0); | ||
2440 | return blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg); | 2431 | return blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg); |
2441 | 2432 | ||
2442 | default: | 2433 | default: |
2443 | printk("pktcdvd: Unknown ioctl for %s (%x)\n", pd->name, cmd); | 2434 | VPRINTK("pktcdvd: Unknown ioctl for %s (%x)\n", pd->name, cmd); |
2444 | return -ENOTTY; | 2435 | return -ENOTTY; |
2445 | } | 2436 | } |
2446 | 2437 | ||
diff --git a/drivers/char/esp.c b/drivers/char/esp.c index 57539d8f9f7c..09dc4b01232c 100644 --- a/drivers/char/esp.c +++ b/drivers/char/esp.c | |||
@@ -150,17 +150,6 @@ static void rs_wait_until_sent(struct tty_struct *, int); | |||
150 | /* Standard COM flags (except for COM4, because of the 8514 problem) */ | 150 | /* Standard COM flags (except for COM4, because of the 8514 problem) */ |
151 | #define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST) | 151 | #define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST) |
152 | 152 | ||
153 | /* | ||
154 | * tmp_buf is used as a temporary buffer by serial_write. We need to | ||
155 | * lock it in case the memcpy_fromfs blocks while swapping in a page, | ||
156 | * and some other program tries to do a serial write at the same time. | ||
157 | * Since the lock will only come under contention when the system is | ||
158 | * swapping and available memory is low, it makes sense to share one | ||
159 | * buffer across all the serial ports, since it significantly saves | ||
160 | * memory if large numbers of serial ports are open. | ||
161 | */ | ||
162 | static unsigned char *tmp_buf; | ||
163 | |||
164 | static inline int serial_paranoia_check(struct esp_struct *info, | 153 | static inline int serial_paranoia_check(struct esp_struct *info, |
165 | char *name, const char *routine) | 154 | char *name, const char *routine) |
166 | { | 155 | { |
@@ -1267,7 +1256,7 @@ static int rs_write(struct tty_struct * tty, | |||
1267 | if (serial_paranoia_check(info, tty->name, "rs_write")) | 1256 | if (serial_paranoia_check(info, tty->name, "rs_write")) |
1268 | return 0; | 1257 | return 0; |
1269 | 1258 | ||
1270 | if (!tty || !info->xmit_buf || !tmp_buf) | 1259 | if (!tty || !info->xmit_buf) |
1271 | return 0; | 1260 | return 0; |
1272 | 1261 | ||
1273 | while (1) { | 1262 | while (1) { |
@@ -2291,11 +2280,7 @@ static int esp_open(struct tty_struct *tty, struct file * filp) | |||
2291 | tty->driver_data = info; | 2280 | tty->driver_data = info; |
2292 | info->tty = tty; | 2281 | info->tty = tty; |
2293 | 2282 | ||
2294 | if (!tmp_buf) { | 2283 | spin_unlock_irqrestore(&info->lock, flags); |
2295 | tmp_buf = (unsigned char *) get_zeroed_page(GFP_KERNEL); | ||
2296 | if (!tmp_buf) | ||
2297 | return -ENOMEM; | ||
2298 | } | ||
2299 | 2284 | ||
2300 | /* | 2285 | /* |
2301 | * Start up serial port | 2286 | * Start up serial port |
@@ -2602,9 +2587,6 @@ static void __exit espserial_exit(void) | |||
2602 | free_pages((unsigned long)dma_buffer, | 2587 | free_pages((unsigned long)dma_buffer, |
2603 | get_order(DMA_BUFFER_SZ)); | 2588 | get_order(DMA_BUFFER_SZ)); |
2604 | 2589 | ||
2605 | if (tmp_buf) | ||
2606 | free_page((unsigned long)tmp_buf); | ||
2607 | |||
2608 | while (free_pio_buf) { | 2590 | while (free_pio_buf) { |
2609 | pio_buf = free_pio_buf->next; | 2591 | pio_buf = free_pio_buf->next; |
2610 | kfree(free_pio_buf); | 2592 | kfree(free_pio_buf); |
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index 66a2fee06eb9..ef140ebde117 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c | |||
@@ -956,22 +956,18 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data) | |||
956 | } | 956 | } |
957 | } else if (res->type == ACPI_RESOURCE_TYPE_EXTENDED_IRQ) { | 957 | } else if (res->type == ACPI_RESOURCE_TYPE_EXTENDED_IRQ) { |
958 | struct acpi_resource_extended_irq *irqp; | 958 | struct acpi_resource_extended_irq *irqp; |
959 | int i; | 959 | int i, irq; |
960 | 960 | ||
961 | irqp = &res->data.extended_irq; | 961 | irqp = &res->data.extended_irq; |
962 | 962 | ||
963 | if (irqp->interrupt_count > 0) { | 963 | for (i = 0; i < irqp->interrupt_count; i++) { |
964 | hdp->hd_nirqs = irqp->interrupt_count; | 964 | irq = acpi_register_gsi(irqp->interrupts[i], |
965 | 965 | irqp->triggering, irqp->polarity); | |
966 | for (i = 0; i < hdp->hd_nirqs; i++) { | 966 | if (irq < 0) |
967 | int rc = | 967 | return AE_ERROR; |
968 | acpi_register_gsi(irqp->interrupts[i], | 968 | |
969 | irqp->triggering, | 969 | hdp->hd_irq[hdp->hd_nirqs] = irq; |
970 | irqp->polarity); | 970 | hdp->hd_nirqs++; |
971 | if (rc < 0) | ||
972 | return AE_ERROR; | ||
973 | hdp->hd_irq[i] = rc; | ||
974 | } | ||
975 | } | 971 | } |
976 | } | 972 | } |
977 | 973 | ||
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index a23816d3e9a1..e9bba94fc898 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c | |||
@@ -1841,7 +1841,6 @@ static void release_dev(struct file * filp) | |||
1841 | tty_closing = tty->count <= 1; | 1841 | tty_closing = tty->count <= 1; |
1842 | o_tty_closing = o_tty && | 1842 | o_tty_closing = o_tty && |
1843 | (o_tty->count <= (pty_master ? 1 : 0)); | 1843 | (o_tty->count <= (pty_master ? 1 : 0)); |
1844 | up(&tty_sem); | ||
1845 | do_sleep = 0; | 1844 | do_sleep = 0; |
1846 | 1845 | ||
1847 | if (tty_closing) { | 1846 | if (tty_closing) { |
@@ -1869,6 +1868,7 @@ static void release_dev(struct file * filp) | |||
1869 | 1868 | ||
1870 | printk(KERN_WARNING "release_dev: %s: read/write wait queue " | 1869 | printk(KERN_WARNING "release_dev: %s: read/write wait queue " |
1871 | "active!\n", tty_name(tty, buf)); | 1870 | "active!\n", tty_name(tty, buf)); |
1871 | up(&tty_sem); | ||
1872 | schedule(); | 1872 | schedule(); |
1873 | } | 1873 | } |
1874 | 1874 | ||
@@ -1877,8 +1877,6 @@ static void release_dev(struct file * filp) | |||
1877 | * both sides, and we've completed the last operation that could | 1877 | * both sides, and we've completed the last operation that could |
1878 | * block, so it's safe to proceed with closing. | 1878 | * block, so it's safe to proceed with closing. |
1879 | */ | 1879 | */ |
1880 | |||
1881 | down(&tty_sem); | ||
1882 | if (pty_master) { | 1880 | if (pty_master) { |
1883 | if (--o_tty->count < 0) { | 1881 | if (--o_tty->count < 0) { |
1884 | printk(KERN_WARNING "release_dev: bad pty slave count " | 1882 | printk(KERN_WARNING "release_dev: bad pty slave count " |
@@ -1892,7 +1890,6 @@ static void release_dev(struct file * filp) | |||
1892 | tty->count, tty_name(tty, buf)); | 1890 | tty->count, tty_name(tty, buf)); |
1893 | tty->count = 0; | 1891 | tty->count = 0; |
1894 | } | 1892 | } |
1895 | up(&tty_sem); | ||
1896 | 1893 | ||
1897 | /* | 1894 | /* |
1898 | * We've decremented tty->count, so we need to remove this file | 1895 | * We've decremented tty->count, so we need to remove this file |
@@ -1937,6 +1934,8 @@ static void release_dev(struct file * filp) | |||
1937 | read_unlock(&tasklist_lock); | 1934 | read_unlock(&tasklist_lock); |
1938 | } | 1935 | } |
1939 | 1936 | ||
1937 | up(&tty_sem); | ||
1938 | |||
1940 | /* check whether both sides are closing ... */ | 1939 | /* check whether both sides are closing ... */ |
1941 | if (!tty_closing || (o_tty && !o_tty_closing)) | 1940 | if (!tty_closing || (o_tty && !o_tty_closing)) |
1942 | return; | 1941 | return; |
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c index 2b286e865163..43b96e298363 100644 --- a/drivers/ide/pci/sgiioc4.c +++ b/drivers/ide/pci/sgiioc4.c | |||
@@ -13,11 +13,6 @@ | |||
13 | * License along with this program; if not, write the Free Software | 13 | * License along with this program; if not, write the Free Software |
14 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. | 14 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. |
15 | * | 15 | * |
16 | * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, | ||
17 | * Mountain View, CA 94043, or: | ||
18 | * | ||
19 | * http://www.sgi.com | ||
20 | * | ||
21 | * For further information regarding this notice, see: | 16 | * For further information regarding this notice, see: |
22 | * | 17 | * |
23 | * http://oss.sgi.com/projects/GenInfo/NoticeExplan | 18 | * http://oss.sgi.com/projects/GenInfo/NoticeExplan |
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index d393b504bf26..c82f47a66e48 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -665,7 +665,15 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, | |||
665 | struct ib_wc mad_wc; | 665 | struct ib_wc mad_wc; |
666 | struct ib_send_wr *send_wr = &mad_send_wr->send_wr; | 666 | struct ib_send_wr *send_wr = &mad_send_wr->send_wr; |
667 | 667 | ||
668 | if (!smi_handle_dr_smp_send(smp, device->node_type, port_num)) { | 668 | /* |
669 | * Directed route handling starts if the initial LID routed part of | ||
670 | * a request or the ending LID routed part of a response is empty. | ||
671 | * If we are at the start of the LID routed part, don't update the | ||
672 | * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec. | ||
673 | */ | ||
674 | if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) == | ||
675 | IB_LID_PERMISSIVE && | ||
676 | !smi_handle_dr_smp_send(smp, device->node_type, port_num)) { | ||
669 | ret = -EINVAL; | 677 | ret = -EINVAL; |
670 | printk(KERN_ERR PFX "Invalid directed route\n"); | 678 | printk(KERN_ERR PFX "Invalid directed route\n"); |
671 | goto out; | 679 | goto out; |
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index f9b9b93dc501..2825615ce81c 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c | |||
@@ -1029,25 +1029,6 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, | |||
1029 | MTHCA_GET(size, outbox, QUERY_DEV_LIM_UAR_ENTRY_SZ_OFFSET); | 1029 | MTHCA_GET(size, outbox, QUERY_DEV_LIM_UAR_ENTRY_SZ_OFFSET); |
1030 | dev_lim->uar_scratch_entry_sz = size; | 1030 | dev_lim->uar_scratch_entry_sz = size; |
1031 | 1031 | ||
1032 | mthca_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n", | ||
1033 | dev_lim->max_qps, dev_lim->reserved_qps, dev_lim->qpc_entry_sz); | ||
1034 | mthca_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n", | ||
1035 | dev_lim->max_srqs, dev_lim->reserved_srqs, dev_lim->srq_entry_sz); | ||
1036 | mthca_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n", | ||
1037 | dev_lim->max_cqs, dev_lim->reserved_cqs, dev_lim->cqc_entry_sz); | ||
1038 | mthca_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n", | ||
1039 | dev_lim->max_eqs, dev_lim->reserved_eqs, dev_lim->eqc_entry_sz); | ||
1040 | mthca_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n", | ||
1041 | dev_lim->reserved_mrws, dev_lim->reserved_mtts); | ||
1042 | mthca_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n", | ||
1043 | dev_lim->max_pds, dev_lim->reserved_pds, dev_lim->reserved_uars); | ||
1044 | mthca_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n", | ||
1045 | dev_lim->max_pds, dev_lim->reserved_mgms); | ||
1046 | mthca_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n", | ||
1047 | dev_lim->max_cq_sz, dev_lim->max_qp_sz, dev_lim->max_srq_sz); | ||
1048 | |||
1049 | mthca_dbg(dev, "Flags: %08x\n", dev_lim->flags); | ||
1050 | |||
1051 | if (mthca_is_memfree(dev)) { | 1032 | if (mthca_is_memfree(dev)) { |
1052 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET); | 1033 | MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET); |
1053 | dev_lim->max_srq_sz = 1 << field; | 1034 | dev_lim->max_srq_sz = 1 << field; |
@@ -1093,6 +1074,25 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, | |||
1093 | dev_lim->mpt_entry_sz = MTHCA_MPT_ENTRY_SIZE; | 1074 | dev_lim->mpt_entry_sz = MTHCA_MPT_ENTRY_SIZE; |
1094 | } | 1075 | } |
1095 | 1076 | ||
1077 | mthca_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n", | ||
1078 | dev_lim->max_qps, dev_lim->reserved_qps, dev_lim->qpc_entry_sz); | ||
1079 | mthca_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n", | ||
1080 | dev_lim->max_srqs, dev_lim->reserved_srqs, dev_lim->srq_entry_sz); | ||
1081 | mthca_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n", | ||
1082 | dev_lim->max_cqs, dev_lim->reserved_cqs, dev_lim->cqc_entry_sz); | ||
1083 | mthca_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n", | ||
1084 | dev_lim->max_eqs, dev_lim->reserved_eqs, dev_lim->eqc_entry_sz); | ||
1085 | mthca_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n", | ||
1086 | dev_lim->reserved_mrws, dev_lim->reserved_mtts); | ||
1087 | mthca_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n", | ||
1088 | dev_lim->max_pds, dev_lim->reserved_pds, dev_lim->reserved_uars); | ||
1089 | mthca_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n", | ||
1090 | dev_lim->max_pds, dev_lim->reserved_mgms); | ||
1091 | mthca_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n", | ||
1092 | dev_lim->max_cq_sz, dev_lim->max_qp_sz, dev_lim->max_srq_sz); | ||
1093 | |||
1094 | mthca_dbg(dev, "Flags: %08x\n", dev_lim->flags); | ||
1095 | |||
1096 | out: | 1096 | out: |
1097 | mthca_free_mailbox(dev, mailbox); | 1097 | mthca_free_mailbox(dev, mailbox); |
1098 | return err; | 1098 | return err; |
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h index 2a165fd06e57..e481037288d6 100644 --- a/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/drivers/infiniband/hw/mthca/mthca_dev.h | |||
@@ -53,8 +53,8 @@ | |||
53 | 53 | ||
54 | #define DRV_NAME "ib_mthca" | 54 | #define DRV_NAME "ib_mthca" |
55 | #define PFX DRV_NAME ": " | 55 | #define PFX DRV_NAME ": " |
56 | #define DRV_VERSION "0.06" | 56 | #define DRV_VERSION "0.07" |
57 | #define DRV_RELDATE "June 23, 2005" | 57 | #define DRV_RELDATE "February 13, 2006" |
58 | 58 | ||
59 | enum { | 59 | enum { |
60 | MTHCA_FLAG_DDR_HIDDEN = 1 << 1, | 60 | MTHCA_FLAG_DDR_HIDDEN = 1 << 1, |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index e0a5412b7e68..2f85a9a831b1 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
@@ -78,6 +78,7 @@ enum { | |||
78 | IPOIB_FLAG_SUBINTERFACE = 4, | 78 | IPOIB_FLAG_SUBINTERFACE = 4, |
79 | IPOIB_MCAST_RUN = 5, | 79 | IPOIB_MCAST_RUN = 5, |
80 | IPOIB_STOP_REAPER = 6, | 80 | IPOIB_STOP_REAPER = 6, |
81 | IPOIB_MCAST_STARTED = 7, | ||
81 | 82 | ||
82 | IPOIB_MAX_BACKOFF_SECONDS = 16, | 83 | IPOIB_MAX_BACKOFF_SECONDS = 16, |
83 | 84 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index ccaa0c387076..a2408d7ec598 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
@@ -533,8 +533,10 @@ void ipoib_mcast_join_task(void *dev_ptr) | |||
533 | } | 533 | } |
534 | 534 | ||
535 | if (!priv->broadcast) { | 535 | if (!priv->broadcast) { |
536 | priv->broadcast = ipoib_mcast_alloc(dev, 1); | 536 | struct ipoib_mcast *broadcast; |
537 | if (!priv->broadcast) { | 537 | |
538 | broadcast = ipoib_mcast_alloc(dev, 1); | ||
539 | if (!broadcast) { | ||
538 | ipoib_warn(priv, "failed to allocate broadcast group\n"); | 540 | ipoib_warn(priv, "failed to allocate broadcast group\n"); |
539 | mutex_lock(&mcast_mutex); | 541 | mutex_lock(&mcast_mutex); |
540 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) | 542 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) |
@@ -544,10 +546,11 @@ void ipoib_mcast_join_task(void *dev_ptr) | |||
544 | return; | 546 | return; |
545 | } | 547 | } |
546 | 548 | ||
547 | memcpy(priv->broadcast->mcmember.mgid.raw, priv->dev->broadcast + 4, | 549 | spin_lock_irq(&priv->lock); |
550 | memcpy(broadcast->mcmember.mgid.raw, priv->dev->broadcast + 4, | ||
548 | sizeof (union ib_gid)); | 551 | sizeof (union ib_gid)); |
552 | priv->broadcast = broadcast; | ||
549 | 553 | ||
550 | spin_lock_irq(&priv->lock); | ||
551 | __ipoib_mcast_add(dev, priv->broadcast); | 554 | __ipoib_mcast_add(dev, priv->broadcast); |
552 | spin_unlock_irq(&priv->lock); | 555 | spin_unlock_irq(&priv->lock); |
553 | } | 556 | } |
@@ -601,6 +604,10 @@ int ipoib_mcast_start_thread(struct net_device *dev) | |||
601 | queue_work(ipoib_workqueue, &priv->mcast_task); | 604 | queue_work(ipoib_workqueue, &priv->mcast_task); |
602 | mutex_unlock(&mcast_mutex); | 605 | mutex_unlock(&mcast_mutex); |
603 | 606 | ||
607 | spin_lock_irq(&priv->lock); | ||
608 | set_bit(IPOIB_MCAST_STARTED, &priv->flags); | ||
609 | spin_unlock_irq(&priv->lock); | ||
610 | |||
604 | return 0; | 611 | return 0; |
605 | } | 612 | } |
606 | 613 | ||
@@ -611,6 +618,10 @@ int ipoib_mcast_stop_thread(struct net_device *dev, int flush) | |||
611 | 618 | ||
612 | ipoib_dbg_mcast(priv, "stopping multicast thread\n"); | 619 | ipoib_dbg_mcast(priv, "stopping multicast thread\n"); |
613 | 620 | ||
621 | spin_lock_irq(&priv->lock); | ||
622 | clear_bit(IPOIB_MCAST_STARTED, &priv->flags); | ||
623 | spin_unlock_irq(&priv->lock); | ||
624 | |||
614 | mutex_lock(&mcast_mutex); | 625 | mutex_lock(&mcast_mutex); |
615 | clear_bit(IPOIB_MCAST_RUN, &priv->flags); | 626 | clear_bit(IPOIB_MCAST_RUN, &priv->flags); |
616 | cancel_delayed_work(&priv->mcast_task); | 627 | cancel_delayed_work(&priv->mcast_task); |
@@ -693,6 +704,14 @@ void ipoib_mcast_send(struct net_device *dev, union ib_gid *mgid, | |||
693 | */ | 704 | */ |
694 | spin_lock(&priv->lock); | 705 | spin_lock(&priv->lock); |
695 | 706 | ||
707 | if (!test_bit(IPOIB_MCAST_STARTED, &priv->flags) || | ||
708 | !priv->broadcast || | ||
709 | !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { | ||
710 | ++priv->stats.tx_dropped; | ||
711 | dev_kfree_skb_any(skb); | ||
712 | goto unlock; | ||
713 | } | ||
714 | |||
696 | mcast = __ipoib_mcast_find(dev, mgid); | 715 | mcast = __ipoib_mcast_find(dev, mgid); |
697 | if (!mcast) { | 716 | if (!mcast) { |
698 | /* Let's create a new send only group now */ | 717 | /* Let's create a new send only group now */ |
@@ -754,6 +773,7 @@ out: | |||
754 | ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN); | 773 | ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN); |
755 | } | 774 | } |
756 | 775 | ||
776 | unlock: | ||
757 | spin_unlock(&priv->lock); | 777 | spin_unlock(&priv->lock); |
758 | } | 778 | } |
759 | 779 | ||
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c index f190a99604f0..393633681f49 100644 --- a/drivers/isdn/i4l/isdn_tty.c +++ b/drivers/isdn/i4l/isdn_tty.c | |||
@@ -2359,8 +2359,8 @@ isdn_tty_at_cout(char *msg, modem_info * info) | |||
2359 | 2359 | ||
2360 | /* use queue instead of direct, if online and */ | 2360 | /* use queue instead of direct, if online and */ |
2361 | /* data is in queue or buffer is full */ | 2361 | /* data is in queue or buffer is full */ |
2362 | if ((info->online && tty_buffer_request_room(tty, l) < l) || | 2362 | if (info->online && ((tty_buffer_request_room(tty, l) < l) || |
2363 | (!skb_queue_empty(&dev->drv[info->isdn_driver]->rpqueue[info->isdn_channel]))) { | 2363 | !skb_queue_empty(&dev->drv[info->isdn_driver]->rpqueue[info->isdn_channel]))) { |
2364 | skb = alloc_skb(l, GFP_ATOMIC); | 2364 | skb = alloc_skb(l, GFP_ATOMIC); |
2365 | if (!skb) { | 2365 | if (!skb) { |
2366 | spin_unlock_irqrestore(&info->readlock, flags); | 2366 | spin_unlock_irqrestore(&info->readlock, flags); |
diff --git a/drivers/video/neofb.c b/drivers/video/neofb.c index 747602aa5615..b85e2b180a44 100644 --- a/drivers/video/neofb.c +++ b/drivers/video/neofb.c | |||
@@ -1334,6 +1334,12 @@ static int neofb_blank(int blank_mode, struct fb_info *info) | |||
1334 | struct neofb_par *par = info->par; | 1334 | struct neofb_par *par = info->par; |
1335 | int seqflags, lcdflags, dpmsflags, reg; | 1335 | int seqflags, lcdflags, dpmsflags, reg; |
1336 | 1336 | ||
1337 | /* | ||
1338 | * Reload the value stored in the register, might have been changed via | ||
1339 | * FN keystroke | ||
1340 | */ | ||
1341 | par->PanelDispCntlReg1 = vga_rgfx(NULL, 0x20) & 0x03; | ||
1342 | |||
1337 | switch (blank_mode) { | 1343 | switch (blank_mode) { |
1338 | case FB_BLANK_POWERDOWN: /* powerdown - both sync lines down */ | 1344 | case FB_BLANK_POWERDOWN: /* powerdown - both sync lines down */ |
1339 | seqflags = VGA_SR01_SCREEN_OFF; /* Disable sequencer */ | 1345 | seqflags = VGA_SR01_SCREEN_OFF; /* Disable sequencer */ |
@@ -1366,7 +1372,7 @@ static int neofb_blank(int blank_mode, struct fb_info *info) | |||
1366 | case FB_BLANK_NORMAL: /* just blank screen (backlight stays on) */ | 1372 | case FB_BLANK_NORMAL: /* just blank screen (backlight stays on) */ |
1367 | seqflags = VGA_SR01_SCREEN_OFF; /* Disable sequencer */ | 1373 | seqflags = VGA_SR01_SCREEN_OFF; /* Disable sequencer */ |
1368 | lcdflags = par->PanelDispCntlReg1 & 0x02; /* LCD normal */ | 1374 | lcdflags = par->PanelDispCntlReg1 & 0x02; /* LCD normal */ |
1369 | dpmsflags = 0; /* no hsync/vsync suppression */ | 1375 | dpmsflags = 0x00; /* no hsync/vsync suppression */ |
1370 | break; | 1376 | break; |
1371 | case FB_BLANK_UNBLANK: /* unblank */ | 1377 | case FB_BLANK_UNBLANK: /* unblank */ |
1372 | seqflags = 0; /* Enable sequencer */ | 1378 | seqflags = 0; /* Enable sequencer */ |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index d17c97d07c80..675bd2568297 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -1442,13 +1442,15 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data, | |||
1442 | &bytes_read, &smb_read_data, | 1442 | &bytes_read, &smb_read_data, |
1443 | &buf_type); | 1443 | &buf_type); |
1444 | pSMBr = (struct smb_com_read_rsp *)smb_read_data; | 1444 | pSMBr = (struct smb_com_read_rsp *)smb_read_data; |
1445 | if (copy_to_user(current_offset, | ||
1446 | smb_read_data + 4 /* RFC1001 hdr */ | ||
1447 | + le16_to_cpu(pSMBr->DataOffset), | ||
1448 | bytes_read)) { | ||
1449 | rc = -EFAULT; | ||
1450 | } | ||
1451 | if (smb_read_data) { | 1445 | if (smb_read_data) { |
1446 | if (copy_to_user(current_offset, | ||
1447 | smb_read_data + | ||
1448 | 4 /* RFC1001 length field */ + | ||
1449 | le16_to_cpu(pSMBr->DataOffset), | ||
1450 | bytes_read)) { | ||
1451 | rc = -EFAULT; | ||
1452 | } | ||
1453 | |||
1452 | if(buf_type == CIFS_SMALL_BUFFER) | 1454 | if(buf_type == CIFS_SMALL_BUFFER) |
1453 | cifs_small_buf_release(smb_read_data); | 1455 | cifs_small_buf_release(smb_read_data); |
1454 | else if(buf_type == CIFS_LARGE_BUFFER) | 1456 | else if(buf_type == CIFS_LARGE_BUFFER) |
@@ -1403,7 +1403,7 @@ static void zap_threads (struct mm_struct *mm) | |||
1403 | do_each_thread(g,p) { | 1403 | do_each_thread(g,p) { |
1404 | if (mm == p->mm && p != tsk && | 1404 | if (mm == p->mm && p != tsk && |
1405 | p->ptrace && p->parent->mm == mm) { | 1405 | p->ptrace && p->parent->mm == mm) { |
1406 | __ptrace_unlink(p); | 1406 | __ptrace_detach(p, 0); |
1407 | } | 1407 | } |
1408 | } while_each_thread(g,p); | 1408 | } while_each_thread(g,p); |
1409 | write_unlock_irq(&tasklist_lock); | 1409 | write_unlock_irq(&tasklist_lock); |
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c index e6265a0b56b8..543ed543d1e5 100644 --- a/fs/jbd/checkpoint.c +++ b/fs/jbd/checkpoint.c | |||
@@ -24,75 +24,29 @@ | |||
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | 25 | ||
26 | /* | 26 | /* |
27 | * Unlink a buffer from a transaction checkpoint list. | 27 | * Unlink a buffer from a transaction. |
28 | * | 28 | * |
29 | * Called with j_list_lock held. | 29 | * Called with j_list_lock held. |
30 | */ | 30 | */ |
31 | 31 | ||
32 | static void __buffer_unlink_first(struct journal_head *jh) | 32 | static inline void __buffer_unlink(struct journal_head *jh) |
33 | { | 33 | { |
34 | transaction_t *transaction; | 34 | transaction_t *transaction; |
35 | 35 | ||
36 | transaction = jh->b_cp_transaction; | 36 | transaction = jh->b_cp_transaction; |
37 | jh->b_cp_transaction = NULL; | ||
37 | 38 | ||
38 | jh->b_cpnext->b_cpprev = jh->b_cpprev; | 39 | jh->b_cpnext->b_cpprev = jh->b_cpprev; |
39 | jh->b_cpprev->b_cpnext = jh->b_cpnext; | 40 | jh->b_cpprev->b_cpnext = jh->b_cpnext; |
40 | if (transaction->t_checkpoint_list == jh) { | 41 | if (transaction->t_checkpoint_list == jh) |
41 | transaction->t_checkpoint_list = jh->b_cpnext; | 42 | transaction->t_checkpoint_list = jh->b_cpnext; |
42 | if (transaction->t_checkpoint_list == jh) | 43 | if (transaction->t_checkpoint_list == jh) |
43 | transaction->t_checkpoint_list = NULL; | 44 | transaction->t_checkpoint_list = NULL; |
44 | } | ||
45 | } | ||
46 | |||
47 | /* | ||
48 | * Unlink a buffer from a transaction checkpoint(io) list. | ||
49 | * | ||
50 | * Called with j_list_lock held. | ||
51 | */ | ||
52 | |||
53 | static inline void __buffer_unlink(struct journal_head *jh) | ||
54 | { | ||
55 | transaction_t *transaction; | ||
56 | |||
57 | transaction = jh->b_cp_transaction; | ||
58 | |||
59 | __buffer_unlink_first(jh); | ||
60 | if (transaction->t_checkpoint_io_list == jh) { | ||
61 | transaction->t_checkpoint_io_list = jh->b_cpnext; | ||
62 | if (transaction->t_checkpoint_io_list == jh) | ||
63 | transaction->t_checkpoint_io_list = NULL; | ||
64 | } | ||
65 | } | ||
66 | |||
67 | /* | ||
68 | * Move a buffer from the checkpoint list to the checkpoint io list | ||
69 | * | ||
70 | * Called with j_list_lock held | ||
71 | */ | ||
72 | |||
73 | static inline void __buffer_relink_io(struct journal_head *jh) | ||
74 | { | ||
75 | transaction_t *transaction; | ||
76 | |||
77 | transaction = jh->b_cp_transaction; | ||
78 | __buffer_unlink_first(jh); | ||
79 | |||
80 | if (!transaction->t_checkpoint_io_list) { | ||
81 | jh->b_cpnext = jh->b_cpprev = jh; | ||
82 | } else { | ||
83 | jh->b_cpnext = transaction->t_checkpoint_io_list; | ||
84 | jh->b_cpprev = transaction->t_checkpoint_io_list->b_cpprev; | ||
85 | jh->b_cpprev->b_cpnext = jh; | ||
86 | jh->b_cpnext->b_cpprev = jh; | ||
87 | } | ||
88 | transaction->t_checkpoint_io_list = jh; | ||
89 | } | 45 | } |
90 | 46 | ||
91 | /* | 47 | /* |
92 | * Try to release a checkpointed buffer from its transaction. | 48 | * Try to release a checkpointed buffer from its transaction. |
93 | * Returns 1 if we released it and 2 if we also released the | 49 | * Returns 1 if we released it. |
94 | * whole transaction. | ||
95 | * | ||
96 | * Requires j_list_lock | 50 | * Requires j_list_lock |
97 | * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it | 51 | * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it |
98 | */ | 52 | */ |
@@ -103,11 +57,12 @@ static int __try_to_free_cp_buf(struct journal_head *jh) | |||
103 | 57 | ||
104 | if (jh->b_jlist == BJ_None && !buffer_locked(bh) && !buffer_dirty(bh)) { | 58 | if (jh->b_jlist == BJ_None && !buffer_locked(bh) && !buffer_dirty(bh)) { |
105 | JBUFFER_TRACE(jh, "remove from checkpoint list"); | 59 | JBUFFER_TRACE(jh, "remove from checkpoint list"); |
106 | ret = __journal_remove_checkpoint(jh) + 1; | 60 | __journal_remove_checkpoint(jh); |
107 | jbd_unlock_bh_state(bh); | 61 | jbd_unlock_bh_state(bh); |
108 | journal_remove_journal_head(bh); | 62 | journal_remove_journal_head(bh); |
109 | BUFFER_TRACE(bh, "release"); | 63 | BUFFER_TRACE(bh, "release"); |
110 | __brelse(bh); | 64 | __brelse(bh); |
65 | ret = 1; | ||
111 | } else { | 66 | } else { |
112 | jbd_unlock_bh_state(bh); | 67 | jbd_unlock_bh_state(bh); |
113 | } | 68 | } |
@@ -162,53 +117,83 @@ static void jbd_sync_bh(journal_t *journal, struct buffer_head *bh) | |||
162 | } | 117 | } |
163 | 118 | ||
164 | /* | 119 | /* |
165 | * Clean up transaction's list of buffers submitted for io. | 120 | * Clean up a transaction's checkpoint list. |
166 | * We wait for any pending IO to complete and remove any clean | 121 | * |
167 | * buffers. Note that we take the buffers in the opposite ordering | 122 | * We wait for any pending IO to complete and make sure any clean |
168 | * from the one in which they were submitted for IO. | 123 | * buffers are removed from the transaction. |
124 | * | ||
125 | * Return 1 if we performed any actions which might have destroyed the | ||
126 | * checkpoint. (journal_remove_checkpoint() deletes the transaction when | ||
127 | * the last checkpoint buffer is cleansed) | ||
169 | * | 128 | * |
170 | * Called with j_list_lock held. | 129 | * Called with j_list_lock held. |
171 | */ | 130 | */ |
172 | 131 | static int __cleanup_transaction(journal_t *journal, transaction_t *transaction) | |
173 | static void __wait_cp_io(journal_t *journal, transaction_t *transaction) | ||
174 | { | 132 | { |
175 | struct journal_head *jh; | 133 | struct journal_head *jh, *next_jh, *last_jh; |
176 | struct buffer_head *bh; | 134 | struct buffer_head *bh; |
177 | tid_t this_tid; | 135 | int ret = 0; |
178 | int released = 0; | 136 | |
179 | 137 | assert_spin_locked(&journal->j_list_lock); | |
180 | this_tid = transaction->t_tid; | 138 | jh = transaction->t_checkpoint_list; |
181 | restart: | 139 | if (!jh) |
182 | /* Didn't somebody clean up the transaction in the meanwhile */ | 140 | return 0; |
183 | if (journal->j_checkpoint_transactions != transaction || | 141 | |
184 | transaction->t_tid != this_tid) | 142 | last_jh = jh->b_cpprev; |
185 | return; | 143 | next_jh = jh; |
186 | while (!released && transaction->t_checkpoint_io_list) { | 144 | do { |
187 | jh = transaction->t_checkpoint_io_list; | 145 | jh = next_jh; |
188 | bh = jh2bh(jh); | 146 | bh = jh2bh(jh); |
189 | if (!jbd_trylock_bh_state(bh)) { | ||
190 | jbd_sync_bh(journal, bh); | ||
191 | spin_lock(&journal->j_list_lock); | ||
192 | goto restart; | ||
193 | } | ||
194 | if (buffer_locked(bh)) { | 147 | if (buffer_locked(bh)) { |
195 | atomic_inc(&bh->b_count); | 148 | atomic_inc(&bh->b_count); |
196 | spin_unlock(&journal->j_list_lock); | 149 | spin_unlock(&journal->j_list_lock); |
197 | jbd_unlock_bh_state(bh); | ||
198 | wait_on_buffer(bh); | 150 | wait_on_buffer(bh); |
199 | /* the journal_head may have gone by now */ | 151 | /* the journal_head may have gone by now */ |
200 | BUFFER_TRACE(bh, "brelse"); | 152 | BUFFER_TRACE(bh, "brelse"); |
201 | __brelse(bh); | 153 | __brelse(bh); |
202 | spin_lock(&journal->j_list_lock); | 154 | goto out_return_1; |
203 | goto restart; | ||
204 | } | 155 | } |
156 | |||
205 | /* | 157 | /* |
206 | * Now in whatever state the buffer currently is, we know that | 158 | * This is foul |
207 | * it has been written out and so we can drop it from the list | ||
208 | */ | 159 | */ |
209 | released = __journal_remove_checkpoint(jh); | 160 | if (!jbd_trylock_bh_state(bh)) { |
210 | jbd_unlock_bh_state(bh); | 161 | jbd_sync_bh(journal, bh); |
211 | } | 162 | goto out_return_1; |
163 | } | ||
164 | |||
165 | if (jh->b_transaction != NULL) { | ||
166 | transaction_t *t = jh->b_transaction; | ||
167 | tid_t tid = t->t_tid; | ||
168 | |||
169 | spin_unlock(&journal->j_list_lock); | ||
170 | jbd_unlock_bh_state(bh); | ||
171 | log_start_commit(journal, tid); | ||
172 | log_wait_commit(journal, tid); | ||
173 | goto out_return_1; | ||
174 | } | ||
175 | |||
176 | /* | ||
177 | * AKPM: I think the buffer_jbddirty test is redundant - it | ||
178 | * shouldn't have NULL b_transaction? | ||
179 | */ | ||
180 | next_jh = jh->b_cpnext; | ||
181 | if (!buffer_dirty(bh) && !buffer_jbddirty(bh)) { | ||
182 | BUFFER_TRACE(bh, "remove from checkpoint"); | ||
183 | __journal_remove_checkpoint(jh); | ||
184 | jbd_unlock_bh_state(bh); | ||
185 | journal_remove_journal_head(bh); | ||
186 | __brelse(bh); | ||
187 | ret = 1; | ||
188 | } else { | ||
189 | jbd_unlock_bh_state(bh); | ||
190 | } | ||
191 | } while (jh != last_jh); | ||
192 | |||
193 | return ret; | ||
194 | out_return_1: | ||
195 | spin_lock(&journal->j_list_lock); | ||
196 | return 1; | ||
212 | } | 197 | } |
213 | 198 | ||
214 | #define NR_BATCH 64 | 199 | #define NR_BATCH 64 |
@@ -218,7 +203,9 @@ __flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count) | |||
218 | { | 203 | { |
219 | int i; | 204 | int i; |
220 | 205 | ||
206 | spin_unlock(&journal->j_list_lock); | ||
221 | ll_rw_block(SWRITE, *batch_count, bhs); | 207 | ll_rw_block(SWRITE, *batch_count, bhs); |
208 | spin_lock(&journal->j_list_lock); | ||
222 | for (i = 0; i < *batch_count; i++) { | 209 | for (i = 0; i < *batch_count; i++) { |
223 | struct buffer_head *bh = bhs[i]; | 210 | struct buffer_head *bh = bhs[i]; |
224 | clear_buffer_jwrite(bh); | 211 | clear_buffer_jwrite(bh); |
@@ -234,46 +221,19 @@ __flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count) | |||
234 | * Return 1 if something happened which requires us to abort the current | 221 | * Return 1 if something happened which requires us to abort the current |
235 | * scan of the checkpoint list. | 222 | * scan of the checkpoint list. |
236 | * | 223 | * |
237 | * Called with j_list_lock held and drops it if 1 is returned | 224 | * Called with j_list_lock held. |
238 | * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it | 225 | * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it |
239 | */ | 226 | */ |
240 | static int __process_buffer(journal_t *journal, struct journal_head *jh, | 227 | static int __flush_buffer(journal_t *journal, struct journal_head *jh, |
241 | struct buffer_head **bhs, int *batch_count) | 228 | struct buffer_head **bhs, int *batch_count, |
229 | int *drop_count) | ||
242 | { | 230 | { |
243 | struct buffer_head *bh = jh2bh(jh); | 231 | struct buffer_head *bh = jh2bh(jh); |
244 | int ret = 0; | 232 | int ret = 0; |
245 | 233 | ||
246 | if (buffer_locked(bh)) { | 234 | if (buffer_dirty(bh) && !buffer_locked(bh) && jh->b_jlist == BJ_None) { |
247 | get_bh(bh); | 235 | J_ASSERT_JH(jh, jh->b_transaction == NULL); |
248 | spin_unlock(&journal->j_list_lock); | ||
249 | jbd_unlock_bh_state(bh); | ||
250 | wait_on_buffer(bh); | ||
251 | /* the journal_head may have gone by now */ | ||
252 | BUFFER_TRACE(bh, "brelse"); | ||
253 | put_bh(bh); | ||
254 | ret = 1; | ||
255 | } | ||
256 | else if (jh->b_transaction != NULL) { | ||
257 | transaction_t *t = jh->b_transaction; | ||
258 | tid_t tid = t->t_tid; | ||
259 | 236 | ||
260 | spin_unlock(&journal->j_list_lock); | ||
261 | jbd_unlock_bh_state(bh); | ||
262 | log_start_commit(journal, tid); | ||
263 | log_wait_commit(journal, tid); | ||
264 | ret = 1; | ||
265 | } | ||
266 | else if (!buffer_dirty(bh)) { | ||
267 | J_ASSERT_JH(jh, !buffer_jbddirty(bh)); | ||
268 | BUFFER_TRACE(bh, "remove from checkpoint"); | ||
269 | __journal_remove_checkpoint(jh); | ||
270 | spin_unlock(&journal->j_list_lock); | ||
271 | jbd_unlock_bh_state(bh); | ||
272 | journal_remove_journal_head(bh); | ||
273 | put_bh(bh); | ||
274 | ret = 1; | ||
275 | } | ||
276 | else { | ||
277 | /* | 237 | /* |
278 | * Important: we are about to write the buffer, and | 238 | * Important: we are about to write the buffer, and |
279 | * possibly block, while still holding the journal lock. | 239 | * possibly block, while still holding the journal lock. |
@@ -286,30 +246,45 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh, | |||
286 | J_ASSERT_BH(bh, !buffer_jwrite(bh)); | 246 | J_ASSERT_BH(bh, !buffer_jwrite(bh)); |
287 | set_buffer_jwrite(bh); | 247 | set_buffer_jwrite(bh); |
288 | bhs[*batch_count] = bh; | 248 | bhs[*batch_count] = bh; |
289 | __buffer_relink_io(jh); | ||
290 | jbd_unlock_bh_state(bh); | 249 | jbd_unlock_bh_state(bh); |
291 | (*batch_count)++; | 250 | (*batch_count)++; |
292 | if (*batch_count == NR_BATCH) { | 251 | if (*batch_count == NR_BATCH) { |
293 | spin_unlock(&journal->j_list_lock); | ||
294 | __flush_batch(journal, bhs, batch_count); | 252 | __flush_batch(journal, bhs, batch_count); |
295 | ret = 1; | 253 | ret = 1; |
296 | } | 254 | } |
255 | } else { | ||
256 | int last_buffer = 0; | ||
257 | if (jh->b_cpnext == jh) { | ||
258 | /* We may be about to drop the transaction. Tell the | ||
259 | * caller that the lists have changed. | ||
260 | */ | ||
261 | last_buffer = 1; | ||
262 | } | ||
263 | if (__try_to_free_cp_buf(jh)) { | ||
264 | (*drop_count)++; | ||
265 | ret = last_buffer; | ||
266 | } | ||
297 | } | 267 | } |
298 | return ret; | 268 | return ret; |
299 | } | 269 | } |
300 | 270 | ||
301 | /* | 271 | /* |
302 | * Perform an actual checkpoint. We take the first transaction on the | 272 | * Perform an actual checkpoint. We don't write out only enough to |
303 | * list of transactions to be checkpointed and send all its buffers | 273 | * satisfy the current blocked requests: rather we submit a reasonably |
304 | * to disk. We submit larger chunks of data at once. | 274 | * sized chunk of the outstanding data to disk at once for |
275 | * efficiency. __log_wait_for_space() will retry if we didn't free enough. | ||
305 | * | 276 | * |
277 | * However, we _do_ take into account the amount requested so that once | ||
278 | * the IO has been queued, we can return as soon as enough of it has | ||
279 | * completed to disk. | ||
280 | * | ||
306 | * The journal should be locked before calling this function. | 281 | * The journal should be locked before calling this function. |
307 | */ | 282 | */ |
308 | int log_do_checkpoint(journal_t *journal) | 283 | int log_do_checkpoint(journal_t *journal) |
309 | { | 284 | { |
310 | transaction_t *transaction; | ||
311 | tid_t this_tid; | ||
312 | int result; | 285 | int result; |
286 | int batch_count = 0; | ||
287 | struct buffer_head *bhs[NR_BATCH]; | ||
313 | 288 | ||
314 | jbd_debug(1, "Start checkpoint\n"); | 289 | jbd_debug(1, "Start checkpoint\n"); |
315 | 290 | ||
@@ -324,70 +299,79 @@ int log_do_checkpoint(journal_t *journal) | |||
324 | return result; | 299 | return result; |
325 | 300 | ||
326 | /* | 301 | /* |
327 | * OK, we need to start writing disk blocks. Take one transaction | 302 | * OK, we need to start writing disk blocks. Try to free up a |
328 | * and write it. | 303 | * quarter of the log in a single checkpoint if we can. |
329 | */ | 304 | */ |
330 | spin_lock(&journal->j_list_lock); | ||
331 | if (!journal->j_checkpoint_transactions) | ||
332 | goto out; | ||
333 | transaction = journal->j_checkpoint_transactions; | ||
334 | this_tid = transaction->t_tid; | ||
335 | restart: | ||
336 | /* | 305 | /* |
337 | * If someone cleaned up this transaction while we slept, we're | 306 | * AKPM: check this code. I had a feeling a while back that it |
338 | * done (maybe it's a new transaction, but it fell at the same | 307 | * degenerates into a busy loop at unmount time. |
339 | * address). | ||
340 | */ | 308 | */ |
341 | if (journal->j_checkpoint_transactions == transaction && | 309 | spin_lock(&journal->j_list_lock); |
342 | transaction->t_tid == this_tid) { | 310 | while (journal->j_checkpoint_transactions) { |
343 | int batch_count = 0; | 311 | transaction_t *transaction; |
344 | struct buffer_head *bhs[NR_BATCH]; | 312 | struct journal_head *jh, *last_jh, *next_jh; |
345 | struct journal_head *jh; | 313 | int drop_count = 0; |
346 | int retry = 0; | 314 | int cleanup_ret, retry = 0; |
347 | 315 | tid_t this_tid; | |
348 | while (!retry && transaction->t_checkpoint_list) { | 316 | |
317 | transaction = journal->j_checkpoint_transactions; | ||
318 | this_tid = transaction->t_tid; | ||
319 | jh = transaction->t_checkpoint_list; | ||
320 | last_jh = jh->b_cpprev; | ||
321 | next_jh = jh; | ||
322 | do { | ||
349 | struct buffer_head *bh; | 323 | struct buffer_head *bh; |
350 | 324 | ||
351 | jh = transaction->t_checkpoint_list; | 325 | jh = next_jh; |
326 | next_jh = jh->b_cpnext; | ||
352 | bh = jh2bh(jh); | 327 | bh = jh2bh(jh); |
353 | if (!jbd_trylock_bh_state(bh)) { | 328 | if (!jbd_trylock_bh_state(bh)) { |
354 | jbd_sync_bh(journal, bh); | 329 | jbd_sync_bh(journal, bh); |
330 | spin_lock(&journal->j_list_lock); | ||
355 | retry = 1; | 331 | retry = 1; |
356 | break; | 332 | break; |
357 | } | 333 | } |
358 | retry = __process_buffer(journal, jh, bhs, | 334 | retry = __flush_buffer(journal, jh, bhs, &batch_count, &drop_count); |
359 | &batch_count); | 335 | if (cond_resched_lock(&journal->j_list_lock)) { |
360 | if (!retry && | ||
361 | lock_need_resched(&journal->j_list_lock)) { | ||
362 | spin_unlock(&journal->j_list_lock); | ||
363 | retry = 1; | 336 | retry = 1; |
364 | break; | 337 | break; |
365 | } | 338 | } |
366 | } | 339 | } while (jh != last_jh && !retry); |
367 | 340 | ||
368 | if (batch_count) { | 341 | if (batch_count) { |
369 | if (!retry) { | ||
370 | spin_unlock(&journal->j_list_lock); | ||
371 | retry = 1; | ||
372 | } | ||
373 | __flush_batch(journal, bhs, &batch_count); | 342 | __flush_batch(journal, bhs, &batch_count); |
343 | retry = 1; | ||
374 | } | 344 | } |
375 | 345 | ||
376 | if (retry) { | ||
377 | spin_lock(&journal->j_list_lock); | ||
378 | goto restart; | ||
379 | } | ||
380 | /* | 346 | /* |
381 | * Now we have cleaned up the first transaction's checkpoint | 347 | * If someone cleaned up this transaction while we slept, we're |
382 | * list. Let's clean up the second one. | 348 | * done |
349 | */ | ||
350 | if (journal->j_checkpoint_transactions != transaction) | ||
351 | break; | ||
352 | if (retry) | ||
353 | continue; | ||
354 | /* | ||
355 | * Maybe it's a new transaction, but it fell at the same | ||
356 | * address | ||
383 | */ | 357 | */ |
384 | __wait_cp_io(journal, transaction); | 358 | if (transaction->t_tid != this_tid) |
359 | continue; | ||
360 | /* | ||
361 | * We have walked the whole transaction list without | ||
362 | * finding anything to write to disk. We had better be | ||
363 | * able to make some progress or we are in trouble. | ||
364 | */ | ||
365 | cleanup_ret = __cleanup_transaction(journal, transaction); | ||
366 | J_ASSERT(drop_count != 0 || cleanup_ret != 0); | ||
367 | if (journal->j_checkpoint_transactions != transaction) | ||
368 | break; | ||
385 | } | 369 | } |
386 | out: | ||
387 | spin_unlock(&journal->j_list_lock); | 370 | spin_unlock(&journal->j_list_lock); |
388 | result = cleanup_journal_tail(journal); | 371 | result = cleanup_journal_tail(journal); |
389 | if (result < 0) | 372 | if (result < 0) |
390 | return result; | 373 | return result; |
374 | |||
391 | return 0; | 375 | return 0; |
392 | } | 376 | } |
393 | 377 | ||
@@ -472,91 +456,52 @@ int cleanup_journal_tail(journal_t *journal) | |||
472 | /* Checkpoint list management */ | 456 | /* Checkpoint list management */ |
473 | 457 | ||
474 | /* | 458 | /* |
475 | * journal_clean_one_cp_list | ||
476 | * | ||
477 | * Find all the written-back checkpoint buffers in the given list and release them. | ||
478 | * | ||
479 | * Called with the journal locked. | ||
480 | * Called with j_list_lock held. | ||
481 | * Returns number of bufers reaped (for debug) | ||
482 | */ | ||
483 | |||
484 | static int journal_clean_one_cp_list(struct journal_head *jh, int *released) | ||
485 | { | ||
486 | struct journal_head *last_jh; | ||
487 | struct journal_head *next_jh = jh; | ||
488 | int ret, freed = 0; | ||
489 | |||
490 | *released = 0; | ||
491 | if (!jh) | ||
492 | return 0; | ||
493 | |||
494 | last_jh = jh->b_cpprev; | ||
495 | do { | ||
496 | jh = next_jh; | ||
497 | next_jh = jh->b_cpnext; | ||
498 | /* Use trylock because of the ranking */ | ||
499 | if (jbd_trylock_bh_state(jh2bh(jh))) { | ||
500 | ret = __try_to_free_cp_buf(jh); | ||
501 | if (ret) { | ||
502 | freed++; | ||
503 | if (ret == 2) { | ||
504 | *released = 1; | ||
505 | return freed; | ||
506 | } | ||
507 | } | ||
508 | } | ||
509 | /* | ||
510 | * This function only frees up some memory if possible so we | ||
511 | * dont have an obligation to finish processing. Bail out if | ||
512 | * preemption requested: | ||
513 | */ | ||
514 | if (need_resched()) | ||
515 | return freed; | ||
516 | } while (jh != last_jh); | ||
517 | |||
518 | return freed; | ||
519 | } | ||
520 | |||
521 | /* | ||
522 | * journal_clean_checkpoint_list | 459 | * journal_clean_checkpoint_list |
523 | * | 460 | * |
524 | * Find all the written-back checkpoint buffers in the journal and release them. | 461 | * Find all the written-back checkpoint buffers in the journal and release them. |
525 | * | 462 | * |
526 | * Called with the journal locked. | 463 | * Called with the journal locked. |
527 | * Called with j_list_lock held. | 464 | * Called with j_list_lock held. |
528 | * Returns number of buffers reaped (for debug) | 465 | * Returns number of bufers reaped (for debug) |
529 | */ | 466 | */ |
530 | 467 | ||
531 | int __journal_clean_checkpoint_list(journal_t *journal) | 468 | int __journal_clean_checkpoint_list(journal_t *journal) |
532 | { | 469 | { |
533 | transaction_t *transaction, *last_transaction, *next_transaction; | 470 | transaction_t *transaction, *last_transaction, *next_transaction; |
534 | int ret = 0, released; | 471 | int ret = 0; |
535 | 472 | ||
536 | transaction = journal->j_checkpoint_transactions; | 473 | transaction = journal->j_checkpoint_transactions; |
537 | if (!transaction) | 474 | if (transaction == 0) |
538 | goto out; | 475 | goto out; |
539 | 476 | ||
540 | last_transaction = transaction->t_cpprev; | 477 | last_transaction = transaction->t_cpprev; |
541 | next_transaction = transaction; | 478 | next_transaction = transaction; |
542 | do { | 479 | do { |
480 | struct journal_head *jh; | ||
481 | |||
543 | transaction = next_transaction; | 482 | transaction = next_transaction; |
544 | next_transaction = transaction->t_cpnext; | 483 | next_transaction = transaction->t_cpnext; |
545 | ret += journal_clean_one_cp_list(transaction-> | 484 | jh = transaction->t_checkpoint_list; |
546 | t_checkpoint_list, &released); | 485 | if (jh) { |
547 | if (need_resched()) | 486 | struct journal_head *last_jh = jh->b_cpprev; |
548 | goto out; | 487 | struct journal_head *next_jh = jh; |
549 | if (released) | 488 | |
550 | continue; | 489 | do { |
551 | /* | 490 | jh = next_jh; |
552 | * It is essential that we are as careful as in the case of | 491 | next_jh = jh->b_cpnext; |
553 | * t_checkpoint_list with removing the buffer from the list as | 492 | /* Use trylock because of the ranknig */ |
554 | * we can possibly see not yet submitted buffers on io_list | 493 | if (jbd_trylock_bh_state(jh2bh(jh))) |
555 | */ | 494 | ret += __try_to_free_cp_buf(jh); |
556 | ret += journal_clean_one_cp_list(transaction-> | 495 | /* |
557 | t_checkpoint_io_list, &released); | 496 | * This function only frees up some memory |
558 | if (need_resched()) | 497 | * if possible so we dont have an obligation |
559 | goto out; | 498 | * to finish processing. Bail out if preemption |
499 | * requested: | ||
500 | */ | ||
501 | if (need_resched()) | ||
502 | goto out; | ||
503 | } while (jh != last_jh); | ||
504 | } | ||
560 | } while (transaction != last_transaction); | 505 | } while (transaction != last_transaction); |
561 | out: | 506 | out: |
562 | return ret; | 507 | return ret; |
@@ -571,22 +516,18 @@ out: | |||
571 | * buffer updates committed in that transaction have safely been stored | 516 | * buffer updates committed in that transaction have safely been stored |
572 | * elsewhere on disk. To achieve this, all of the buffers in a | 517 | * elsewhere on disk. To achieve this, all of the buffers in a |
573 | * transaction need to be maintained on the transaction's checkpoint | 518 | * transaction need to be maintained on the transaction's checkpoint |
574 | * lists until they have been rewritten, at which point this function is | 519 | * list until they have been rewritten, at which point this function is |
575 | * called to remove the buffer from the existing transaction's | 520 | * called to remove the buffer from the existing transaction's |
576 | * checkpoint lists. | 521 | * checkpoint list. |
577 | * | ||
578 | * The function returns 1 if it frees the transaction, 0 otherwise. | ||
579 | * | 522 | * |
580 | * This function is called with the journal locked. | 523 | * This function is called with the journal locked. |
581 | * This function is called with j_list_lock held. | 524 | * This function is called with j_list_lock held. |
582 | * This function is called with jbd_lock_bh_state(jh2bh(jh)) | ||
583 | */ | 525 | */ |
584 | 526 | ||
585 | int __journal_remove_checkpoint(struct journal_head *jh) | 527 | void __journal_remove_checkpoint(struct journal_head *jh) |
586 | { | 528 | { |
587 | transaction_t *transaction; | 529 | transaction_t *transaction; |
588 | journal_t *journal; | 530 | journal_t *journal; |
589 | int ret = 0; | ||
590 | 531 | ||
591 | JBUFFER_TRACE(jh, "entry"); | 532 | JBUFFER_TRACE(jh, "entry"); |
592 | 533 | ||
@@ -597,10 +538,8 @@ int __journal_remove_checkpoint(struct journal_head *jh) | |||
597 | journal = transaction->t_journal; | 538 | journal = transaction->t_journal; |
598 | 539 | ||
599 | __buffer_unlink(jh); | 540 | __buffer_unlink(jh); |
600 | jh->b_cp_transaction = NULL; | ||
601 | 541 | ||
602 | if (transaction->t_checkpoint_list != NULL || | 542 | if (transaction->t_checkpoint_list != NULL) |
603 | transaction->t_checkpoint_io_list != NULL) | ||
604 | goto out; | 543 | goto out; |
605 | JBUFFER_TRACE(jh, "transaction has no more buffers"); | 544 | JBUFFER_TRACE(jh, "transaction has no more buffers"); |
606 | 545 | ||
@@ -626,10 +565,8 @@ int __journal_remove_checkpoint(struct journal_head *jh) | |||
626 | /* Just in case anybody was waiting for more transactions to be | 565 | /* Just in case anybody was waiting for more transactions to be |
627 | checkpointed... */ | 566 | checkpointed... */ |
628 | wake_up(&journal->j_wait_logspace); | 567 | wake_up(&journal->j_wait_logspace); |
629 | ret = 1; | ||
630 | out: | 568 | out: |
631 | JBUFFER_TRACE(jh, "exit"); | 569 | JBUFFER_TRACE(jh, "exit"); |
632 | return ret; | ||
633 | } | 570 | } |
634 | 571 | ||
635 | /* | 572 | /* |
@@ -691,7 +628,6 @@ void __journal_drop_transaction(journal_t *journal, transaction_t *transaction) | |||
691 | J_ASSERT(transaction->t_shadow_list == NULL); | 628 | J_ASSERT(transaction->t_shadow_list == NULL); |
692 | J_ASSERT(transaction->t_log_list == NULL); | 629 | J_ASSERT(transaction->t_log_list == NULL); |
693 | J_ASSERT(transaction->t_checkpoint_list == NULL); | 630 | J_ASSERT(transaction->t_checkpoint_list == NULL); |
694 | J_ASSERT(transaction->t_checkpoint_io_list == NULL); | ||
695 | J_ASSERT(transaction->t_updates == 0); | 631 | J_ASSERT(transaction->t_updates == 0); |
696 | J_ASSERT(journal->j_committing_transaction != transaction); | 632 | J_ASSERT(journal->j_committing_transaction != transaction); |
697 | J_ASSERT(journal->j_running_transaction != transaction); | 633 | J_ASSERT(journal->j_running_transaction != transaction); |
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c index 29e62d98bae6..002ad2bbc769 100644 --- a/fs/jbd/commit.c +++ b/fs/jbd/commit.c | |||
@@ -829,8 +829,7 @@ restart_loop: | |||
829 | journal->j_committing_transaction = NULL; | 829 | journal->j_committing_transaction = NULL; |
830 | spin_unlock(&journal->j_state_lock); | 830 | spin_unlock(&journal->j_state_lock); |
831 | 831 | ||
832 | if (commit_transaction->t_checkpoint_list == NULL && | 832 | if (commit_transaction->t_checkpoint_list == NULL) { |
833 | commit_transaction->t_checkpoint_io_list == NULL) { | ||
834 | __journal_drop_transaction(journal, commit_transaction); | 833 | __journal_drop_transaction(journal, commit_transaction); |
835 | } else { | 834 | } else { |
836 | if (journal->j_checkpoint_transactions == NULL) { | 835 | if (journal->j_checkpoint_transactions == NULL) { |
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c index 3eaf6e701087..da6354baa0b8 100644 --- a/fs/lockd/clntlock.c +++ b/fs/lockd/clntlock.c | |||
@@ -111,9 +111,10 @@ long nlmclnt_block(struct nlm_rqst *req, long timeout) | |||
111 | /* | 111 | /* |
112 | * The server lockd has called us back to tell us the lock was granted | 112 | * The server lockd has called us back to tell us the lock was granted |
113 | */ | 113 | */ |
114 | u32 | 114 | u32 nlmclnt_grant(const struct sockaddr_in *addr, const struct nlm_lock *lock) |
115 | nlmclnt_grant(struct nlm_lock *lock) | ||
116 | { | 115 | { |
116 | const struct file_lock *fl = &lock->fl; | ||
117 | const struct nfs_fh *fh = &lock->fh; | ||
117 | struct nlm_wait *block; | 118 | struct nlm_wait *block; |
118 | u32 res = nlm_lck_denied; | 119 | u32 res = nlm_lck_denied; |
119 | 120 | ||
@@ -122,14 +123,20 @@ nlmclnt_grant(struct nlm_lock *lock) | |||
122 | * Warning: must not use cookie to match it! | 123 | * Warning: must not use cookie to match it! |
123 | */ | 124 | */ |
124 | list_for_each_entry(block, &nlm_blocked, b_list) { | 125 | list_for_each_entry(block, &nlm_blocked, b_list) { |
125 | if (nlm_compare_locks(block->b_lock, &lock->fl)) { | 126 | struct file_lock *fl_blocked = block->b_lock; |
126 | /* Alright, we found a lock. Set the return status | 127 | |
127 | * and wake up the caller | 128 | if (!nlm_compare_locks(fl_blocked, fl)) |
128 | */ | 129 | continue; |
129 | block->b_status = NLM_LCK_GRANTED; | 130 | if (!nlm_cmp_addr(&block->b_host->h_addr, addr)) |
130 | wake_up(&block->b_wait); | 131 | continue; |
131 | res = nlm_granted; | 132 | if (nfs_compare_fh(NFS_FH(fl_blocked->fl_file->f_dentry->d_inode) ,fh) != 0) |
132 | } | 133 | continue; |
134 | /* Alright, we found a lock. Set the return status | ||
135 | * and wake up the caller | ||
136 | */ | ||
137 | block->b_status = NLM_LCK_GRANTED; | ||
138 | wake_up(&block->b_wait); | ||
139 | res = nlm_granted; | ||
133 | } | 140 | } |
134 | return res; | 141 | return res; |
135 | } | 142 | } |
diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c index 4063095d849e..b10f913aa06a 100644 --- a/fs/lockd/svc4proc.c +++ b/fs/lockd/svc4proc.c | |||
@@ -228,7 +228,7 @@ nlm4svc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp, | |||
228 | resp->cookie = argp->cookie; | 228 | resp->cookie = argp->cookie; |
229 | 229 | ||
230 | dprintk("lockd: GRANTED called\n"); | 230 | dprintk("lockd: GRANTED called\n"); |
231 | resp->status = nlmclnt_grant(&argp->lock); | 231 | resp->status = nlmclnt_grant(&rqstp->rq_addr, &argp->lock); |
232 | dprintk("lockd: GRANTED status %d\n", ntohl(resp->status)); | 232 | dprintk("lockd: GRANTED status %d\n", ntohl(resp->status)); |
233 | return rpc_success; | 233 | return rpc_success; |
234 | } | 234 | } |
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c index 3bc437e0cf5b..35681d9cf1fc 100644 --- a/fs/lockd/svcproc.c +++ b/fs/lockd/svcproc.c | |||
@@ -256,7 +256,7 @@ nlmsvc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp, | |||
256 | resp->cookie = argp->cookie; | 256 | resp->cookie = argp->cookie; |
257 | 257 | ||
258 | dprintk("lockd: GRANTED called\n"); | 258 | dprintk("lockd: GRANTED called\n"); |
259 | resp->status = nlmclnt_grant(&argp->lock); | 259 | resp->status = nlmclnt_grant(&rqstp->rq_addr, &argp->lock); |
260 | dprintk("lockd: GRANTED status %d\n", ntohl(resp->status)); | 260 | dprintk("lockd: GRANTED status %d\n", ntohl(resp->status)); |
261 | return rpc_success; | 261 | return rpc_success; |
262 | } | 262 | } |
diff --git a/include/asm-alpha/mman.h b/include/asm-alpha/mman.h index f6439532a262..a21515c16a43 100644 --- a/include/asm-alpha/mman.h +++ b/include/asm-alpha/mman.h | |||
@@ -43,6 +43,8 @@ | |||
43 | #define MADV_SPACEAVAIL 5 /* ensure resources are available */ | 43 | #define MADV_SPACEAVAIL 5 /* ensure resources are available */ |
44 | #define MADV_DONTNEED 6 /* don't need these pages */ | 44 | #define MADV_DONTNEED 6 /* don't need these pages */ |
45 | #define MADV_REMOVE 7 /* remove these pages & resources */ | 45 | #define MADV_REMOVE 7 /* remove these pages & resources */ |
46 | #define MADV_DONTFORK 0x30 /* dont inherit across fork */ | ||
47 | #define MADV_DOFORK 0x31 /* do inherit across fork */ | ||
46 | 48 | ||
47 | /* compatibility flags */ | 49 | /* compatibility flags */ |
48 | #define MAP_ANON MAP_ANONYMOUS | 50 | #define MAP_ANON MAP_ANONYMOUS |
diff --git a/include/asm-arm/mman.h b/include/asm-arm/mman.h index f0bebca2ac21..693ed859e632 100644 --- a/include/asm-arm/mman.h +++ b/include/asm-arm/mman.h | |||
@@ -36,6 +36,8 @@ | |||
36 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ | 36 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ |
37 | #define MADV_DONTNEED 0x4 /* discard these pages */ | 37 | #define MADV_DONTNEED 0x4 /* discard these pages */ |
38 | #define MADV_REMOVE 0x5 /* remove these pages & resources */ | 38 | #define MADV_REMOVE 0x5 /* remove these pages & resources */ |
39 | #define MADV_DONTFORK 0x30 /* dont inherit across fork */ | ||
40 | #define MADV_DOFORK 0x31 /* do inherit across fork */ | ||
39 | 41 | ||
40 | /* compatibility flags */ | 42 | /* compatibility flags */ |
41 | #define MAP_ANON MAP_ANONYMOUS | 43 | #define MAP_ANON MAP_ANONYMOUS |
diff --git a/include/asm-arm26/mman.h b/include/asm-arm26/mman.h index 0ed7780541fa..2096c50df888 100644 --- a/include/asm-arm26/mman.h +++ b/include/asm-arm26/mman.h | |||
@@ -36,6 +36,8 @@ | |||
36 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ | 36 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ |
37 | #define MADV_DONTNEED 0x4 /* discard these pages */ | 37 | #define MADV_DONTNEED 0x4 /* discard these pages */ |
38 | #define MADV_REMOVE 0x5 /* remove these pages & resources */ | 38 | #define MADV_REMOVE 0x5 /* remove these pages & resources */ |
39 | #define MADV_DONTFORK 0x30 /* dont inherit across fork */ | ||
40 | #define MADV_DOFORK 0x31 /* do inherit across fork */ | ||
39 | 41 | ||
40 | /* compatibility flags */ | 42 | /* compatibility flags */ |
41 | #define MAP_ANON MAP_ANONYMOUS | 43 | #define MAP_ANON MAP_ANONYMOUS |
diff --git a/include/asm-cris/mman.h b/include/asm-cris/mman.h index 5a382b8bf3f7..deddfb239ff5 100644 --- a/include/asm-cris/mman.h +++ b/include/asm-cris/mman.h | |||
@@ -38,6 +38,8 @@ | |||
38 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ | 38 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ |
39 | #define MADV_DONTNEED 0x4 /* discard these pages */ | 39 | #define MADV_DONTNEED 0x4 /* discard these pages */ |
40 | #define MADV_REMOVE 0x5 /* remove these pages & resources */ | 40 | #define MADV_REMOVE 0x5 /* remove these pages & resources */ |
41 | #define MADV_DONTFORK 0x30 /* dont inherit across fork */ | ||
42 | #define MADV_DOFORK 0x31 /* do inherit across fork */ | ||
41 | 43 | ||
42 | /* compatibility flags */ | 44 | /* compatibility flags */ |
43 | #define MAP_ANON MAP_ANONYMOUS | 45 | #define MAP_ANON MAP_ANONYMOUS |
diff --git a/include/asm-frv/atomic.h b/include/asm-frv/atomic.h index a59f684b4f33..5d9f84bfdcad 100644 --- a/include/asm-frv/atomic.h +++ b/include/asm-frv/atomic.h | |||
@@ -220,9 +220,9 @@ extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsig | |||
220 | switch (sizeof(__xg_orig)) { \ | 220 | switch (sizeof(__xg_orig)) { \ |
221 | case 4: \ | 221 | case 4: \ |
222 | asm volatile( \ | 222 | asm volatile( \ |
223 | "swap%I0 %2,%M0" \ | 223 | "swap%I0 %M0,%1" \ |
224 | : "+m"(*__xg_ptr), "=&r"(__xg_orig) \ | 224 | : "+m"(*__xg_ptr), "=r"(__xg_orig) \ |
225 | : "r"(x) \ | 225 | : "1"(x) \ |
226 | : "memory" \ | 226 | : "memory" \ |
227 | ); \ | 227 | ); \ |
228 | break; \ | 228 | break; \ |
diff --git a/include/asm-frv/cacheflush.h b/include/asm-frv/cacheflush.h index 3007deccb490..eaa5826bc1c8 100644 --- a/include/asm-frv/cacheflush.h +++ b/include/asm-frv/cacheflush.h | |||
@@ -87,5 +87,17 @@ static inline void flush_icache_page(struct vm_area_struct *vma, struct page *pa | |||
87 | flush_icache_user_range(vma, page, page_to_phys(page), PAGE_SIZE); | 87 | flush_icache_user_range(vma, page, page_to_phys(page), PAGE_SIZE); |
88 | } | 88 | } |
89 | 89 | ||
90 | /* | ||
91 | * permit ptrace to access another process's address space through the icache | ||
92 | * and the dcache | ||
93 | */ | ||
94 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | ||
95 | do { \ | ||
96 | memcpy((dst), (src), (len)); \ | ||
97 | flush_icache_user_range((vma), (page), (vaddr), (len)); \ | ||
98 | } while(0) | ||
99 | |||
100 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ | ||
101 | memcpy((dst), (src), (len)) | ||
90 | 102 | ||
91 | #endif /* _ASM_CACHEFLUSH_H */ | 103 | #endif /* _ASM_CACHEFLUSH_H */ |
diff --git a/include/asm-frv/io.h b/include/asm-frv/io.h index 075369b1a34b..01247cb2bc39 100644 --- a/include/asm-frv/io.h +++ b/include/asm-frv/io.h | |||
@@ -251,7 +251,6 @@ static inline void writel(uint32_t datum, volatile void __iomem *addr) | |||
251 | #define IOMAP_WRITETHROUGH 3 | 251 | #define IOMAP_WRITETHROUGH 3 |
252 | 252 | ||
253 | extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag); | 253 | extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag); |
254 | extern void __iounmap(void __iomem *addr, unsigned long size); | ||
255 | 254 | ||
256 | static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size) | 255 | static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size) |
257 | { | 256 | { |
diff --git a/include/asm-frv/mman.h b/include/asm-frv/mman.h index 8af4a41c255e..d3bca306da82 100644 --- a/include/asm-frv/mman.h +++ b/include/asm-frv/mman.h | |||
@@ -36,6 +36,8 @@ | |||
36 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ | 36 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ |
37 | #define MADV_DONTNEED 0x4 /* discard these pages */ | 37 | #define MADV_DONTNEED 0x4 /* discard these pages */ |
38 | #define MADV_REMOVE 0x5 /* remove these pages & resources */ | 38 | #define MADV_REMOVE 0x5 /* remove these pages & resources */ |
39 | #define MADV_DONTFORK 0x30 /* dont inherit across fork */ | ||
40 | #define MADV_DOFORK 0x31 /* do inherit across fork */ | ||
39 | 41 | ||
40 | /* compatibility flags */ | 42 | /* compatibility flags */ |
41 | #define MAP_ANON MAP_ANONYMOUS | 43 | #define MAP_ANON MAP_ANONYMOUS |
diff --git a/include/asm-frv/spr-regs.h b/include/asm-frv/spr-regs.h index ef472f058d9c..c2a541ef828d 100644 --- a/include/asm-frv/spr-regs.h +++ b/include/asm-frv/spr-regs.h | |||
@@ -98,6 +98,7 @@ | |||
98 | #define TBR_TT_TRAP0 (0x80 << 4) | 98 | #define TBR_TT_TRAP0 (0x80 << 4) |
99 | #define TBR_TT_TRAP1 (0x81 << 4) | 99 | #define TBR_TT_TRAP1 (0x81 << 4) |
100 | #define TBR_TT_TRAP2 (0x82 << 4) | 100 | #define TBR_TT_TRAP2 (0x82 << 4) |
101 | #define TBR_TT_TRAP3 (0x83 << 4) | ||
101 | #define TBR_TT_TRAP126 (0xfe << 4) | 102 | #define TBR_TT_TRAP126 (0xfe << 4) |
102 | #define TBR_TT_BREAK (0xff << 4) | 103 | #define TBR_TT_BREAK (0xff << 4) |
103 | 104 | ||
diff --git a/include/asm-frv/system.h b/include/asm-frv/system.h index d2aea70a5f64..f72ff0c4dc0b 100644 --- a/include/asm-frv/system.h +++ b/include/asm-frv/system.h | |||
@@ -40,8 +40,84 @@ do { \ | |||
40 | 40 | ||
41 | /* | 41 | /* |
42 | * interrupt flag manipulation | 42 | * interrupt flag manipulation |
43 | * - use virtual interrupt management since touching the PSR is slow | ||
44 | * - ICC2.Z: T if interrupts virtually disabled | ||
45 | * - ICC2.C: F if interrupts really disabled | ||
46 | * - if Z==1 upon interrupt: | ||
47 | * - C is set to 0 | ||
48 | * - interrupts are really disabled | ||
49 | * - entry.S returns immediately | ||
50 | * - uses TIHI (TRAP if Z==0 && C==0) #2 to really reenable interrupts | ||
51 | * - if taken, the trap: | ||
52 | * - sets ICC2.C | ||
53 | * - enables interrupts | ||
43 | */ | 54 | */ |
44 | #define local_irq_disable() \ | 55 | #define local_irq_disable() \ |
56 | do { \ | ||
57 | /* set Z flag, but don't change the C flag */ \ | ||
58 | asm volatile(" andcc gr0,gr0,gr0,icc2 \n" \ | ||
59 | : \ | ||
60 | : \ | ||
61 | : "memory", "icc2" \ | ||
62 | ); \ | ||
63 | } while(0) | ||
64 | |||
65 | #define local_irq_enable() \ | ||
66 | do { \ | ||
67 | /* clear Z flag and then test the C flag */ \ | ||
68 | asm volatile(" oricc gr0,#1,gr0,icc2 \n" \ | ||
69 | " tihi icc2,gr0,#2 \n" \ | ||
70 | : \ | ||
71 | : \ | ||
72 | : "memory", "icc2" \ | ||
73 | ); \ | ||
74 | } while(0) | ||
75 | |||
76 | #define local_save_flags(flags) \ | ||
77 | do { \ | ||
78 | typecheck(unsigned long, flags); \ | ||
79 | asm volatile("movsg ccr,%0" \ | ||
80 | : "=r"(flags) \ | ||
81 | : \ | ||
82 | : "memory"); \ | ||
83 | \ | ||
84 | /* shift ICC2.Z to bit 0 */ \ | ||
85 | flags >>= 26; \ | ||
86 | \ | ||
87 | /* make flags 1 if interrupts disabled, 0 otherwise */ \ | ||
88 | flags &= 1UL; \ | ||
89 | } while(0) | ||
90 | |||
91 | #define irqs_disabled() \ | ||
92 | ({unsigned long flags; local_save_flags(flags); flags; }) | ||
93 | |||
94 | #define local_irq_save(flags) \ | ||
95 | do { \ | ||
96 | typecheck(unsigned long, flags); \ | ||
97 | local_save_flags(flags); \ | ||
98 | local_irq_disable(); \ | ||
99 | } while(0) | ||
100 | |||
101 | #define local_irq_restore(flags) \ | ||
102 | do { \ | ||
103 | typecheck(unsigned long, flags); \ | ||
104 | \ | ||
105 | /* load the Z flag by turning 1 if disabled into 0 if disabled \ | ||
106 | * and thus setting the Z flag but not the C flag */ \ | ||
107 | asm volatile(" xoricc %0,#1,gr0,icc2 \n" \ | ||
108 | /* then test Z=0 and C=0 */ \ | ||
109 | " tihi icc2,gr0,#2 \n" \ | ||
110 | : \ | ||
111 | : "r"(flags) \ | ||
112 | : "memory", "icc2" \ | ||
113 | ); \ | ||
114 | \ | ||
115 | } while(0) | ||
116 | |||
117 | /* | ||
118 | * real interrupt flag manipulation | ||
119 | */ | ||
120 | #define __local_irq_disable() \ | ||
45 | do { \ | 121 | do { \ |
46 | unsigned long psr; \ | 122 | unsigned long psr; \ |
47 | asm volatile(" movsg psr,%0 \n" \ | 123 | asm volatile(" movsg psr,%0 \n" \ |
@@ -53,7 +129,7 @@ do { \ | |||
53 | : "memory"); \ | 129 | : "memory"); \ |
54 | } while(0) | 130 | } while(0) |
55 | 131 | ||
56 | #define local_irq_enable() \ | 132 | #define __local_irq_enable() \ |
57 | do { \ | 133 | do { \ |
58 | unsigned long psr; \ | 134 | unsigned long psr; \ |
59 | asm volatile(" movsg psr,%0 \n" \ | 135 | asm volatile(" movsg psr,%0 \n" \ |
@@ -64,7 +140,7 @@ do { \ | |||
64 | : "memory"); \ | 140 | : "memory"); \ |
65 | } while(0) | 141 | } while(0) |
66 | 142 | ||
67 | #define local_save_flags(flags) \ | 143 | #define __local_save_flags(flags) \ |
68 | do { \ | 144 | do { \ |
69 | typecheck(unsigned long, flags); \ | 145 | typecheck(unsigned long, flags); \ |
70 | asm("movsg psr,%0" \ | 146 | asm("movsg psr,%0" \ |
@@ -73,7 +149,7 @@ do { \ | |||
73 | : "memory"); \ | 149 | : "memory"); \ |
74 | } while(0) | 150 | } while(0) |
75 | 151 | ||
76 | #define local_irq_save(flags) \ | 152 | #define __local_irq_save(flags) \ |
77 | do { \ | 153 | do { \ |
78 | unsigned long npsr; \ | 154 | unsigned long npsr; \ |
79 | typecheck(unsigned long, flags); \ | 155 | typecheck(unsigned long, flags); \ |
@@ -86,7 +162,7 @@ do { \ | |||
86 | : "memory"); \ | 162 | : "memory"); \ |
87 | } while(0) | 163 | } while(0) |
88 | 164 | ||
89 | #define local_irq_restore(flags) \ | 165 | #define __local_irq_restore(flags) \ |
90 | do { \ | 166 | do { \ |
91 | typecheck(unsigned long, flags); \ | 167 | typecheck(unsigned long, flags); \ |
92 | asm volatile(" movgs %0,psr \n" \ | 168 | asm volatile(" movgs %0,psr \n" \ |
@@ -95,7 +171,7 @@ do { \ | |||
95 | : "memory"); \ | 171 | : "memory"); \ |
96 | } while(0) | 172 | } while(0) |
97 | 173 | ||
98 | #define irqs_disabled() \ | 174 | #define __irqs_disabled() \ |
99 | ((__get_PSR() & PSR_PIL) >= PSR_PIL_14) | 175 | ((__get_PSR() & PSR_PIL) >= PSR_PIL_14) |
100 | 176 | ||
101 | /* | 177 | /* |
diff --git a/include/asm-frv/uaccess.h b/include/asm-frv/uaccess.h index b6bcbe01f6ee..a1d140438863 100644 --- a/include/asm-frv/uaccess.h +++ b/include/asm-frv/uaccess.h | |||
@@ -306,7 +306,4 @@ extern long strnlen_user(const char *src, long count); | |||
306 | 306 | ||
307 | extern unsigned long search_exception_table(unsigned long addr); | 307 | extern unsigned long search_exception_table(unsigned long addr); |
308 | 308 | ||
309 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) memcpy(dst, src, len) | ||
310 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) memcpy(dst, src, len) | ||
311 | |||
312 | #endif /* _ASM_UACCESS_H */ | 309 | #endif /* _ASM_UACCESS_H */ |
diff --git a/include/asm-frv/unistd.h b/include/asm-frv/unistd.h index 4d994d2e99e3..322531caa484 100644 --- a/include/asm-frv/unistd.h +++ b/include/asm-frv/unistd.h | |||
@@ -295,13 +295,29 @@ | |||
295 | #define __NR_add_key 286 | 295 | #define __NR_add_key 286 |
296 | #define __NR_request_key 287 | 296 | #define __NR_request_key 287 |
297 | #define __NR_keyctl 288 | 297 | #define __NR_keyctl 288 |
298 | #define __NR_vperfctr_open 289 | 298 | #define __NR_ioprio_set 289 |
299 | #define __NR_vperfctr_control (__NR_perfctr_info+1) | 299 | #define __NR_ioprio_get 290 |
300 | #define __NR_vperfctr_unlink (__NR_perfctr_info+2) | 300 | #define __NR_inotify_init 291 |
301 | #define __NR_vperfctr_iresume (__NR_perfctr_info+3) | 301 | #define __NR_inotify_add_watch 292 |
302 | #define __NR_vperfctr_read (__NR_perfctr_info+4) | 302 | #define __NR_inotify_rm_watch 293 |
303 | #define __NR_migrate_pages 294 | ||
304 | #define __NR_openat 295 | ||
305 | #define __NR_mkdirat 296 | ||
306 | #define __NR_mknodat 297 | ||
307 | #define __NR_fchownat 298 | ||
308 | #define __NR_futimesat 299 | ||
309 | #define __NR_newfstatat 300 | ||
310 | #define __NR_unlinkat 301 | ||
311 | #define __NR_renameat 302 | ||
312 | #define __NR_linkat 303 | ||
313 | #define __NR_symlinkat 304 | ||
314 | #define __NR_readlinkat 305 | ||
315 | #define __NR_fchmodat 306 | ||
316 | #define __NR_faccessat 307 | ||
317 | #define __NR_pselect6 308 | ||
318 | #define __NR_ppoll 309 | ||
303 | 319 | ||
304 | #define NR_syscalls 294 | 320 | #define NR_syscalls 310 |
305 | 321 | ||
306 | /* | 322 | /* |
307 | * process the return value of a syscall, consigning it to one of two possible fates | 323 | * process the return value of a syscall, consigning it to one of two possible fates |
diff --git a/include/asm-h8300/mman.h b/include/asm-h8300/mman.h index 744a8fb485c2..ac0346f7d11d 100644 --- a/include/asm-h8300/mman.h +++ b/include/asm-h8300/mman.h | |||
@@ -36,6 +36,8 @@ | |||
36 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ | 36 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ |
37 | #define MADV_DONTNEED 0x4 /* discard these pages */ | 37 | #define MADV_DONTNEED 0x4 /* discard these pages */ |
38 | #define MADV_REMOVE 0x5 /* remove these pages & resources */ | 38 | #define MADV_REMOVE 0x5 /* remove these pages & resources */ |
39 | #define MADV_DONTFORK 0x30 /* dont inherit across fork */ | ||
40 | #define MADV_DOFORK 0x31 /* do inherit across fork */ | ||
39 | 41 | ||
40 | /* compatibility flags */ | 42 | /* compatibility flags */ |
41 | #define MAP_ANON MAP_ANONYMOUS | 43 | #define MAP_ANON MAP_ANONYMOUS |
diff --git a/include/asm-i386/mman.h b/include/asm-i386/mman.h index ba4941e6f643..ab2339a1d807 100644 --- a/include/asm-i386/mman.h +++ b/include/asm-i386/mman.h | |||
@@ -36,6 +36,8 @@ | |||
36 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ | 36 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ |
37 | #define MADV_DONTNEED 0x4 /* discard these pages */ | 37 | #define MADV_DONTNEED 0x4 /* discard these pages */ |
38 | #define MADV_REMOVE 0x5 /* remove these pages & resources */ | 38 | #define MADV_REMOVE 0x5 /* remove these pages & resources */ |
39 | #define MADV_DONTFORK 0x30 /* dont inherit across fork */ | ||
40 | #define MADV_DOFORK 0x31 /* do inherit across fork */ | ||
39 | 41 | ||
40 | /* compatibility flags */ | 42 | /* compatibility flags */ |
41 | #define MAP_ANON MAP_ANONYMOUS | 43 | #define MAP_ANON MAP_ANONYMOUS |
diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h index af503a122b23..aa958c6ee83e 100644 --- a/include/asm-i386/topology.h +++ b/include/asm-i386/topology.h | |||
@@ -27,7 +27,7 @@ | |||
27 | #ifndef _ASM_I386_TOPOLOGY_H | 27 | #ifndef _ASM_I386_TOPOLOGY_H |
28 | #define _ASM_I386_TOPOLOGY_H | 28 | #define _ASM_I386_TOPOLOGY_H |
29 | 29 | ||
30 | #ifdef CONFIG_SMP | 30 | #ifdef CONFIG_X86_HT |
31 | #define topology_physical_package_id(cpu) \ | 31 | #define topology_physical_package_id(cpu) \ |
32 | (phys_proc_id[cpu] == BAD_APICID ? -1 : phys_proc_id[cpu]) | 32 | (phys_proc_id[cpu] == BAD_APICID ? -1 : phys_proc_id[cpu]) |
33 | #define topology_core_id(cpu) \ | 33 | #define topology_core_id(cpu) \ |
diff --git a/include/asm-ia64/machvec_sn2.h b/include/asm-ia64/machvec_sn2.h index e1b6cd63f49e..03d00faf03b5 100644 --- a/include/asm-ia64/machvec_sn2.h +++ b/include/asm-ia64/machvec_sn2.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2002-2003 Silicon Graphics, Inc. All Rights Reserved. | 2 | * Copyright (c) 2002-2003, 2006 Silicon Graphics, Inc. All Rights Reserved. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of version 2 of the GNU General Public License | 5 | * under the terms of version 2 of the GNU General Public License |
@@ -20,11 +20,6 @@ | |||
20 | * License along with this program; if not, write the Free Software | 20 | * License along with this program; if not, write the Free Software |
21 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. | 21 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. |
22 | * | 22 | * |
23 | * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, | ||
24 | * Mountain View, CA 94043, or: | ||
25 | * | ||
26 | * http://www.sgi.com | ||
27 | * | ||
28 | * For further information regarding this notice, see: | 23 | * For further information regarding this notice, see: |
29 | * | 24 | * |
30 | * http://oss.sgi.com/projects/GenInfo/NoticeExplan | 25 | * http://oss.sgi.com/projects/GenInfo/NoticeExplan |
diff --git a/include/asm-ia64/mman.h b/include/asm-ia64/mman.h index 828beb24a20e..357ebb780cc0 100644 --- a/include/asm-ia64/mman.h +++ b/include/asm-ia64/mman.h | |||
@@ -44,6 +44,8 @@ | |||
44 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ | 44 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ |
45 | #define MADV_DONTNEED 0x4 /* discard these pages */ | 45 | #define MADV_DONTNEED 0x4 /* discard these pages */ |
46 | #define MADV_REMOVE 0x5 /* remove these pages & resources */ | 46 | #define MADV_REMOVE 0x5 /* remove these pages & resources */ |
47 | #define MADV_DONTFORK 0x30 /* dont inherit across fork */ | ||
48 | #define MADV_DOFORK 0x31 /* do inherit across fork */ | ||
47 | 49 | ||
48 | /* compatibility flags */ | 50 | /* compatibility flags */ |
49 | #define MAP_ANON MAP_ANONYMOUS | 51 | #define MAP_ANON MAP_ANONYMOUS |
diff --git a/include/asm-ia64/sn/arch.h b/include/asm-ia64/sn/arch.h index 1a3831c04af6..91c31be87b13 100644 --- a/include/asm-ia64/sn/arch.h +++ b/include/asm-ia64/sn/arch.h | |||
@@ -70,7 +70,7 @@ DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); | |||
70 | * Compact node ID to nasid mappings kept in the per-cpu data areas of each | 70 | * Compact node ID to nasid mappings kept in the per-cpu data areas of each |
71 | * cpu. | 71 | * cpu. |
72 | */ | 72 | */ |
73 | DECLARE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_NUMNODES]); | 73 | DECLARE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]); |
74 | #define sn_cnodeid_to_nasid (&__get_cpu_var(__sn_cnodeid_to_nasid[0])) | 74 | #define sn_cnodeid_to_nasid (&__get_cpu_var(__sn_cnodeid_to_nasid[0])) |
75 | 75 | ||
76 | 76 | ||
diff --git a/include/asm-ia64/sn/bte.h b/include/asm-ia64/sn/bte.h index 01e5b4103235..5335d87ca5f8 100644 --- a/include/asm-ia64/sn/bte.h +++ b/include/asm-ia64/sn/bte.h | |||
@@ -46,7 +46,7 @@ | |||
46 | #define BTES_PER_NODE (is_shub2() ? 4 : 2) | 46 | #define BTES_PER_NODE (is_shub2() ? 4 : 2) |
47 | #define MAX_BTES_PER_NODE 4 | 47 | #define MAX_BTES_PER_NODE 4 |
48 | 48 | ||
49 | #define BTE2OFF_CTRL (0) | 49 | #define BTE2OFF_CTRL 0 |
50 | #define BTE2OFF_SRC (SH2_BT_ENG_SRC_ADDR_0 - SH2_BT_ENG_CSR_0) | 50 | #define BTE2OFF_SRC (SH2_BT_ENG_SRC_ADDR_0 - SH2_BT_ENG_CSR_0) |
51 | #define BTE2OFF_DEST (SH2_BT_ENG_DEST_ADDR_0 - SH2_BT_ENG_CSR_0) | 51 | #define BTE2OFF_DEST (SH2_BT_ENG_DEST_ADDR_0 - SH2_BT_ENG_CSR_0) |
52 | #define BTE2OFF_NOTIFY (SH2_BT_ENG_NOTIF_ADDR_0 - SH2_BT_ENG_CSR_0) | 52 | #define BTE2OFF_NOTIFY (SH2_BT_ENG_NOTIF_ADDR_0 - SH2_BT_ENG_CSR_0) |
@@ -75,11 +75,11 @@ | |||
75 | : base + (BTEOFF_NOTIFY/8)) | 75 | : base + (BTEOFF_NOTIFY/8)) |
76 | 76 | ||
77 | /* Define hardware modes */ | 77 | /* Define hardware modes */ |
78 | #define BTE_NOTIFY (IBCT_NOTIFY) | 78 | #define BTE_NOTIFY IBCT_NOTIFY |
79 | #define BTE_NORMAL BTE_NOTIFY | 79 | #define BTE_NORMAL BTE_NOTIFY |
80 | #define BTE_ZERO_FILL (BTE_NOTIFY | IBCT_ZFIL_MODE) | 80 | #define BTE_ZERO_FILL (BTE_NOTIFY | IBCT_ZFIL_MODE) |
81 | /* Use a reserved bit to let the caller specify a wait for any BTE */ | 81 | /* Use a reserved bit to let the caller specify a wait for any BTE */ |
82 | #define BTE_WACQUIRE (0x4000) | 82 | #define BTE_WACQUIRE 0x4000 |
83 | /* Use the BTE on the node with the destination memory */ | 83 | /* Use the BTE on the node with the destination memory */ |
84 | #define BTE_USE_DEST (BTE_WACQUIRE << 1) | 84 | #define BTE_USE_DEST (BTE_WACQUIRE << 1) |
85 | /* Use any available BTE interface on any node for the transfer */ | 85 | /* Use any available BTE interface on any node for the transfer */ |
diff --git a/include/asm-ia64/sn/pcibr_provider.h b/include/asm-ia64/sn/pcibr_provider.h index 9334078b089a..a601d3af39b6 100644 --- a/include/asm-ia64/sn/pcibr_provider.h +++ b/include/asm-ia64/sn/pcibr_provider.h | |||
@@ -3,7 +3,7 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 1992-1997,2000-2004 Silicon Graphics, Inc. All rights reserved. | 6 | * Copyright (C) 1992-1997,2000-2006 Silicon Graphics, Inc. All rights reserved. |
7 | */ | 7 | */ |
8 | #ifndef _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H | 8 | #ifndef _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H |
9 | #define _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H | 9 | #define _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H |
@@ -115,18 +115,6 @@ struct pcibus_info { | |||
115 | spinlock_t pbi_lock; | 115 | spinlock_t pbi_lock; |
116 | }; | 116 | }; |
117 | 117 | ||
118 | /* | ||
119 | * pcibus_info structure locking macros | ||
120 | */ | ||
121 | inline static unsigned long | ||
122 | pcibr_lock(struct pcibus_info *pcibus_info) | ||
123 | { | ||
124 | unsigned long flag; | ||
125 | spin_lock_irqsave(&pcibus_info->pbi_lock, flag); | ||
126 | return(flag); | ||
127 | } | ||
128 | #define pcibr_unlock(pcibus_info, flag) spin_unlock_irqrestore(&pcibus_info->pbi_lock, flag) | ||
129 | |||
130 | extern int pcibr_init_provider(void); | 118 | extern int pcibr_init_provider(void); |
131 | extern void *pcibr_bus_fixup(struct pcibus_bussoft *, struct pci_controller *); | 119 | extern void *pcibr_bus_fixup(struct pcibus_bussoft *, struct pci_controller *); |
132 | extern dma_addr_t pcibr_dma_map(struct pci_dev *, unsigned long, size_t); | 120 | extern dma_addr_t pcibr_dma_map(struct pci_dev *, unsigned long, size_t); |
diff --git a/include/asm-ia64/sn/sn_feature_sets.h b/include/asm-ia64/sn/sn_feature_sets.h index 9ca642cad338..ff33e3bd3f8e 100644 --- a/include/asm-ia64/sn/sn_feature_sets.h +++ b/include/asm-ia64/sn/sn_feature_sets.h | |||
@@ -12,9 +12,6 @@ | |||
12 | */ | 12 | */ |
13 | 13 | ||
14 | 14 | ||
15 | #include <asm/types.h> | ||
16 | #include <asm/bitops.h> | ||
17 | |||
18 | /* --------------------- PROM Features -----------------------------*/ | 15 | /* --------------------- PROM Features -----------------------------*/ |
19 | extern int sn_prom_feature_available(int id); | 16 | extern int sn_prom_feature_available(int id); |
20 | 17 | ||
diff --git a/include/asm-ia64/sn/xpc.h b/include/asm-ia64/sn/xpc.h index 0c36928ffd8b..df7f5f4f3cde 100644 --- a/include/asm-ia64/sn/xpc.h +++ b/include/asm-ia64/sn/xpc.h | |||
@@ -508,19 +508,24 @@ struct xpc_channel { | |||
508 | #define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */ | 508 | #define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */ |
509 | 509 | ||
510 | #define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */ | 510 | #define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */ |
511 | #define XPC_C_CONNECTCALLOUT 0x00000040 /* channel connected callout made */ | 511 | #define XPC_C_CONNECTEDCALLOUT 0x00000040 /* connected callout initiated */ |
512 | #define XPC_C_CONNECTED 0x00000080 /* local channel is connected */ | 512 | #define XPC_C_CONNECTEDCALLOUT_MADE \ |
513 | #define XPC_C_CONNECTING 0x00000100 /* channel is being connected */ | 513 | 0x00000080 /* connected callout completed */ |
514 | 514 | #define XPC_C_CONNECTED 0x00000100 /* local channel is connected */ | |
515 | #define XPC_C_RCLOSEREPLY 0x00000200 /* remote close channel reply */ | 515 | #define XPC_C_CONNECTING 0x00000200 /* channel is being connected */ |
516 | #define XPC_C_CLOSEREPLY 0x00000400 /* local close channel reply */ | 516 | |
517 | #define XPC_C_RCLOSEREQUEST 0x00000800 /* remote close channel request */ | 517 | #define XPC_C_RCLOSEREPLY 0x00000400 /* remote close channel reply */ |
518 | #define XPC_C_CLOSEREQUEST 0x00001000 /* local close channel request */ | 518 | #define XPC_C_CLOSEREPLY 0x00000800 /* local close channel reply */ |
519 | 519 | #define XPC_C_RCLOSEREQUEST 0x00001000 /* remote close channel request */ | |
520 | #define XPC_C_DISCONNECTED 0x00002000 /* channel is disconnected */ | 520 | #define XPC_C_CLOSEREQUEST 0x00002000 /* local close channel request */ |
521 | #define XPC_C_DISCONNECTING 0x00004000 /* channel is being disconnected */ | 521 | |
522 | #define XPC_C_DISCONNECTCALLOUT 0x00008000 /* chan disconnected callout made */ | 522 | #define XPC_C_DISCONNECTED 0x00004000 /* channel is disconnected */ |
523 | #define XPC_C_WDISCONNECT 0x00010000 /* waiting for channel disconnect */ | 523 | #define XPC_C_DISCONNECTING 0x00008000 /* channel is being disconnected */ |
524 | #define XPC_C_DISCONNECTINGCALLOUT \ | ||
525 | 0x00010000 /* disconnecting callout initiated */ | ||
526 | #define XPC_C_DISCONNECTINGCALLOUT_MADE \ | ||
527 | 0x00020000 /* disconnecting callout completed */ | ||
528 | #define XPC_C_WDISCONNECT 0x00040000 /* waiting for channel disconnect */ | ||
524 | 529 | ||
525 | 530 | ||
526 | 531 | ||
diff --git a/include/asm-ia64/timex.h b/include/asm-ia64/timex.h index 414aae060440..05a6baf8a472 100644 --- a/include/asm-ia64/timex.h +++ b/include/asm-ia64/timex.h | |||
@@ -15,6 +15,8 @@ | |||
15 | 15 | ||
16 | typedef unsigned long cycles_t; | 16 | typedef unsigned long cycles_t; |
17 | 17 | ||
18 | extern void (*ia64_udelay)(unsigned long usecs); | ||
19 | |||
18 | /* | 20 | /* |
19 | * For performance reasons, we don't want to define CLOCK_TICK_TRATE as | 21 | * For performance reasons, we don't want to define CLOCK_TICK_TRATE as |
20 | * local_cpu_data->itc_rate. Fortunately, we don't have to, either: according to George | 22 | * local_cpu_data->itc_rate. Fortunately, we don't have to, either: according to George |
diff --git a/include/asm-m32r/mman.h b/include/asm-m32r/mman.h index 12e29747bc84..6b02fe3fcff2 100644 --- a/include/asm-m32r/mman.h +++ b/include/asm-m32r/mman.h | |||
@@ -38,6 +38,8 @@ | |||
38 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ | 38 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ |
39 | #define MADV_DONTNEED 0x4 /* discard these pages */ | 39 | #define MADV_DONTNEED 0x4 /* discard these pages */ |
40 | #define MADV_REMOVE 0x5 /* remove these pages & resources */ | 40 | #define MADV_REMOVE 0x5 /* remove these pages & resources */ |
41 | #define MADV_DONTFORK 0x30 /* dont inherit across fork */ | ||
42 | #define MADV_DOFORK 0x31 /* do inherit across fork */ | ||
41 | 43 | ||
42 | /* compatibility flags */ | 44 | /* compatibility flags */ |
43 | #define MAP_ANON MAP_ANONYMOUS | 45 | #define MAP_ANON MAP_ANONYMOUS |
diff --git a/include/asm-m68k/mman.h b/include/asm-m68k/mman.h index ea262ab88b3b..efd12bc4ccb7 100644 --- a/include/asm-m68k/mman.h +++ b/include/asm-m68k/mman.h | |||
@@ -36,6 +36,8 @@ | |||
36 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ | 36 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ |
37 | #define MADV_DONTNEED 0x4 /* discard these pages */ | 37 | #define MADV_DONTNEED 0x4 /* discard these pages */ |
38 | #define MADV_REMOVE 0x5 /* remove these pages & resources */ | 38 | #define MADV_REMOVE 0x5 /* remove these pages & resources */ |
39 | #define MADV_DONTFORK 0x30 /* dont inherit across fork */ | ||
40 | #define MADV_DOFORK 0x31 /* do inherit across fork */ | ||
39 | 41 | ||
40 | /* compatibility flags */ | 42 | /* compatibility flags */ |
41 | #define MAP_ANON MAP_ANONYMOUS | 43 | #define MAP_ANON MAP_ANONYMOUS |
diff --git a/include/asm-mips/cpu.h b/include/asm-mips/cpu.h index 934e063e79f1..818b9a97e214 100644 --- a/include/asm-mips/cpu.h +++ b/include/asm-mips/cpu.h | |||
@@ -204,9 +204,9 @@ | |||
204 | */ | 204 | */ |
205 | #define MIPS_CPU_ISA_I 0x00000001 | 205 | #define MIPS_CPU_ISA_I 0x00000001 |
206 | #define MIPS_CPU_ISA_II 0x00000002 | 206 | #define MIPS_CPU_ISA_II 0x00000002 |
207 | #define MIPS_CPU_ISA_III 0x00000003 | 207 | #define MIPS_CPU_ISA_III 0x00000004 |
208 | #define MIPS_CPU_ISA_IV 0x00000004 | 208 | #define MIPS_CPU_ISA_IV 0x00000008 |
209 | #define MIPS_CPU_ISA_V 0x00000005 | 209 | #define MIPS_CPU_ISA_V 0x00000010 |
210 | #define MIPS_CPU_ISA_M32R1 0x00000020 | 210 | #define MIPS_CPU_ISA_M32R1 0x00000020 |
211 | #define MIPS_CPU_ISA_M32R2 0x00000040 | 211 | #define MIPS_CPU_ISA_M32R2 0x00000040 |
212 | #define MIPS_CPU_ISA_M64R1 0x00000080 | 212 | #define MIPS_CPU_ISA_M64R1 0x00000080 |
diff --git a/include/asm-mips/gcc/sgidefs.h b/include/asm-mips/gcc/sgidefs.h deleted file mode 100644 index 05994371a2af..000000000000 --- a/include/asm-mips/gcc/sgidefs.h +++ /dev/null | |||
@@ -1,17 +0,0 @@ | |||
1 | /* | ||
2 | * include/sgidefs.h | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * Copyright (C) 1996 by Ralf Baechle | ||
9 | * | ||
10 | * This file is here to satisfy GCC's expectations. | ||
11 | */ | ||
12 | #ifndef __SGIDEFS_H | ||
13 | #define __SGIDEFS_H | ||
14 | |||
15 | #include <asm/sgidefs.h> | ||
16 | |||
17 | #endif /* __SGIDEFS_H */ | ||
diff --git a/include/asm-mips/mach-generic/timex.h b/include/asm-mips/mach-generic/timex.h index c6a2e5f0574a..48b4cfaa0d50 100644 --- a/include/asm-mips/mach-generic/timex.h +++ b/include/asm-mips/mach-generic/timex.h | |||
@@ -3,20 +3,11 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 2003 by Ralf Baechle | 6 | * Copyright (C) 2003, 2005 by Ralf Baechle |
7 | */ | 7 | */ |
8 | #ifndef __ASM_MACH_GENERIC_TIMEX_H | 8 | #ifndef __ASM_MACH_GENERIC_TIMEX_H |
9 | #define __ASM_MACH_GENERIC_TIMEX_H | 9 | #define __ASM_MACH_GENERIC_TIMEX_H |
10 | 10 | ||
11 | #include <linux/config.h> | ||
12 | |||
13 | /* | ||
14 | * Last remaining user of the i8254 PIC, will be converted, too ... | ||
15 | */ | ||
16 | #ifdef CONFIG_SNI_RM200_PCI | ||
17 | #define CLOCK_TICK_RATE 1193182 | ||
18 | #else | ||
19 | #define CLOCK_TICK_RATE 500000 | 11 | #define CLOCK_TICK_RATE 500000 |
20 | #endif | ||
21 | 12 | ||
22 | #endif /* __ASM_MACH_GENERIC_TIMEX_H */ | 13 | #endif /* __ASM_MACH_GENERIC_TIMEX_H */ |
diff --git a/include/asm-mips/mach-rm200/timex.h b/include/asm-mips/mach-rm200/timex.h new file mode 100644 index 000000000000..11ff6cb0f214 --- /dev/null +++ b/include/asm-mips/mach-rm200/timex.h | |||
@@ -0,0 +1,13 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2003, 2005 by Ralf Baechle | ||
7 | */ | ||
8 | #ifndef __ASM_MACH_RM200_TIMEX_H | ||
9 | #define __ASM_MACH_RM200_TIMEX_H | ||
10 | |||
11 | #define CLOCK_TICK_RATE 1193182 | ||
12 | |||
13 | #endif /* __ASM_MACH_RM200_TIMEX_H */ | ||
diff --git a/include/asm-mips/mman.h b/include/asm-mips/mman.h index dd17c8bd62a1..6d01e26830fa 100644 --- a/include/asm-mips/mman.h +++ b/include/asm-mips/mman.h | |||
@@ -66,6 +66,8 @@ | |||
66 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ | 66 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ |
67 | #define MADV_DONTNEED 0x4 /* discard these pages */ | 67 | #define MADV_DONTNEED 0x4 /* discard these pages */ |
68 | #define MADV_REMOVE 0x5 /* remove these pages & resources */ | 68 | #define MADV_REMOVE 0x5 /* remove these pages & resources */ |
69 | #define MADV_DONTFORK 0x30 /* dont inherit across fork */ | ||
70 | #define MADV_DOFORK 0x31 /* do inherit across fork */ | ||
69 | 71 | ||
70 | /* compatibility flags */ | 72 | /* compatibility flags */ |
71 | #define MAP_ANON MAP_ANONYMOUS | 73 | #define MAP_ANON MAP_ANONYMOUS |
diff --git a/include/asm-mips/r4kcache.h b/include/asm-mips/r4kcache.h index cc53196efa40..9632c27dad15 100644 --- a/include/asm-mips/r4kcache.h +++ b/include/asm-mips/r4kcache.h | |||
@@ -14,6 +14,7 @@ | |||
14 | 14 | ||
15 | #include <asm/asm.h> | 15 | #include <asm/asm.h> |
16 | #include <asm/cacheops.h> | 16 | #include <asm/cacheops.h> |
17 | #include <asm/cpu-features.h> | ||
17 | 18 | ||
18 | /* | 19 | /* |
19 | * This macro return a properly sign-extended address suitable as base address | 20 | * This macro return a properly sign-extended address suitable as base address |
@@ -78,22 +79,25 @@ static inline void flush_scache_line(unsigned long addr) | |||
78 | cache_op(Hit_Writeback_Inv_SD, addr); | 79 | cache_op(Hit_Writeback_Inv_SD, addr); |
79 | } | 80 | } |
80 | 81 | ||
82 | #define protected_cache_op(op,addr) \ | ||
83 | __asm__ __volatile__( \ | ||
84 | " .set push \n" \ | ||
85 | " .set noreorder \n" \ | ||
86 | " .set mips3 \n" \ | ||
87 | "1: cache %0, (%1) \n" \ | ||
88 | "2: .set pop \n" \ | ||
89 | " .section __ex_table,\"a\" \n" \ | ||
90 | " "STR(PTR)" 1b, 2b \n" \ | ||
91 | " .previous" \ | ||
92 | : \ | ||
93 | : "i" (op), "r" (addr)) | ||
94 | |||
81 | /* | 95 | /* |
82 | * The next two are for badland addresses like signal trampolines. | 96 | * The next two are for badland addresses like signal trampolines. |
83 | */ | 97 | */ |
84 | static inline void protected_flush_icache_line(unsigned long addr) | 98 | static inline void protected_flush_icache_line(unsigned long addr) |
85 | { | 99 | { |
86 | __asm__ __volatile__( | 100 | protected_cache_op(Hit_Invalidate_I, addr); |
87 | " .set push \n" | ||
88 | " .set noreorder \n" | ||
89 | " .set mips3 \n" | ||
90 | "1: cache %0, (%1) \n" | ||
91 | "2: .set pop \n" | ||
92 | " .section __ex_table,\"a\" \n" | ||
93 | " "STR(PTR)" 1b, 2b \n" | ||
94 | " .previous" | ||
95 | : | ||
96 | : "i" (Hit_Invalidate_I), "r" (addr)); | ||
97 | } | 101 | } |
98 | 102 | ||
99 | /* | 103 | /* |
@@ -104,32 +108,12 @@ static inline void protected_flush_icache_line(unsigned long addr) | |||
104 | */ | 108 | */ |
105 | static inline void protected_writeback_dcache_line(unsigned long addr) | 109 | static inline void protected_writeback_dcache_line(unsigned long addr) |
106 | { | 110 | { |
107 | __asm__ __volatile__( | 111 | protected_cache_op(Hit_Writeback_Inv_D, addr); |
108 | " .set push \n" | ||
109 | " .set noreorder \n" | ||
110 | " .set mips3 \n" | ||
111 | "1: cache %0, (%1) \n" | ||
112 | "2: .set pop \n" | ||
113 | " .section __ex_table,\"a\" \n" | ||
114 | " "STR(PTR)" 1b, 2b \n" | ||
115 | " .previous" | ||
116 | : | ||
117 | : "i" (Hit_Writeback_Inv_D), "r" (addr)); | ||
118 | } | 112 | } |
119 | 113 | ||
120 | static inline void protected_writeback_scache_line(unsigned long addr) | 114 | static inline void protected_writeback_scache_line(unsigned long addr) |
121 | { | 115 | { |
122 | __asm__ __volatile__( | 116 | protected_cache_op(Hit_Writeback_Inv_SD, addr); |
123 | " .set push \n" | ||
124 | " .set noreorder \n" | ||
125 | " .set mips3 \n" | ||
126 | "1: cache %0, (%1) \n" | ||
127 | "2: .set pop \n" | ||
128 | " .section __ex_table,\"a\" \n" | ||
129 | " "STR(PTR)" 1b, 2b \n" | ||
130 | " .previous" | ||
131 | : | ||
132 | : "i" (Hit_Writeback_Inv_SD), "r" (addr)); | ||
133 | } | 117 | } |
134 | 118 | ||
135 | /* | 119 | /* |
@@ -295,4 +279,28 @@ __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64) | |||
295 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64) | 279 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64) |
296 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128) | 280 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128) |
297 | 281 | ||
282 | /* build blast_xxx_range, protected_blast_xxx_range */ | ||
283 | #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot) \ | ||
284 | static inline void prot##blast_##pfx##cache##_range(unsigned long start, \ | ||
285 | unsigned long end) \ | ||
286 | { \ | ||
287 | unsigned long lsize = cpu_##desc##_line_size(); \ | ||
288 | unsigned long addr = start & ~(lsize - 1); \ | ||
289 | unsigned long aend = (end - 1) & ~(lsize - 1); \ | ||
290 | while (1) { \ | ||
291 | prot##cache_op(hitop, addr); \ | ||
292 | if (addr == aend) \ | ||
293 | break; \ | ||
294 | addr += lsize; \ | ||
295 | } \ | ||
296 | } | ||
297 | |||
298 | __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_) | ||
299 | __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_) | ||
300 | __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_) | ||
301 | __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, ) | ||
302 | __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, ) | ||
303 | /* blast_inv_dcache_range */ | ||
304 | __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, ) | ||
305 | |||
298 | #endif /* _ASM_R4KCACHE_H */ | 306 | #endif /* _ASM_R4KCACHE_H */ |
diff --git a/include/asm-mips/uaccess.h b/include/asm-mips/uaccess.h index 91d813a37823..7a553e9d44d3 100644 --- a/include/asm-mips/uaccess.h +++ b/include/asm-mips/uaccess.h | |||
@@ -266,6 +266,8 @@ do { \ | |||
266 | */ | 266 | */ |
267 | #define __get_user_asm_ll32(val, addr) \ | 267 | #define __get_user_asm_ll32(val, addr) \ |
268 | { \ | 268 | { \ |
269 | unsigned long long __gu_tmp; \ | ||
270 | \ | ||
269 | __asm__ __volatile__( \ | 271 | __asm__ __volatile__( \ |
270 | "1: lw %1, (%3) \n" \ | 272 | "1: lw %1, (%3) \n" \ |
271 | "2: lw %D1, 4(%3) \n" \ | 273 | "2: lw %D1, 4(%3) \n" \ |
@@ -280,8 +282,9 @@ do { \ | |||
280 | " " __UA_ADDR " 1b, 4b \n" \ | 282 | " " __UA_ADDR " 1b, 4b \n" \ |
281 | " " __UA_ADDR " 2b, 4b \n" \ | 283 | " " __UA_ADDR " 2b, 4b \n" \ |
282 | " .previous \n" \ | 284 | " .previous \n" \ |
283 | : "=r" (__gu_err), "=&r" (val) \ | 285 | : "=r" (__gu_err), "=&r" (__gu_tmp) \ |
284 | : "0" (0), "r" (addr), "i" (-EFAULT)); \ | 286 | : "0" (0), "r" (addr), "i" (-EFAULT)); \ |
287 | (val) = __gu_tmp; \ | ||
285 | } | 288 | } |
286 | 289 | ||
287 | /* | 290 | /* |
diff --git a/include/asm-mips/unistd.h b/include/asm-mips/unistd.h index e7ff9b187783..769305d20108 100644 --- a/include/asm-mips/unistd.h +++ b/include/asm-mips/unistd.h | |||
@@ -1184,10 +1184,8 @@ type name (atype a,btype b,ctype c,dtype d,etype e,ftype f) \ | |||
1184 | #define __ARCH_WANT_SYS_SIGPENDING | 1184 | #define __ARCH_WANT_SYS_SIGPENDING |
1185 | #define __ARCH_WANT_SYS_SIGPROCMASK | 1185 | #define __ARCH_WANT_SYS_SIGPROCMASK |
1186 | #define __ARCH_WANT_SYS_RT_SIGACTION | 1186 | #define __ARCH_WANT_SYS_RT_SIGACTION |
1187 | # ifndef __mips64 | ||
1188 | # define __ARCH_WANT_STAT64 | ||
1189 | # endif | ||
1190 | # ifdef CONFIG_32BIT | 1187 | # ifdef CONFIG_32BIT |
1188 | # define __ARCH_WANT_STAT64 | ||
1191 | # define __ARCH_WANT_SYS_TIME | 1189 | # define __ARCH_WANT_SYS_TIME |
1192 | # endif | 1190 | # endif |
1193 | # ifdef CONFIG_MIPS32_O32 | 1191 | # ifdef CONFIG_MIPS32_O32 |
diff --git a/include/asm-parisc/mman.h b/include/asm-parisc/mman.h index 736b0abcac05..a381cf5c8f55 100644 --- a/include/asm-parisc/mman.h +++ b/include/asm-parisc/mman.h | |||
@@ -49,6 +49,8 @@ | |||
49 | #define MADV_4M_PAGES 22 /* Use 4 Megabyte pages */ | 49 | #define MADV_4M_PAGES 22 /* Use 4 Megabyte pages */ |
50 | #define MADV_16M_PAGES 24 /* Use 16 Megabyte pages */ | 50 | #define MADV_16M_PAGES 24 /* Use 16 Megabyte pages */ |
51 | #define MADV_64M_PAGES 26 /* Use 64 Megabyte pages */ | 51 | #define MADV_64M_PAGES 26 /* Use 64 Megabyte pages */ |
52 | #define MADV_DONTFORK 0x30 /* dont inherit across fork */ | ||
53 | #define MADV_DOFORK 0x31 /* do inherit across fork */ | ||
52 | 54 | ||
53 | /* compatibility flags */ | 55 | /* compatibility flags */ |
54 | #define MAP_ANON MAP_ANONYMOUS | 56 | #define MAP_ANON MAP_ANONYMOUS |
diff --git a/include/asm-powerpc/mman.h b/include/asm-powerpc/mman.h index a2e34c21b44f..fcff25d13f13 100644 --- a/include/asm-powerpc/mman.h +++ b/include/asm-powerpc/mman.h | |||
@@ -45,6 +45,8 @@ | |||
45 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ | 45 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ |
46 | #define MADV_DONTNEED 0x4 /* discard these pages */ | 46 | #define MADV_DONTNEED 0x4 /* discard these pages */ |
47 | #define MADV_REMOVE 0x5 /* remove these pages & resources */ | 47 | #define MADV_REMOVE 0x5 /* remove these pages & resources */ |
48 | #define MADV_DONTFORK 0x30 /* dont inherit across fork */ | ||
49 | #define MADV_DOFORK 0x31 /* do inherit across fork */ | ||
48 | 50 | ||
49 | /* compatibility flags */ | 51 | /* compatibility flags */ |
50 | #define MAP_ANON MAP_ANONYMOUS | 52 | #define MAP_ANON MAP_ANONYMOUS |
diff --git a/include/asm-s390/mman.h b/include/asm-s390/mman.h index c8d5409b5d56..d41ca1477010 100644 --- a/include/asm-s390/mman.h +++ b/include/asm-s390/mman.h | |||
@@ -44,6 +44,8 @@ | |||
44 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ | 44 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ |
45 | #define MADV_DONTNEED 0x4 /* discard these pages */ | 45 | #define MADV_DONTNEED 0x4 /* discard these pages */ |
46 | #define MADV_REMOVE 0x5 /* remove these pages & resources */ | 46 | #define MADV_REMOVE 0x5 /* remove these pages & resources */ |
47 | #define MADV_DONTFORK 0x30 /* dont inherit across fork */ | ||
48 | #define MADV_DOFORK 0x31 /* do inherit across fork */ | ||
47 | 49 | ||
48 | /* compatibility flags */ | 50 | /* compatibility flags */ |
49 | #define MAP_ANON MAP_ANONYMOUS | 51 | #define MAP_ANON MAP_ANONYMOUS |
diff --git a/include/asm-sh/mman.h b/include/asm-sh/mman.h index 693bd55a3710..0e08d0573abc 100644 --- a/include/asm-sh/mman.h +++ b/include/asm-sh/mman.h | |||
@@ -36,6 +36,8 @@ | |||
36 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ | 36 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ |
37 | #define MADV_DONTNEED 0x4 /* discard these pages */ | 37 | #define MADV_DONTNEED 0x4 /* discard these pages */ |
38 | #define MADV_REMOVE 0x5 /* remove these pages & resources */ | 38 | #define MADV_REMOVE 0x5 /* remove these pages & resources */ |
39 | #define MADV_DONTFORK 0x30 /* dont inherit across fork */ | ||
40 | #define MADV_DOFORK 0x31 /* do inherit across fork */ | ||
39 | 41 | ||
40 | /* compatibility flags */ | 42 | /* compatibility flags */ |
41 | #define MAP_ANON MAP_ANONYMOUS | 43 | #define MAP_ANON MAP_ANONYMOUS |
diff --git a/include/asm-sparc/mman.h b/include/asm-sparc/mman.h index 98435ad8619e..4a298b2be859 100644 --- a/include/asm-sparc/mman.h +++ b/include/asm-sparc/mman.h | |||
@@ -55,6 +55,8 @@ | |||
55 | #define MADV_DONTNEED 0x4 /* discard these pages */ | 55 | #define MADV_DONTNEED 0x4 /* discard these pages */ |
56 | #define MADV_FREE 0x5 /* (Solaris) contents can be freed */ | 56 | #define MADV_FREE 0x5 /* (Solaris) contents can be freed */ |
57 | #define MADV_REMOVE 0x6 /* remove these pages & resources */ | 57 | #define MADV_REMOVE 0x6 /* remove these pages & resources */ |
58 | #define MADV_DONTFORK 0x30 /* dont inherit across fork */ | ||
59 | #define MADV_DOFORK 0x31 /* do inherit across fork */ | ||
58 | 60 | ||
59 | /* compatibility flags */ | 61 | /* compatibility flags */ |
60 | #define MAP_ANON MAP_ANONYMOUS | 62 | #define MAP_ANON MAP_ANONYMOUS |
diff --git a/include/asm-sparc64/mman.h b/include/asm-sparc64/mman.h index cb4b6156194d..d705ec92da8b 100644 --- a/include/asm-sparc64/mman.h +++ b/include/asm-sparc64/mman.h | |||
@@ -55,6 +55,8 @@ | |||
55 | #define MADV_DONTNEED 0x4 /* discard these pages */ | 55 | #define MADV_DONTNEED 0x4 /* discard these pages */ |
56 | #define MADV_FREE 0x5 /* (Solaris) contents can be freed */ | 56 | #define MADV_FREE 0x5 /* (Solaris) contents can be freed */ |
57 | #define MADV_REMOVE 0x6 /* remove these pages & resources */ | 57 | #define MADV_REMOVE 0x6 /* remove these pages & resources */ |
58 | #define MADV_DONTFORK 0x30 /* dont inherit across fork */ | ||
59 | #define MADV_DOFORK 0x31 /* do inherit across fork */ | ||
58 | 60 | ||
59 | /* compatibility flags */ | 61 | /* compatibility flags */ |
60 | #define MAP_ANON MAP_ANONYMOUS | 62 | #define MAP_ANON MAP_ANONYMOUS |
diff --git a/include/asm-v850/mman.h b/include/asm-v850/mman.h index edc79965193a..7b851c310e41 100644 --- a/include/asm-v850/mman.h +++ b/include/asm-v850/mman.h | |||
@@ -33,6 +33,8 @@ | |||
33 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ | 33 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ |
34 | #define MADV_DONTNEED 0x4 /* discard these pages */ | 34 | #define MADV_DONTNEED 0x4 /* discard these pages */ |
35 | #define MADV_REMOVE 0x5 /* remove these pages & resources */ | 35 | #define MADV_REMOVE 0x5 /* remove these pages & resources */ |
36 | #define MADV_DONTFORK 0x30 /* dont inherit across fork */ | ||
37 | #define MADV_DOFORK 0x31 /* do inherit across fork */ | ||
36 | 38 | ||
37 | /* compatibility flags */ | 39 | /* compatibility flags */ |
38 | #define MAP_ANON MAP_ANONYMOUS | 40 | #define MAP_ANON MAP_ANONYMOUS |
diff --git a/include/asm-x86_64/mman.h b/include/asm-x86_64/mman.h index d0e97b74f735..b699a38c1c3c 100644 --- a/include/asm-x86_64/mman.h +++ b/include/asm-x86_64/mman.h | |||
@@ -37,6 +37,8 @@ | |||
37 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ | 37 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ |
38 | #define MADV_DONTNEED 0x4 /* discard these pages */ | 38 | #define MADV_DONTNEED 0x4 /* discard these pages */ |
39 | #define MADV_REMOVE 0x5 /* remove these pages & resources */ | 39 | #define MADV_REMOVE 0x5 /* remove these pages & resources */ |
40 | #define MADV_DONTFORK 0x30 /* dont inherit across fork */ | ||
41 | #define MADV_DOFORK 0x31 /* do inherit across fork */ | ||
40 | 42 | ||
41 | /* compatibility flags */ | 43 | /* compatibility flags */ |
42 | #define MAP_ANON MAP_ANONYMOUS | 44 | #define MAP_ANON MAP_ANONYMOUS |
diff --git a/include/asm-xtensa/mman.h b/include/asm-xtensa/mman.h index 082a7504925e..e2d7afb679c8 100644 --- a/include/asm-xtensa/mman.h +++ b/include/asm-xtensa/mman.h | |||
@@ -73,6 +73,8 @@ | |||
73 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ | 73 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ |
74 | #define MADV_DONTNEED 0x4 /* discard these pages */ | 74 | #define MADV_DONTNEED 0x4 /* discard these pages */ |
75 | #define MADV_REMOVE 0x5 /* remove these pages & resources */ | 75 | #define MADV_REMOVE 0x5 /* remove these pages & resources */ |
76 | #define MADV_DONTFORK 0x30 /* dont inherit across fork */ | ||
77 | #define MADV_DOFORK 0x31 /* do inherit across fork */ | ||
76 | 78 | ||
77 | /* compatibility flags */ | 79 | /* compatibility flags */ |
78 | #define MAP_ANON MAP_ANONYMOUS | 80 | #define MAP_ANON MAP_ANONYMOUS |
diff --git a/include/linux/jbd.h b/include/linux/jbd.h index 0fe4aa891ddc..41ee79962bb2 100644 --- a/include/linux/jbd.h +++ b/include/linux/jbd.h | |||
@@ -498,12 +498,6 @@ struct transaction_s | |||
498 | struct journal_head *t_checkpoint_list; | 498 | struct journal_head *t_checkpoint_list; |
499 | 499 | ||
500 | /* | 500 | /* |
501 | * Doubly-linked circular list of all buffers submitted for IO while | ||
502 | * checkpointing. [j_list_lock] | ||
503 | */ | ||
504 | struct journal_head *t_checkpoint_io_list; | ||
505 | |||
506 | /* | ||
507 | * Doubly-linked circular list of temporary buffers currently undergoing | 501 | * Doubly-linked circular list of temporary buffers currently undergoing |
508 | * IO in the log [j_list_lock] | 502 | * IO in the log [j_list_lock] |
509 | */ | 503 | */ |
@@ -852,7 +846,7 @@ extern void journal_commit_transaction(journal_t *); | |||
852 | 846 | ||
853 | /* Checkpoint list management */ | 847 | /* Checkpoint list management */ |
854 | int __journal_clean_checkpoint_list(journal_t *journal); | 848 | int __journal_clean_checkpoint_list(journal_t *journal); |
855 | int __journal_remove_checkpoint(struct journal_head *); | 849 | void __journal_remove_checkpoint(struct journal_head *); |
856 | void __journal_insert_checkpoint(struct journal_head *, transaction_t *); | 850 | void __journal_insert_checkpoint(struct journal_head *, transaction_t *); |
857 | 851 | ||
858 | /* Buffer IO */ | 852 | /* Buffer IO */ |
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index 920766cea79c..ef21ed296039 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h | |||
@@ -149,7 +149,7 @@ struct nlm_rqst * nlmclnt_alloc_call(void); | |||
149 | int nlmclnt_prepare_block(struct nlm_rqst *req, struct nlm_host *host, struct file_lock *fl); | 149 | int nlmclnt_prepare_block(struct nlm_rqst *req, struct nlm_host *host, struct file_lock *fl); |
150 | void nlmclnt_finish_block(struct nlm_rqst *req); | 150 | void nlmclnt_finish_block(struct nlm_rqst *req); |
151 | long nlmclnt_block(struct nlm_rqst *req, long timeout); | 151 | long nlmclnt_block(struct nlm_rqst *req, long timeout); |
152 | u32 nlmclnt_grant(struct nlm_lock *); | 152 | u32 nlmclnt_grant(const struct sockaddr_in *addr, const struct nlm_lock *); |
153 | void nlmclnt_recovery(struct nlm_host *, u32); | 153 | void nlmclnt_recovery(struct nlm_host *, u32); |
154 | int nlmclnt_reclaim(struct nlm_host *, struct file_lock *); | 154 | int nlmclnt_reclaim(struct nlm_host *, struct file_lock *); |
155 | int nlmclnt_setgrantargs(struct nlm_rqst *, struct nlm_lock *); | 155 | int nlmclnt_setgrantargs(struct nlm_rqst *, struct nlm_lock *); |
@@ -204,7 +204,7 @@ nlmsvc_file_inode(struct nlm_file *file) | |||
204 | * Compare two host addresses (needs modifying for ipv6) | 204 | * Compare two host addresses (needs modifying for ipv6) |
205 | */ | 205 | */ |
206 | static __inline__ int | 206 | static __inline__ int |
207 | nlm_cmp_addr(struct sockaddr_in *sin1, struct sockaddr_in *sin2) | 207 | nlm_cmp_addr(const struct sockaddr_in *sin1, const struct sockaddr_in *sin2) |
208 | { | 208 | { |
209 | return sin1->sin_addr.s_addr == sin2->sin_addr.s_addr; | 209 | return sin1->sin_addr.s_addr == sin2->sin_addr.s_addr; |
210 | } | 210 | } |
@@ -214,7 +214,7 @@ nlm_cmp_addr(struct sockaddr_in *sin1, struct sockaddr_in *sin2) | |||
214 | * When the second lock is of type F_UNLCK, this acts like a wildcard. | 214 | * When the second lock is of type F_UNLCK, this acts like a wildcard. |
215 | */ | 215 | */ |
216 | static __inline__ int | 216 | static __inline__ int |
217 | nlm_compare_locks(struct file_lock *fl1, struct file_lock *fl2) | 217 | nlm_compare_locks(const struct file_lock *fl1, const struct file_lock *fl2) |
218 | { | 218 | { |
219 | return fl1->fl_pid == fl2->fl_pid | 219 | return fl1->fl_pid == fl2->fl_pid |
220 | && fl1->fl_start == fl2->fl_start | 220 | && fl1->fl_start == fl2->fl_start |
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h index fdc4a9527343..43c09d790b83 100644 --- a/include/linux/netfilter_ipv4.h +++ b/include/linux/netfilter_ipv4.h | |||
@@ -79,7 +79,7 @@ enum nf_ip_hook_priorities { | |||
79 | 79 | ||
80 | #ifdef __KERNEL__ | 80 | #ifdef __KERNEL__ |
81 | extern int ip_route_me_harder(struct sk_buff **pskb); | 81 | extern int ip_route_me_harder(struct sk_buff **pskb); |
82 | 82 | extern int ip_xfrm_me_harder(struct sk_buff **pskb); | |
83 | #endif /*__KERNEL__*/ | 83 | #endif /*__KERNEL__*/ |
84 | 84 | ||
85 | #endif /*__LINUX_IP_NETFILTER_H*/ | 85 | #endif /*__LINUX_IP_NETFILTER_H*/ |
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 9d5cd106b344..0d36750fc0f1 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h | |||
@@ -84,6 +84,7 @@ extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __us | |||
84 | extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); | 84 | extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); |
85 | extern int ptrace_attach(struct task_struct *tsk); | 85 | extern int ptrace_attach(struct task_struct *tsk); |
86 | extern int ptrace_detach(struct task_struct *, unsigned int); | 86 | extern int ptrace_detach(struct task_struct *, unsigned int); |
87 | extern void __ptrace_detach(struct task_struct *, unsigned int); | ||
87 | extern void ptrace_disable(struct task_struct *); | 88 | extern void ptrace_disable(struct task_struct *); |
88 | extern int ptrace_check_attach(struct task_struct *task, int kill); | 89 | extern int ptrace_check_attach(struct task_struct *task, int kill); |
89 | extern int ptrace_request(struct task_struct *child, long request, long addr, long data); | 90 | extern int ptrace_request(struct task_struct *child, long request, long addr, long data); |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 9c1da0269a18..b6f51e3a38ec 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -697,12 +697,9 @@ struct task_struct { | |||
697 | 697 | ||
698 | int lock_depth; /* BKL lock depth */ | 698 | int lock_depth; /* BKL lock depth */ |
699 | 699 | ||
700 | #if defined(CONFIG_SMP) | 700 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) |
701 | int last_waker_cpu; /* CPU that last woke this task up */ | ||
702 | #if defined(__ARCH_WANT_UNLOCKED_CTXSW) | ||
703 | int oncpu; | 701 | int oncpu; |
704 | #endif | 702 | #endif |
705 | #endif | ||
706 | int prio, static_prio; | 703 | int prio, static_prio; |
707 | struct list_head run_list; | 704 | struct list_head run_list; |
708 | prio_array_t *array; | 705 | prio_array_t *array; |
diff --git a/kernel/fork.c b/kernel/fork.c index 8e88b374cee9..fbea12d7a943 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1123,8 +1123,8 @@ static task_t *copy_process(unsigned long clone_flags, | |||
1123 | p->real_parent = current; | 1123 | p->real_parent = current; |
1124 | p->parent = p->real_parent; | 1124 | p->parent = p->real_parent; |
1125 | 1125 | ||
1126 | spin_lock(¤t->sighand->siglock); | ||
1126 | if (clone_flags & CLONE_THREAD) { | 1127 | if (clone_flags & CLONE_THREAD) { |
1127 | spin_lock(¤t->sighand->siglock); | ||
1128 | /* | 1128 | /* |
1129 | * Important: if an exit-all has been started then | 1129 | * Important: if an exit-all has been started then |
1130 | * do not create this new thread - the whole thread | 1130 | * do not create this new thread - the whole thread |
@@ -1162,8 +1162,6 @@ static task_t *copy_process(unsigned long clone_flags, | |||
1162 | */ | 1162 | */ |
1163 | p->it_prof_expires = jiffies_to_cputime(1); | 1163 | p->it_prof_expires = jiffies_to_cputime(1); |
1164 | } | 1164 | } |
1165 | |||
1166 | spin_unlock(¤t->sighand->siglock); | ||
1167 | } | 1165 | } |
1168 | 1166 | ||
1169 | /* | 1167 | /* |
@@ -1175,8 +1173,6 @@ static task_t *copy_process(unsigned long clone_flags, | |||
1175 | if (unlikely(p->ptrace & PT_PTRACED)) | 1173 | if (unlikely(p->ptrace & PT_PTRACED)) |
1176 | __ptrace_link(p, current->parent); | 1174 | __ptrace_link(p, current->parent); |
1177 | 1175 | ||
1178 | attach_pid(p, PIDTYPE_PID, p->pid); | ||
1179 | attach_pid(p, PIDTYPE_TGID, p->tgid); | ||
1180 | if (thread_group_leader(p)) { | 1176 | if (thread_group_leader(p)) { |
1181 | p->signal->tty = current->signal->tty; | 1177 | p->signal->tty = current->signal->tty; |
1182 | p->signal->pgrp = process_group(current); | 1178 | p->signal->pgrp = process_group(current); |
@@ -1186,9 +1182,12 @@ static task_t *copy_process(unsigned long clone_flags, | |||
1186 | if (p->pid) | 1182 | if (p->pid) |
1187 | __get_cpu_var(process_counts)++; | 1183 | __get_cpu_var(process_counts)++; |
1188 | } | 1184 | } |
1185 | attach_pid(p, PIDTYPE_TGID, p->tgid); | ||
1186 | attach_pid(p, PIDTYPE_PID, p->pid); | ||
1189 | 1187 | ||
1190 | nr_threads++; | 1188 | nr_threads++; |
1191 | total_forks++; | 1189 | total_forks++; |
1190 | spin_unlock(¤t->sighand->siglock); | ||
1192 | write_unlock_irq(&tasklist_lock); | 1191 | write_unlock_irq(&tasklist_lock); |
1193 | proc_fork_connector(p); | 1192 | proc_fork_connector(p); |
1194 | return p; | 1193 | return p; |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 2b6e1757aedd..5ae51f1bc7c8 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -418,8 +418,19 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) | |||
418 | /* Switch the timer base, if necessary: */ | 418 | /* Switch the timer base, if necessary: */ |
419 | new_base = switch_hrtimer_base(timer, base); | 419 | new_base = switch_hrtimer_base(timer, base); |
420 | 420 | ||
421 | if (mode == HRTIMER_REL) | 421 | if (mode == HRTIMER_REL) { |
422 | tim = ktime_add(tim, new_base->get_time()); | 422 | tim = ktime_add(tim, new_base->get_time()); |
423 | /* | ||
424 | * CONFIG_TIME_LOW_RES is a temporary way for architectures | ||
425 | * to signal that they simply return xtime in | ||
426 | * do_gettimeoffset(). In this case we want to round up by | ||
427 | * resolution when starting a relative timer, to avoid short | ||
428 | * timeouts. This will go away with the GTOD framework. | ||
429 | */ | ||
430 | #ifdef CONFIG_TIME_LOW_RES | ||
431 | tim = ktime_add(tim, base->resolution); | ||
432 | #endif | ||
433 | } | ||
423 | timer->expires = tim; | 434 | timer->expires = tim; |
424 | 435 | ||
425 | enqueue_hrtimer(timer, new_base); | 436 | enqueue_hrtimer(timer, new_base); |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 5f33cdb6fff5..d95a72c9279d 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -72,8 +72,8 @@ void ptrace_untrace(task_t *child) | |||
72 | */ | 72 | */ |
73 | void __ptrace_unlink(task_t *child) | 73 | void __ptrace_unlink(task_t *child) |
74 | { | 74 | { |
75 | if (!child->ptrace) | 75 | BUG_ON(!child->ptrace); |
76 | BUG(); | 76 | |
77 | child->ptrace = 0; | 77 | child->ptrace = 0; |
78 | if (!list_empty(&child->ptrace_list)) { | 78 | if (!list_empty(&child->ptrace_list)) { |
79 | list_del_init(&child->ptrace_list); | 79 | list_del_init(&child->ptrace_list); |
@@ -184,22 +184,27 @@ bad: | |||
184 | return retval; | 184 | return retval; |
185 | } | 185 | } |
186 | 186 | ||
187 | void __ptrace_detach(struct task_struct *child, unsigned int data) | ||
188 | { | ||
189 | child->exit_code = data; | ||
190 | /* .. re-parent .. */ | ||
191 | __ptrace_unlink(child); | ||
192 | /* .. and wake it up. */ | ||
193 | if (child->exit_state != EXIT_ZOMBIE) | ||
194 | wake_up_process(child); | ||
195 | } | ||
196 | |||
187 | int ptrace_detach(struct task_struct *child, unsigned int data) | 197 | int ptrace_detach(struct task_struct *child, unsigned int data) |
188 | { | 198 | { |
189 | if (!valid_signal(data)) | 199 | if (!valid_signal(data)) |
190 | return -EIO; | 200 | return -EIO; |
191 | 201 | ||
192 | /* Architecture-specific hardware disable .. */ | 202 | /* Architecture-specific hardware disable .. */ |
193 | ptrace_disable(child); | 203 | ptrace_disable(child); |
194 | 204 | ||
195 | /* .. re-parent .. */ | ||
196 | child->exit_code = data; | ||
197 | |||
198 | write_lock_irq(&tasklist_lock); | 205 | write_lock_irq(&tasklist_lock); |
199 | __ptrace_unlink(child); | 206 | if (child->ptrace) |
200 | /* .. and wake it up. */ | 207 | __ptrace_detach(child, data); |
201 | if (child->exit_state != EXIT_ZOMBIE) | ||
202 | wake_up_process(child); | ||
203 | write_unlock_irq(&tasklist_lock); | 208 | write_unlock_irq(&tasklist_lock); |
204 | 209 | ||
205 | return 0; | 210 | return 0; |
@@ -242,8 +247,7 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in | |||
242 | if (write) { | 247 | if (write) { |
243 | copy_to_user_page(vma, page, addr, | 248 | copy_to_user_page(vma, page, addr, |
244 | maddr + offset, buf, bytes); | 249 | maddr + offset, buf, bytes); |
245 | if (!PageCompound(page)) | 250 | set_page_dirty_lock(page); |
246 | set_page_dirty_lock(page); | ||
247 | } else { | 251 | } else { |
248 | copy_from_user_page(vma, page, addr, | 252 | copy_from_user_page(vma, page, addr, |
249 | buf, maddr + offset, bytes); | 253 | buf, maddr + offset, bytes); |
diff --git a/kernel/sched.c b/kernel/sched.c index 87d93be336a1..66d957227de9 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1204,9 +1204,6 @@ static int try_to_wake_up(task_t *p, unsigned int state, int sync) | |||
1204 | } | 1204 | } |
1205 | } | 1205 | } |
1206 | 1206 | ||
1207 | if (p->last_waker_cpu != this_cpu) | ||
1208 | goto out_set_cpu; | ||
1209 | |||
1210 | if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed))) | 1207 | if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed))) |
1211 | goto out_set_cpu; | 1208 | goto out_set_cpu; |
1212 | 1209 | ||
@@ -1277,8 +1274,6 @@ out_set_cpu: | |||
1277 | cpu = task_cpu(p); | 1274 | cpu = task_cpu(p); |
1278 | } | 1275 | } |
1279 | 1276 | ||
1280 | p->last_waker_cpu = this_cpu; | ||
1281 | |||
1282 | out_activate: | 1277 | out_activate: |
1283 | #endif /* CONFIG_SMP */ | 1278 | #endif /* CONFIG_SMP */ |
1284 | if (old_state == TASK_UNINTERRUPTIBLE) { | 1279 | if (old_state == TASK_UNINTERRUPTIBLE) { |
@@ -1360,12 +1355,9 @@ void fastcall sched_fork(task_t *p, int clone_flags) | |||
1360 | #ifdef CONFIG_SCHEDSTATS | 1355 | #ifdef CONFIG_SCHEDSTATS |
1361 | memset(&p->sched_info, 0, sizeof(p->sched_info)); | 1356 | memset(&p->sched_info, 0, sizeof(p->sched_info)); |
1362 | #endif | 1357 | #endif |
1363 | #if defined(CONFIG_SMP) | 1358 | #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) |
1364 | p->last_waker_cpu = cpu; | ||
1365 | #if defined(__ARCH_WANT_UNLOCKED_CTXSW) | ||
1366 | p->oncpu = 0; | 1359 | p->oncpu = 0; |
1367 | #endif | 1360 | #endif |
1368 | #endif | ||
1369 | #ifdef CONFIG_PREEMPT | 1361 | #ifdef CONFIG_PREEMPT |
1370 | /* Want to start with kernel preemption disabled. */ | 1362 | /* Want to start with kernel preemption disabled. */ |
1371 | task_thread_info(p)->preempt_count = 1; | 1363 | task_thread_info(p)->preempt_count = 1; |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 67f29516662a..508707704d2c 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -85,7 +85,7 @@ void free_huge_page(struct page *page) | |||
85 | BUG_ON(page_count(page)); | 85 | BUG_ON(page_count(page)); |
86 | 86 | ||
87 | INIT_LIST_HEAD(&page->lru); | 87 | INIT_LIST_HEAD(&page->lru); |
88 | page[1].mapping = NULL; | 88 | page[1].lru.next = NULL; /* reset dtor */ |
89 | 89 | ||
90 | spin_lock(&hugetlb_lock); | 90 | spin_lock(&hugetlb_lock); |
91 | enqueue_huge_page(page); | 91 | enqueue_huge_page(page); |
@@ -105,7 +105,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr) | |||
105 | } | 105 | } |
106 | spin_unlock(&hugetlb_lock); | 106 | spin_unlock(&hugetlb_lock); |
107 | set_page_count(page, 1); | 107 | set_page_count(page, 1); |
108 | page[1].mapping = (void *)free_huge_page; | 108 | page[1].lru.next = (void *)free_huge_page; /* set dtor */ |
109 | for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i) | 109 | for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i) |
110 | clear_user_highpage(&page[i], addr); | 110 | clear_user_highpage(&page[i], addr); |
111 | return page; | 111 | return page; |
diff --git a/mm/madvise.c b/mm/madvise.c index ae0ae3ea299a..af3d573b0141 100644 --- a/mm/madvise.c +++ b/mm/madvise.c | |||
@@ -22,16 +22,23 @@ static long madvise_behavior(struct vm_area_struct * vma, | |||
22 | struct mm_struct * mm = vma->vm_mm; | 22 | struct mm_struct * mm = vma->vm_mm; |
23 | int error = 0; | 23 | int error = 0; |
24 | pgoff_t pgoff; | 24 | pgoff_t pgoff; |
25 | int new_flags = vma->vm_flags & ~VM_READHINTMASK; | 25 | int new_flags = vma->vm_flags; |
26 | 26 | ||
27 | switch (behavior) { | 27 | switch (behavior) { |
28 | case MADV_NORMAL: | ||
29 | new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ; | ||
30 | break; | ||
28 | case MADV_SEQUENTIAL: | 31 | case MADV_SEQUENTIAL: |
29 | new_flags |= VM_SEQ_READ; | 32 | new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ; |
30 | break; | 33 | break; |
31 | case MADV_RANDOM: | 34 | case MADV_RANDOM: |
32 | new_flags |= VM_RAND_READ; | 35 | new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ; |
33 | break; | 36 | break; |
34 | default: | 37 | case MADV_DONTFORK: |
38 | new_flags |= VM_DONTCOPY; | ||
39 | break; | ||
40 | case MADV_DOFORK: | ||
41 | new_flags &= ~VM_DONTCOPY; | ||
35 | break; | 42 | break; |
36 | } | 43 | } |
37 | 44 | ||
@@ -177,6 +184,12 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, | |||
177 | long error; | 184 | long error; |
178 | 185 | ||
179 | switch (behavior) { | 186 | switch (behavior) { |
187 | case MADV_DOFORK: | ||
188 | if (vma->vm_flags & VM_IO) { | ||
189 | error = -EINVAL; | ||
190 | break; | ||
191 | } | ||
192 | case MADV_DONTFORK: | ||
180 | case MADV_NORMAL: | 193 | case MADV_NORMAL: |
181 | case MADV_SEQUENTIAL: | 194 | case MADV_SEQUENTIAL: |
182 | case MADV_RANDOM: | 195 | case MADV_RANDOM: |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index dde04ff4be31..62c122528587 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -56,6 +56,7 @@ long nr_swap_pages; | |||
56 | int percpu_pagelist_fraction; | 56 | int percpu_pagelist_fraction; |
57 | 57 | ||
58 | static void fastcall free_hot_cold_page(struct page *page, int cold); | 58 | static void fastcall free_hot_cold_page(struct page *page, int cold); |
59 | static void __free_pages_ok(struct page *page, unsigned int order); | ||
59 | 60 | ||
60 | /* | 61 | /* |
61 | * results with 256, 32 in the lowmem_reserve sysctl: | 62 | * results with 256, 32 in the lowmem_reserve sysctl: |
@@ -169,20 +170,23 @@ static void bad_page(struct page *page) | |||
169 | * All pages have PG_compound set. All pages have their ->private pointing at | 170 | * All pages have PG_compound set. All pages have their ->private pointing at |
170 | * the head page (even the head page has this). | 171 | * the head page (even the head page has this). |
171 | * | 172 | * |
172 | * The first tail page's ->mapping, if non-zero, holds the address of the | 173 | * The first tail page's ->lru.next holds the address of the compound page's |
173 | * compound page's put_page() function. | 174 | * put_page() function. Its ->lru.prev holds the order of allocation. |
174 | * | 175 | * This usage means that zero-order pages may not be compound. |
175 | * The order of the allocation is stored in the first tail page's ->index | ||
176 | * This is only for debug at present. This usage means that zero-order pages | ||
177 | * may not be compound. | ||
178 | */ | 176 | */ |
177 | |||
178 | static void free_compound_page(struct page *page) | ||
179 | { | ||
180 | __free_pages_ok(page, (unsigned long)page[1].lru.prev); | ||
181 | } | ||
182 | |||
179 | static void prep_compound_page(struct page *page, unsigned long order) | 183 | static void prep_compound_page(struct page *page, unsigned long order) |
180 | { | 184 | { |
181 | int i; | 185 | int i; |
182 | int nr_pages = 1 << order; | 186 | int nr_pages = 1 << order; |
183 | 187 | ||
184 | page[1].mapping = NULL; | 188 | page[1].lru.next = (void *)free_compound_page; /* set dtor */ |
185 | page[1].index = order; | 189 | page[1].lru.prev = (void *)order; |
186 | for (i = 0; i < nr_pages; i++) { | 190 | for (i = 0; i < nr_pages; i++) { |
187 | struct page *p = page + i; | 191 | struct page *p = page + i; |
188 | 192 | ||
@@ -196,7 +200,7 @@ static void destroy_compound_page(struct page *page, unsigned long order) | |||
196 | int i; | 200 | int i; |
197 | int nr_pages = 1 << order; | 201 | int nr_pages = 1 << order; |
198 | 202 | ||
199 | if (unlikely(page[1].index != order)) | 203 | if (unlikely((unsigned long)page[1].lru.prev != order)) |
200 | bad_page(page); | 204 | bad_page(page); |
201 | 205 | ||
202 | for (i = 0; i < nr_pages; i++) { | 206 | for (i = 0; i < nr_pages; i++) { |
@@ -40,7 +40,7 @@ static void put_compound_page(struct page *page) | |||
40 | if (put_page_testzero(page)) { | 40 | if (put_page_testzero(page)) { |
41 | void (*dtor)(struct page *page); | 41 | void (*dtor)(struct page *page); |
42 | 42 | ||
43 | dtor = (void (*)(struct page *))page[1].mapping; | 43 | dtor = (void (*)(struct page *))page[1].lru.next; |
44 | (*dtor)(page); | 44 | (*dtor)(page); |
45 | } | 45 | } |
46 | } | 46 | } |
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c index cc047f7fb6ef..35cf3a074087 100644 --- a/net/bridge/br_stp_if.c +++ b/net/bridge/br_stp_if.c | |||
@@ -67,7 +67,7 @@ void br_stp_disable_bridge(struct net_bridge *br) | |||
67 | { | 67 | { |
68 | struct net_bridge_port *p; | 68 | struct net_bridge_port *p; |
69 | 69 | ||
70 | spin_lock(&br->lock); | 70 | spin_lock_bh(&br->lock); |
71 | list_for_each_entry(p, &br->port_list, list) { | 71 | list_for_each_entry(p, &br->port_list, list) { |
72 | if (p->state != BR_STATE_DISABLED) | 72 | if (p->state != BR_STATE_DISABLED) |
73 | br_stp_disable_port(p); | 73 | br_stp_disable_port(p); |
@@ -76,7 +76,7 @@ void br_stp_disable_bridge(struct net_bridge *br) | |||
76 | 76 | ||
77 | br->topology_change = 0; | 77 | br->topology_change = 0; |
78 | br->topology_change_detected = 0; | 78 | br->topology_change_detected = 0; |
79 | spin_unlock(&br->lock); | 79 | spin_unlock_bh(&br->lock); |
80 | 80 | ||
81 | del_timer_sync(&br->hello_timer); | 81 | del_timer_sync(&br->hello_timer); |
82 | del_timer_sync(&br->topology_change_timer); | 82 | del_timer_sync(&br->topology_change_timer); |
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index 52a3d7c57907..ed42cdc57cd9 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c | |||
@@ -78,6 +78,47 @@ int ip_route_me_harder(struct sk_buff **pskb) | |||
78 | } | 78 | } |
79 | EXPORT_SYMBOL(ip_route_me_harder); | 79 | EXPORT_SYMBOL(ip_route_me_harder); |
80 | 80 | ||
81 | #ifdef CONFIG_XFRM | ||
82 | int ip_xfrm_me_harder(struct sk_buff **pskb) | ||
83 | { | ||
84 | struct flowi fl; | ||
85 | unsigned int hh_len; | ||
86 | struct dst_entry *dst; | ||
87 | |||
88 | if (IPCB(*pskb)->flags & IPSKB_XFRM_TRANSFORMED) | ||
89 | return 0; | ||
90 | if (xfrm_decode_session(*pskb, &fl, AF_INET) < 0) | ||
91 | return -1; | ||
92 | |||
93 | dst = (*pskb)->dst; | ||
94 | if (dst->xfrm) | ||
95 | dst = ((struct xfrm_dst *)dst)->route; | ||
96 | dst_hold(dst); | ||
97 | |||
98 | if (xfrm_lookup(&dst, &fl, (*pskb)->sk, 0) < 0) | ||
99 | return -1; | ||
100 | |||
101 | dst_release((*pskb)->dst); | ||
102 | (*pskb)->dst = dst; | ||
103 | |||
104 | /* Change in oif may mean change in hh_len. */ | ||
105 | hh_len = (*pskb)->dst->dev->hard_header_len; | ||
106 | if (skb_headroom(*pskb) < hh_len) { | ||
107 | struct sk_buff *nskb; | ||
108 | |||
109 | nskb = skb_realloc_headroom(*pskb, hh_len); | ||
110 | if (!nskb) | ||
111 | return -1; | ||
112 | if ((*pskb)->sk) | ||
113 | skb_set_owner_w(nskb, (*pskb)->sk); | ||
114 | kfree_skb(*pskb); | ||
115 | *pskb = nskb; | ||
116 | } | ||
117 | return 0; | ||
118 | } | ||
119 | EXPORT_SYMBOL(ip_xfrm_me_harder); | ||
120 | #endif | ||
121 | |||
81 | void (*ip_nat_decode_session)(struct sk_buff *, struct flowi *); | 122 | void (*ip_nat_decode_session)(struct sk_buff *, struct flowi *); |
82 | EXPORT_SYMBOL(ip_nat_decode_session); | 123 | EXPORT_SYMBOL(ip_nat_decode_session); |
83 | 124 | ||
diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c index 92c54999a19d..7c3f7d380240 100644 --- a/net/ipv4/netfilter/ip_nat_standalone.c +++ b/net/ipv4/netfilter/ip_nat_standalone.c | |||
@@ -235,19 +235,19 @@ ip_nat_out(unsigned int hooknum, | |||
235 | return NF_ACCEPT; | 235 | return NF_ACCEPT; |
236 | 236 | ||
237 | ret = ip_nat_fn(hooknum, pskb, in, out, okfn); | 237 | ret = ip_nat_fn(hooknum, pskb, in, out, okfn); |
238 | #ifdef CONFIG_XFRM | ||
238 | if (ret != NF_DROP && ret != NF_STOLEN | 239 | if (ret != NF_DROP && ret != NF_STOLEN |
239 | && (ct = ip_conntrack_get(*pskb, &ctinfo)) != NULL) { | 240 | && (ct = ip_conntrack_get(*pskb, &ctinfo)) != NULL) { |
240 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); | 241 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); |
241 | 242 | ||
242 | if (ct->tuplehash[dir].tuple.src.ip != | 243 | if (ct->tuplehash[dir].tuple.src.ip != |
243 | ct->tuplehash[!dir].tuple.dst.ip | 244 | ct->tuplehash[!dir].tuple.dst.ip |
244 | #ifdef CONFIG_XFRM | ||
245 | || ct->tuplehash[dir].tuple.src.u.all != | 245 | || ct->tuplehash[dir].tuple.src.u.all != |
246 | ct->tuplehash[!dir].tuple.dst.u.all | 246 | ct->tuplehash[!dir].tuple.dst.u.all |
247 | #endif | ||
248 | ) | 247 | ) |
249 | return ip_route_me_harder(pskb) == 0 ? ret : NF_DROP; | 248 | return ip_xfrm_me_harder(pskb) == 0 ? ret : NF_DROP; |
250 | } | 249 | } |
250 | #endif | ||
251 | return ret; | 251 | return ret; |
252 | } | 252 | } |
253 | 253 | ||